Skip to content

Commit 4b4397c

Browse files
Merge pull request #102 from danwinship/reconcile-deployments
Update to be able to track both DaemonSets and Deployments
2 parents 9db4d03 + 1493551 commit 4b4397c

File tree

5 files changed

+372
-66
lines changed

5 files changed

+372
-66
lines changed

pkg/controller/networkconfig/daemonset_controller.go

Lines changed: 0 additions & 48 deletions
This file was deleted.

pkg/controller/networkconfig/networkconfig_controller.go

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ func newReconciler(mgr manager.Manager, status *clusteroperator.StatusManager) *
5353
scheme: mgr.GetScheme(),
5454
status: status,
5555

56-
daemonSetReconciler: newDaemonSetReconciler(status),
56+
podReconciler: newPodReconciler(status),
5757
}
5858
}
5959

@@ -71,15 +71,19 @@ func add(mgr manager.Manager, r *ReconcileNetworkConfig) error {
7171
return err
7272
}
7373

74-
// Likewise for the DaemonSet reconciler
75-
c, err = controller.New("daemonset-controller", mgr, controller.Options{Reconciler: r.daemonSetReconciler})
74+
// Likewise for the Pod reconciler
75+
c, err = controller.New("pod-controller", mgr, controller.Options{Reconciler: r.podReconciler})
7676
if err != nil {
7777
return err
7878
}
7979
err = c.Watch(&source.Kind{Type: &appsv1.DaemonSet{}}, &handler.EnqueueRequestForObject{})
8080
if err != nil {
8181
return err
8282
}
83+
err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{})
84+
if err != nil {
85+
return err
86+
}
8387

8488
return nil
8589
}
@@ -94,7 +98,7 @@ type ReconcileNetworkConfig struct {
9498
scheme *runtime.Scheme
9599
status *clusteroperator.StatusManager
96100

97-
daemonSetReconciler *ReconcileDaemonSets
101+
podReconciler *ReconcilePods
98102
}
99103

100104
// Reconcile updates the state of the cluster to match that which is desired
@@ -188,15 +192,24 @@ func (r *ReconcileNetworkConfig) Reconcile(request reconcile.Request) (reconcile
188192
}
189193
objs = append([]*uns.Unstructured{app}, objs...)
190194

191-
// Set up the DaemonSet reconciler before we start creating the DaemonSets
195+
// Set up the Pod reconciler before we start creating DaemonSets/Deployments
192196
r.status.SetConfigSuccess()
193197
daemonSets := []types.NamespacedName{}
198+
deployments := []types.NamespacedName{}
194199
for _, obj := range objs {
195200
if obj.GetAPIVersion() == "apps/v1" && obj.GetKind() == "DaemonSet" {
196201
daemonSets = append(daemonSets, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()})
202+
} else if obj.GetAPIVersion() == "apps/v1" && obj.GetKind() == "Deployment" {
203+
deployments = append(deployments, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()})
197204
}
198205
}
199-
r.daemonSetReconciler.SetDaemonSets(daemonSets)
206+
r.status.SetDaemonSets(daemonSets)
207+
r.status.SetDeployments(deployments)
208+
209+
allResources := []types.NamespacedName{}
210+
allResources = append(allResources, daemonSets...)
211+
allResources = append(allResources, deployments...)
212+
r.podReconciler.SetResources(allResources)
200213

201214
// Apply the objects to the cluster
202215
for _, obj := range objs {
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
package networkconfig
2+
3+
import (
4+
"log"
5+
6+
"github.com/openshift/cluster-network-operator/pkg/util/clusteroperator"
7+
8+
"k8s.io/apimachinery/pkg/types"
9+
"sigs.k8s.io/controller-runtime/pkg/reconcile"
10+
)
11+
12+
// newPodReconciler returns a new reconcile.Reconciler
13+
func newPodReconciler(status *clusteroperator.StatusManager) *ReconcilePods {
14+
return &ReconcilePods{status: status}
15+
}
16+
17+
var _ reconcile.Reconciler = &ReconcilePods{}
18+
19+
// ReconcilePods watches for updates to specified resources and then updates its StatusManager
20+
type ReconcilePods struct {
21+
status *clusteroperator.StatusManager
22+
23+
resources []types.NamespacedName
24+
}
25+
26+
func (r *ReconcilePods) SetResources(resources []types.NamespacedName) {
27+
r.resources = resources
28+
}
29+
30+
// Reconcile updates the ClusterOperator.Status to match the current state of the
31+
// watched Deployments/DaemonSets
32+
func (r *ReconcilePods) Reconcile(request reconcile.Request) (reconcile.Result, error) {
33+
found := false
34+
for _, name := range r.resources {
35+
if name.Namespace == request.Namespace && name.Name == request.Name {
36+
found = true
37+
break
38+
}
39+
}
40+
if !found {
41+
return reconcile.Result{}, nil
42+
}
43+
44+
log.Printf("Reconciling update to %s/%s\n", request.Namespace, request.Name)
45+
r.status.SetFromPods()
46+
47+
return reconcile.Result{RequeueAfter: ResyncPeriod}, nil
48+
}

pkg/util/clusteroperator/status_manager.go

Lines changed: 42 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ type StatusManager struct {
2626
version string
2727

2828
configFailure bool
29+
30+
daemonSets []types.NamespacedName
31+
deployments []types.NamespacedName
2932
}
3033

3134
func NewStatusManager(client client.Client, name, version string) *StatusManager {
@@ -114,17 +117,25 @@ func (status *StatusManager) SetFailing(reason string, err error) error {
114117
)
115118
}
116119

117-
// SetFromDaemonSets sets the operator status to Failing, Progressing, or Available, based
118-
// on the current status of the indicated DaemonSets. However, this is a no-op if the
119-
// StatusManager is currently marked as failing due to a configuration error.
120-
func (status *StatusManager) SetFromDaemonSets(daemonSets []types.NamespacedName) error {
120+
func (status *StatusManager) SetDaemonSets(daemonSets []types.NamespacedName) {
121+
status.daemonSets = daemonSets
122+
}
123+
124+
func (status *StatusManager) SetDeployments(deployments []types.NamespacedName) {
125+
status.deployments = deployments
126+
}
127+
128+
// SetFromPods sets the operator status to Failing, Progressing, or Available, based on
129+
// the current status of the manager's DaemonSets and Deployments. However, this is a
130+
// no-op if the StatusManager is currently marked as failing due to a configuration error.
131+
func (status *StatusManager) SetFromPods() error {
121132
if status.configFailure {
122133
return nil
123134
}
124135

125136
progressing := []string{}
126137

127-
for _, dsName := range daemonSets {
138+
for _, dsName := range status.daemonSets {
128139
ns := &corev1.Namespace{}
129140
if err := status.client.Get(context.TODO(), types.NamespacedName{Name: dsName.Namespace}, ns); err != nil {
130141
if errors.IsNotFound(err) {
@@ -150,6 +161,32 @@ func (status *StatusManager) SetFromDaemonSets(daemonSets []types.NamespacedName
150161
}
151162
}
152163

164+
for _, depName := range status.deployments {
165+
ns := &corev1.Namespace{}
166+
if err := status.client.Get(context.TODO(), types.NamespacedName{Name: depName.Namespace}, ns); err != nil {
167+
if errors.IsNotFound(err) {
168+
return status.SetFailing("NoNamespace", fmt.Errorf("Namespace %q does not exist", depName.Namespace))
169+
} else {
170+
return status.SetFailing("InternalError", err)
171+
}
172+
}
173+
174+
dep := &appsv1.Deployment{}
175+
if err := status.client.Get(context.TODO(), depName, dep); err != nil {
176+
if errors.IsNotFound(err) {
177+
return status.SetFailing("NoDeployment", fmt.Errorf("Deployment %q does not exist", depName.String()))
178+
} else {
179+
return status.SetFailing("InternalError", err)
180+
}
181+
}
182+
183+
if dep.Status.UnavailableReplicas > 0 {
184+
progressing = append(progressing, fmt.Sprintf("Deployment %q is not available (awaiting %d nodes)", depName.String(), dep.Status.UnavailableReplicas))
185+
} else if dep.Status.AvailableReplicas == 0 {
186+
progressing = append(progressing, fmt.Sprintf("Deployment %q is not yet scheduled on any nodes", depName.String()))
187+
}
188+
}
189+
153190
if len(progressing) > 0 {
154191
return status.Set(
155192
&configv1.ClusterOperatorStatusCondition{

0 commit comments

Comments
 (0)