From 9d07cc986e8ffbbb26a34bb3c9ff67233aeb6bfa Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Mon, 14 Nov 2022 16:46:51 +0100 Subject: [PATCH 1/2] Remove leftovers after scaffolding Signed-off-by: Alexandr Demicev --- bootstrap/api/v1alpha1/rke2config_types.go | 3 --- bootstrap/api/v1alpha1/rke2configtemplate_types.go | 3 --- bootstrap/api/v1alpha1/webhook_suite_test.go | 3 --- 3 files changed, 9 deletions(-) diff --git a/bootstrap/api/v1alpha1/rke2config_types.go b/bootstrap/api/v1alpha1/rke2config_types.go index 457683e5..04179756 100644 --- a/bootstrap/api/v1alpha1/rke2config_types.go +++ b/bootstrap/api/v1alpha1/rke2config_types.go @@ -22,9 +22,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // RKE2ConfigSpec defines the desired state of RKE2Config. type RKE2ConfigSpec struct { // Files specifies extra files to be passed to user_data upon creation. diff --git a/bootstrap/api/v1alpha1/rke2configtemplate_types.go b/bootstrap/api/v1alpha1/rke2configtemplate_types.go index a8e59402..80800bc1 100644 --- a/bootstrap/api/v1alpha1/rke2configtemplate_types.go +++ b/bootstrap/api/v1alpha1/rke2configtemplate_types.go @@ -20,9 +20,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // RKE2ConfigTemplateSpec defines the desired state of RKE2ConfigTemplate type RKE2ConfigTemplateSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster diff --git a/bootstrap/api/v1alpha1/webhook_suite_test.go b/bootstrap/api/v1alpha1/webhook_suite_test.go index 606c0b87..c3292dc2 100644 --- a/bootstrap/api/v1alpha1/webhook_suite_test.go +++ b/bootstrap/api/v1alpha1/webhook_suite_test.go @@ -40,9 +40,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment From 3d4ffa44b5a8eeea4423064bf9dd1cc6a7bfe99a Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Mon, 14 Nov 2022 17:17:02 +0100 Subject: [PATCH 2/2] Improve main.go after scaffolding Signed-off-by: Alexandr Demicev --- bootstrap/main.go | 171 +++++++++++++++++++++++++++++------------- controlplane/main.go | 174 ++++++++++++++++++++++++++++++------------- go.mod | 4 +- 3 files changed, 245 insertions(+), 104 deletions(-) diff --git a/bootstrap/main.go b/bootstrap/main.go index 4e86c96a..a2de4f5c 100644 --- a/bootstrap/main.go +++ b/bootstrap/main.go @@ -18,94 +18,146 @@ package main import ( "flag" + "fmt" + "net/http" "os" + "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + bootstrapv1 "github.com/rancher-sandbox/cluster-api-provider-rke2/bootstrap/api/v1alpha1" + "github.com/rancher-sandbox/cluster-api-provider-rke2/bootstrap/internal/controllers" + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - bootstrapv1 "github.com/rancher-sandbox/cluster-api-provider-rke2/bootstrap/api/v1alpha1" - "github.com/rancher-sandbox/cluster-api-provider-rke2/bootstrap/internal/controllers" //+kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") + + // flags. + metricsBindAddr string + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + watchFilterValue string + profilerAddress string + concurrencyNumber int + syncPeriod time.Duration + webhookPort int + webhookCertDir string + healthAddr string ) func init() { + klog.InitFlags(nil) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(bootstrapv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + fs.StringVar(&metricsBindAddr, "metrics-bind-addr", ":8080", + "The address the metric endpoint binds to.") + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + + fs.StringVar(&profilerAddress, "profiler-address", "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)") + + fs.IntVar(&concurrencyNumber, "concurrency", 1, + "Number of core resources to process simultaneously") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir, only used when webhook-port is specified.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") +} + func main() { - var metricsAddr string - var enableLeaderElection bool - var probeAddr string - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") - opts := zap.Options{ - Development: true, - } - opts.BindFlags(flag.CommandLine) - flag.Parse() + InitFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() - ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + ctrl.SetLogger(klogr.New()) + + if profilerAddress != "" { + klog.Infof("Profiler listening for requests at %s", profilerAddress) + go func() { + klog.Info(http.ListenAndServe(profilerAddress, nil)) + }() + } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "b820bb25.cluster.x-k8s.io", - // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily - // when the Manager ends. This requires the binary to immediately end when the - // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly - // speeds up voluntary leader transitions as the new leader don't have to wait - // LeaseDuration time first. - // - // In the default scaffold provided, the program ends immediately after - // the manager stops, so would be fine to enable this option. However, - // if you are doing or is intended to do any operation such as perform cleanups - // after the manager stops then its usage might be unsafe. - // LeaderElectionReleaseOnCancel: true, + Scheme: scheme, + MetricsBindAddress: metricsBindAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "rke2-bootstrap-manager-leader-election-capi", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + SyncPeriod: &syncPeriod, + ClientDisableCacheFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + Port: webhookPort, + CertDir: webhookCertDir, + HealthProbeBindAddress: healthAddr, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } - if err = (&controllers.Rke2ConfigReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Rke2Config") - os.Exit(1) - } - if err = (&bootstrapv1.RKE2Config{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "Rke2Config") - os.Exit(1) - } - if err = (&bootstrapv1.RKE2ConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "Rke2ConfigTemplate") + setupChecks(mgr) + setupReconcilers(mgr) + setupWebhooks(mgr) + //+kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") os.Exit(1) } - //+kubebuilder:scaffold:builder +} +func setupChecks(mgr ctrl.Manager) { if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) @@ -114,10 +166,25 @@ func main() { setupLog.Error(err, "unable to set up ready check") os.Exit(1) } +} - setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { - setupLog.Error(err, "problem running manager") +func setupReconcilers(mgr ctrl.Manager) { + if err := (&controllers.Rke2ConfigReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Rke2Config") + os.Exit(1) + } +} + +func setupWebhooks(mgr ctrl.Manager) { + if err := (&bootstrapv1.RKE2Config{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Rke2Config") + os.Exit(1) + } + if err := (&bootstrapv1.RKE2ConfigTemplate{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Rke2ConfigTemplate") os.Exit(1) } } diff --git a/controlplane/main.go b/controlplane/main.go index dd47813d..27972a52 100644 --- a/controlplane/main.go +++ b/controlplane/main.go @@ -18,94 +18,153 @@ package main import ( "flag" + "fmt" + "net/http" "os" + "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + controlplanev1 "github.com/rancher-sandbox/cluster-api-provider-rke2/controlplane/api/v1alpha1" + "github.com/rancher-sandbox/cluster-api-provider-rke2/controlplane/internal/controllers" + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" + "k8s.io/klog/v2/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - controlplanev1 "github.com/rancher-sandbox/cluster-api-provider-rke2/controlplane/api/v1alpha1" - "github.com/rancher-sandbox/cluster-api-provider-rke2/controlplane/internal/controllers" //+kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") + + // flags. + metricsBindAddr string + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + watchFilterValue string + profilerAddress string + concurrencyNumber int + syncPeriod time.Duration + webhookPort int + webhookCertDir string + healthAddr string ) func init() { + klog.InitFlags(nil) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(controlplanev1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + fs.StringVar(&metricsBindAddr, "metrics-bind-addr", ":8080", + "The address the metric endpoint binds to.") + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&watchFilterValue, "watch-filter", "", + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel)) + + fs.StringVar(&profilerAddress, "profiler-address", "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)") + + fs.IntVar(&concurrencyNumber, "concurrency", 1, + "Number of core resources to process simultaneously") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir, only used when webhook-port is specified.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") +} + func main() { - var metricsAddr string - var enableLeaderElection bool - var probeAddr string - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. "+ - "Enabling this will ensure there is only one active controller manager.") - opts := zap.Options{ - Development: true, - } - opts.BindFlags(flag.CommandLine) - flag.Parse() + InitFlags(pflag.CommandLine) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() - ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + ctrl.SetLogger(klogr.New()) + + if profilerAddress != "" { + klog.Infof("Profiler listening for requests at %s", profilerAddress) + go func() { + klog.Info(http.ListenAndServe(profilerAddress, nil)) + }() + } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "e629e1f4.cluster.x-k8s.io", - // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily - // when the Manager ends. This requires the binary to immediately end when the - // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly - // speeds up voluntary leader transitions as the new leader don't have to wait - // LeaseDuration time first. - // - // In the default scaffold provided, the program ends immediately after - // the manager stops, so would be fine to enable this option. However, - // if you are doing or is intended to do any operation such as perform cleanups - // after the manager stops then its usage might be unsafe. - // LeaderElectionReleaseOnCancel: true, + Scheme: scheme, + MetricsBindAddress: metricsBindAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "rke2-controlplane-manager-leader-election-capi", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + SyncPeriod: &syncPeriod, + ClientDisableCacheFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + Port: webhookPort, + CertDir: webhookCertDir, + HealthProbeBindAddress: healthAddr, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } - if err = (&controllers.RKE2ControlPlaneReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "RKE2ControlPlane") - os.Exit(1) - } - if err = (&controlplanev1.RKE2ControlPlane{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "RKE2ControlPlane") + setupChecks(mgr) + setupReconcilers(mgr) + setupWebhooks(mgr) + //+kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") os.Exit(1) } - if err = (&controlplanev1.RKE2ControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "RKE2ControlPlaneTemplate") + + //+kubebuilder:scaffold:builder + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") os.Exit(1) } - //+kubebuilder:scaffold:builder +} +func setupChecks(mgr ctrl.Manager) { if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") os.Exit(1) @@ -114,10 +173,25 @@ func main() { setupLog.Error(err, "unable to set up ready check") os.Exit(1) } +} - setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { - setupLog.Error(err, "problem running manager") +func setupReconcilers(mgr ctrl.Manager) { + if err := (&controllers.RKE2ControlPlaneReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RKE2ControlPlane") + os.Exit(1) + } +} + +func setupWebhooks(mgr ctrl.Manager) { + if err := (&controlplanev1.RKE2ControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "RKE2ControlPlane") + os.Exit(1) + } + if err := (&controlplanev1.RKE2ControlPlaneTemplate{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "RKE2ControlPlaneTemplate") os.Exit(1) } } diff --git a/go.mod b/go.mod index e77dcb7a..6d369545 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,11 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.24.1 github.com/pkg/errors v0.9.1 + github.com/spf13/pflag v1.0.5 k8s.io/api v0.25.4 k8s.io/apimachinery v0.25.4 k8s.io/client-go v0.25.4 + k8s.io/klog/v2 v2.70.1 sigs.k8s.io/cluster-api v1.2.5 sigs.k8s.io/controller-runtime v0.13.1 ) @@ -56,7 +58,6 @@ require ( github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.21.0 // indirect @@ -76,7 +77,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.25.0 // indirect k8s.io/component-base v0.25.0 // indirect - k8s.io/klog/v2 v2.70.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect