From 1b7e32b9f856c3b6f30f10bf390521e6d989189d Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Wed, 8 Nov 2023 18:14:30 -0800 Subject: [PATCH 01/22] fix for adding validation --- cmd/otel-allocator/watcher/promOperator.go | 135 ++++++++++++++++++++- 1 file changed, 129 insertions(+), 6 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index b687b96048..41bc1c73f6 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -17,9 +17,12 @@ package watcher import ( "context" "fmt" + "os" + "regexp" "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/go-logr/logr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" @@ -27,8 +30,10 @@ import ( monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" "github.com/prometheus-operator/prometheus-operator/pkg/informers" "github.com/prometheus-operator/prometheus-operator/pkg/prometheus" + "github.com/prometheus/common/model" promconfig "github.com/prometheus/prometheus/config" kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes" + "github.com/prometheus/prometheus/model/relabel" "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -220,8 +225,12 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi smRetrieveErr := w.informers[monitoringv1.ServiceMonitorName].ListAll(w.serviceMonitorSelector, func(sm interface{}) { monitor := sm.(*monitoringv1.ServiceMonitor) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) - w.addStoreAssetsForServiceMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.Endpoints, store) - serviceMonitorInstances[key] = monitor + validateError := w.addStoreAssetsForServiceMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.Endpoints, store) + if validateError != nil { + w.logger.Error(validateError, "Failed validating ServiceMonitor, skipping", "ServiceMonitor:", monitor.Name, "in namespace", monitor.Namespace) + } else { + serviceMonitorInstances[key] = monitor + } }) if smRetrieveErr != nil { return nil, smRetrieveErr @@ -231,8 +240,12 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi pmRetrieveErr := w.informers[monitoringv1.PodMonitorName].ListAll(w.podMonitorSelector, func(pm interface{}) { monitor := pm.(*monitoringv1.PodMonitor) key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) - w.addStoreAssetsForPodMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.PodMetricsEndpoints, store) - podMonitorInstances[key] = monitor + validateError := w.addStoreAssetsForPodMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.PodMetricsEndpoints, store) + if validateError != nil { + w.logger.Error(validateError, "Failed validating PodMonitor, skipping", "PodMonitor:", monitor.Name, "in namespace", monitor.Namespace) + } else { + podMonitorInstances[key] = monitor + } }) if pmRetrieveErr != nil { return nil, pmRetrieveErr @@ -288,8 +301,9 @@ func (w *PrometheusCRWatcher) addStoreAssetsForServiceMonitor( smName, smNamespace string, endps []monitoringv1.Endpoint, store *assets.Store, -) { +) error { var err error + var validateErr error for i, endp := range endps { objKey := fmt.Sprintf("serviceMonitor/%s/%s/%d", smNamespace, smName, i) @@ -315,11 +329,33 @@ func (w *PrometheusCRWatcher) addStoreAssetsForServiceMonitor( if err = store.AddSafeAuthorizationCredentials(ctx, smNamespace, endp.Authorization, smAuthKey); err != nil { break } + + for _, rl := range endp.RelabelConfigs { + if rl.Action != "" { + if validateErr = validateRelabelConfig(*rl); validateErr != nil { + break + } + } + } + + for _, rl := range endp.MetricRelabelConfigs { + if rl.Action != "" { + if validateErr = validateRelabelConfig(*rl); validateErr != nil { + break + } + } + } } if err != nil { w.logger.Error(err, "Failed to obtain credentials for a ServiceMonitor", "serviceMonitor", smName) } + + if validateErr != nil { + return validateErr + } + + return nil } // addStoreAssetsForServiceMonitor adds authentication / authorization related information to the assets store, @@ -331,7 +367,7 @@ func (w *PrometheusCRWatcher) addStoreAssetsForPodMonitor( pmName, pmNamespace string, podMetricsEndps []monitoringv1.PodMetricsEndpoint, store *assets.Store, -) { +) error { var err error for i, endp := range podMetricsEndps { objKey := fmt.Sprintf("podMonitor/%s/%s/%d", pmNamespace, pmName, i) @@ -358,9 +394,96 @@ func (w *PrometheusCRWatcher) addStoreAssetsForPodMonitor( if err = store.AddSafeAuthorizationCredentials(ctx, pmNamespace, endp.Authorization, smAuthKey); err != nil { break } + + for _, rl := range endp.RelabelConfigs { + if rl.Action != "" { + if validateErr = validateRelabelConfig(*rl); validateErr != nil { + break + } + } + } + + for _, rl := range endp.MetricRelabelConfigs { + if rl.Action != "" { + if validateErr = validateRelabelConfig(*rl); validateErr != nil { + break + } + } + } } if err != nil { w.logger.Error(err, "Failed to obtain credentials for a PodMonitor", "podMonitor", pmName) } + + if validateErr != nil { + return validateErr + } + + return nil +} + +// validateRelabelConfig validates relabel config for service and pod monitor, +// based on the service monitor and pod metrics endpoints specs. +// This code borrows from +// https://github.com/prometheus-operator/prometheus-operator/blob/ba536405154d18f3a6f312818283d671182af6f3/pkg/prometheus/resource_selector.go#L237 +func validateRelabelConfig(rc monitoringv1.RelabelConfig) error { + relabelTarget := regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + + if _, err := relabel.NewRegexp(rc.Regex); err != nil { + return fmt.Errorf("invalid regex %s for relabel configuration", rc.Regex) + } + + if rc.Modulus == 0 && rc.Action == string(relabel.HashMod) { + return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") + } + + if (rc.Action == string(relabel.Replace) || rc.Action == string(relabel.HashMod) || rc.Action == string(relabel.Lowercase) || rc.Action == string(relabel.Uppercase) || rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual)) && rc.TargetLabel == "" { + return fmt.Errorf("relabel configuration for %s action needs targetLabel value", rc.Action) + } + + if (rc.Action == string(relabel.Replace) || rc.Action == string(relabel.Lowercase) || rc.Action == string(relabel.Uppercase) || rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual)) && !relabelTarget.MatchString(rc.TargetLabel) { + return fmt.Errorf("%q is invalid 'target_label' for %s action", rc.TargetLabel, rc.Action) + } + + if (rc.Action == string(relabel.Lowercase) || rc.Action == string(relabel.Uppercase) || rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual)) && !(rc.Replacement == relabel.DefaultRelabelConfig.Replacement || rc.Replacement == "") { + return fmt.Errorf("'replacement' can not be set for %s action", rc.Action) + } + + if rc.Action == string(relabel.LabelMap) { + if rc.Replacement != "" && !relabelTarget.MatchString(rc.Replacement) { + return fmt.Errorf("%q is invalid 'replacement' for %s action", rc.Replacement, rc.Action) + } + } + + if rc.Action == string(relabel.HashMod) && !model.LabelName(rc.TargetLabel).IsValid() { + return fmt.Errorf("%q is invalid 'target_label' for %s action", rc.TargetLabel, rc.Action) + } + + if rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual) { + if !(rc.Regex == "" || rc.Regex == relabel.DefaultRelabelConfig.Regex.String()) || + !(rc.Modulus == uint64(0) || + rc.Modulus == relabel.DefaultRelabelConfig.Modulus) || + !(rc.Separator == "" || + rc.Separator == relabel.DefaultRelabelConfig.Separator) || + !(rc.Replacement == relabel.DefaultRelabelConfig.Replacement || + rc.Replacement == "") { + return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", rc.Action) + } + } + + if rc.Action == string(relabel.LabelDrop) || rc.Action == string(relabel.LabelKeep) { + if len(rc.SourceLabels) != 0 || + !(rc.TargetLabel == "" || + rc.TargetLabel == relabel.DefaultRelabelConfig.TargetLabel) || + !(rc.Modulus == uint64(0) || + rc.Modulus == relabel.DefaultRelabelConfig.Modulus) || + !(rc.Separator == "" || + rc.Separator == relabel.DefaultRelabelConfig.Separator) || + !(rc.Replacement == relabel.DefaultRelabelConfig.Replacement || + rc.Replacement == "") { + return fmt.Errorf("%s action requires only 'regex', and no other fields", rc.Action) + } + } + return nil } From a55e6887ff3533fa7855db0a2cde42aa96f25c0d Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Wed, 8 Nov 2023 18:23:38 -0800 Subject: [PATCH 02/22] removing unused references --- cmd/otel-allocator/watcher/promOperator.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 41bc1c73f6..4739f86e01 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -17,12 +17,10 @@ package watcher import ( "context" "fmt" - "os" "regexp" "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-logr/logr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" From 401ad2c879d0c532ab0d13c9e4945e26cbbf8fe8 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Tue, 28 Nov 2023 19:54:44 -0800 Subject: [PATCH 03/22] adding tests --- .../watcher/promOperator_test.go | 684 +++++++++++++++--- 1 file changed, 603 insertions(+), 81 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index 5aa05c23f8..d949364572 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -16,14 +16,21 @@ package watcher import ( "context" + "fmt" + "os" "testing" "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" + allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/prometheus-operator/prometheus-operator/pkg/assets" fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" "github.com/prometheus-operator/prometheus-operator/pkg/informers" + "github.com/prometheus-operator/prometheus-operator/pkg/operator" "github.com/prometheus-operator/prometheus-operator/pkg/prometheus" + prometheusgoclient "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" promconfig "github.com/prometheus/prometheus/config" @@ -35,46 +42,53 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + fcache "k8s.io/client-go/tools/cache/testing" ) func TestLoadConfig(t *testing.T) { tests := []struct { - name string - serviceMonitor *monitoringv1.ServiceMonitor - podMonitor *monitoringv1.PodMonitor - want *promconfig.Config - wantErr bool + name string + serviceMonitors []*monitoringv1.ServiceMonitor + podMonitors []*monitoringv1.PodMonitor + want *promconfig.Config + wantErr bool + cfg allocatorconfig.Config }{ { name: "simple test", - serviceMonitor: &monitoringv1.ServiceMonitor{ - ObjectMeta: metav1.ObjectMeta{ - Name: "simple", - Namespace: "test", - }, - Spec: monitoringv1.ServiceMonitorSpec{ - JobLabel: "test", - Endpoints: []monitoringv1.Endpoint{ - { - Port: "web", + serviceMonitors: []*monitoringv1.ServiceMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "simple", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, }, }, }, }, - podMonitor: &monitoringv1.PodMonitor{ - ObjectMeta: metav1.ObjectMeta{ - Name: "simple", - Namespace: "test", - }, - Spec: monitoringv1.PodMonitorSpec{ - JobLabel: "test", - PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ - { - Port: "web", + podMonitors: []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "simple", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, }, }, }, }, + cfg: allocatorconfig.Config{}, want: &promconfig.Config{ ScrapeConfigs: []*promconfig.ScrapeConfig{ { @@ -122,39 +136,42 @@ func TestLoadConfig(t *testing.T) { }, { name: "basic auth (serviceMonitor)", - serviceMonitor: &monitoringv1.ServiceMonitor{ - ObjectMeta: metav1.ObjectMeta{ - Name: "auth", - Namespace: "test", - }, - Spec: monitoringv1.ServiceMonitorSpec{ - JobLabel: "auth", - Endpoints: []monitoringv1.Endpoint{ - { - Port: "web", - BasicAuth: &monitoringv1.BasicAuth{ - Username: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: "basic-auth", + serviceMonitors: []*monitoringv1.ServiceMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "auth", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "auth", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + BasicAuth: &monitoringv1.BasicAuth{ + Username: v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "basic-auth", + }, + Key: "username", }, - Key: "username", - }, - Password: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: "basic-auth", + Password: v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "basic-auth", + }, + Key: "password", }, - Key: "password", }, }, }, - }, - Selector: metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "auth", + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "auth", + }, }, }, }, }, + cfg: allocatorconfig.Config{}, want: &promconfig.Config{ GlobalConfig: promconfig.GlobalConfig{}, ScrapeConfigs: []*promconfig.ScrapeConfig{ @@ -190,26 +207,29 @@ func TestLoadConfig(t *testing.T) { }, { name: "bearer token (podMonitor)", - podMonitor: &monitoringv1.PodMonitor{ - ObjectMeta: metav1.ObjectMeta{ - Name: "bearer", - Namespace: "test", - }, - Spec: monitoringv1.PodMonitorSpec{ - JobLabel: "bearer", - PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ - { - Port: "web", - BearerTokenSecret: v1.SecretKeySelector{ - LocalObjectReference: v1.LocalObjectReference{ - Name: "bearer", + podMonitors: []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "bearer", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "bearer", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + BearerTokenSecret: v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "bearer", + }, + Key: "token", }, - Key: "token", }, }, }, }, }, + cfg: allocatorconfig.Config{}, want: &promconfig.Config{ GlobalConfig: promconfig.GlobalConfig{}, ScrapeConfigs: []*promconfig.ScrapeConfig{ @@ -243,10 +263,475 @@ func TestLoadConfig(t *testing.T) { }, }, }, + { + name: "invalid pod monitor test", + serviceMonitors: []*monitoringv1.ServiceMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-sm", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + }, + podMonitors: []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pm", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-pm", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + RelabelConfigs: []*monitoringv1.RelabelConfig{ + { + Action: "keep", + Regex: ".*(", + Replacement: "invalid", + TargetLabel: "city", + }, + }, + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{}, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "serviceMonitor/test/valid-sm/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "endpointslice", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + { + JobName: "podMonitor/test/valid-pm/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, + { + name: "invalid service monitor test", + serviceMonitors: []*monitoringv1.ServiceMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-sm", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-sm", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + RelabelConfigs: []*monitoringv1.RelabelConfig{ + { + Action: "keep", + Regex: ".*(", + Replacement: "invalid", + TargetLabel: "city", + }, + }, + }, + }, + }, + }, + }, + podMonitors: []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pm", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{}, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "serviceMonitor/test/valid-sm/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "endpointslice", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + { + JobName: "podMonitor/test/valid-pm/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, + { + name: "service monitor selector test", + serviceMonitors: []*monitoringv1.ServiceMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "sm-1", + Namespace: "test", + Labels: map[string]string{ + "testsvc": "testsvc", + }, + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "sm-2", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{ + ServiceMonitorSelector: map[string]string{ + "testsvc": "testsvc", + }, + }, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "serviceMonitor/test/sm-1/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "endpointslice", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, + { + name: "pod monitor selector test", + podMonitors: []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pm-1", + Namespace: "test", + Labels: map[string]string{ + "testpod": "testpod", + }, + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pm-2", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{ + PodMonitorSelector: map[string]string{ + "testpod": "testpod", + }, + }, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "podMonitor/test/pm-1/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"test"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, + { + name: "service monitor namespace selector test", + serviceMonitors: []*monitoringv1.ServiceMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "sm-1", + Namespace: "labellednamespace", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "sm-2", + Namespace: "test", + }, + Spec: monitoringv1.ServiceMonitorSpec{ + JobLabel: "test", + Endpoints: []monitoringv1.Endpoint{ + { + Port: "web", + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{ + ServiceMonitorNamespaceSelector: map[string]string{ + "label1": "label1", + }, + }, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "serviceMonitor/labellednamespace/sm-1/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "endpointslice", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"labellednamespace"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, + { + name: "pod monitor namespace selector test", + podMonitors: []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pm-1", + Namespace: "labellednamespace", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pm-2", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + }, + cfg: allocatorconfig.Config{ + PodMonitorNamespaceSelector: map[string]string{ + "label1": "label1", + }, + }, + want: &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "podMonitor/labellednamespace/pm-1/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"labellednamespace"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - w := getTestPrometheusCRWatcher(t, tt.serviceMonitor, tt.podMonitor) + w := getTestPrometheuCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg) + + // Start namespace informers in order to populate cache. + go w.nsInformer.Run(w.stopChannel) + for !w.nsInformer.HasSynced() { + time.Sleep(50 * time.Millisecond) + } + for _, informer := range w.informers { // Start informers in order to populate cache. informer.Start(w.stopChannel) @@ -350,18 +835,22 @@ func TestRateLimit(t *testing.T) { // getTestPrometheuCRWatcher creates a test instance of PrometheusCRWatcher with fake clients // and test secrets. -func getTestPrometheusCRWatcher(t *testing.T, sm *monitoringv1.ServiceMonitor, pm *monitoringv1.PodMonitor) *PrometheusCRWatcher { +func getTestPrometheuCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) *PrometheusCRWatcher { mClient := fakemonitoringclient.NewSimpleClientset() - if sm != nil { - _, err := mClient.MonitoringV1().ServiceMonitors("test").Create(context.Background(), sm, metav1.CreateOptions{}) - if err != nil { - t.Fatal(t, err) + for _, sm := range svcMonitors { + if sm != nil { + _, err := mClient.MonitoringV1().ServiceMonitors(sm.Namespace).Create(context.Background(), sm, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } } } - if pm != nil { - _, err := mClient.MonitoringV1().PodMonitors("test").Create(context.Background(), pm, metav1.CreateOptions{}) - if err != nil { - t.Fatal(t, err) + for _, pm := range podMonitors { + if pm != nil { + _, err := mClient.MonitoringV1().PodMonitors(pm.Namespace).Create(context.Background(), pm, metav1.CreateOptions{}) + if err != nil { + t.Fatal(t, err) + } } } @@ -397,24 +886,57 @@ func getTestPrometheusCRWatcher(t *testing.T, sm *monitoringv1.ServiceMonitor, p Spec: monitoringv1.PrometheusSpec{ CommonPrometheusFields: monitoringv1.CommonPrometheusFields{ ScrapeInterval: monitoringv1.Duration("30s"), + ServiceMonitorSelector: &metav1.LabelSelector{ + MatchLabels: cfg.ServiceMonitorSelector, + }, + PodMonitorSelector: &metav1.LabelSelector{ + MatchLabels: cfg.PodMonitorSelector, + }, + ServiceMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.ServiceMonitorNamespaceSelector, + }, + PodMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.PodMonitorNamespaceSelector, + }, }, }, } - generator, err := prometheus.NewConfigGenerator(log.NewNopLogger(), prom, true) + promOperatorLogger := level.NewFilter(log.NewLogfmtLogger(os.Stderr), level.AllowWarn()) + + generator, err := prometheus.NewConfigGenerator(promOperatorLogger, prom, true) if err != nil { t.Fatal(t, err) } + store := assets.NewStore(k8sClient.CoreV1(), k8sClient.CoreV1()) + promRegisterer := prometheusgoclient.NewRegistry() + operatorMetrics := operator.NewMetrics(promRegisterer) + + source := fcache.NewFakeControllerSource() + source.Add(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}}) + source.Add(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: "labellednamespace", + Labels: map[string]string{ + "label1": "label1", + }}}) + + // create the shared informer and resync every 1s + nsMonInf := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(cache.SharedIndexInformer) + + resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics) + return &PrometheusCRWatcher{ - kubeMonitoringClient: mClient, - k8sClient: k8sClient, - informers: informers, - configGenerator: generator, - serviceMonitorSelector: getSelector(nil), - podMonitorSelector: getSelector(nil), - stopChannel: make(chan struct{}), + kubeMonitoringClient: mClient, + k8sClient: k8sClient, + informers: informers, + nsInformer: nsMonInf, + stopChannel: make(chan struct{}), + configGenerator: generator, + resourceSelector: resourceSelector, + store: store, } + } // Remove relable configs fields from scrape configs for testing, From b582198de01bc08665278ee8c4410cce85b99f5c Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Wed, 29 Nov 2023 11:14:20 -0800 Subject: [PATCH 04/22] adding some changes --- cmd/otel-allocator/config/config.go | 24 +- cmd/otel-allocator/main.go | 2 +- cmd/otel-allocator/watcher/promOperator.go | 410 +++++++----------- .../watcher/promOperator_test.go | 11 +- 4 files changed, 172 insertions(+), 275 deletions(-) diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index 96748c1e4d..4480cb52e3 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -39,17 +39,19 @@ const DefaultConfigFilePath string = "/conf/targetallocator.yaml" const DefaultCRScrapeInterval model.Duration = model.Duration(time.Second * 30) type Config struct { - ListenAddr string `yaml:"listen_addr,omitempty"` - KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"` - ClusterConfig *rest.Config `yaml:"-"` - RootLogger logr.Logger `yaml:"-"` - LabelSelector map[string]string `yaml:"label_selector,omitempty"` - PromConfig *promconfig.Config `yaml:"config"` - AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` - FilterStrategy *string `yaml:"filter_strategy,omitempty"` - PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` - PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` - ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` + ListenAddr string `yaml:"listen_addr,omitempty"` + KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"` + ClusterConfig *rest.Config `yaml:"-"` + RootLogger logr.Logger `yaml:"-"` + LabelSelector map[string]string `yaml:"label_selector,omitempty"` + PromConfig *promconfig.Config `yaml:"config"` + AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` + FilterStrategy *string `yaml:"filter_strategy,omitempty"` + PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` + PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` + ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` + ServiceMonitorNamespaceSelector map[string]string `yaml:"service_monitor_namespace_selector,omitempty"` + PodMonitorNamespaceSelector map[string]string `yaml:"pod_monitor_namespace_selector,omitempty"` } type PrometheusCRConfig struct { diff --git a/cmd/otel-allocator/main.go b/cmd/otel-allocator/main.go index 297b7d58da..6492197651 100644 --- a/cmd/otel-allocator/main.go +++ b/cmd/otel-allocator/main.go @@ -107,7 +107,7 @@ func main() { defer close(interrupts) if cfg.PrometheusCR.Enabled { - promWatcher, err = allocatorWatcher.NewPrometheusCRWatcher(setupLog.WithName("prometheus-cr-watcher"), *cfg) + promWatcher, err = allocatorWatcher.NewPrometheusCRWatcher(ctx, setupLog.WithName("prometheus-cr-watcher"), *cfg) if err != nil { setupLog.Error(err, "Can't start the prometheus watcher") os.Exit(1) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 4739f86e01..7322af26d0 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -17,24 +17,33 @@ package watcher import ( "context" "fmt" - "regexp" + "os" "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/go-logr/logr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" promv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" monitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" "github.com/prometheus-operator/prometheus-operator/pkg/informers" + "github.com/prometheus-operator/prometheus-operator/pkg/k8sutil" + "github.com/prometheus-operator/prometheus-operator/pkg/listwatch" + "github.com/prometheus-operator/prometheus-operator/pkg/operator" "github.com/prometheus-operator/prometheus-operator/pkg/prometheus" - "github.com/prometheus/common/model" + prometheusgoclient "github.com/prometheus/client_golang/prometheus" + + // "github.com/prometheus/common/model" promconfig "github.com/prometheus/prometheus/config" kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes" - "github.com/prometheus/prometheus/model/relabel" + // "github.com/prometheus/prometheus/model/relabel" "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + // "k8s.io/apimachinery/pkg/fields" + // "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -42,8 +51,11 @@ import ( ) const minEventInterval = time.Second * 5 +const ( + resyncPeriod = 5 * time.Minute +) -func NewPrometheusCRWatcher(logger logr.Logger, cfg allocatorconfig.Config) (*PrometheusCRWatcher, error) { +func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocatorconfig.Config) (*PrometheusCRWatcher, error) { mClient, err := monitoringclient.NewForConfig(cfg.ClusterConfig) if err != nil { return nil, err @@ -66,52 +78,113 @@ func NewPrometheusCRWatcher(logger logr.Logger, cfg allocatorconfig.Config) (*Pr Spec: monitoringv1.PrometheusSpec{ CommonPrometheusFields: monitoringv1.CommonPrometheusFields{ ScrapeInterval: monitoringv1.Duration(cfg.PrometheusCR.ScrapeInterval.String()), + ServiceMonitorSelector: &metav1.LabelSelector{ + MatchLabels: cfg.ServiceMonitorSelector, + }, + PodMonitorSelector: &metav1.LabelSelector{ + MatchLabels: cfg.PodMonitorSelector, + }, + ServiceMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.ServiceMonitorNamespaceSelector, + }, + PodMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.PodMonitorNamespaceSelector, + }, }, }, } - generator, err := prometheus.NewConfigGenerator(log.NewNopLogger(), prom, true) // TODO replace Nop? + promOperatorLogger := level.NewFilter(log.NewLogfmtLogger(os.Stderr), level.AllowWarn()) + + generator, err := prometheus.NewConfigGenerator(promOperatorLogger, prom, true) if err != nil { return nil, err } - servMonSelector := getSelector(cfg.ServiceMonitorSelector) + store := assets.NewStore(clientset.CoreV1(), clientset.CoreV1()) + promRegisterer := prometheusgoclient.NewRegistry() + operatorMetrics := operator.NewMetrics(promRegisterer) + + nsMonInf, err := getNamespaceInformer(ctx, map[string]struct{}{v1.NamespaceAll: {}}, promOperatorLogger, clientset, operatorMetrics) + if err != nil { + return nil, err + } - podMonSelector := getSelector(cfg.PodMonitorSelector) + resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics) return &PrometheusCRWatcher{ - logger: logger, - kubeMonitoringClient: mClient, - k8sClient: clientset, - informers: monitoringInformers, - stopChannel: make(chan struct{}), - eventInterval: minEventInterval, - configGenerator: generator, - kubeConfigPath: cfg.KubeConfigFilePath, - serviceMonitorSelector: servMonSelector, - podMonitorSelector: podMonSelector, + logger: logger, + kubeMonitoringClient: mClient, + k8sClient: clientset, + informers: monitoringInformers, + nsInformer: nsMonInf, + stopChannel: make(chan struct{}), + eventInterval: minEventInterval, + configGenerator: generator, + kubeConfigPath: cfg.KubeConfigFilePath, + podMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.PodMonitorNamespaceSelector, + }, + serviceMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.ServiceMonitorNamespaceSelector, + }, + resourceSelector: resourceSelector, + store: store, }, nil } type PrometheusCRWatcher struct { - logger logr.Logger - kubeMonitoringClient monitoringclient.Interface - k8sClient kubernetes.Interface - informers map[string]*informers.ForResource - eventInterval time.Duration - stopChannel chan struct{} - configGenerator *prometheus.ConfigGenerator - kubeConfigPath string - - serviceMonitorSelector labels.Selector - podMonitorSelector labels.Selector + logger logr.Logger + kubeMonitoringClient monitoringclient.Interface + k8sClient kubernetes.Interface + informers map[string]*informers.ForResource + nsInformer cache.SharedIndexInformer + eventInterval time.Duration + stopChannel chan struct{} + configGenerator *prometheus.ConfigGenerator + kubeConfigPath string + podMonitorNamespaceSelector *metav1.LabelSelector + serviceMonitorNamespaceSelector *metav1.LabelSelector + resourceSelector *prometheus.ResourceSelector + store *assets.Store } -func getSelector(s map[string]string) labels.Selector { - if s == nil { - return labels.NewSelector() +func getNamespaceInformer(ctx context.Context, allowList map[string]struct{}, promOperatorLogger log.Logger, clientset kubernetes.Interface, operatorMetrics *operator.Metrics) (cache.SharedIndexInformer, error) { + + kubernetesVersion, err := clientset.Discovery().ServerVersion() + if err != nil { + // level.Error(logger).Log("msg", "failed to request Kubernetes server version", "err", err) + // cancel() + return nil, err + } + // cfg.KubernetesVersion = *kubernetesVersion + lw, _, err := listwatch.NewNamespaceListWatchFromClient( + ctx, + promOperatorLogger, + *kubernetesVersion, + clientset.CoreV1(), + clientset.AuthorizationV1().SelfSubjectAccessReviews(), + allowList, + map[string]struct{}{}, + ) + if err != nil { + return nil, err } - return labels.SelectorFromSet(s) + + // level.Debug(o.logger).Log("msg", "creating namespace informer", "privileged", privileged) + return cache.NewSharedIndexInformer( + operatorMetrics.NewInstrumentedListerWatcher(lw), + &v1.Namespace{}, resyncPeriod, cache.Indexers{}, + ), nil + + // nsInf := cache.NewSharedIndexInformer( + // operatorMetrics.NewInstrumentedListerWatcher( + // listwatch.NewUnprivilegedNamespaceListWatchFromClient(ctx, promOperatorLogger, clientset.CoreV1().RESTClient(), allowList, map[string]struct{}{}, fields.Everything()), + // ), + // &v1.Namespace{}, resyncPeriod, cache.Indexers{}, + // ) + + // return nsInf, nil } // getInformers returns a map of informers for the given resources. @@ -138,6 +211,44 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch // this channel needs to be buffered because notifications are asynchronous and neither producers nor consumers wait notifyEvents := make(chan struct{}, 1) + go w.nsInformer.Run(w.stopChannel) + if ok := cache.WaitForNamedCacheSync("namespace", w.stopChannel, w.nsInformer.HasSynced); !ok { + success = false + } + + w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + old := oldObj.(*v1.Namespace) + cur := newObj.(*v1.Namespace) + + // Periodic resync may resend the Namespace without changes + // in-between. + if old.ResourceVersion == cur.ResourceVersion { + return + } + + for name, selector := range map[string]*metav1.LabelSelector{ + "PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector, + "ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector, + } { + + sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector) + if err != nil { + w.logger.Error(err, "Failed to check label selection between namespaces while handling namespace updates", "selector", name) + return + } + + if sync { + select { + case notifyEvents <- struct{}{}: + default: + } + return + } + } + }, + }) + for name, resource := range w.informers { resource.Start(w.stopChannel) @@ -170,7 +281,7 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch }) } if !success { - return fmt.Errorf("failed to sync cache") + return fmt.Errorf("failed to sync one of the caches") } // limit the rate of outgoing events @@ -218,35 +329,14 @@ func (w *PrometheusCRWatcher) Close() error { } func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Config, error) { - store := assets.NewStore(w.k8sClient.CoreV1(), w.k8sClient.CoreV1()) - serviceMonitorInstances := make(map[string]*monitoringv1.ServiceMonitor) - smRetrieveErr := w.informers[monitoringv1.ServiceMonitorName].ListAll(w.serviceMonitorSelector, func(sm interface{}) { - monitor := sm.(*monitoringv1.ServiceMonitor) - key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) - validateError := w.addStoreAssetsForServiceMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.Endpoints, store) - if validateError != nil { - w.logger.Error(validateError, "Failed validating ServiceMonitor, skipping", "ServiceMonitor:", monitor.Name, "in namespace", monitor.Namespace) - } else { - serviceMonitorInstances[key] = monitor - } - }) - if smRetrieveErr != nil { - return nil, smRetrieveErr + serviceMonitorInstances, err := w.resourceSelector.SelectServiceMonitors(ctx, w.informers[monitoringv1.ServiceMonitorName].ListAllByNamespace) + if err != nil { + return nil, err } - podMonitorInstances := make(map[string]*monitoringv1.PodMonitor) - pmRetrieveErr := w.informers[monitoringv1.PodMonitorName].ListAll(w.podMonitorSelector, func(pm interface{}) { - monitor := pm.(*monitoringv1.PodMonitor) - key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(monitor) - validateError := w.addStoreAssetsForPodMonitor(ctx, monitor.Name, monitor.Namespace, monitor.Spec.PodMetricsEndpoints, store) - if validateError != nil { - w.logger.Error(validateError, "Failed validating PodMonitor, skipping", "PodMonitor:", monitor.Name, "in namespace", monitor.Namespace) - } else { - podMonitorInstances[key] = monitor - } - }) - if pmRetrieveErr != nil { - return nil, pmRetrieveErr + podMonitorInstances, err := w.resourceSelector.SelectPodMonitors(ctx, w.informers[monitoringv1.PodMonitorName].ListAllByNamespace) + if err != nil { + return nil, err } generatedConfig, err := w.configGenerator.GenerateServerConfiguration( @@ -262,7 +352,7 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi podMonitorInstances, map[string]*monitoringv1.Probe{}, map[string]*promv1alpha1.ScrapeConfig{}, - store, + w.store, nil, nil, nil, @@ -289,199 +379,3 @@ func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Confi } return promCfg, nil } - -// addStoreAssetsForServiceMonitor adds authentication / authorization related information to the assets store, -// based on the service monitor and endpoints specs. -// This code borrows from -// https://github.com/prometheus-operator/prometheus-operator/blob/06b5c4189f3f72737766d86103d049115c3aff48/pkg/prometheus/resource_selector.go#L73. -func (w *PrometheusCRWatcher) addStoreAssetsForServiceMonitor( - ctx context.Context, - smName, smNamespace string, - endps []monitoringv1.Endpoint, - store *assets.Store, -) error { - var err error - var validateErr error - for i, endp := range endps { - objKey := fmt.Sprintf("serviceMonitor/%s/%s/%d", smNamespace, smName, i) - - if err = store.AddBearerToken(ctx, smNamespace, endp.BearerTokenSecret, objKey); err != nil { - break - } - - if err = store.AddBasicAuth(ctx, smNamespace, endp.BasicAuth, objKey); err != nil { - break - } - - if endp.TLSConfig != nil { - if err = store.AddTLSConfig(ctx, smNamespace, endp.TLSConfig); err != nil { - break - } - } - - if err = store.AddOAuth2(ctx, smNamespace, endp.OAuth2, objKey); err != nil { - break - } - - smAuthKey := fmt.Sprintf("serviceMonitor/auth/%s/%s/%d", smNamespace, smName, i) - if err = store.AddSafeAuthorizationCredentials(ctx, smNamespace, endp.Authorization, smAuthKey); err != nil { - break - } - - for _, rl := range endp.RelabelConfigs { - if rl.Action != "" { - if validateErr = validateRelabelConfig(*rl); validateErr != nil { - break - } - } - } - - for _, rl := range endp.MetricRelabelConfigs { - if rl.Action != "" { - if validateErr = validateRelabelConfig(*rl); validateErr != nil { - break - } - } - } - } - - if err != nil { - w.logger.Error(err, "Failed to obtain credentials for a ServiceMonitor", "serviceMonitor", smName) - } - - if validateErr != nil { - return validateErr - } - - return nil -} - -// addStoreAssetsForServiceMonitor adds authentication / authorization related information to the assets store, -// based on the service monitor and pod metrics endpoints specs. -// This code borrows from -// https://github.com/prometheus-operator/prometheus-operator/blob/06b5c4189f3f72737766d86103d049115c3aff48/pkg/prometheus/resource_selector.go#L314. -func (w *PrometheusCRWatcher) addStoreAssetsForPodMonitor( - ctx context.Context, - pmName, pmNamespace string, - podMetricsEndps []monitoringv1.PodMetricsEndpoint, - store *assets.Store, -) error { - var err error - for i, endp := range podMetricsEndps { - objKey := fmt.Sprintf("podMonitor/%s/%s/%d", pmNamespace, pmName, i) - - if err = store.AddBearerToken(ctx, pmNamespace, &endp.BearerTokenSecret, objKey); err != nil { - break - } - - if err = store.AddBasicAuth(ctx, pmNamespace, endp.BasicAuth, objKey); err != nil { - break - } - - if endp.TLSConfig != nil { - if err = store.AddSafeTLSConfig(ctx, pmNamespace, &endp.TLSConfig.SafeTLSConfig); err != nil { - break - } - } - - if err = store.AddOAuth2(ctx, pmNamespace, endp.OAuth2, objKey); err != nil { - break - } - - smAuthKey := fmt.Sprintf("podMonitor/auth/%s/%s/%d", pmNamespace, pmName, i) - if err = store.AddSafeAuthorizationCredentials(ctx, pmNamespace, endp.Authorization, smAuthKey); err != nil { - break - } - - for _, rl := range endp.RelabelConfigs { - if rl.Action != "" { - if validateErr = validateRelabelConfig(*rl); validateErr != nil { - break - } - } - } - - for _, rl := range endp.MetricRelabelConfigs { - if rl.Action != "" { - if validateErr = validateRelabelConfig(*rl); validateErr != nil { - break - } - } - } - } - - if err != nil { - w.logger.Error(err, "Failed to obtain credentials for a PodMonitor", "podMonitor", pmName) - } - - if validateErr != nil { - return validateErr - } - - return nil -} - -// validateRelabelConfig validates relabel config for service and pod monitor, -// based on the service monitor and pod metrics endpoints specs. -// This code borrows from -// https://github.com/prometheus-operator/prometheus-operator/blob/ba536405154d18f3a6f312818283d671182af6f3/pkg/prometheus/resource_selector.go#L237 -func validateRelabelConfig(rc monitoringv1.RelabelConfig) error { - relabelTarget := regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) - - if _, err := relabel.NewRegexp(rc.Regex); err != nil { - return fmt.Errorf("invalid regex %s for relabel configuration", rc.Regex) - } - - if rc.Modulus == 0 && rc.Action == string(relabel.HashMod) { - return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") - } - - if (rc.Action == string(relabel.Replace) || rc.Action == string(relabel.HashMod) || rc.Action == string(relabel.Lowercase) || rc.Action == string(relabel.Uppercase) || rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual)) && rc.TargetLabel == "" { - return fmt.Errorf("relabel configuration for %s action needs targetLabel value", rc.Action) - } - - if (rc.Action == string(relabel.Replace) || rc.Action == string(relabel.Lowercase) || rc.Action == string(relabel.Uppercase) || rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual)) && !relabelTarget.MatchString(rc.TargetLabel) { - return fmt.Errorf("%q is invalid 'target_label' for %s action", rc.TargetLabel, rc.Action) - } - - if (rc.Action == string(relabel.Lowercase) || rc.Action == string(relabel.Uppercase) || rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual)) && !(rc.Replacement == relabel.DefaultRelabelConfig.Replacement || rc.Replacement == "") { - return fmt.Errorf("'replacement' can not be set for %s action", rc.Action) - } - - if rc.Action == string(relabel.LabelMap) { - if rc.Replacement != "" && !relabelTarget.MatchString(rc.Replacement) { - return fmt.Errorf("%q is invalid 'replacement' for %s action", rc.Replacement, rc.Action) - } - } - - if rc.Action == string(relabel.HashMod) && !model.LabelName(rc.TargetLabel).IsValid() { - return fmt.Errorf("%q is invalid 'target_label' for %s action", rc.TargetLabel, rc.Action) - } - - if rc.Action == string(relabel.KeepEqual) || rc.Action == string(relabel.DropEqual) { - if !(rc.Regex == "" || rc.Regex == relabel.DefaultRelabelConfig.Regex.String()) || - !(rc.Modulus == uint64(0) || - rc.Modulus == relabel.DefaultRelabelConfig.Modulus) || - !(rc.Separator == "" || - rc.Separator == relabel.DefaultRelabelConfig.Separator) || - !(rc.Replacement == relabel.DefaultRelabelConfig.Replacement || - rc.Replacement == "") { - return fmt.Errorf("%s action requires only 'source_labels' and `target_label`, and no other fields", rc.Action) - } - } - - if rc.Action == string(relabel.LabelDrop) || rc.Action == string(relabel.LabelKeep) { - if len(rc.SourceLabels) != 0 || - !(rc.TargetLabel == "" || - rc.TargetLabel == relabel.DefaultRelabelConfig.TargetLabel) || - !(rc.Modulus == uint64(0) || - rc.Modulus == relabel.DefaultRelabelConfig.Modulus) || - !(rc.Separator == "" || - rc.Separator == relabel.DefaultRelabelConfig.Separator) || - !(rc.Replacement == relabel.DefaultRelabelConfig.Replacement || - rc.Replacement == "") { - return fmt.Errorf("%s action requires only 'regex', and no other fields", rc.Action) - } - } - return nil -} diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index d949364572..cf0dc72b35 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -16,7 +16,7 @@ package watcher import ( "context" - "fmt" + // "fmt" "os" "testing" "time" @@ -724,7 +724,7 @@ func TestLoadConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - w := getTestPrometheuCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg) + w := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg) // Start namespace informers in order to populate cache. go w.nsInformer.Run(w.stopChannel) @@ -771,8 +771,9 @@ func TestRateLimit(t *testing.T) { } events := make(chan Event, 1) eventInterval := 5 * time.Millisecond + cfg := allocatorconfig.Config{} - w := getTestPrometheusCRWatcher(t, nil, nil) + w := getTestPrometheusCRWatcher(t, nil, nil, cfg) defer w.Close() w.eventInterval = eventInterval @@ -833,9 +834,9 @@ func TestRateLimit(t *testing.T) { } -// getTestPrometheuCRWatcher creates a test instance of PrometheusCRWatcher with fake clients +// getTestPrometheusCRWatcher creates a test instance of PrometheusCRWatcher with fake clients // and test secrets. -func getTestPrometheuCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) *PrometheusCRWatcher { +func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) *PrometheusCRWatcher { mClient := fakemonitoringclient.NewSimpleClientset() for _, sm := range svcMonitors { if sm != nil { From 2c276dc617736645e4aa2a0527f0d66ef1d63dd9 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Wed, 29 Nov 2023 11:24:20 -0800 Subject: [PATCH 05/22] cleaning up --- cmd/otel-allocator/watcher/promOperator.go | 16 ---------------- cmd/otel-allocator/watcher/promOperator_test.go | 1 - 2 files changed, 17 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index b3a53c6bf9..946ad44777 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -34,15 +34,11 @@ import ( "github.com/prometheus-operator/prometheus-operator/pkg/prometheus" prometheusgoclient "github.com/prometheus/client_golang/prometheus" - // "github.com/prometheus/common/model" promconfig "github.com/prometheus/prometheus/config" kubeDiscovery "github.com/prometheus/prometheus/discovery/kubernetes" - // "github.com/prometheus/prometheus/model/relabel" "gopkg.in/yaml.v2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - // "k8s.io/apimachinery/pkg/fields" - // "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -152,11 +148,8 @@ func getNamespaceInformer(ctx context.Context, allowList map[string]struct{}, pr kubernetesVersion, err := clientset.Discovery().ServerVersion() if err != nil { - // level.Error(logger).Log("msg", "failed to request Kubernetes server version", "err", err) - // cancel() return nil, err } - // cfg.KubernetesVersion = *kubernetesVersion lw, _, err := listwatch.NewNamespaceListWatchFromClient( ctx, promOperatorLogger, @@ -170,20 +163,11 @@ func getNamespaceInformer(ctx context.Context, allowList map[string]struct{}, pr return nil, err } - // level.Debug(o.logger).Log("msg", "creating namespace informer", "privileged", privileged) return cache.NewSharedIndexInformer( operatorMetrics.NewInstrumentedListerWatcher(lw), &v1.Namespace{}, resyncPeriod, cache.Indexers{}, ), nil - // nsInf := cache.NewSharedIndexInformer( - // operatorMetrics.NewInstrumentedListerWatcher( - // listwatch.NewUnprivilegedNamespaceListWatchFromClient(ctx, promOperatorLogger, clientset.CoreV1().RESTClient(), allowList, map[string]struct{}{}, fields.Everything()), - // ), - // &v1.Namespace{}, resyncPeriod, cache.Indexers{}, - // ) - - // return nsInf, nil } // getInformers returns a map of informers for the given resources. diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index cf0dc72b35..4f05ff87e2 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -16,7 +16,6 @@ package watcher import ( "context" - // "fmt" "os" "testing" "time" From e97f969661d29750b3c1a30bce9f23c7f6f02b82 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Wed, 29 Nov 2023 11:30:25 -0800 Subject: [PATCH 06/22] adding change log --- ...commended-prometheus-operator-interfaces.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml diff --git a/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml b/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml new file mode 100644 index 0000000000..52e4a35018 --- /dev/null +++ b/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) +component: target allocator + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Use recommended interfaces(resource selector) by the prometheus-operator for watching CRs. + +# One or more tracking issues related to the change +issues: [2309] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: \ No newline at end of file From 533dff81f473d6387af63b9de82aac3bc2ed6dde Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Mon, 4 Dec 2023 12:38:30 -0800 Subject: [PATCH 07/22] running goimports and adding return value check code for namespace informer --- cmd/otel-allocator/watcher/promOperator.go | 7 ++++++- cmd/otel-allocator/watcher/promOperator_test.go | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 946ad44777..b9430e200e 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -199,7 +199,12 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch success = false } - w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + // The controller needs to watch the namespaces in which the service/pod + // monitors live because a label change on a namespace may + // trigger a configuration change. + // It doesn't need to watch on addition/deletion though because it's + // already covered by the event handlers on service/pod monitors. + _, _ = w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { old := oldObj.(*v1.Namespace) cur := newObj.(*v1.Namespace) diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index 4f05ff87e2..1359cc5ed0 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -22,7 +22,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "github.com/prometheus-operator/prometheus-operator/pkg/assets" fakemonitoringclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" @@ -42,6 +41,8 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" fcache "k8s.io/client-go/tools/cache/testing" + + allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) func TestLoadConfig(t *testing.T) { From 537a2bbae69f417c74fcb5a5560382b315170683 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Mon, 4 Dec 2023 12:44:30 -0800 Subject: [PATCH 08/22] fixing lint error --- cmd/otel-allocator/config/config.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index 790c70e4dd..f26ee058d6 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -39,18 +39,18 @@ const DefaultConfigFilePath string = "/conf/targetallocator.yaml" const DefaultCRScrapeInterval model.Duration = model.Duration(time.Second * 30) type Config struct { - ListenAddr string `yaml:"listen_addr,omitempty"` - KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"` - ClusterConfig *rest.Config `yaml:"-"` - RootLogger logr.Logger `yaml:"-"` - ReloadConfig bool `yaml:"-"` - LabelSelector map[string]string `yaml:"label_selector,omitempty"` - PromConfig *promconfig.Config `yaml:"config"` - AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` - FilterStrategy *string `yaml:"filter_strategy,omitempty"` - PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` - PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` - ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` + ListenAddr string `yaml:"listen_addr,omitempty"` + KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"` + ClusterConfig *rest.Config `yaml:"-"` + RootLogger logr.Logger `yaml:"-"` + ReloadConfig bool `yaml:"-"` + LabelSelector map[string]string `yaml:"label_selector,omitempty"` + PromConfig *promconfig.Config `yaml:"config"` + AllocationStrategy *string `yaml:"allocation_strategy,omitempty"` + FilterStrategy *string `yaml:"filter_strategy,omitempty"` + PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` + PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` + ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` ServiceMonitorNamespaceSelector map[string]string `yaml:"service_monitor_namespace_selector,omitempty"` PodMonitorNamespaceSelector map[string]string `yaml:"pod_monitor_namespace_selector,omitempty"` } From 90bcb75c363a81201fe2f714efb040bfe88b26df Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Mon, 4 Dec 2023 13:20:40 -0800 Subject: [PATCH 09/22] fixing tests and comment --- cmd/otel-allocator/watcher/promOperator.go | 4 ++-- .../00-promreceiver-allocatorconfig.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index b9430e200e..7e00260f98 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -45,9 +45,9 @@ import ( allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) -const minEventInterval = time.Second * 5 const ( - resyncPeriod = 5 * time.Minute + resyncPeriod = 5 * time.Minute + minEventInterval = time.Second * 5 ) func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocatorconfig.Config) (*PrometheusCRWatcher, error) { diff --git a/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml b/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml index b248e0f30e..fe6d8e33f6 100644 --- a/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml +++ b/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml @@ -10,7 +10,7 @@ metadata: name: pod-view rules: - apiGroups: [""] - resources: [ "pods" ] + resources: [ "pods", "namespaces" ] verbs: [ "get", "list", "watch"] --- apiVersion: kuttl.dev/v1beta1 From dfcc7463daf9da98fcedfde28b1f67a1601c306e Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Mon, 4 Dec 2023 14:38:32 -0800 Subject: [PATCH 10/22] adding permissions for e2e tests --- tests/e2e/smoke-targetallocator/00-install.yaml | 2 +- tests/e2e/targetallocator-features/00-install.yaml | 2 +- tests/e2e/targetallocator-prometheuscr/00-install.yaml | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/e2e/smoke-targetallocator/00-install.yaml b/tests/e2e/smoke-targetallocator/00-install.yaml index e61d014c5f..f8d3c98ba4 100644 --- a/tests/e2e/smoke-targetallocator/00-install.yaml +++ b/tests/e2e/smoke-targetallocator/00-install.yaml @@ -10,7 +10,7 @@ metadata: name: pod-view rules: - apiGroups: [""] - resources: [ "pods" ] + resources: [ "pods", "namespaces" ] verbs: [ "get", "list", "watch"] --- apiVersion: kuttl.dev/v1beta1 diff --git a/tests/e2e/targetallocator-features/00-install.yaml b/tests/e2e/targetallocator-features/00-install.yaml index 9b75be12db..021ca631ca 100644 --- a/tests/e2e/targetallocator-features/00-install.yaml +++ b/tests/e2e/targetallocator-features/00-install.yaml @@ -10,7 +10,7 @@ metadata: name: pod-view rules: - apiGroups: [""] - resources: [ "pods" ] + resources: [ "pods", "namespaces" ] verbs: [ "get", "list", "watch"] --- apiVersion: kuttl.dev/v1beta1 diff --git a/tests/e2e/targetallocator-prometheuscr/00-install.yaml b/tests/e2e/targetallocator-prometheuscr/00-install.yaml index 148763a3f6..7c937672fd 100644 --- a/tests/e2e/targetallocator-prometheuscr/00-install.yaml +++ b/tests/e2e/targetallocator-prometheuscr/00-install.yaml @@ -74,6 +74,7 @@ rules: - nodes/metrics - services - endpoints + - namespaces verbs: - get - watch From 5406145d1fea05a261e0cf00aad05d208466b135 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Mon, 4 Dec 2023 15:50:20 -0800 Subject: [PATCH 11/22] adding cluster roles instead of roles --- .../00-promreceiver-allocatorconfig.yaml | 4 ++-- tests/e2e/smoke-targetallocator/00-install.yaml | 4 ++-- tests/e2e/targetallocator-features/00-install.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml b/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml index fe6d8e33f6..96ae50355d 100644 --- a/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml +++ b/tests/e2e/prometheus-config-validation/00-promreceiver-allocatorconfig.yaml @@ -5,7 +5,7 @@ metadata: automountServiceAccountToken: true --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: name: pod-view rules: @@ -16,7 +16,7 @@ rules: apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: kubectl -n $NAMESPACE create rolebinding default-view-$NAMESPACE --role=pod-view --serviceaccount=$NAMESPACE:ta + - command: kubectl -n $NAMESPACE create clusterrolebinding default-view-$NAMESPACE --clusterrole=pod-view --serviceaccount=$NAMESPACE:ta --- apiVersion: opentelemetry.io/v1alpha1 kind: OpenTelemetryCollector diff --git a/tests/e2e/smoke-targetallocator/00-install.yaml b/tests/e2e/smoke-targetallocator/00-install.yaml index f8d3c98ba4..7395af45ec 100644 --- a/tests/e2e/smoke-targetallocator/00-install.yaml +++ b/tests/e2e/smoke-targetallocator/00-install.yaml @@ -5,7 +5,7 @@ metadata: automountServiceAccountToken: true --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: name: pod-view rules: @@ -16,7 +16,7 @@ rules: apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: kubectl -n $NAMESPACE create rolebinding default-view-$NAMESPACE --role=pod-view --serviceaccount=$NAMESPACE:ta + - command: kubectl -n $NAMESPACE create clusterrolebinding default-view-$NAMESPACE --clusterrole=pod-view --serviceaccount=$NAMESPACE:ta --- apiVersion: opentelemetry.io/v1alpha1 kind: OpenTelemetryCollector diff --git a/tests/e2e/targetallocator-features/00-install.yaml b/tests/e2e/targetallocator-features/00-install.yaml index 021ca631ca..bae9097065 100644 --- a/tests/e2e/targetallocator-features/00-install.yaml +++ b/tests/e2e/targetallocator-features/00-install.yaml @@ -5,7 +5,7 @@ metadata: automountServiceAccountToken: true --- apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: name: pod-view rules: @@ -16,7 +16,7 @@ rules: apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: kubectl -n $NAMESPACE create rolebinding default-view-$NAMESPACE --role=pod-view --serviceaccount=$NAMESPACE:ta + - command: kubectl -n $NAMESPACE create clusterrolebinding default-view-$NAMESPACE --clusterrole=pod-view --serviceaccount=$NAMESPACE:ta --- apiVersion: opentelemetry.io/v1alpha1 kind: OpenTelemetryCollector From 883b583c4c93038d6fddc55b192659c4aa90f0fe Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Mon, 4 Dec 2023 15:55:34 -0800 Subject: [PATCH 12/22] updaintg readme --- cmd/otel-allocator/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/otel-allocator/README.md b/cmd/otel-allocator/README.md index 0f4419e8ef..37c7ba9c54 100644 --- a/cmd/otel-allocator/README.md +++ b/cmd/otel-allocator/README.md @@ -124,7 +124,7 @@ to collector instance pods by default. ### RBAC -The ServiceAccount that the TargetAllocator runs as, has to have access to the CRs. A role like this will provide that +The ServiceAccount that the TargetAllocator runs as, has to have access to the CRs and the namespaces to watch for the pod and service monitors. A role like this will provide that access. ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -139,6 +139,10 @@ rules: - podmonitors verbs: - '*' +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] ``` In addition, the TargetAllocator needs the same permissions as a Prometheus instance would to find the matching targets from the CR instances. From 7885db1c1b28bede22e0bb532644f5adec621635 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Tue, 5 Dec 2023 12:38:45 -0800 Subject: [PATCH 13/22] fixing comments --- cmd/otel-allocator/watcher/promOperator.go | 171 +++++++++++---------- 1 file changed, 89 insertions(+), 82 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 7e00260f98..1f6918e146 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -45,12 +45,13 @@ import ( allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) +const minEventInterval = time.Second * 5 const ( - resyncPeriod = 5 * time.Minute - minEventInterval = time.Second * 5 + resyncPeriod = 5 * time.Minute ) func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocatorconfig.Config) (*PrometheusCRWatcher, error) { + var resourceSelector *prometheus.ResourceSelector mClient, err := monitoringclient.NewForConfig(cfg.ClusterConfig) if err != nil { return nil, err @@ -102,10 +103,11 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat nsMonInf, err := getNamespaceInformer(ctx, map[string]struct{}{v1.NamespaceAll: {}}, promOperatorLogger, clientset, operatorMetrics) if err != nil { - return nil, err - } + logger.Error(err, "Failed to create namespace informer in promOperator CRD watcher") - resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics) + } else { + resourceSelector = prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics) + } return &PrometheusCRWatcher{ logger: logger, @@ -194,48 +196,47 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch // this channel needs to be buffered because notifications are asynchronous and neither producers nor consumers wait notifyEvents := make(chan struct{}, 1) - go w.nsInformer.Run(w.stopChannel) - if ok := cache.WaitForNamedCacheSync("namespace", w.stopChannel, w.nsInformer.HasSynced); !ok { - success = false - } - - // The controller needs to watch the namespaces in which the service/pod - // monitors live because a label change on a namespace may - // trigger a configuration change. - // It doesn't need to watch on addition/deletion though because it's - // already covered by the event handlers on service/pod monitors. - _, _ = w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(oldObj, newObj interface{}) { - old := oldObj.(*v1.Namespace) - cur := newObj.(*v1.Namespace) - - // Periodic resync may resend the Namespace without changes - // in-between. - if old.ResourceVersion == cur.ResourceVersion { - return - } + if w.nsInformer != nil { + go w.nsInformer.Run(w.stopChannel) + if ok := cache.WaitForNamedCacheSync("namespace", w.stopChannel, w.nsInformer.HasSynced); !ok { + success = false + } - for name, selector := range map[string]*metav1.LabelSelector{ - "PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector, - "ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector, - } { + w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj, newObj interface{}) { + old := oldObj.(*v1.Namespace) + cur := newObj.(*v1.Namespace) - sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector) - if err != nil { - w.logger.Error(err, "Failed to check label selection between namespaces while handling namespace updates", "selector", name) + // Periodic resync may resend the Namespace without changes + // in-between. + if old.ResourceVersion == cur.ResourceVersion { return } - if sync { - select { - case notifyEvents <- struct{}{}: - default: + for name, selector := range map[string]*metav1.LabelSelector{ + "PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector, + "ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector, + } { + + sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector) + if err != nil { + w.logger.Error(err, "Failed to check label selection between namespaces while handling namespace updates", "selector", name) + return + } + + if sync { + select { + case notifyEvents <- struct{}{}: + default: + } + return } - return } - } - }, - }) + }, + }) + } else { + w.logger.Info("Unable to watch namespaces since namespace informer is nil") + } for name, resource := range w.informers { resource.Start(w.stopChannel) @@ -317,53 +318,59 @@ func (w *PrometheusCRWatcher) Close() error { } func (w *PrometheusCRWatcher) LoadConfig(ctx context.Context) (*promconfig.Config, error) { - serviceMonitorInstances, err := w.resourceSelector.SelectServiceMonitors(ctx, w.informers[monitoringv1.ServiceMonitorName].ListAllByNamespace) - if err != nil { - return nil, err - } + promCfg := &promconfig.Config{} - podMonitorInstances, err := w.resourceSelector.SelectPodMonitors(ctx, w.informers[monitoringv1.PodMonitorName].ListAllByNamespace) - if err != nil { - return nil, err - } + if w.resourceSelector != nil { + serviceMonitorInstances, err := w.resourceSelector.SelectServiceMonitors(ctx, w.informers[monitoringv1.ServiceMonitorName].ListAllByNamespace) + if err != nil { + return nil, err + } - generatedConfig, err := w.configGenerator.GenerateServerConfiguration( - ctx, - "30s", - "", - nil, - nil, - monitoringv1.TSDBSpec{}, - nil, - nil, - serviceMonitorInstances, - podMonitorInstances, - map[string]*monitoringv1.Probe{}, - map[string]*promv1alpha1.ScrapeConfig{}, - w.store, - nil, - nil, - nil, - []string{}) - if err != nil { - return nil, err - } + podMonitorInstances, err := w.resourceSelector.SelectPodMonitors(ctx, w.informers[monitoringv1.PodMonitorName].ListAllByNamespace) + if err != nil { + return nil, err + } - promCfg := &promconfig.Config{} - unmarshalErr := yaml.Unmarshal(generatedConfig, promCfg) - if unmarshalErr != nil { - return nil, unmarshalErr - } + generatedConfig, err := w.configGenerator.GenerateServerConfiguration( + ctx, + "30s", + "", + nil, + nil, + monitoringv1.TSDBSpec{}, + nil, + nil, + serviceMonitorInstances, + podMonitorInstances, + map[string]*monitoringv1.Probe{}, + map[string]*promv1alpha1.ScrapeConfig{}, + w.store, + nil, + nil, + nil, + []string{}) + if err != nil { + return nil, err + } - // set kubeconfig path to service discovery configs, else kubernetes_sd will always attempt in-cluster - // authentication even if running with a detected kubeconfig - for _, scrapeConfig := range promCfg.ScrapeConfigs { - for _, serviceDiscoveryConfig := range scrapeConfig.ServiceDiscoveryConfigs { - if serviceDiscoveryConfig.Name() == "kubernetes" { - sdConfig := interface{}(serviceDiscoveryConfig).(*kubeDiscovery.SDConfig) - sdConfig.KubeConfig = w.kubeConfigPath + unmarshalErr := yaml.Unmarshal(generatedConfig, promCfg) + if unmarshalErr != nil { + return nil, unmarshalErr + } + + // set kubeconfig path to service discovery configs, else kubernetes_sd will always attempt in-cluster + // authentication even if running with a detected kubeconfig + for _, scrapeConfig := range promCfg.ScrapeConfigs { + for _, serviceDiscoveryConfig := range scrapeConfig.ServiceDiscoveryConfigs { + if serviceDiscoveryConfig.Name() == "kubernetes" { + sdConfig := interface{}(serviceDiscoveryConfig).(*kubeDiscovery.SDConfig) + sdConfig.KubeConfig = w.kubeConfigPath + } } } + return promCfg, nil + } else { + w.logger.Info("Unable to load config since resource selector is nil, returning empty prometheus config") + return promCfg, nil } - return promCfg, nil } From c9f3aa5a1a7b54c4e8948978d019f3280bb9536a Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Tue, 2 Jan 2024 13:39:14 -0800 Subject: [PATCH 14/22] adding contant to same block --- cmd/otel-allocator/watcher/promOperator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 1f6918e146..37176dae69 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -45,9 +45,9 @@ import ( allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) -const minEventInterval = time.Second * 5 const ( resyncPeriod = 5 * time.Minute + minEventInterval = time.Second * 5 ) func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocatorconfig.Config) (*PrometheusCRWatcher, error) { From 4fc140349c8d6c40fd3706e68accb80912a712bf Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Fri, 12 Jan 2024 15:54:26 -0800 Subject: [PATCH 15/22] fixing lint errors --- cmd/otel-allocator/watcher/promOperator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index 37176dae69..c0df1b5a0d 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -46,7 +46,7 @@ import ( ) const ( - resyncPeriod = 5 * time.Minute + resyncPeriod = 5 * time.Minute minEventInterval = time.Second * 5 ) @@ -202,7 +202,7 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch success = false } - w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + _, _ = w.nsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { old := oldObj.(*v1.Namespace) cur := newObj.(*v1.Namespace) From a943cdb600b4fb08dcb48800780e4a62379c2c26 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Fri, 12 Jan 2024 17:34:35 -0800 Subject: [PATCH 16/22] running go import --- cmd/otel-allocator/config/config.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index 4256e75b12..1d56f4f836 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -44,19 +44,19 @@ const ( ) type Config struct { - ListenAddr string `yaml:"listen_addr,omitempty"` - KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"` - ClusterConfig *rest.Config `yaml:"-"` - RootLogger logr.Logger `yaml:"-"` - CollectorSelector *metav1.LabelSelector `yaml:"collector_selector,omitempty"` - PromConfig *promconfig.Config `yaml:"config"` - AllocationStrategy string `yaml:"allocation_strategy,omitempty"` - FilterStrategy string `yaml:"filter_strategy,omitempty"` - PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` - PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` - ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` - ServiceMonitorNamespaceSelector map[string]string `yaml:"service_monitor_namespace_selector,omitempty"` - PodMonitorNamespaceSelector map[string]string `yaml:"pod_monitor_namespace_selector,omitempty"` + ListenAddr string `yaml:"listen_addr,omitempty"` + KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"` + ClusterConfig *rest.Config `yaml:"-"` + RootLogger logr.Logger `yaml:"-"` + CollectorSelector *metav1.LabelSelector `yaml:"collector_selector,omitempty"` + PromConfig *promconfig.Config `yaml:"config"` + AllocationStrategy string `yaml:"allocation_strategy,omitempty"` + FilterStrategy string `yaml:"filter_strategy,omitempty"` + PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` + PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` + ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` + ServiceMonitorNamespaceSelector map[string]string `yaml:"service_monitor_namespace_selector,omitempty"` + PodMonitorNamespaceSelector map[string]string `yaml:"pod_monitor_namespace_selector,omitempty"` } type PrometheusCRConfig struct { From bf33615e30ba8961fd181bae00b38a080d2dbb8a Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Fri, 12 Jan 2024 17:55:11 -0800 Subject: [PATCH 17/22] adding namespaces since that is required for informer --- apis/v1alpha1/collector_webhook.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apis/v1alpha1/collector_webhook.go b/apis/v1alpha1/collector_webhook.go index 48aeb34f6e..5e9af9d3ad 100644 --- a/apis/v1alpha1/collector_webhook.go +++ b/apis/v1alpha1/collector_webhook.go @@ -47,7 +47,7 @@ var ( Verbs: []string{"*"}, }, { APIGroups: []string{""}, - Resources: []string{"nodes", "nodes/metrics", "services", "endpoints", "pods"}, + Resources: []string{"nodes", "nodes/metrics", "services", "endpoints", "pods", "namespaces"}, Verbs: []string{"get", "list", "watch"}, }, { APIGroups: []string{""}, From bc6dc56fadcbe59cabac8c80e4a1ab8003ac4298 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Sat, 13 Jan 2024 11:55:13 -0800 Subject: [PATCH 18/22] adding extected warnings --- apis/v1alpha1/collector_webhook_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/apis/v1alpha1/collector_webhook_test.go b/apis/v1alpha1/collector_webhook_test.go index ea67ac5886..286e9a5aa8 100644 --- a/apis/v1alpha1/collector_webhook_test.go +++ b/apis/v1alpha1/collector_webhook_test.go @@ -563,6 +563,7 @@ func TestOTELColValidatingWebhook(t *testing.T) { "missing the following rules for nodes/metrics: [get,list,watch]", "missing the following rules for services: [get,list,watch]", "missing the following rules for endpoints: [get,list,watch]", + "missing the following rules for namespaces: [get,list,watch]", "missing the following rules for networking.k8s.io/ingresses: [get,list,watch]", "missing the following rules for nodes: [get,list,watch]", "missing the following rules for pods: [get,list,watch]", From b742f0a4838e6e40bee67ec29926d70cb43d370f Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Wed, 17 Jan 2024 16:47:58 -0800 Subject: [PATCH 19/22] addressing comments --- ...mended-prometheus-operator-interfaces.yaml | 4 +- cmd/otel-allocator/README.md | 2 +- cmd/otel-allocator/watcher/promOperator.go | 4 +- .../watcher/promOperator_test.go | 65 ++++++++++++++++++- 4 files changed, 70 insertions(+), 5 deletions(-) diff --git a/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml b/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml index 52e4a35018..2a5afa0682 100644 --- a/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml +++ b/.chloggen/target-allocator-use-recommended-prometheus-operator-interfaces.yaml @@ -1,5 +1,5 @@ # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement +change_type: breaking # The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action) component: target allocator @@ -13,4 +13,4 @@ issues: [2309] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. -subtext: \ No newline at end of file +subtext: The target allocator now requires get/list/watch permissions for namespaces. Update your RBAC permissions for the attached role, if necessary. \ No newline at end of file diff --git a/cmd/otel-allocator/README.md b/cmd/otel-allocator/README.md index 37c7ba9c54..44b545efcc 100644 --- a/cmd/otel-allocator/README.md +++ b/cmd/otel-allocator/README.md @@ -142,7 +142,7 @@ rules: - apiGroups: [""] resources: - namespaces - verbs: ["list", "watch"] + verbs: ["get", "list", "watch"] ``` In addition, the TargetAllocator needs the same permissions as a Prometheus instance would to find the matching targets from the CR instances. diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index c0df1b5a0d..bf222d91a9 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -100,13 +100,15 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat store := assets.NewStore(clientset.CoreV1(), clientset.CoreV1()) promRegisterer := prometheusgoclient.NewRegistry() operatorMetrics := operator.NewMetrics(promRegisterer) + // eventRecorderFactory := operator.NewEventRecorderFactory(false) + eventRecorder := operator.NewEventRecorder(clientset, "target-allocator") nsMonInf, err := getNamespaceInformer(ctx, map[string]struct{}{v1.NamespaceAll: {}}, promOperatorLogger, clientset, operatorMetrics) if err != nil { logger.Error(err, "Failed to create namespace informer in promOperator CRD watcher") } else { - resourceSelector = prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics) + resourceSelector = prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics, eventRecorder) } return &PrometheusCRWatcher{ diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index aef37fa5f4..c3ee2631dc 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -724,6 +724,68 @@ func TestLoadConfig(t *testing.T) { }, }, }, + // { + // name: "pod monitor namespace label update test", + // podMonitors: []*monitoringv1.PodMonitor{ + // { + // ObjectMeta: metav1.ObjectMeta{ + // Name: "pm-1", + // Namespace: "labellednamespace", + // }, + // Spec: monitoringv1.PodMonitorSpec{ + // JobLabel: "test", + // PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + // { + // Port: "web", + // }, + // }, + // }, + // }, + // { + // ObjectMeta: metav1.ObjectMeta{ + // Name: "pm-2", + // Namespace: "test", + // }, + // Spec: monitoringv1.PodMonitorSpec{ + // JobLabel: "test", + // PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + // { + // Port: "web", + // }, + // }, + // }, + // }, + // }, + // cfg: allocatorconfig.Config{ + // PodMonitorNamespaceSelector: map[string]string{ + // "label1": "label1", + // }, + // }, + // want: &promconfig.Config{ + // ScrapeConfigs: []*promconfig.ScrapeConfig{ + // { + // JobName: "podMonitor/labellednamespace/pm-1/0", + // ScrapeInterval: model.Duration(30 * time.Second), + // ScrapeTimeout: model.Duration(10 * time.Second), + // HonorTimestamps: true, + // HonorLabels: false, + // Scheme: "http", + // MetricsPath: "/metrics", + // ServiceDiscoveryConfigs: []discovery.Config{ + // &kubeDiscovery.SDConfig{ + // Role: "pod", + // NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + // Names: []string{"labellednamespace"}, + // IncludeOwnNamespace: false, + // }, + // HTTPClientConfig: config.DefaultHTTPClientConfig, + // }, + // }, + // HTTPClientConfig: config.DefaultHTTPClientConfig, + // }, + // }, + // }, + // }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -916,6 +978,7 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic store := assets.NewStore(k8sClient.CoreV1(), k8sClient.CoreV1()) promRegisterer := prometheusgoclient.NewRegistry() operatorMetrics := operator.NewMetrics(promRegisterer) + eventRecorder := operator.NewEventRecorder(k8sClient, "target-allocator") source := fcache.NewFakeControllerSource() source.Add(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test"}}) @@ -928,7 +991,7 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic // create the shared informer and resync every 1s nsMonInf := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(cache.SharedIndexInformer) - resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics) + resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics, eventRecorder) return &PrometheusCRWatcher{ kubeMonitoringClient: mClient, From d062dbde7dbbc4084752626a91f36fd6b20a6c28 Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Thu, 18 Jan 2024 15:36:43 -0800 Subject: [PATCH 20/22] adding test for namespace label update --- cmd/otel-allocator/watcher/promOperator.go | 2 - .../watcher/promOperator_test.go | 205 +++++++++++------- 2 files changed, 132 insertions(+), 75 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index bf222d91a9..e3abd27c62 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -100,7 +100,6 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat store := assets.NewStore(clientset.CoreV1(), clientset.CoreV1()) promRegisterer := prometheusgoclient.NewRegistry() operatorMetrics := operator.NewMetrics(promRegisterer) - // eventRecorderFactory := operator.NewEventRecorderFactory(false) eventRecorder := operator.NewEventRecorder(clientset, "target-allocator") nsMonInf, err := getNamespaceInformer(ctx, map[string]struct{}{v1.NamespaceAll: {}}, promOperatorLogger, clientset, operatorMetrics) @@ -219,7 +218,6 @@ func (w *PrometheusCRWatcher) Watch(upstreamEvents chan Event, upstreamErrors ch "PodMonitorNamespaceSelector": w.podMonitorNamespaceSelector, "ServiceMonitorNamespaceSelector": w.serviceMonitorNamespaceSelector, } { - sync, err := k8sutil.LabelSelectionHasChanged(old.Labels, cur.Labels, selector) if err != nil { w.logger.Error(err, "Failed to check label selection between namespaces while handling namespace updates", "selector", name) diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index c3ee2631dc..b68b542686 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -16,10 +16,6 @@ package watcher import ( "context" - "os" - "testing" - "time" - "github.com/go-kit/log" "github.com/go-kit/log/level" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -41,6 +37,9 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" fcache "k8s.io/client-go/tools/cache/testing" + "os" + "testing" + "time" allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) @@ -724,72 +723,10 @@ func TestLoadConfig(t *testing.T) { }, }, }, - // { - // name: "pod monitor namespace label update test", - // podMonitors: []*monitoringv1.PodMonitor{ - // { - // ObjectMeta: metav1.ObjectMeta{ - // Name: "pm-1", - // Namespace: "labellednamespace", - // }, - // Spec: monitoringv1.PodMonitorSpec{ - // JobLabel: "test", - // PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ - // { - // Port: "web", - // }, - // }, - // }, - // }, - // { - // ObjectMeta: metav1.ObjectMeta{ - // Name: "pm-2", - // Namespace: "test", - // }, - // Spec: monitoringv1.PodMonitorSpec{ - // JobLabel: "test", - // PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ - // { - // Port: "web", - // }, - // }, - // }, - // }, - // }, - // cfg: allocatorconfig.Config{ - // PodMonitorNamespaceSelector: map[string]string{ - // "label1": "label1", - // }, - // }, - // want: &promconfig.Config{ - // ScrapeConfigs: []*promconfig.ScrapeConfig{ - // { - // JobName: "podMonitor/labellednamespace/pm-1/0", - // ScrapeInterval: model.Duration(30 * time.Second), - // ScrapeTimeout: model.Duration(10 * time.Second), - // HonorTimestamps: true, - // HonorLabels: false, - // Scheme: "http", - // MetricsPath: "/metrics", - // ServiceDiscoveryConfigs: []discovery.Config{ - // &kubeDiscovery.SDConfig{ - // Role: "pod", - // NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ - // Names: []string{"labellednamespace"}, - // IncludeOwnNamespace: false, - // }, - // HTTPClientConfig: config.DefaultHTTPClientConfig, - // }, - // }, - // HTTPClientConfig: config.DefaultHTTPClientConfig, - // }, - // }, - // }, - // }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - w := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg) + w, _ := getTestPrometheusCRWatcher(t, tt.serviceMonitors, tt.podMonitors, tt.cfg) // Start namespace informers in order to populate cache. go w.nsInformer.Run(w.stopChannel) @@ -818,6 +755,122 @@ func TestLoadConfig(t *testing.T) { } } +func TestNamespaceLabelUpdate(t *testing.T) { + var err error + podMonitors := []*monitoringv1.PodMonitor{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pm-1", + Namespace: "labellednamespace", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pm-2", + Namespace: "test", + }, + Spec: monitoringv1.PodMonitorSpec{ + JobLabel: "test", + PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{ + { + Port: "web", + }, + }, + }, + }, + } + + cfg := allocatorconfig.Config{ + PodMonitorNamespaceSelector: map[string]string{ + "label1": "label1", + }, + } + + want_before := &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{ + { + JobName: "podMonitor/labellednamespace/pm-1/0", + ScrapeInterval: model.Duration(30 * time.Second), + ScrapeTimeout: model.Duration(10 * time.Second), + HonorTimestamps: true, + HonorLabels: false, + Scheme: "http", + MetricsPath: "/metrics", + ServiceDiscoveryConfigs: []discovery.Config{ + &kubeDiscovery.SDConfig{ + Role: "pod", + NamespaceDiscovery: kubeDiscovery.NamespaceDiscovery{ + Names: []string{"labellednamespace"}, + IncludeOwnNamespace: false, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + }, + }, + } + + want_after := &promconfig.Config{ + ScrapeConfigs: []*promconfig.ScrapeConfig{}, + } + + w, source := getTestPrometheusCRWatcher(t, nil, podMonitors, cfg) + events := make(chan Event, 1) + eventInterval := 5 * time.Millisecond + + defer w.Close() + w.eventInterval = eventInterval + + go func() { + watchErr := w.Watch(events, make(chan error)) + require.NoError(t, watchErr) + }() + + if success := cache.WaitForNamedCacheSync("namespace", w.stopChannel, w.nsInformer.HasSynced); !success { + require.True(t, success) + } + + for _, informer := range w.informers { + success := cache.WaitForCacheSync(w.stopChannel, informer.HasSynced) + require.True(t, success) + } + + got, err := w.LoadConfig(context.Background()) + assert.NoError(t, err) + + sanitizeScrapeConfigsForTest(got.ScrapeConfigs) + assert.Equal(t, want_before.ScrapeConfigs, got.ScrapeConfigs) + + require.Eventually(t, func() bool { + source.Modify(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: "labellednamespace", + Labels: map[string]string{ + "label2": "label2", + }, + }}) + + select { + case <-events: + got, err := w.LoadConfig(context.Background()) + assert.NoError(t, err) + + sanitizeScrapeConfigsForTest(got.ScrapeConfigs) + return assert.Equal(t, want_after.ScrapeConfigs, got.ScrapeConfigs) + default: + return false + } + }, eventInterval*2, time.Millisecond) +} + func TestRateLimit(t *testing.T) { var err error serviceMonitor := &monitoringv1.ServiceMonitor{ @@ -838,7 +891,7 @@ func TestRateLimit(t *testing.T) { eventInterval := 5 * time.Millisecond cfg := allocatorconfig.Config{} - w := getTestPrometheusCRWatcher(t, nil, nil, cfg) + w, _ := getTestPrometheusCRWatcher(t, nil, nil, cfg) defer w.Close() w.eventInterval = eventInterval @@ -901,7 +954,7 @@ func TestRateLimit(t *testing.T) { // getTestPrometheusCRWatcher creates a test instance of PrometheusCRWatcher with fake clients // and test secrets. -func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) *PrometheusCRWatcher { +func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.ServiceMonitor, podMonitors []*monitoringv1.PodMonitor, cfg allocatorconfig.Config) (*PrometheusCRWatcher, *fcache.FakeControllerSource) { mClient := fakemonitoringclient.NewSimpleClientset() for _, sm := range svcMonitors { if sm != nil { @@ -989,7 +1042,7 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic }}}) // create the shared informer and resync every 1s - nsMonInf := cache.NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(cache.SharedIndexInformer) + nsMonInf := cache.NewSharedInformer(source, &v1.Namespace{}, 1*time.Second).(cache.SharedIndexInformer) resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics, eventRecorder) @@ -1000,9 +1053,15 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic nsInformer: nsMonInf, stopChannel: make(chan struct{}), configGenerator: generator, - resourceSelector: resourceSelector, - store: store, - } + podMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.PodMonitorNamespaceSelector, + }, + serviceMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: cfg.ServiceMonitorNamespaceSelector, + }, + resourceSelector: resourceSelector, + store: store, + }, source } From da40b956220fa71dcd0fc8607c360c029bb33f8a Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Thu, 18 Jan 2024 15:55:42 -0800 Subject: [PATCH 21/22] fixing goimports --- cmd/otel-allocator/watcher/promOperator_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index b68b542686..068c3973fc 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -16,6 +16,10 @@ package watcher import ( "context" + "os" + "testing" + "time" + "github.com/go-kit/log" "github.com/go-kit/log/level" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -37,9 +41,6 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" fcache "k8s.io/client-go/tools/cache/testing" - "os" - "testing" - "time" allocatorconfig "github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/config" ) From f0bc9d2dd314d5ae14fd7778be9ee0e267d4de2f Mon Sep 17 00:00:00 2001 From: Rashmi Chandrashekar Date: Fri, 19 Jan 2024 18:21:54 -0800 Subject: [PATCH 22/22] making namespaceselectores as labelselectors --- cmd/otel-allocator/config/config.go | 4 +- cmd/otel-allocator/watcher/promOperator.go | 38 ++++++-------- .../watcher/promOperator_test.go | 52 +++++++++---------- 3 files changed, 42 insertions(+), 52 deletions(-) diff --git a/cmd/otel-allocator/config/config.go b/cmd/otel-allocator/config/config.go index 1d56f4f836..edbed7e794 100644 --- a/cmd/otel-allocator/config/config.go +++ b/cmd/otel-allocator/config/config.go @@ -55,8 +55,8 @@ type Config struct { PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"` PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"` ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"` - ServiceMonitorNamespaceSelector map[string]string `yaml:"service_monitor_namespace_selector,omitempty"` - PodMonitorNamespaceSelector map[string]string `yaml:"pod_monitor_namespace_selector,omitempty"` + ServiceMonitorNamespaceSelector *metav1.LabelSelector `yaml:"service_monitor_namespace_selector,omitempty"` + PodMonitorNamespaceSelector *metav1.LabelSelector `yaml:"pod_monitor_namespace_selector,omitempty"` } type PrometheusCRConfig struct { diff --git a/cmd/otel-allocator/watcher/promOperator.go b/cmd/otel-allocator/watcher/promOperator.go index e3abd27c62..464ac74ea8 100644 --- a/cmd/otel-allocator/watcher/promOperator.go +++ b/cmd/otel-allocator/watcher/promOperator.go @@ -80,12 +80,8 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat PodMonitorSelector: &metav1.LabelSelector{ MatchLabels: cfg.PodMonitorSelector, }, - ServiceMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.ServiceMonitorNamespaceSelector, - }, - PodMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.PodMonitorNamespaceSelector, - }, + ServiceMonitorNamespaceSelector: cfg.ServiceMonitorNamespaceSelector, + PodMonitorNamespaceSelector: cfg.PodMonitorNamespaceSelector, }, }, } @@ -111,23 +107,19 @@ func NewPrometheusCRWatcher(ctx context.Context, logger logr.Logger, cfg allocat } return &PrometheusCRWatcher{ - logger: logger, - kubeMonitoringClient: mClient, - k8sClient: clientset, - informers: monitoringInformers, - nsInformer: nsMonInf, - stopChannel: make(chan struct{}), - eventInterval: minEventInterval, - configGenerator: generator, - kubeConfigPath: cfg.KubeConfigFilePath, - podMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.PodMonitorNamespaceSelector, - }, - serviceMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.ServiceMonitorNamespaceSelector, - }, - resourceSelector: resourceSelector, - store: store, + logger: logger, + kubeMonitoringClient: mClient, + k8sClient: clientset, + informers: monitoringInformers, + nsInformer: nsMonInf, + stopChannel: make(chan struct{}), + eventInterval: minEventInterval, + configGenerator: generator, + kubeConfigPath: cfg.KubeConfigFilePath, + podMonitorNamespaceSelector: cfg.PodMonitorNamespaceSelector, + serviceMonitorNamespaceSelector: cfg.ServiceMonitorNamespaceSelector, + resourceSelector: resourceSelector, + store: store, }, nil } diff --git a/cmd/otel-allocator/watcher/promOperator_test.go b/cmd/otel-allocator/watcher/promOperator_test.go index 068c3973fc..21c0f3982e 100644 --- a/cmd/otel-allocator/watcher/promOperator_test.go +++ b/cmd/otel-allocator/watcher/promOperator_test.go @@ -633,8 +633,10 @@ func TestLoadConfig(t *testing.T) { }, }, cfg: allocatorconfig.Config{ - ServiceMonitorNamespaceSelector: map[string]string{ - "label1": "label1", + ServiceMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label1": "label1", + }, }, }, want: &promconfig.Config{ @@ -695,8 +697,10 @@ func TestLoadConfig(t *testing.T) { }, }, cfg: allocatorconfig.Config{ - PodMonitorNamespaceSelector: map[string]string{ - "label1": "label1", + PodMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label1": "label1", + }, }, }, want: &promconfig.Config{ @@ -790,8 +794,10 @@ func TestNamespaceLabelUpdate(t *testing.T) { } cfg := allocatorconfig.Config{ - PodMonitorNamespaceSelector: map[string]string{ - "label1": "label1", + PodMonitorNamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "label1": "label1", + }, }, } @@ -869,7 +875,7 @@ func TestNamespaceLabelUpdate(t *testing.T) { default: return false } - }, eventInterval*2, time.Millisecond) + }, eventInterval*5, time.Millisecond) } func TestRateLimit(t *testing.T) { @@ -1012,12 +1018,8 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic PodMonitorSelector: &metav1.LabelSelector{ MatchLabels: cfg.PodMonitorSelector, }, - ServiceMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.ServiceMonitorNamespaceSelector, - }, - PodMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.PodMonitorNamespaceSelector, - }, + ServiceMonitorNamespaceSelector: cfg.ServiceMonitorNamespaceSelector, + PodMonitorNamespaceSelector: cfg.PodMonitorNamespaceSelector, }, }, } @@ -1048,20 +1050,16 @@ func getTestPrometheusCRWatcher(t *testing.T, svcMonitors []*monitoringv1.Servic resourceSelector := prometheus.NewResourceSelector(promOperatorLogger, prom, store, nsMonInf, operatorMetrics, eventRecorder) return &PrometheusCRWatcher{ - kubeMonitoringClient: mClient, - k8sClient: k8sClient, - informers: informers, - nsInformer: nsMonInf, - stopChannel: make(chan struct{}), - configGenerator: generator, - podMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.PodMonitorNamespaceSelector, - }, - serviceMonitorNamespaceSelector: &metav1.LabelSelector{ - MatchLabels: cfg.ServiceMonitorNamespaceSelector, - }, - resourceSelector: resourceSelector, - store: store, + kubeMonitoringClient: mClient, + k8sClient: k8sClient, + informers: informers, + nsInformer: nsMonInf, + stopChannel: make(chan struct{}), + configGenerator: generator, + podMonitorNamespaceSelector: cfg.PodMonitorNamespaceSelector, + serviceMonitorNamespaceSelector: cfg.ServiceMonitorNamespaceSelector, + resourceSelector: resourceSelector, + store: store, }, source }