Description of problem: enforcedTargetLimit: 0 does not take effect. NOTE: non-zero settings take effect, this bug is only about enforcedTargetLimit: 0 reproduce steps: 1. enable UserWorkload ****************************** apiVersion: v1 kind: ConfigMap metadata: name: cluster-monitoring-config namespace: openshift-monitoring data: config.yaml: | enableUserWorkload: true ****************************** 2. set enforcedSampleLimit to 0 ****************************** apiVersion: v1 kind: ConfigMap metadata: name: user-workload-monitoring-config namespace: openshift-user-workload-monitoring data: config.yaml: | prometheus: enforcedTargetLimit: 0 ****************************** # oc -n openshift-user-workload-monitoring get prometheus user-workload -oyaml | grep enforcedTargetLimit enforcedTargetLimit: 0 3. deploy pod under user namespace ****************************** # oc new-project test # oc -n test create -f - << EOF apiVersion: apps/v1 kind: Deployment metadata: labels: app: prometheus-example-app name: prometheus-example-app spec: replicas: 1 selector: matchLabels: app: prometheus-example-app template: metadata: labels: app: prometheus-example-app spec: containers: - image: ghcr.io/rhobs/prometheus-example-app:0.3.0 imagePullPolicy: IfNotPresent name: prometheus-example-app --- apiVersion: v1 kind: Service metadata: labels: app: prometheus-example-app name: prometheus-example-app spec: ports: - port: 8080 protocol: TCP targetPort: 8080 name: web selector: app: prometheus-example-app type: ClusterIP --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: prometheus-example-monitor spec: endpoints: - interval: 5s port: web selector: matchLabels: app: prometheus-example-app EOF ****************************** 4. checker metrics exposed by user project, could see the user metrics, which should not # token=`oc sa get-token prometheus-k8s -n openshift-monitoring` # oc -n openshift-monitoring exec -c prometheus prometheus-k8s-0 -- curl -k -H "Authorization: Bearer $token" 'https://thanos-querier.openshift-monitoring.svc:9091/api/v1/query?query=version' | jq { "status": "success", "data": { "resultType": "vector", "result": [ { "metric": { "__name__": "version", "endpoint": "web", "instance": "10.129.2.59:8080", "job": "prometheus-example-app", "namespace": "test", "pod": "prometheus-example-app-d748cfb54-j2t4r", "prometheus": "openshift-user-workload-monitoring/user-workload", "service": "prometheus-example-app", "version": "v0.3.0" }, "value": [ 1626406238.152, "1" ] } ] } } # oc -n openshift-user-workload-monitoring exec -c prometheus prometheus-user-workload-0 -- cat /etc/prometheus/config_out/prometheus.env.yaml global: evaluation_interval: 30s scrape_interval: 30s external_labels: prometheus: openshift-user-workload-monitoring/user-workload prometheus_replica: prometheus-user-workload-0 rule_files: - /etc/prometheus/rules/prometheus-user-workload-rulefiles-0/*.yaml scrape_configs: - job_name: serviceMonitor/test/prometheus-example-monitor/0 honor_labels: false honor_timestamps: false kubernetes_sd_configs: - role: endpoints namespaces: names: - test scrape_interval: 5s relabel_configs: - source_labels: - job target_label: __tmp_prometheus_job_name - action: keep source_labels: - __meta_kubernetes_service_label_app regex: prometheus-example-app - action: keep source_labels: - __meta_kubernetes_endpoint_port_name regex: web - source_labels: - __meta_kubernetes_endpoint_address_target_kind - __meta_kubernetes_endpoint_address_target_name separator: ; regex: Node;(.*) replacement: ${1} target_label: node - source_labels: - __meta_kubernetes_endpoint_address_target_kind - __meta_kubernetes_endpoint_address_target_name separator: ; regex: Pod;(.*) replacement: ${1} target_label: pod - source_labels: - __meta_kubernetes_namespace target_label: namespace - source_labels: - __meta_kubernetes_service_name target_label: service - source_labels: - __meta_kubernetes_pod_name target_label: pod - source_labels: - __meta_kubernetes_pod_container_name target_label: container - source_labels: - __meta_kubernetes_service_name target_label: job replacement: ${1} - target_label: endpoint replacement: web - target_label: namespace replacement: test - source_labels: - __address__ target_label: __tmp_hash modulus: 1 action: hashmod - source_labels: - __tmp_hash regex: 0 action: keep target_limit: 0 alerting: alert_relabel_configs: - action: labeldrop regex: prometheus_replica alertmanagers: - path_prefix: / scheme: https tls_config: insecure_skip_verify: false server_name: alertmanager-main.openshift-monitoring.svc ca_file: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt kubernetes_sd_configs: - role: endpoints namespaces: names: - openshift-monitoring bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token api_version: v2 relabel_configs: - action: keep source_labels: - __meta_kubernetes_service_name regex: alertmanager-main - action: keep source_labels: - __meta_kubernetes_endpoint_port_name regex: web Version-Release number of selected component (if applicable): 4.9.0-0.nightly-2021-07-15-015134 How reproducible: always Steps to Reproduce: 1. see the description 2. 3. Actual results: enforcedTargetLimit: 0 does not take effect Expected results: Additional info: Is it by design? enforcedTargetLimit: 0 means no limit for target?