Targets


monitoring/alertmanager/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/coredns/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.2:9153/metrics
up container="coredns" endpoint="metrics" instance="10.42.0.2:9153" job="kube-dns" namespace="kube-system" pod="coredns-695cbbfcb9-sktdt" service="kube-dns" 566ms ago 2.756ms

monitoring/grafana/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/kube-apiserver/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://172.16.0.26:6443/metrics
up endpoint="https" instance="172.16.0.26:6443" job="apiserver" namespace="default" service="kubernetes" 19.196s ago 147ms

monitoring/kube-controller-manager/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/kube-scheduler/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.3.9:8443/metrics
up container="kube-rbac-proxy-main" instance="10.42.3.9:8443" job="kube-state-metrics" 25.365s ago 9.765ms

monitoring/kube-state-metrics/1 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.3.9:9443/metrics
up container="kube-rbac-proxy-self" endpoint="https-self" instance="10.42.3.9:9443" job="kube-state-metrics" namespace="monitoring" pod="kube-state-metrics-8587c4b6b9-t4949" service="kube-state-metrics" 9.054s ago 3.529ms

monitoring/kubelet/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://172.16.0.24:10250/metrics
up endpoint="https-metrics" instance="172.16.0.24:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="node01" service="kubelet" 23.722s ago 22.19ms
https://172.16.0.25:10250/metrics
up endpoint="https-metrics" instance="172.16.0.25:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="node02" service="kubelet" 8.27s ago 33.46ms
https://172.16.0.26:10250/metrics
up endpoint="https-metrics" instance="172.16.0.26:10250" job="kubelet" metrics_path="/metrics" namespace="kube-system" node="master" service="kubelet" 24.931s ago 153ms

monitoring/kubelet/1 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://172.16.0.24:10250/metrics/cadvisor
up endpoint="https-metrics" instance="172.16.0.24:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="node01" service="kubelet" 16.718s ago 35ms
https://172.16.0.25:10250/metrics/cadvisor
up endpoint="https-metrics" instance="172.16.0.25:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="node02" service="kubelet" 26.209s ago 46.98ms
https://172.16.0.26:10250/metrics/cadvisor
up endpoint="https-metrics" instance="172.16.0.26:10250" job="kubelet" metrics_path="/metrics/cadvisor" namespace="kube-system" node="master" service="kubelet" 15.91s ago 32.38ms

monitoring/kubelet/2 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://172.16.0.24:10250/metrics/probes
up endpoint="https-metrics" instance="172.16.0.24:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="node01" service="kubelet" 9.012s ago 1.505ms
https://172.16.0.25:10250/metrics/probes
up endpoint="https-metrics" instance="172.16.0.25:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="node02" service="kubelet" 95ms ago 2.339ms
https://172.16.0.26:10250/metrics/probes
up endpoint="https-metrics" instance="172.16.0.26:10250" job="kubelet" metrics_path="/metrics/probes" namespace="kube-system" node="master" service="kubelet" 9.017s ago 1.317ms

monitoring/node-exporter/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://172.16.0.26:9103/metrics
up container="kube-rbac-proxy" endpoint="https" instance="master" job="node-exporter" namespace="monitoring" pod="node-exporter-k9zl6" service="node-exporter" 2.362s ago 25.67ms
https://172.16.0.24:9103/metrics
up container="kube-rbac-proxy" endpoint="https" instance="node01" job="node-exporter" namespace="monitoring" pod="node-exporter-8x5qx" service="node-exporter" 3.256s ago 37.82ms
https://172.16.0.25:9103/metrics
up container="kube-rbac-proxy" endpoint="https" instance="node02" job="node-exporter" namespace="monitoring" pod="node-exporter-m8hcq" service="node-exporter" 11.904s ago 147.2ms

monitoring/prometheus-adapter/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

monitoring/prometheus-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://10.42.1.12:8443/metrics
up container="kube-rbac-proxy" endpoint="https" instance="10.42.1.12:8443" job="prometheus-operator" namespace="monitoring" pod="prometheus-operator-57b74654f7-klb7k" service="prometheus-operator" 12.945s ago 2.322ms

monitoring/prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.25:9090/metrics
up container="prometheus" endpoint="web" instance="10.42.0.25:9090" job="prometheus-k8s" namespace="monitoring" pod="prometheus-k8s-0" service="prometheus-k8s" 21.489s ago 5.746ms

polardbx/pxc-prod-monitor-cdc/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

polardbx/pxc-prod-monitor-cn/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.0.27:8081/metrics
up container="exporter" endpoint="metrics" instance="10.42.0.27:8081" job="pxc-prod" namespace="polardbx" pod="pxc-prod-wmpd-cn-default-5dcd4b7f64-f7w6d" polardbx_cn_type="rw" polardbx_name="pxc-prod" polardbx_role="cn" service="pxc-prod" 27.268s ago 4.905ms
http://10.42.3.13:8081/metrics
up container="exporter" endpoint="metrics" instance="10.42.3.13:8081" job="pxc-prod" namespace="polardbx" pod="pxc-prod-wmpd-cn-default-5dcd4b7f64-88mmc" polardbx_cn_type="rw" polardbx_name="pxc-prod" polardbx_role="cn" service="pxc-prod" 14.731s ago 7.072ms

polardbx/pxc-prod-monitor-columnar/0 (0/0 up)

Endpoint State Labels Last Scrape Scrape Duration Error

polardbx/pxc-prod-monitor-dn/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.1.5:8000/metrics
up container="exporter" endpoint="metrics" instance="10.42.1.5:8000" job="pxc-prod-wmpd-dn-0-metrics" namespace="polardbx" pod="pxc-prod-wmpd-dn-0-single-0" polardbx_dn_index="0" polardbx_name="pxc-prod" polardbx_role="dn" service="pxc-prod-wmpd-dn-0-metrics" xstore_name="pxc-prod-wmpd-dn-0" xstore_node_role="candidate" xstore_node_set="single" xstore_role="follower" 5.247s ago 33.6ms

polardbx/pxc-prod-monitor-gms/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.42.3.5:8000/metrics
up container="exporter" endpoint="metrics" instance="10.42.3.5:8000" job="pxc-prod-wmpd-gms-metrics" namespace="polardbx" pod="pxc-prod-wmpd-gms-single-0" polardbx_name="pxc-prod" polardbx_role="gms" service="pxc-prod-wmpd-gms-metrics" xstore_name="pxc-prod-wmpd-gms" xstore_node_role="candidate" xstore_node_set="single" xstore_role="leader" 6.921s ago 78.16ms