Merge pull request #857 from paulfantom/globals_experiment_1

Remove mutating global state in node-exporter objects
This commit is contained in:
Paweł Krupa
2021-01-07 09:08:59 +01:00
committed by GitHub
13 changed files with 381 additions and 309 deletions

View File

@@ -18,9 +18,14 @@ Components included in this package:
This stack is meant for cluster monitoring, so it is pre-configured to collect metrics from all Kubernetes components. In addition to that it delivers a default set of dashboards and alerting rules. Many of the useful dashboards and alerts come from the [kubernetes-mixin project](https://github.com/kubernetes-monitoring/kubernetes-mixin), similar to this project it provides composable jsonnet as a library for users to customize to their needs. This stack is meant for cluster monitoring, so it is pre-configured to collect metrics from all Kubernetes components. In addition to that it delivers a default set of dashboards and alerting rules. Many of the useful dashboards and alerts come from the [kubernetes-mixin project](https://github.com/kubernetes-monitoring/kubernetes-mixin), similar to this project it provides composable jsonnet as a library for users to customize to their needs.
## Warning
`master` branch is under heavy refactoring work. Please use `release-0.7` branch until code refactoring is complete and this information is removed.
## Table of contents ## Table of contents
- [kube-prometheus](#kube-prometheus) - [kube-prometheus](#kube-prometheus)
- [Warning](#warning)
- [Table of contents](#table-of-contents) - [Table of contents](#table-of-contents)
- [Prerequisites](#prerequisites) - [Prerequisites](#prerequisites)
- [minikube](#minikube) - [minikube](#minikube)
@@ -53,8 +58,9 @@ This stack is meant for cluster monitoring, so it is pre-configured to collect m
- [Stripping container resource limits](#stripping-container-resource-limits) - [Stripping container resource limits](#stripping-container-resource-limits)
- [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards) - [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
- [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress) - [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
- [Setting up a blackbox exporter](#setting-up-a-blackbox exporter) - [Setting up a blackbox exporter](#setting-up-a-blackbox-exporter)
- [Minikube Example](#minikube-example) - [Minikube Example](#minikube-example)
- [Continuous Delivery](#continuous-delivery)
- [Troubleshooting](#troubleshooting) - [Troubleshooting](#troubleshooting)
- [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics) - [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
- [Authentication problem](#authentication-problem) - [Authentication problem](#authentication-problem)

View File

@@ -1,4 +1,4 @@
local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet'; local kubeRbacProxyContainer = import '../kube-rbac-proxy/containerMixin.libsonnet';
{ {
_config+:: { _config+:: {

View File

@@ -1,9 +1,10 @@
local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet'; local kubeRbacProxyContainer = import './kube-rbac-proxy/containerMixin.libsonnet';
local nodeExporter = import './node-exporter/node-exporter.libsonnet';
(import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet') + (import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet') +
(import './kube-state-metrics/kube-state-metrics.libsonnet') + (import './kube-state-metrics/kube-state-metrics.libsonnet') +
(import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') + (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') +
(import './node-exporter/node-exporter.libsonnet') +
(import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') + (import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') +
(import './blackbox-exporter/blackbox-exporter.libsonnet') + (import './blackbox-exporter/blackbox-exporter.libsonnet') +
(import './alertmanager/alertmanager.libsonnet') + (import './alertmanager/alertmanager.libsonnet') +
@@ -17,6 +18,11 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
(import './alerts/alerts.libsonnet') + (import './alerts/alerts.libsonnet') +
(import './rules/rules.libsonnet') + (import './rules/rules.libsonnet') +
{ {
nodeExporter: nodeExporter({
namespace: $._config.namespace,
version: '1.0.1',
image: 'quay.io/prometheus/node-exporter:v1.0.1',
}),
kubePrometheus+:: { kubePrometheus+:: {
namespace: { namespace: {
apiVersion: 'v1', apiVersion: 'v1',
@@ -84,7 +90,6 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
}, },
}).deploymentMixin, }).deploymentMixin,
grafana+:: { grafana+:: {
local dashboardDefinitions = super.dashboardDefinitions, local dashboardDefinitions = super.dashboardDefinitions,
@@ -197,10 +202,6 @@ local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
requests: { cpu: '100m', memory: '150Mi' }, requests: { cpu: '100m', memory: '150Mi' },
limits: { cpu: '100m', memory: '150Mi' }, limits: { cpu: '100m', memory: '150Mi' },
}, },
'node-exporter': {
requests: { cpu: '102m', memory: '180Mi' },
limits: { cpu: '250m', memory: '180Mi' },
},
}, },
prometheus+:: { rules: $.prometheusRules + $.prometheusAlerts }, prometheus+:: { rules: $.prometheusRules + $.prometheusAlerts },
grafana+:: { grafana+:: {

View File

@@ -1,93 +1,64 @@
{ local defaults = {
local defaults = self,
namespace: error 'must provide namespace',
image: 'quay.io/brancz/kube-rbac-proxy:v0.8.0',
ports: error 'must provide ports',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
resources: {
requests: { cpu: '10m', memory: '20Mi' },
limits: { cpu: '20m', memory: '40Mi' },
},
tlsCipherSuites: [
'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721
'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721
// 'TLS_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
// 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661
// 'TLS_RSA_WITH_AES_128_CBC_SHA', // disabled by h2
// 'TLS_RSA_WITH_AES_256_CBC_SHA', // disabled by h2
// 'TLS_RSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
// 'TLS_RSA_WITH_AES_128_GCM_SHA256', // disabled by h2
// 'TLS_RSA_WITH_AES_256_GCM_SHA384', // disabled by h2
// 'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
// 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA', // disabled by h2
// 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA', // disabled by h2
// 'TLS_ECDHE_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
// 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661
// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', // disabled by h2
// 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', // disabled by h2
// 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
// disabled by h2 means: https://github.com/golang/net/blob/e514e69ffb8bc3c76a71ae40de0118d794855992/http2/ciphers.go
'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384',
'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384',
'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305',
'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305',
],
};
function(params) {
local krp = self, local krp = self,
config+:: { config:: defaults + params,
kubeRbacProxy: { // Safety check
image: error 'must provide image', assert std.isObject(krp.config.resources),
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
specMixin:: { name: krp.config.name,
local sm = self, image: krp.config.image,
config+:: { args: [
kubeRbacProxy: { '--logtostderr',
image: error 'must provide image', '--secure-listen-address=' + krp.config.secureListenAddress,
name: error 'must provide name', '--tls-cipher-suites=' + std.join(',', krp.config.tlsCipherSuites),
securePortName: error 'must provide securePortName', '--upstream=' + krp.config.upstream,
securePort: error 'must provide securePort', ],
secureListenAddress: error 'must provide secureListenAddress', resources: krp.config.resources,
upstream: error 'must provide upstream', ports: krp.config.ports,
tlsCipherSuites: error 'must provide tlsCipherSuites', securityContext: {
}, runAsUser: 65532,
runAsGroup: 65532,
runAsNonRoot: true,
}, },
spec+: {
template+: {
spec+: {
containers+: [{
name: krp.config.kubeRbacProxy.name,
image: krp.config.kubeRbacProxy.image,
args: [
'--logtostderr',
'--secure-listen-address=' + krp.config.kubeRbacProxy.secureListenAddress,
'--tls-cipher-suites=' + std.join(',', krp.config.kubeRbacProxy.tlsCipherSuites),
'--upstream=' + krp.config.kubeRbacProxy.upstream,
],
ports: [
{ name: krp.config.kubeRbacProxy.securePortName, containerPort: krp.config.kubeRbacProxy.securePort },
],
securityContext: {
runAsUser: 65532,
runAsGroup: 65532,
runAsNonRoot: true,
},
}],
},
},
},
},
deploymentMixin:: {
local dm = self,
config+:: {
kubeRbacProxy: {
image: error 'must provide image',
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
deployment+: krp.specMixin {
config+:: {
kubeRbacProxy+: dm.config.kubeRbacProxy,
},
},
},
statefulSetMixin:: {
local sm = self,
config+:: {
kubeRbacProxy: {
image: error 'must provide image',
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
statefulSet+: krp.specMixin {
config+:: {
kubeRbacProxy+: sm.config.kubeRbacProxy,
},
},
},
} }

View File

@@ -0,0 +1,96 @@
// TODO(paulfantom): remove the file after all usage of kube-rbac-proxy/containerMixin.libsonnet
// are converted to use kube-rbac-proxy/container.libsonnet
{
local krp = self,
config+:: {
kubeRbacProxy: {
image: error 'must provide image',
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
specMixin:: {
local sm = self,
config+:: {
kubeRbacProxy: {
image: error 'must provide image',
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
spec+: {
template+: {
spec+: {
containers+: [{
name: krp.config.kubeRbacProxy.name,
image: krp.config.kubeRbacProxy.image,
args: [
'--logtostderr',
'--secure-listen-address=' + krp.config.kubeRbacProxy.secureListenAddress,
'--tls-cipher-suites=' + std.join(',', krp.config.kubeRbacProxy.tlsCipherSuites),
'--upstream=' + krp.config.kubeRbacProxy.upstream,
],
ports: [
{ name: krp.config.kubeRbacProxy.securePortName, containerPort: krp.config.kubeRbacProxy.securePort },
],
securityContext: {
runAsUser: 65532,
runAsGroup: 65532,
runAsNonRoot: true,
},
}],
},
},
},
},
deploymentMixin:: {
local dm = self,
config+:: {
kubeRbacProxy: {
image: error 'must provide image',
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
deployment+: krp.specMixin {
config+:: {
kubeRbacProxy+: dm.config.kubeRbacProxy,
},
},
},
statefulSetMixin:: {
local sm = self,
config+:: {
kubeRbacProxy: {
image: error 'must provide image',
name: error 'must provide name',
securePortName: error 'must provide securePortName',
securePort: error 'must provide securePort',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
tlsCipherSuites: error 'must provide tlsCipherSuites',
},
},
statefulSet+: krp.specMixin {
config+:: {
kubeRbacProxy+: sm.config.kubeRbacProxy,
},
},
},
}

View File

@@ -1,4 +1,4 @@
local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet'; local kubeRbacProxyContainer = import '../kube-rbac-proxy/containerMixin.libsonnet';
local ksm = import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet'; local ksm = import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet';
{ {

View File

@@ -1,214 +1,212 @@
{ local krp = (import '../kube-rbac-proxy/container.libsonnet');
_config+:: {
namespace: 'default',
versions+:: { nodeExporter: 'v1.0.1' },
imageRepos+:: { nodeExporter: 'quay.io/prometheus/node-exporter' },
nodeExporter+:: { local defaults = {
listenAddress: '127.0.0.1', local defaults = self,
port: 9100, namespace: error 'must provide namespace',
labels: { version: error 'must provide version',
'app.kubernetes.io/name': 'node-exporter', image: error 'must provide version',
'app.kubernetes.io/version': $._config.versions.nodeExporter, resources: {
'app.kubernetes.io/component': 'exporter', requests: { cpu: '102m', memory: '180Mi' },
'app.kubernetes.io/part-of': 'kube-prometheus', limits: { cpu: '250m', memory: '180Mi' },
}, },
selectorLabels: { listenAddress: '127.0.0.1',
[labelName]: $._config.nodeExporter.labels[labelName] port: 9100,
for labelName in std.objectFields($._config.nodeExporter.labels) commonLabels:: {
if !std.setMember(labelName, ['app.kubernetes.io/version']) 'app.kubernetes.io/name': 'node-exporter',
}, 'app.kubernetes.io/version': defaults.version,
'app.kubernetes.io/component': 'exporter',
'app.kubernetes.io/part-of': 'kube-prometheus',
},
selectorLabels:: {
[labelName]: defaults.commonLabels[labelName]
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
};
function(params) {
local ne = self,
config:: defaults + params,
// Safety check
assert std.isObject(ne.config.resources),
clusterRoleBinding: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: {
name: 'node-exporter',
labels: ne.config.commonLabels,
}, },
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: 'node-exporter',
},
subjects: [{
kind: 'ServiceAccount',
name: 'node-exporter',
namespace: ne.config.namespace,
}],
}, },
nodeExporter+:: { clusterRole: {
clusterRoleBinding: { apiVersion: 'rbac.authorization.k8s.io/v1',
apiVersion: 'rbac.authorization.k8s.io/v1', kind: 'ClusterRole',
kind: 'ClusterRoleBinding', metadata: {
metadata: { name: 'node-exporter',
name: 'node-exporter', labels: ne.config.commonLabels,
labels: $._config.nodeExporter.labels, },
rules: [
{
apiGroups: ['authentication.k8s.io'],
resources: ['tokenreviews'],
verbs: ['create'],
}, },
roleRef: { {
apiGroup: 'rbac.authorization.k8s.io', apiGroups: ['authorization.k8s.io'],
kind: 'ClusterRole', resources: ['subjectaccessreviews'],
name: 'node-exporter', verbs: ['create'],
},
subjects: [{
kind: 'ServiceAccount',
name: 'node-exporter',
namespace: $._config.namespace,
}], }],
}, },
clusterRole: { serviceAccount: {
apiVersion: 'rbac.authorization.k8s.io/v1', apiVersion: 'v1',
kind: 'ClusterRole', kind: 'ServiceAccount',
metadata: { metadata: {
name: 'node-exporter', name: 'node-exporter',
labels: $._config.nodeExporter.labels, namespace: ne.config.namespace,
}, labels: ne.config.commonLabels,
rules: [
{
apiGroups: ['authentication.k8s.io'],
resources: ['tokenreviews'],
verbs: ['create'],
},
{
apiGroups: ['authorization.k8s.io'],
resources: ['subjectaccessreviews'],
verbs: ['create'],
},
],
},
daemonset:
local nodeExporter = {
name: 'node-exporter',
image: $._config.imageRepos.nodeExporter + ':' + $._config.versions.nodeExporter,
args: [
'--web.listen-address=' + std.join(':', [$._config.nodeExporter.listenAddress, std.toString($._config.nodeExporter.port)]),
'--path.sysfs=/host/sys',
'--path.rootfs=/host/root',
'--no-collector.wifi',
'--no-collector.hwmon',
'--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
],
volumeMounts: [
{ name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true },
{ name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true },
],
resources: $._config.resources['node-exporter'],
};
local proxy = {
name: 'kube-rbac-proxy',
image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
args: [
'--logtostderr',
'--secure-listen-address=[$(IP)]:' + $._config.nodeExporter.port,
'--tls-cipher-suites=' + std.join(',', $._config.tlsCipherSuites),
'--upstream=http://127.0.0.1:' + $._config.nodeExporter.port + '/',
],
env: [
{ name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } },
],
// Keep `hostPort` here, rather than in the node-exporter container
// because Kubernetes mandates that if you define a `hostPort` then
// `containerPort` must match. In our case, we are splitting the
// host port and container port between the two containers.
// We'll keep the port specification here so that the named port
// used by the service is tied to the proxy container. We *could*
// forgo declaring the host port, however it is important to declare
// it so that the scheduler can decide if the pod is schedulable.
ports: [
{ name: 'https', containerPort: $._config.nodeExporter.port, hostPort: $._config.nodeExporter.port },
],
resources: $._config.resources['kube-rbac-proxy'],
securityContext: {
runAsUser: 65532,
runAsGroup: 65532,
runAsNonRoot: true,
},
};
{
apiVersion: 'apps/v1',
kind: 'DaemonSet',
metadata: {
name: 'node-exporter',
namespace: $._config.namespace,
labels: $._config.nodeExporter.labels,
},
spec: {
selector: { matchLabels: $._config.nodeExporter.selectorLabels },
updateStrategy: {
type: 'RollingUpdate',
rollingUpdate: { maxUnavailable: '10%' },
},
template: {
metadata: { labels: $._config.nodeExporter.labels },
spec: {
nodeSelector: { 'kubernetes.io/os': 'linux' },
tolerations: [{
operator: 'Exists',
}],
containers: [nodeExporter, proxy],
volumes: [
{ name: 'sys', hostPath: { path: '/sys' } },
{ name: 'root', hostPath: { path: '/' } },
],
serviceAccountName: 'node-exporter',
securityContext: {
runAsUser: 65534,
runAsNonRoot: true,
},
hostPID: true,
hostNetwork: true,
},
},
},
},
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: {
name: 'node-exporter',
namespace: $._config.namespace,
labels: $._config.nodeExporter.labels,
},
},
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'node-exporter',
namespace: $._config.namespace,
labels: $._config.nodeExporter.labels,
},
spec: {
jobLabel: 'app.kubernetes.io/name',
selector: {
matchLabels: $._config.nodeExporter.selectorLabels,
},
endpoints: [{
port: 'https',
scheme: 'https',
interval: '15s',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
relabelings: [
{
action: 'replace',
regex: '(.*)',
replacement: '$1',
sourceLabels: ['__meta_kubernetes_pod_node_name'],
targetLabel: 'instance',
},
],
tlsConfig: {
insecureSkipVerify: true,
},
}],
},
},
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: 'node-exporter',
namespace: $._config.namespace,
labels: $._config.nodeExporter.labels,
},
spec: {
ports: [
{ name: 'https', targetPort: 'https', port: $._config.nodeExporter.port },
],
selector: $._config.nodeExporter.selectorLabels,
clusterIP: 'None',
},
}, },
}, },
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: 'node-exporter',
namespace: ne.config.namespace,
labels: ne.config.commonLabels,
},
spec: {
ports: [
{ name: 'https', targetPort: 'https', port: ne.config.port },
],
selector: ne.config.selectorLabels,
clusterIP: 'None',
},
},
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'node-exporter',
namespace: ne.config.namespace,
labels: ne.config.commonLabels,
},
spec: {
jobLabel: 'app.kubernetes.io/name',
selector: {
matchLabels: ne.config.selectorLabels,
},
endpoints: [{
port: 'https',
scheme: 'https',
interval: '15s',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
relabelings: [
{
action: 'replace',
regex: '(.*)',
replacement: '$1',
sourceLabels: ['__meta_kubernetes_pod_node_name'],
targetLabel: 'instance',
},
],
tlsConfig: {
insecureSkipVerify: true,
},
}],
},
},
daemonset:
local nodeExporter = {
name: 'node-exporter',
image: ne.config.image,
args: [
'--web.listen-address=' + std.join(':', [ne.config.listenAddress, std.toString(ne.config.port)]),
'--path.sysfs=/host/sys',
'--path.rootfs=/host/root',
'--no-collector.wifi',
'--no-collector.hwmon',
'--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
],
volumeMounts: [
{ name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true },
{ name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true },
],
resources: ne.config.resources,
};
local kubeRbacProxy = krp({
name: 'kube-rbac-proxy',
//image: krpImage,
upstream: 'http://127.0.0.1:' + ne.config.port + '/',
secureListenAddress: '[$(IP)]:' + ne.config.port,
// Keep `hostPort` here, rather than in the node-exporter container
// because Kubernetes mandates that if you define a `hostPort` then
// `containerPort` must match. In our case, we are splitting the
// host port and container port between the two containers.
// We'll keep the port specification here so that the named port
// used by the service is tied to the proxy container. We *could*
// forgo declaring the host port, however it is important to declare
// it so that the scheduler can decide if the pod is schedulable.
ports: [
{ name: 'https', containerPort: ne.config.port, hostPort: ne.config.port },
],
}) + {
env: [
{ name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } },
]
};
{
apiVersion: 'apps/v1',
kind: 'DaemonSet',
metadata: {
name: 'node-exporter',
namespace: ne.config.namespace,
labels: ne.config.commonLabels,
},
spec: {
selector: { matchLabels: ne.config.selectorLabels },
updateStrategy: {
type: 'RollingUpdate',
rollingUpdate: { maxUnavailable: '10%' },
},
template: {
metadata: { labels: ne.config.commonLabels },
spec: {
nodeSelector: { 'kubernetes.io/os': 'linux' },
tolerations: [{
operator: 'Exists',
}],
containers: [nodeExporter, kubeRbacProxy],
volumes: [
{ name: 'sys', hostPath: { path: '/sys' } },
{ name: 'root', hostPath: { path: '/' } },
],
serviceAccountName: 'node-exporter',
securityContext: {
runAsUser: 65534,
runAsNonRoot: true,
},
hostPID: true,
hostNetwork: true,
},
},
},
},
} }

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
name: node-exporter name: node-exporter
rules: rules:
- apiGroups: - apiGroups:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
name: node-exporter name: node-exporter
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
name: node-exporter name: node-exporter
namespace: monitoring namespace: monitoring
spec: spec:
@@ -20,7 +20,7 @@ spec:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
spec: spec:
containers: containers:
- args: - args:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
name: node-exporter name: node-exporter
namespace: monitoring namespace: monitoring
spec: spec:

View File

@@ -5,6 +5,6 @@ metadata:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
name: node-exporter name: node-exporter
namespace: monitoring namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: v1.0.1 app.kubernetes.io/version: 1.0.1
name: node-exporter name: node-exporter
namespace: monitoring namespace: monitoring
spec: spec: