jsonnet: refactor the rest of mixins and grafana inclusion

Signed-off-by: paulfantom <pawel@krupa.net.pl>
This commit is contained in:
paulfantom
2021-01-15 09:26:57 +01:00
parent 86d4571aea
commit 5624c5a9a8
5 changed files with 23 additions and 18 deletions

View File

@@ -14,7 +14,7 @@ local kp =
},
};
{ ['setup/0namespace-namespace']: kp.kubePrometheus.namespace } +
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))

View File

@@ -60,7 +60,7 @@ function(params) {
dashboardDefinitions: if std.length(g.config.dashboards) > 0 then {
apiVersion: 'v1',
kind: 'ConfigMapList',
items: g.dashboardDefinitions,
items: glib.grafana.dashboardDefinitions,
},
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',

View File

@@ -11,6 +11,7 @@ local prometheus = import './prometheus/prometheus.libsonnet';
local prometheusOperator = import './prometheus-operator/prometheus-operator.libsonnet';
{
local all = self,
alertmanager: alertmanager({
name: $._config.alertmanagerName,
namespace: $._config.namespace,
@@ -25,12 +26,16 @@ local prometheusOperator = import './prometheus-operator/prometheus-operator.lib
version: '0.18.0',
image: 'quay.io/prometheus/blackbox-exporter:v0.18.0',
}),
// TODO(paulfantom) This should be done by iterating over all objects and looking for object.mixin.grafanaDashboards
local allDashboards = $.nodeExporter.mixin.grafanaDashboards +
$.prometheus.mixin.grafanaDashboards +
$.kubernetesMixin.mixin.grafanaDashboards,
grafana: grafana({
namespace: $._config.namespace,
version: '7.3.5',
image: 'grafana/grafana:v7.3.7',
dashboards: {},
prometheusName: $._config.prometheusName,
dashboards: allDashboards,
}),
kubeStateMetrics: kubeStateMetrics({
namespace: $._config.namespace,
@@ -106,4 +111,4 @@ local prometheusOperator = import './prometheus-operator/prometheus-operator.lib
prometheus: $._config.prometheusName,
},
},
}
}

View File

@@ -9,9 +9,9 @@ local defaults = {
mixin: {
ruleLabels: {},
_config: {
nodeExporterSelector: 'job="node-exporter"',
hostNetworkInterfaceSelector: 'device!~"veth.+"',
},
nodeExporterSelector: 'job="node-exporter"',
hostNetworkInterfaceSelector: 'device!~"veth.+"',
},
},
};

View File

@@ -9,17 +9,17 @@ local defaults = {
mixin: {
ruleLabels: {},
_config: {
cadvisorSelector: 'job="kubelet", metrics_path="/metrics/cadvisor"',
kubeletSelector: 'job="kubelet", metrics_path="/metrics"',
kubeStateMetricsSelector: 'job="kube-state-metrics"',
nodeExporterSelector: 'job="node-exporter"',
kubeSchedulerSelector: 'job="kube-scheduler"',
kubeControllerManagerSelector: 'job="kube-controller-manager"',
kubeApiserverSelector: 'job="apiserver"',
podLabel: 'pod',
runbookURLPattern: 'https://github.com/prometheus-operator/kube-prometheus/wiki/%s',
diskDeviceSelector: 'device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"',
hostNetworkInterfaceSelector: 'device!~"veth.+"',
cadvisorSelector: 'job="kubelet", metrics_path="/metrics/cadvisor"',
kubeletSelector: 'job="kubelet", metrics_path="/metrics"',
kubeStateMetricsSelector: 'job="kube-state-metrics"',
nodeExporterSelector: 'job="node-exporter"',
kubeSchedulerSelector: 'job="kube-scheduler"',
kubeControllerManagerSelector: 'job="kube-controller-manager"',
kubeApiserverSelector: 'job="apiserver"',
podLabel: 'pod',
runbookURLPattern: 'https://github.com/prometheus-operator/kube-prometheus/wiki/%s',
diskDeviceSelector: 'device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"',
hostNetworkInterfaceSelector: 'device!~"veth.+"',
},
},
};