regenerate
This commit is contained in:
71
README.md
71
README.md
@@ -233,14 +233,14 @@ local kp =
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesMixin[name] for name in std.objectFields(kp.kubernetesMixin) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesMixin[name] for name in std.objectFields(kp.kubernetesMixin) }
|
||||
```
|
||||
|
||||
And here's the [build.sh](build.sh) script (which uses `vendor/` to render all manifests in a json structure of `{filename: manifest-content}`):
|
||||
@@ -483,10 +483,12 @@ Then to generate manifests with `internal-registry.com/organization`, use the `w
|
||||
|
||||
[embedmd]:# (examples/internal-registry.jsonnet)
|
||||
```jsonnet
|
||||
local mixin = import 'kube-prometheus/kube-prometheus-config-mixins.libsonnet';
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local mixin = import 'kube-prometheus/addons/config-mixins.libsonnet';
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
} + mixin.withImageRepository('internal-registry.com/organization');
|
||||
|
||||
@@ -515,7 +517,7 @@ To give another customization example, the name of the `Prometheus` object provi
|
||||
|
||||
[embedmd]:# (examples/prometheus-name-override.jsonnet)
|
||||
```jsonnet
|
||||
((import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
((import 'kube-prometheus/main.libsonnet') + {
|
||||
prometheus+: {
|
||||
prometheus+: {
|
||||
metadata+: {
|
||||
@@ -532,7 +534,7 @@ Standard Kubernetes manifests are all written using [ksonnet-lib](https://github
|
||||
|
||||
[embedmd]:# (examples/ksonnet-example.jsonnet)
|
||||
```jsonnet
|
||||
((import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
((import 'kube-prometheus/main.libsonnet') + {
|
||||
nodeExporter+: {
|
||||
daemonset+: {
|
||||
metadata+: {
|
||||
@@ -549,8 +551,8 @@ The Alertmanager configuration is located in the `_config.alertmanager.config` c
|
||||
|
||||
[embedmd]:# (examples/alertmanager-config.jsonnet)
|
||||
```jsonnet
|
||||
((import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
((import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
alertmanager+: {
|
||||
config: |||
|
||||
global:
|
||||
@@ -577,7 +579,7 @@ In the above example the configuration has been inlined, but can just as well be
|
||||
|
||||
[embedmd]:# (examples/alertmanager-config-external.jsonnet)
|
||||
```jsonnet
|
||||
((import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
((import 'kube-prometheus/main.libsonnet') + {
|
||||
_config+:: {
|
||||
alertmanager+: {
|
||||
config: importstr 'alertmanager-config.yaml',
|
||||
@@ -592,9 +594,11 @@ In order to monitor additional namespaces, the Prometheus server requires the ap
|
||||
|
||||
[embedmd]:# (examples/additional-namespaces.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
namespaces+: ['my-namespace', 'my-second-namespace'],
|
||||
@@ -621,14 +625,16 @@ You can define ServiceMonitor resources in your `jsonnet` spec. See the snippet
|
||||
|
||||
[embedmd]:# (examples/additional-namespaces-servicemonitor.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheus+:: {
|
||||
namespaces+: ['my-namespace', 'my-second-namespace'],
|
||||
},
|
||||
},
|
||||
prometheus+:: {
|
||||
prometheus+: {
|
||||
serviceMonitorMyNamespace: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
@@ -671,12 +677,13 @@ In case you want to monitor all namespaces in a cluster, you can add the followi
|
||||
|
||||
[embedmd]:# (examples/all-namespaces.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-all-namespaces.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
|
||||
prometheus+:: {
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/addons/all-namespaces.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheus+: {
|
||||
namespaces: [],
|
||||
},
|
||||
},
|
||||
@@ -718,10 +725,12 @@ To do that, one can import the following mixin
|
||||
|
||||
[embedmd]:# (examples/strip-limits.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-strip-limits.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/addons/strip-limits.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -7,23 +7,29 @@ One fatal issue that can occur is that you run out of IP addresses in your eks c
|
||||
You can monitor the `awscni` using kube-promethus with :
|
||||
[embedmd]:# (../examples/eks-cni-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-eks.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/platforms/eks.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
prometheusRules+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
prometheus+: {
|
||||
prometheusRuleEksCNI+: {
|
||||
spec+: {
|
||||
groups+: [
|
||||
{
|
||||
record: 'aws_eks_available_ip',
|
||||
expr: 'sum by(instance) (awscni_total_ip_addresses) - sum by(instance) (awscni_assigned_ip_addresses) < 10',
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
{
|
||||
record: 'aws_eks_available_ip',
|
||||
expr: 'sum by(instance) (awscni_total_ip_addresses) - sum by(instance) (awscni_assigned_ip_addresses) < 10',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -37,14 +37,14 @@ local kp =
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesMixin[name] for name in std.objectFields(kp.kubernetesMixin) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesMixin[name] for name in std.objectFields(kp.kubernetesMixin) }
|
||||
```
|
||||
|
||||
## Prometheus rules
|
||||
@@ -59,28 +59,34 @@ The format is exactly the Prometheus format, so there should be no changes neces
|
||||
|
||||
[embedmd]:# (../examples/prometheus-additional-alert-rule-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
prometheus+: {
|
||||
prometheusRule+: {
|
||||
spec+: {
|
||||
groups+: [
|
||||
{
|
||||
alert: 'Watchdog',
|
||||
expr: 'vector(1)',
|
||||
labels: {
|
||||
severity: 'none',
|
||||
},
|
||||
annotations: {
|
||||
description: 'This is a Watchdog meant to ensure that the entire alerting pipeline is functional.',
|
||||
},
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
{
|
||||
alert: 'Watchdog',
|
||||
expr: 'vector(1)',
|
||||
labels: {
|
||||
severity: 'none',
|
||||
},
|
||||
annotations: {
|
||||
description: 'This is a Watchdog meant to ensure that the entire alerting pipeline is functional.',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -102,22 +108,28 @@ In order to add a recording rule, simply do the same with the `prometheusRules`
|
||||
|
||||
[embedmd]:# (../examples/prometheus-additional-recording-rule-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
prometheusRules+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
prometheus+: {
|
||||
prometheusRule+: {
|
||||
spec+: {
|
||||
groups+: [
|
||||
{
|
||||
record: 'some_recording_rule_name',
|
||||
expr: 'vector(1)',
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
{
|
||||
record: 'some_recording_rule_name',
|
||||
expr: 'vector(1)',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -149,12 +161,18 @@ Then import it in jsonnet:
|
||||
|
||||
[embedmd]:# (../examples/prometheus-additional-rendered-rule-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups+: (import 'existingrule.json').groups,
|
||||
prometheus+: {
|
||||
prometheusRule+: {
|
||||
spec+: {
|
||||
groups+: (import 'existingrule.json').groups,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -255,7 +273,7 @@ local prometheus = grafana.prometheus;
|
||||
local template = grafana.template;
|
||||
local graphPanel = grafana.graphPanel;
|
||||
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
@@ -303,7 +321,7 @@ As jsonnet is a superset of json, the jsonnet `import` function can be used to i
|
||||
|
||||
[embedmd]:# (../examples/grafana-additional-rendered-dashboard-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
@@ -329,7 +347,7 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
In case you have lots of json dashboard exported out from grafana UI the above approach is going to take lots of time to improve performance we can use `rawDashboards` field and provide it's value as json string by using `importstr`
|
||||
[embedmd]:# (../examples/grafana-additional-rendered-dashboard-example-2.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
|
||||
@@ -17,36 +17,42 @@ Using kube-prometheus and kubectl you will be able install the following for mon
|
||||
|
||||
[embedmd]:# (../examples/weave-net-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-weave-net.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/addons/weave-net.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups: std.map(
|
||||
function(group)
|
||||
if group.name == 'weave-net' then
|
||||
group {
|
||||
rules: std.map(
|
||||
function(rule)
|
||||
if rule.alert == 'WeaveNetFastDPFlowsLow' then
|
||||
rule {
|
||||
expr: 'sum(weave_flows) < 20000',
|
||||
}
|
||||
else if rule.alert == 'WeaveNetIPAMUnreachable' then
|
||||
rule {
|
||||
expr: 'weave_ipam_unreachable_percentage > 25',
|
||||
}
|
||||
else
|
||||
rule
|
||||
,
|
||||
group.rules
|
||||
),
|
||||
}
|
||||
else
|
||||
group,
|
||||
super.groups
|
||||
),
|
||||
prometheus+: {
|
||||
prometheusRule+: {
|
||||
spec+: {
|
||||
groups: std.map(
|
||||
function(group)
|
||||
if group.name == 'weave-net' then
|
||||
group {
|
||||
rules: std.map(
|
||||
function(rule)
|
||||
if rule.alert == 'WeaveNetFastDPFlowsLow' then
|
||||
rule {
|
||||
expr: 'sum(weave_flows) < 20000',
|
||||
}
|
||||
else if rule.alert == 'WeaveNetIPAMUnreachable' then
|
||||
rule {
|
||||
expr: 'weave_ipam_unreachable_percentage > 25',
|
||||
}
|
||||
else
|
||||
rule
|
||||
,
|
||||
group.rules
|
||||
),
|
||||
}
|
||||
else
|
||||
group,
|
||||
super.groups
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user