diff --git a/jsonnet/kube-prometheus/components/mixin/alerts/general.libsonnet b/jsonnet/kube-prometheus/components/mixin/alerts/general.libsonnet index 16f3e39c..cd5c7165 100644 --- a/jsonnet/kube-prometheus/components/mixin/alerts/general.libsonnet +++ b/jsonnet/kube-prometheus/components/mixin/alerts/general.libsonnet @@ -7,7 +7,8 @@ { alert: 'TargetDown', annotations: { - message: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.', + summary: 'One or more targets are unreachable.', + description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.', }, expr: '100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10', 'for': '10m', @@ -18,7 +19,8 @@ { alert: 'Watchdog', annotations: { - message: ||| + summary: 'An alert that should always be firing to certify that Alertmanager is working properly.', + description: ||| This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification diff --git a/manifests/kube-prometheus-prometheusRule.yaml b/manifests/kube-prometheus-prometheusRule.yaml index 530dc0cd..e3ee47fa 100644 --- a/manifests/kube-prometheus-prometheusRule.yaml +++ b/manifests/kube-prometheus-prometheusRule.yaml @@ -15,21 +15,23 @@ spec: rules: - alert: TargetDown annotations: - message: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.' + description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.' runbook_url: https://github.com/prometheus-operator/kube-prometheus/wiki/targetdown + summary: One or more targets are unreachable. expr: 100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10 for: 10m labels: severity: warning - alert: Watchdog annotations: - message: | + description: | This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty. runbook_url: https://github.com/prometheus-operator/kube-prometheus/wiki/watchdog + summary: An alert that should always be firing to certify that Alertmanager is working properly. expr: vector(1) labels: severity: none