Compare commits

..

15 Commits

Author SHA1 Message Date
Paweł Krupa
864ca1e773 Merge pull request #1448 from andrein/cherry-pick-1445
Cherry-pick grafana LDAP into release-0.9
2021-10-20 13:50:04 +02:00
Andrei Nistor
822f885d67 add grafana ldap example
(cherry picked from commit 882484daf1)
2021-10-19 17:08:11 +03:00
machinly
184a6a452b add grafana ldap support
(cherry picked from commit ce7007c568)
2021-10-19 17:08:11 +03:00
Paweł Krupa
b6ab321ac8 Merge pull request #1443 from prometheus-operator/automated-updates-release-0.9 2021-10-18 10:42:13 +02:00
dgrisonnet
6e67e7fdbb [bot] [release-0.9] Automated version update 2021-10-18 07:39:34 +00:00
Damien Grisonnet
ad19693121 Merge pull request #1432 from prometheus-operator/automated-updates-release-0.9
[bot] [release-0.9] Automated version update
2021-10-12 09:20:41 +02:00
dgrisonnet
8ccd82e40a [bot] [release-0.9] Automated version update 2021-10-11 07:39:30 +00:00
Damien Grisonnet
c1fc78c979 Merge pull request #1405 from PhilipGough/bp-9
Adjust dropped metrics from cAdvisor
2021-09-28 12:00:16 +02:00
Philip Gough
4e96f7bed6 Adjust dropped metrics from cAdvisor
This change drops pod-centric metrics without a non-empty 'container' label.

Previously we dropped pod-centric metrics without a (pod, namespace) label set
however these can be critical for debugging.

Keep 'container_fs_.*' metrics from cAdvisor
2021-09-28 10:17:59 +01:00
Damien Grisonnet
49eb7c66f6 Merge pull request #1400 from prometheus-operator/automated-updates-release-0.9
[bot] [release-0.9] Automated version update
2021-09-27 11:11:29 +02:00
dgrisonnet
b4b365cead [bot] [release-0.9] Automated version update 2021-09-27 07:39:22 +00:00
Damien Grisonnet
fdcff9a224 Merge pull request #1366 from dgrisonnet/pin-kubernetes-grafana
Pin kubernetes-grafana on release-0.9
2021-09-07 09:17:15 +02:00
Damien Grisonnet
2640b11d77 jsonnet: pin kubernetes-grafana on release-0.9
Signed-off-by: Damien Grisonnet <dgrisonn@redhat.com>
2021-09-06 20:07:07 +02:00
Simon Pasquier
9ead6ebc53 Merge pull request #1349 from prometheus-operator/automated-updates-release-0.9
[bot] [release-0.9] Automated version update
2021-08-25 12:03:25 +02:00
simonpasquier
62a5b28b55 [bot] [release-0.9] Automated version update 2021-08-25 09:37:18 +00:00
179 changed files with 14014 additions and 26840 deletions

2
.github/env vendored
View File

@@ -1,2 +0,0 @@
golang-version=1.16
kind-version=v0.11.1

View File

@@ -22,17 +22,6 @@ jobs:
with:
go-version: ${{ env.golang-version }}
- run: make --always-make generate validate && git diff --exit-code
check-docs:
runs-on: ubuntu-latest
name: Check Documentation formatting and links
steps:
- uses: actions/checkout@v2
with:
persist-credentials: false
- uses: actions/setup-go@v2
with:
go-version: ${{ env.golang-version }}
- run: make check-docs
lint:
runs-on: ubuntu-latest
name: Jsonnet linter
@@ -63,8 +52,8 @@ jobs:
strategy:
matrix:
kind-image:
- 'kindest/node:v1.23.0'
- 'kindest/node:v1.22.4'
- 'kindest/node:v1.21.1'
- 'kindest/node:v1.22.0'
steps:
- uses: actions/checkout@v2
with:
@@ -86,18 +75,3 @@ jobs:
run: |
export KUBECONFIG="${HOME}/.kube/config"
make test-e2e
# Added to summarize the matrix and allow easy branch protection rules setup
e2e-tests-result:
name: End-to-End Test Results
if: always()
needs:
- e2e-tests
runs-on: ubuntu-latest
steps:
- name: Mark the job as a success
if: needs.e2e-tests.result == 'success'
run: exit 0
- name: Mark the job as a failure
if: needs.e2e-tests.result != 'success'
run: exit 1

View File

@@ -1,20 +0,0 @@
name: 'Close stale issues and PRs'
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
with:
stale-issue-message: 'This issue has been automatically marked as stale because it has not had any activity in the last 60 days. Thank you for your contributions.'
close-issue-message: 'This issue was closed because it has not had any activity in the last 120 days. Please reopen if you feel this is still valid.'
days-before-stale: 60
days-before-issue-close: 120
days-before-pr-close: -1 # Prevent closing PRs
exempt-issue-labels: 'kind/feature,help wanted,kind/bug'
stale-issue-label: 'stale'
stale-pr-label: 'stale'
exempt-draft-pr: true

View File

@@ -10,10 +10,10 @@ jobs:
strategy:
matrix:
branch:
- 'release-0.5'
- 'release-0.6'
- 'release-0.7'
- 'release-0.8'
- 'release-0.9'
- 'main'
steps:
- uses: actions/checkout@v2
@@ -23,15 +23,11 @@ jobs:
with:
go-version: 1.16
- name: Upgrade versions
id: versions
run: |
export GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}
# Write to temporary file to make update atomic
scripts/generate-versions.sh > /tmp/versions.json
mv /tmp/versions.json jsonnet/kube-prometheus/versions.json
# Get the links to the changelogs of the updated versions and make them
# available to the reviewers
echo ::set-output name=new_changelogs::$(scripts/get-new-changelogs.sh)
if: matrix.branch == 'main'
- name: Update jsonnet dependencies
run: |
@@ -53,12 +49,7 @@ jobs:
This is an automated version and jsonnet dependencies update performed from CI.
Please review the following changelogs to make sure that we don't miss any important
changes before merging this PR.
${{ steps.versions.outputs.new_changelogs }}
Configuration of the workflow is located in `.github/workflows/versions.yaml`.
Configuration of the workflow is located in `.github/workflows/versions.yaml`
## Type of change
@@ -70,8 +61,6 @@ jobs:
```
team-reviewers: kube-prometheus-reviewers
committer: Prometheus Operator Bot <prom-op-bot@users.noreply.github.com>
author: Prometheus Operator Bot <prom-op-bot@users.noreply.github.com>
branch: automated-updates-${{ matrix.branch }}
delete-branch: true
# GITHUB_TOKEN cannot be used as it won't trigger CI in a created PR

3
.gitignore vendored
View File

@@ -5,5 +5,4 @@ vendor/
.swp
crdschemas/
developer-workspace/gitpod/_output
kind
.gitpod/_output/

View File

@@ -24,17 +24,17 @@ tasks:
chmod +x ${PWD}/.git/hooks/pre-commit
- name: run kube-prometheus
command: |
developer-workspace/gitpod/prepare-k3s.sh
developer-workspace/common/deploy-kube-prometheus.sh
.gitpod/prepare-k3s.sh
.gitpod/deploy-kube-prometheus.sh
- name: kernel dev environment
init: |
sudo apt update -y
sudo apt install qemu qemu-system-x86 linux-image-$(uname -r) libguestfs-tools sshpass netcat -y
sudo curl -o /usr/bin/kubectl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo chmod +x /usr/bin/kubectl
developer-workspace/gitpod/prepare-rootfs.sh
.gitpod/prepare-rootfs.sh
command: |
developer-workspace/gitpod/qemu.sh
.gitpod/qemu.sh
ports:
- port: 3000
onOpen: open-browser
@@ -44,4 +44,4 @@ ports:
onOpen: open-browser
vscode:
extensions:
- heptio.jsonnet
- heptio.jsonnet@0.1.0:woEDU5N62LRdgdz0g/I6sQ==

View File

@@ -1,13 +1,9 @@
#!/bin/bash
kubectl apply --server-side -f manifests/setup
kubectl apply -f manifests/setup
# Safety wait for CRDs to be working
sleep 30
kubectl apply -f manifests/
sleep 30
# Safety wait for resources to be created
kubectl rollout status -n monitoring daemonset node-exporter
kubectl rollout status -n monitoring statefulset alertmanager-main
@@ -17,4 +13,4 @@ kubectl rollout status -n monitoring deployment kube-state-metrics
kubectl port-forward -n monitoring svc/grafana 3000 > /dev/null 2>&1 &
kubectl port-forward -n monitoring svc/alertmanager-main 9093 > /dev/null 2>&1 &
kubectl port-forward -n monitoring svc/prometheus-k8s 9090 > /dev/null 2>&1 &
kubectl port-forward -n monitoring svc/prometheus-k8s 9090 > /dev/null 2>&1 &

View File

@@ -1,9 +0,0 @@
version: 1
validators:
# Ignore localhost links.
- regex: 'localhost'
type: "ignore"
# Ignore release links.
- regex: 'https:\/\/github\.com\/prometheus-operator\/kube-prometheus\/releases'
type: "ignore"

View File

@@ -1,30 +1,3 @@
## release-0.10 / 2021-12-17
* [CHANGE] Adjust node filesystem space filling up warning threshold to 20% [#1357](https://github.com/prometheus-operator/kube-prometheus/pull/1357)
* [CHANGE] Always generate grafana-config secret [#1373](https://github.com/prometheus-operator/kube-prometheus/pull/1373)
* [CHANGE] Make filesystem ignored mount points configurable for node-exporter [#1376](https://github.com/prometheus-operator/kube-prometheus/pull/1376)
* [CHANGE] Drop some high cardinality cAdvisor metrics [#1406](https://github.com/prometheus-operator/kube-prometheus/pull/1406), [#1396](https://github.com/prometheus-operator/kube-prometheus/pull/1396)
* [CHANGE] Use `--collector.filesystem.mount-points-exclude` instead of deprecated `--collector.filesystem.ignored-mount-points` argument for `node-exporter` [#1407](https://github.com/prometheus-operator/kube-prometheus/pull/1407)
* [CHANGE] Drop some of prometheus-adapter metrics that are inherited from the apiserver code but aren't useful in the context of prometheus-adapter [#1409](https://github.com/prometheus-operator/kube-prometheus/pull/1409)
* [CHANGE] Remove "app" label selector deprecated by Prometheus-operator [#1420](https://github.com/prometheus-operator/kube-prometheus/pull/1420)
* [CHANGE] Use recommended instance label for Prometheus/Alertmanager resources [#1520](https://github.com/prometheus-operator/kube-prometheus/pull/1520)
* [CHANGE] Drop deprecated apiserver_longrunning_gauge and apiserver_registered_watchers metrics [#1553](https://github.com/prometheus-operator/kube-prometheus/pull/1553)
* [CHANGE] Drop deprecated coredns_cache_misses_total [#1553](https://github.com/prometheus-operator/kube-prometheus/pull/1553)
* [ENHANCEMENT] Add support for LDAP authentication in Grafana [#1455](https://github.com/prometheus-operator/kube-prometheus/pull/1445)
* [ENHANCEMENT] Include rewritten kubernetes-grafana for easier usage of new library features [#1450](https://github.com/prometheus-operator/kube-prometheus/pull/1450)
* [ENHANCEMENT] Specify default container in node-exporter pod [#1462](https://github.com/prometheus-operator/kube-prometheus/pull/1462)
* [ENHANCEMENT] Make metadata consistent across objects in the same component [#1471](https://github.com/prometheus-operator/kube-prometheus/pull/1471)
* [ENHANCEMENT] Establish convention for default field types [#1475](https://github.com/prometheus-operator/kube-prometheus/pull/1475)
* [ENHANCEMENT] Exclude k3s containerd mountpoints [#1497](https://github.com/prometheus-operator/kube-prometheus/pull/1497)
* [ENHANCEMENT] Alertmanager now uses the new `matcher` syntax in the routing tree and inhibition rules [#1508](https://github.com/prometheus-operator/kube-prometheus/pull/1508)
* [ENHANCEMENT] Deprecate `thanosSelector` and expose `mixin._config.thanos` config variable for thanos sidecar [#1543](https://github.com/prometheus-operator/kube-prometheus/pull/1543)
* [FEATURE] Support scraping config-reloader sidecar for Prometheus and AlertManager StatefulSets [#1344](https://github.com/prometheus-operator/kube-prometheus/pull/1344)
* [FEATURE] Expose prometheus alerting configuration in $.values.prometheus configuration [#1476](https://github.com/prometheus-operator/kube-prometheus/pull/1476)
* [BUGFIX] Remove deprecated policy/v1beta1 Kubernetes API [#1433](https://github.com/prometheus-operator/kube-prometheus/pull/1433)
* [BUGFIX] Fix prometheus URL in prometheus-adapter [#1463](https://github.com/prometheus-operator/kube-prometheus/pull/1463)
* [BUGFIX] Always use proper values scope for namespace in addons [#1518](https://github.com/prometheus-operator/kube-prometheus/pull/1518)
* [BUGFIX] Fix default empty groups for k8s PrometheusRule [#1534](https://github.com/prometheus-operator/kube-prometheus/pull/1534)
## release-0.9 / 2021-08-19
* [CHANGE] Test against Kubernetes 1.21 and 1,22. #1161 #1337

View File

@@ -1,102 +0,0 @@
# Contributing
This project is licensed under the [Apache 2.0 license](LICENSE) and accept
contributions via GitHub pull requests. This document outlines some of the
conventions on development workflow, commit message formatting, contact points
and other resources to make it easier to get your contribution accepted.
To maintain a safe and welcoming community, all participants must adhere to the
project's [Code of Conduct](code-of-conduct.md).
## Certificate of Origin
By contributing to this project you agree to the Developer Certificate of
Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution. See the [DCO](DCO) file for details.
## Community
The project is developed in the open. Here are some of the channels we use to communicate and contribute:
[**Kubernetes Slack**](https://slack.k8s.io/): [#prometheus-operator](https://kubernetes.slack.com/archives/CFFDS2Z7F) -
General discussions channel
[**Kubernetes Slack**](https://slack.k8s.io/): [#prometheus-operator-dev](https://kubernetes.slack.com/archives/C01B03QCSMN) -
Channel used for project developers discussions
**Discussion forum**: [GitHub discussions](https://github.com/prometheus-operator/kube-prometheus/discussions)
**Twitter**: [@PromOperator](https://twitter.com/promoperator)
**GitHub**: To file bugs and feature requests. For questions and discussions use the GitHub discussions. Generally,
the other community channels listed here are best suited to get support or discuss overarching topics.
Please avoid emailing maintainers directly.
We host publicy bi-weekly meetings focused on project development and contributions. Its meant for developers
and maintainers to meet and get unblocked, pair review, and discuss development aspects of this project and related
projects (e.g kubernetes-mixin). The document linked below contains all the details, including how to register.
**Office Hours**: [Prometheus Operator & Kube-prometheus Contributor Office Hours](https://docs.google.com/document/d/1-fjJmzrwRpKmSPHtXN5u6VZnn39M28KqyQGBEJsqUOk)
## Getting Started
- Fork the repository on GitHub
- Read the [README](README.md) for build and test instructions
- Play with the project, submit bug fixes, submit patches!
## Contribution Flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work (usually `main`).
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Make sure the tests pass, and add any new tests as appropriate.
- Submit a pull request to the original repository.
Thanks for your contributions!
### Generated Files
All `.yaml` files in the `/manifests` folder are generated via
[Jsonnet](https://jsonnet.org/). Contributing changes will most likely include
the following process:
1. Make your changes in the respective `*.jsonnet` or `*.libsonnet` file.
2. Commit your changes (This is currently necessary due to our vendoring
process. This is likely to change in the future).
3. Generate dependent `*.yaml` files: `make generate`
4. Commit the generated changes.
### Format of the Commit Message
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.

View File

@@ -2,39 +2,31 @@ SHELL=/bin/bash -o pipefail
BIN_DIR?=$(shell pwd)/tmp/bin
MDOX_BIN=$(BIN_DIR)/mdox
EMBEDMD_BIN=$(BIN_DIR)/embedmd
JB_BIN=$(BIN_DIR)/jb
GOJSONTOYAML_BIN=$(BIN_DIR)/gojsontoyaml
JSONNET_BIN=$(BIN_DIR)/jsonnet
JSONNETLINT_BIN=$(BIN_DIR)/jsonnet-lint
JSONNETFMT_BIN=$(BIN_DIR)/jsonnetfmt
KUBECONFORM_BIN=$(BIN_DIR)/kubeconform
TOOLING=$(JB_BIN) $(GOJSONTOYAML_BIN) $(JSONNET_BIN) $(JSONNETLINT_BIN) $(JSONNETFMT_BIN) $(KUBECONFORM_BIN) $(MDOX_BIN)
TOOLING=$(EMBEDMD_BIN) $(JB_BIN) $(GOJSONTOYAML_BIN) $(JSONNET_BIN) $(JSONNETLINT_BIN) $(JSONNETFMT_BIN) $(KUBECONFORM_BIN)
JSONNETFMT_ARGS=-n 2 --max-blank-lines 2 --string-style s --comment-style s
MDOX_VALIDATE_CONFIG?=.mdox.validate.yaml
MD_FILES_TO_FORMAT=$(shell find docs developer-workspace examples experimental jsonnet manifests -name "*.md") $(shell ls *.md)
KUBE_VERSION?="1.20.0"
all: generate fmt test docs
all: generate fmt test
.PHONY: clean
clean:
# Remove all files and directories ignored by git.
git clean -Xfd .
.PHONY: docs
docs: $(MDOX_BIN) $(shell find examples) build.sh example.jsonnet
@echo ">> formatting and local/remote links"
$(MDOX_BIN) fmt --soft-wraps -l --links.localize.address-regex="https://prometheus-operator.dev/.*" --links.validate.config-file=$(MDOX_VALIDATE_CONFIG) $(MD_FILES_TO_FORMAT)
.PHONY: check-docs
check-docs: $(MDOX_BIN) $(shell find examples) build.sh example.jsonnet
@echo ">> checking formatting and local/remote links"
$(MDOX_BIN) fmt --soft-wraps --check -l --links.localize.address-regex="https://prometheus-operator.dev/.*" --links.validate.config-file=$(MDOX_VALIDATE_CONFIG) $(MD_FILES_TO_FORMAT)
.PHONY: generate
generate: manifests
generate: manifests **.md
**.md: $(EMBEDMD_BIN) $(shell find examples) build.sh example.jsonnet
$(EMBEDMD_BIN) -w `find . -name "*.md" | grep -v vendor`
manifests: examples/kustomize.jsonnet $(GOJSONTOYAML_BIN) vendor
./build.sh $<
@@ -51,16 +43,7 @@ update: $(JB_BIN)
$(JB_BIN) update
.PHONY: validate
validate: validate-1.21 validate-1.22
validate-1.21:
KUBE_VERSION=1.21.1 $(MAKE) kubeconform
validate-1.22:
KUBE_VERSION=1.22.0 $(MAKE) kubeconform
.PHONY: kubeconform
kubeconform: crdschemas manifests $(KUBECONFORM_BIN)
validate: crdschemas manifests $(KUBECONFORM_BIN)
$(KUBECONFORM_BIN) -kubernetes-version $(KUBE_VERSION) -schema-location 'default' -schema-location 'crdschemas/{{ .ResourceKind }}.json' -skip CustomResourceDefinition manifests/
.PHONY: fmt
@@ -88,8 +71,3 @@ $(BIN_DIR):
$(TOOLING): $(BIN_DIR)
@echo Installing tools from scripts/tools.go
@cd scripts && cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -tI % go build -modfile=go.mod -o $(BIN_DIR) %
.PHONY: deploy
deploy:
./developer-workspace/codespaces/prepare-kind.sh
./developer-workspace/common/deploy-kube-prometheus.sh

479
README.md
View File

@@ -48,6 +48,21 @@ If you are migrating from `release-0.7` branch or earlier please read [what chan
- [Compile the manifests and apply](#compile-the-manifests-and-apply)
- [Configuration](#configuration)
- [Customization Examples](#customization-examples)
- [Cluster Creation Tools](#cluster-creation-tools)
- [Internal Registry](#internal-registry)
- [NodePorts](#nodeports)
- [Prometheus Object Name](#prometheus-object-name)
- [node-exporter DaemonSet namespace](#node-exporter-daemonset-namespace)
- [Alertmanager configuration](#alertmanager-configuration)
- [Adding additional namespaces to monitor](#adding-additional-namespaces-to-monitor)
- [Defining the ServiceMonitor for each additional Namespace](#defining-the-servicemonitor-for-each-additional-namespace)
- [Monitoring all namespaces](#monitoring-all-namespaces)
- [Static etcd configuration](#static-etcd-configuration)
- [Pod Anti-Affinity](#pod-anti-affinity)
- [Stripping container resource limits](#stripping-container-resource-limits)
- [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
- [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
- [Setting up a blackbox exporter](#setting-up-a-blackbox-exporter)
- [Minikube Example](#minikube-example)
- [Continuous Delivery](#continuous-delivery)
- [Troubleshooting](#troubleshooting)
@@ -56,7 +71,7 @@ If you are migrating from `release-0.7` branch or earlier please read [what chan
- [Authorization problem](#authorization-problem)
- [kube-state-metrics resource usage](#kube-state-metrics-resource-usage)
- [Error retrieving kube-proxy metrics](#error-retrieving-kube-proxy-metrics)
- [Contributing](CONTRIBUTING.md)
- [Contributing](#contributing)
- [License](#license)
## Prerequisites
@@ -65,8 +80,8 @@ You will need a Kubernetes cluster, that's it! By default it is assumed, that th
This means the kubelet configuration must contain these flags:
* `--authentication-token-webhook=true` This flag enables, that a `ServiceAccount` token can be used to authenticate against the kubelet(s). This can also be enabled by setting the kubelet configuration value `authentication.webhook.enabled` to `true`.
* `--authorization-mode=Webhook` This flag enables, that the kubelet will perform an RBAC request with the API to determine, whether the requesting entity (Prometheus in this case) is allowed to access a resource, in specific for this project the `/metrics` endpoint. This can also be enabled by setting the kubelet configuration value `authorization.mode` to `Webhook`.
* `--authentication-token-webhook=true` This flag enables, that a `ServiceAccount` token can be used to authenticate against the kubelet(s). This can also be enabled by setting the kubelet configuration value `authentication.webhook.enabled` to `true`.
* `--authorization-mode=Webhook` This flag enables, that the kubelet will perform an RBAC request with the API to determine, whether the requesting entity (Prometheus in this case) is allowed to access a resource, in specific for this project the `/metrics` endpoint. This can also be enabled by setting the kubelet configuration value `authorization.mode` to `Webhook`.
This stack provides [resource metrics](https://github.com/kubernetes/metrics#resource-metrics-api) by deploying the [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/).
This adapter is an Extension API Server and Kubernetes needs to be have this feature enabled, otherwise the adapter has no effect, but is still deployed.
@@ -76,7 +91,7 @@ This adapter is an Extension API Server and Kubernetes needs to be have this fea
To try out this stack, start [minikube](https://github.com/kubernetes/minikube) with the following command:
```shell
$ minikube delete && minikube start --kubernetes-version=v1.20.0 --memory=6g --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.bind-address=0.0.0.0 --extra-config=controller-manager.bind-address=0.0.0.0
$ minikube delete && minikube start --kubernetes-version=v1.20.0 --memory=6g --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
```
The kube-prometheus stack includes a resource metrics API server, so the metrics-server addon is not necessary. Ensure the metrics-server addon is disabled on minikube:
@@ -91,37 +106,36 @@ $ minikube addons disable metrics-server
The following versions are supported and work as we test against these versions in their respective branches. But note that other versions might work!
| kube-prometheus stack | Kubernetes 1.19 | Kubernetes 1.20 | Kubernetes 1.21 | Kubernetes 1.22 | Kubernetes 1.23 |
|--------------------------------------------------------------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------|
| [`release-0.7`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.7) | | ✔ | ✗ | ✗ | ✗ |
| [`release-0.8`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.8) | ✗ | ✔ | ✔ | ✗ | ✗ |
| [`release-0.9`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.9) | ✗ | ✗ | ✔ | ✔ | ✗ |
| [`release-0.10`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.10) | ✗ | ✗ | ✗ | ✔ | ✔ |
| [`main`](https://github.com/prometheus-operator/kube-prometheus/tree/main) | ✗ | ✗ | ✗ | ✔ | ✔ |
| kube-prometheus stack | Kubernetes 1.18 | Kubernetes 1.19 | Kubernetes 1.20 | Kubernetes 1.21 | Kubernetes 1.22 |
|------------------------------------------------------------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------|
| [`release-0.6`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.6) | | ✔ | ✗ | ✗ | ✗ |
| [`release-0.7`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.7) | ✗ | ✔ | ✔ | ✗ | ✗ |
| [`release-0.8`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.8) | ✗ | ✗ | ✔ | ✔ | ✗ |
| [`release-0.9`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.9) | ✗ | ✗ | ✗ | ✔ | ✔ |
| [`HEAD`](https://github.com/prometheus-operator/kube-prometheus/tree/main) | ✗ | ✗ | ✗ | ✔ | ✔ |
## Quickstart
> Note: For versions before Kubernetes v1.21.z refer to the [Kubernetes compatibility matrix](#kubernetes-compatibility-matrix) in order to choose a compatible branch.
>Note: For versions before Kubernetes v1.21.z refer to the [Kubernetes compatibility matrix](#kubernetes-compatibility-matrix) in order to choose a compatible branch.
This project is intended to be used as a library (i.e. the intent is not for you to create your own modified copy of this repository).
Though for a quickstart a compiled version of the Kubernetes [manifests](manifests) generated with this library (specifically with `example.jsonnet`) is checked into this repository in order to try the content out quickly. To try out the stack un-customized run:
* Create the monitoring stack using the config in the `manifests` directory:
* Create the monitoring stack using the config in the `manifests` directory:
```shell
# Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources
kubectl apply --server-side -f manifests/setup
kubectl create -f manifests/setup
until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
kubectl apply -f manifests/
kubectl create -f manifests/
```
We create the namespace and CustomResourceDefinitions first to avoid race conditions when deploying the monitoring components.
Alternatively, the resources in both folders can be applied with a single command
`kubectl apply --server-side -f manifests/setup -f manifests`, but it may be necessary to run the command multiple times for all components to
`kubectl create -f manifests/setup -f manifests`, but it may be necessary to run the command multiple times for all components to
be created successfullly.
* And to teardown the stack:
* And to teardown the stack:
```shell
kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
```
@@ -130,7 +144,7 @@ kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
Prometheus, Grafana, and Alertmanager dashboards can be accessed quickly using `kubectl port-forward` after running the quickstart via the commands below. Kubernetes 1.10 or later is required.
> Note: There are instructions on how to route to these pods behind an ingress controller in the [Exposing Prometheus/Alermanager/Grafana via Ingress](docs/customizations/exposing-prometheus-alertmanager-grafana-ingress.md) section.
> Note: There are instructions on how to route to these pods behind an ingress controller in the [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress) section.
Prometheus
@@ -159,31 +173,29 @@ Then access via [http://localhost:9093](http://localhost:9093)
## Customizing Kube-Prometheus
This section:
* describes how to customize the kube-prometheus library via compiling the kube-prometheus manifests yourself (as an alternative to the [Quickstart section](#quickstart)).
* still doesn't require you to make a copy of this entire repository, but rather only a copy of a few select files.
* describes how to customize the kube-prometheus library via compiling the kube-prometheus manifests yourself (as an alternative to the [Quickstart section](#Quickstart)).
* still doesn't require you to make a copy of this entire repository, but rather only a copy of a few select files.
### Installing
The content of this project consists of a set of [jsonnet](http://jsonnet.org/) files making up a library to be consumed.
Install this library in your own project with [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler#install) (the jsonnet package manager):
```shell
$ mkdir my-kube-prometheus; cd my-kube-prometheus
$ jb init # Creates the initial/empty `jsonnetfile.json`
# Install the kube-prometheus dependency
$ jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json`
$ jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.7 # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json`
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/example.jsonnet -O example.jsonnet
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/build.sh -O build.sh
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.7/example.jsonnet -O example.jsonnet
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.7/build.sh -O build.sh
```
> `jb` can be installed with `go install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest`
> `jb` can be installed with `go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb`
> An e.g. of how to install a given version of this library: `jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main`
> An e.g. of how to install a given version of this library: `jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.7`
In order to update the kube-prometheus dependency, simply use the jsonnet-bundler update functionality:
```shell
$ jb update
```
@@ -192,13 +204,14 @@ $ jb update
e.g. of how to compile the manifests: `./build.sh example.jsonnet`
> before compiling, install `gojsontoyaml` tool with `go install github.com/brancz/gojsontoyaml@latest` and `jsonnet` with `go install github.com/google/go-jsonnet/cmd/jsonnet@latest`
> before compiling, install `gojsontoyaml` tool with `go get github.com/brancz/gojsontoyaml` and `jsonnet` with `go get github.com/google/go-jsonnet/cmd/jsonnet`
Here's [example.jsonnet](example.jsonnet):
> Note: some of the following components must be configured beforehand. See [configuration](#configuration) and [customization-examples](#customization-examples).
```jsonnet mdox-exec="cat example.jsonnet"
[embedmd]:# (example.jsonnet)
```jsonnet
local kp =
(import 'kube-prometheus/main.libsonnet') +
// Uncomment the following imports to enable its patches
@@ -237,7 +250,8 @@ local kp =
And here's the [build.sh](build.sh) script (which uses `vendor/` to render all manifests in a json structure of `{filename: manifest-content}`):
```sh mdox-exec="cat build.sh"
[embedmd]:# (build.sh)
```sh
#!/usr/bin/env bash
# This script uses arg $1 (name of *.jsonnet file to use) to generate the manifests/*.yaml files.
@@ -268,22 +282,16 @@ rm -f kustomization
This script runs the jsonnet code, then reads each key of the generated json and uses that as the file name, and writes the value of that key to that file, and converts each json manifest to yaml.
### Apply the kube-prometheus stack
The previous steps (compilation) has created a bunch of manifest files in the manifest/ folder.
Now simply use `kubectl` to install Prometheus and Grafana as per your configuration:
```shell
# Update the namespace and CRDs, and then wait for them to be available before creating the remaining resources
$ kubectl apply --server-side -f manifests/setup
$ kubectl apply -f manifests/setup
$ kubectl apply -f manifests/
```
> Note that due to some CRD size we are using kubeclt server-side apply feature which is generally available since
> kubernetes 1.22. If you are using previous kubernetes versions this feature may not be available and you would need to
> use `kubectl create` instead.
Alternatively, the resources in both folders can be applied with a single command
`kubectl apply --server-side -Rf manifests`, but it may be necessary to run the command multiple times for all components to
`kubectl apply -Rf manifests`, but it may be necessary to run the command multiple times for all components to
be created successfullly.
Check the monitoring namespace (or the namespace you have specific in `namespace: `) and make sure the pods are running. Prometheus and Grafana should be up and running soon.
@@ -291,18 +299,15 @@ Check the monitoring namespace (or the namespace you have specific in `namespace
### Containerized Installing and Compiling
If you don't care to have `jb` nor `jsonnet` nor `gojsontoyaml` installed, then use `quay.io/coreos/jsonnet-ci` container image. Do the following from this `kube-prometheus` directory:
```shell
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) quay.io/coreos/jsonnet-ci jb update
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) quay.io/coreos/jsonnet-ci ./build.sh example.jsonnet
```
## Update from upstream project
You may wish to fetch changes made on this project so they are available to you.
### Update jb
`jb` may have been updated so it's a good idea to get the latest version of this binary:
```shell
@@ -310,17 +315,15 @@ $ go get -u github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
```
### Update kube-prometheus
The command below will sync with upstream project:
```shell
$ jb update
```
### Compile the manifests and apply
Once updated, just follow the instructions under "Compiling" and "Apply the kube-prometheus stack" to apply the changes to your cluster.
## Configuration
Jsonnet has the concept of hidden fields. These are fields, that are not going to be rendered in a result. This is used to configure the kube-prometheus components in jsonnet. In the example jsonnet code of the above [Customizing Kube-Prometheus section](#customizing-kube-prometheus), you can see an example of this, where the `namespace` is being configured to be `monitoring`. In order to not override the whole object, use the `+::` construct of jsonnet, to merge objects, this way you can override individual settings, but retain all other settings and defaults.
@@ -340,8 +343,7 @@ Configuration is mainly done in the `values` map. You can see this being used in
},
```
The grafana definition is located in a different project (https://github.com/brancz/kubernetes-grafana ), but needed configuration can be customized from the same top level `values` field. For example to allow anonymous access to grafana, add the following `values` section:
The grafana definition is located in a different project (https://github.com/brancz/kubernetes-grafana), but needed configuration can be customized from the same top level `values` field. For example to allow anonymous access to grafana, add the following `values` section:
```
grafana+:: {
config: { // http://docs.grafana.org/installation/configuration/
@@ -356,7 +358,368 @@ The grafana definition is located in a different project (https://github.com/bra
Jsonnet is a turing complete language, any logic can be reflected in it. It also has powerful merge functionalities, allowing sophisticated customizations of any kind simply by merging it into the object the library provides.
To get started, we provide several customization examples in the [docs/customizations/](docs/customizations) section.
### Cluster Creation Tools
A common example is that not all Kubernetes clusters are created exactly the same way, meaning the configuration to monitor them may be slightly different. For the following clusters there are mixins available to easily configure them:
* aws
* bootkube
* eks
* gke
* kops-coredns
* kubeadm
* kubespray
These mixins are selectable via the `platform` field of kubePrometheus:
[embedmd]:# (examples/jsonnet-snippets/platform.jsonnet)
```jsonnet
(import 'kube-prometheus/main.libsonnet') +
{
values+:: {
common+: {
platform: 'example-platform',
},
},
}
```
### Internal Registry
Some Kubernetes installations source all their images from an internal registry. kube-prometheus supports this use case and helps the user synchronize every image it uses to the internal registry and generate manifests pointing at the internal registry.
To produce the `docker pull/tag/push` commands that will synchronize upstream images to `internal-registry.com/organization` (after having run the `jb` command to populate the vendor directory):
```shell
$ jsonnet -J vendor -S --tla-str repository=internal-registry.com/organization sync-to-internal-registry.jsonnet
$ docker pull k8s.gcr.io/addon-resizer:1.8.4
$ docker tag k8s.gcr.io/addon-resizer:1.8.4 internal-registry.com/organization/addon-resizer:1.8.4
$ docker push internal-registry.com/organization/addon-resizer:1.8.4
$ docker pull quay.io/prometheus/alertmanager:v0.16.2
$ docker tag quay.io/prometheus/alertmanager:v0.16.2 internal-registry.com/organization/alertmanager:v0.16.2
$ docker push internal-registry.com/organization/alertmanager:v0.16.2
...
```
The output of this command can be piped to a shell to be executed by appending `| sh`.
Then to generate manifests with `internal-registry.com/organization`, use the `withImageRepository` mixin:
[embedmd]:# (examples/internal-registry.jsonnet)
```jsonnet
local mixin = import 'kube-prometheus/addons/config-mixins.libsonnet';
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
} + mixin.withImageRepository('internal-registry.com/organization');
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
### NodePorts
Another mixin that may be useful for exploring the stack is to expose the UIs of Prometheus, Alertmanager and Grafana on NodePorts:
[embedmd]:# (examples/jsonnet-snippets/node-ports.jsonnet)
```jsonnet
(import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/node-ports.libsonnet')
```
### Prometheus Object Name
To give another customization example, the name of the `Prometheus` object provided by this library can be overridden:
[embedmd]:# (examples/prometheus-name-override.jsonnet)
```jsonnet
((import 'kube-prometheus/main.libsonnet') + {
prometheus+: {
prometheus+: {
metadata+: {
name: 'my-name',
},
},
},
}).prometheus.prometheus
```
### node-exporter DaemonSet namespace
Standard Kubernetes manifests are all written using [ksonnet-lib](https://github.com/ksonnet/ksonnet-lib/), so they can be modified with the mixins supplied by ksonnet-lib. For example to override the namespace of the node-exporter DaemonSet:
[embedmd]:# (examples/ksonnet-example.jsonnet)
```jsonnet
((import 'kube-prometheus/main.libsonnet') + {
nodeExporter+: {
daemonset+: {
metadata+: {
namespace: 'my-custom-namespace',
},
},
},
}).nodeExporter.daemonset
```
### Alertmanager configuration
The Alertmanager configuration is located in the `values.alertmanager.config` configuration field. In order to set a custom Alertmanager configuration simply set this field.
[embedmd]:# (examples/alertmanager-config.jsonnet)
```jsonnet
((import 'kube-prometheus/main.libsonnet') + {
values+:: {
alertmanager+: {
config: |||
global:
resolve_timeout: 10m
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'null'
routes:
- match:
alertname: Watchdog
receiver: 'null'
receivers:
- name: 'null'
|||,
},
},
}).alertmanager.secret
```
In the above example the configuration has been inlined, but can just as well be an external file imported in jsonnet via the `importstr` function.
[embedmd]:# (examples/alertmanager-config-external.jsonnet)
```jsonnet
((import 'kube-prometheus/main.libsonnet') + {
values+:: {
alertmanager+: {
config: importstr 'alertmanager-config.yaml',
},
},
}).alertmanager.secret
```
### Adding additional namespaces to monitor
In order to monitor additional namespaces, the Prometheus server requires the appropriate `Role` and `RoleBinding` to be able to discover targets from that namespace. By default the Prometheus server is limited to the three namespaces it requires: default, kube-system and the namespace you configure the stack to run in via `$.values.namespace`. This is specified in `$.values.prometheus.namespaces`, to add new namespaces to monitor, simply append the additional namespaces:
[embedmd]:# (examples/additional-namespaces.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+: {
namespaces+: ['my-namespace', 'my-second-namespace'],
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
#### Defining the ServiceMonitor for each additional Namespace
In order to Prometheus be able to discovery and scrape services inside the additional namespaces specified in previous step you need to define a ServiceMonitor resource.
> Typically it is up to the users of a namespace to provision the ServiceMonitor resource, but in case you want to generate it with the same tooling as the rest of the cluster monitoring infrastructure, this is a guide on how to achieve this.
You can define ServiceMonitor resources in your `jsonnet` spec. See the snippet bellow:
[embedmd]:# (examples/additional-namespaces-servicemonitor.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+:: {
namespaces+: ['my-namespace', 'my-second-namespace'],
},
},
exampleApplication: {
serviceMonitorMyNamespace: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'my-servicemonitor',
namespace: 'my-namespace',
},
spec: {
jobLabel: 'app',
endpoints: [
{
port: 'http-metrics',
},
],
selector: {
matchLabels: {
app: 'myapp',
},
},
},
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['example-application-' + name]: kp.exampleApplication[name] for name in std.objectFields(kp.exampleApplication) }
```
> NOTE: make sure your service resources have the right labels (eg. `'app': 'myapp'`) applied. Prometheus uses kubernetes labels to discover resources inside the namespaces.
### Monitoring all namespaces
In case you want to monitor all namespaces in a cluster, you can add the following mixin. Also, make sure to empty the namespaces defined in prometheus so that roleBindings are not created against them.
[embedmd]:# (examples/all-namespaces.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/all-namespaces.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+: {
namespaces: [],
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
> NOTE: This configuration can potentially make your cluster insecure especially in a multi-tenant cluster. This is because this gives Prometheus visibility over the whole cluster which might not be expected in a scenario when certain namespaces are locked down for security reasons.
Proceed with [creating ServiceMonitors for the services in the namespaces](#defining-the-servicemonitor-for-each-additional-namespace) you actually want to monitor
### Static etcd configuration
In order to configure a static etcd cluster to scrape there is a simple [kube-prometheus-static-etcd.libsonnet](jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet) mixin prepared - see [etcd.jsonnet](examples/etcd.jsonnet) for an example of how to use that mixin, and [Monitoring external etcd](docs/monitoring-external-etcd.md) for more information.
> Note that monitoring etcd in minikube is currently not possible because of how etcd is setup. (minikube's etcd binds to 127.0.0.1:2379 only, and within host networking namespace.)
### Pod Anti-Affinity
To prevent `Prometheus` and `Alertmanager` instances from being deployed onto the same node when
possible, one can include the [kube-prometheus-anti-affinity.libsonnet](jsonnet/kube-prometheus/addons/anti-affinity.libsonnet) mixin:
[embedmd]:# (examples/anti-affinity.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/anti-affinity.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
### Stripping container resource limits
Sometimes in small clusters, the CPU/memory limits can get high enough for alerts to be fired continuously. To prevent this, one can strip off the predefined limits.
To do that, one can import the following mixin
[embedmd]:# (examples/strip-limits.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/strip-limits.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
### Customizing Prometheus alerting/recording rules and Grafana dashboards
See [developing Prometheus rules and Grafana dashboards](docs/developing-prometheus-rules-and-grafana-dashboards.md) guide.
### Exposing Prometheus/Alermanager/Grafana via Ingress
See [exposing Prometheus/Alertmanager/Grafana](docs/exposing-prometheus-alertmanager-grafana-ingress.md) guide.
### Setting up a blackbox exporter
```jsonnet
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
// ... all necessary mixins ...
{
values+:: {
// ... configuration for other features ...
blackboxExporter+:: {
modules+:: {
tls_connect: {
prober: 'tcp',
tcp: {
tls: true
}
}
}
}
}
};
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
// ... other rendering blocks ...
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) }
```
Then describe the actual blackbox checks you want to run using `Probe` resources. Specify `blackbox-exporter.<namespace>.svc.cluster.local:9115` as the `spec.prober.url` field of the `Probe` resource.
See the [blackbox exporter guide](docs/blackbox-exporter.md) for the list of configurable options and a complete example.
## Minikube Example
@@ -395,11 +758,11 @@ resources. One driver for more resource needs, is a high number of
namespaces. There may be others.
kube-state-metrics resource allocation is managed by
[addon-resizer](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer/nanny)
[addon-resizer](https://github.com/kubernetes/autoscaler/tree/main/addon-resizer/nanny)
You can control it's parameters by setting variables in the
config. They default to:
```jsonnet
``` jsonnet
kubeStateMetrics+:: {
baseCPU: '100m',
cpuPerNode: '2m',
@@ -409,12 +772,24 @@ config. They default to:
```
### Error retrieving kube-proxy metrics
By default, kubeadm will configure kube-proxy to listen on 127.0.0.1 for metrics. Because of this prometheus would not be able to scrape these metrics. This would have to be changed to 0.0.0.0 in one of the following two places:
1. Before cluster initialization, the config file passed to kubeadm init should have KubeProxyConfiguration manifest with the field metricsBindAddress set to 0.0.0.0:10249
2. If the k8s cluster is already up and running, we'll have to modify the configmap kube-proxy in the namespace kube-system and set the metricsBindAddress field. After this kube-proxy daemonset would have to be restarted with
`kubectl -n kube-system rollout restart daemonset kube-proxy`
`kubectl -n kube-system rollout restart daemonset kube-proxy`
## Contributing
All `.yaml` files in the `/manifests` folder are generated via
[Jsonnet](https://jsonnet.org/). Contributing changes will most likely include
the following process:
1. Make your changes in the respective `*.jsonnet` file.
2. Commit your changes (This is currently necessary due to our vendoring
process. This is likely to change in the future).
3. Update the pinned kube-prometheus dependency in `jsonnetfile.lock.json`: `jb update`
3. Generate dependent `*.yaml` files: `make generate`
4. Commit the generated changes.
## License

View File

@@ -1,120 +0,0 @@
# Release schedule
Kube-prometheus has a somehow predictable release schedule, releases were
historically cut in sync with OpenShift releases as per downstream needs. So
far there hasn't been any problem with this schedule since it is also in sync
with Kubernetes releases. So for every new Kubernetes release, there is a new
release of kube-prometheus, although it tends to happen later.
# How to cut a new release
> This guide is strongly based on the [prometheus-operator release
> instructions](https://github.com/prometheus-operator/prometheus-operator/blob/master/RELEASE.md).
## Branch management and versioning strategy
We use [Semantic Versioning](http://semver.org/).
We maintain a separate branch for each minor release, named
`release-<major>.<minor>`, e.g. `release-1.1`, `release-2.0`.
The usual flow is to merge new features and changes into the master branch and
to merge bug fixes into the latest release branch. Bug fixes are then merged
into master from the latest release branch. The master branch should always
contain all commits from the latest release branch.
If a bug fix got accidentally merged into master, cherry-pick commits have to be
created in the latest release branch, which then has to be merged back into
master. Try to avoid that situation.
Maintaining the release branches for older minor releases happens on a best
effort basis.
## Cut a release of kubernetes-mixins
kube-prometheus and kubernetes-mixins releases are tied, so before cutting the
release of kube-prometheus we should make sure that the same release of
kubernetes-mixins exists.
## Update components version
Every release of kube-prometheus should include the latest versions of each
component. Updating them is automated via a CI job that can be triggered
manually from this
[workflow](https://github.com/prometheus-operator/kube-prometheus/actions/workflows/versions.yaml).
Once the workflow is completed, the prometheus-operator bot will create some
PRs. You should merge the one prefixed by `[bot][main]` if created before
proceeding. If the bot didn't create the PR, it is either because the workflow
failed or because the main branch was already up-to-date.
## Update Kubernetes supported versions
The main branch of kube-prometheus should support the last 2 versions of
Kubernetes. We need to make sure that the CI on the main branch is testing the
kube-prometheus configuration against both of these versions by updating the [CI
worklow](.github/workflows/ci.yaml) to include the latest kind version and the
2 latest images versions that are attached to the kind release. Once that is
done, the [compatibility matrix](README.md#kubernetes-compatibility-matrix) in
the README should also be updated to reflect the CI changes.
## Create pull request to cut the release
### Pin Jsonnet dependencies
Pin jsonnet dependencies in
[jsonnetfile.json](jsonnet/kube-prometheus/jsonnetfile.json). Each dependency
should be pinned to the latest release branch or if it doesn't have one, pinned
to the latest commit.
### Start with a fresh environment
```bash
make clean
```
### Update Jsonnet dependencies
```bash
make update
```
### Generate manifests
```bash
make generate
```
### Update the compatibility matrix
Update the [compatibility matrix](README.md#kubernetes-compatibility-matrix) in
the README, by adding the new release based on the `main` branch compatibility
and removing the oldest release branch to only keep the latest 5 branches in the
matrix.
### Update changelog
Iterate over the PRs that were merged between the latest release of kube-prometheus and the HEAD and add the changelog entries to the [CHANGELOG](CHANGELOG.md).
## Create release branch
Once the PR cutting the release is merged, pull the changes, create a new
release branch named `release-x.y` based on the latest changes and push it to
the upstream repository.
## Create follow-up pull request
### Unpin Jsonnet dependencies
Revert previous changes made when pinning the jsonnet dependencies since we want
the main branch to be in sync with the latest changes of its dependencies.
### Update CI workflow
Update the [versions workflow](.github/workflows/versions.yaml) to include the latest release branch and remove the oldest one to reflect the list of supported releases.
### Update Kubernetes versions used by kubeconform
Update the versions of Kubernetes used when validating manifests with
kubeconform in the [Makefile](Makefile) to align with the compatibility
matrix.

View File

@@ -33,8 +33,8 @@ This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting a project maintainer listed in
https://github.com/prometheus-operator/prometheus-operator/blob/master/MAINTAINERS.md.
reported by contacting a project maintainer listed in
https://github.com/prometheus-operator/prometheus-operator/blob/master/MAINTAINERS.md.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at

View File

@@ -1,33 +0,0 @@
# Ephemeral developer workspaces
Aiming to provide better developer experience when making contributions to kube-prometheus, whether by actively developing new features/bug fixes or by reviewing pull requests, we want to provide ephemeral developer workspaces with everything already configured (as far as tooling makes it possible).
Those developer workspaces should provide a brand new kubernetes cluster, where kube-prometheus can be easily deployed and the contributor can easily see the impact that a pull request is proposing.
Today there is 2 providers in the market:
* [Github Codespaces](https://github.com/features/codespaces)
* [Gitpod](https://www.gitpod.io/)
## Codespaces
Unfortunately, Codespaces is not available for everyone. If you are fortunate to have access to it, you can open a new workspace from a specific branch, or even from Pull Requests.
![image](https://user-images.githubusercontent.com/24193764/135522435-44b177b4-00d4-4863-b45b-2db47c8c70d0.png)
![image](https://user-images.githubusercontent.com/24193764/135522560-c64968ab-3b4e-4639-893a-c4d0a14421aa.png)
After your workspace start, you can deploy a kube-prometheus inside a Kind cluster inside by running `make deploy`.
If you are reviewing a PR, you'll have a fully-functional kubernetes cluster, generating real monitoring data that can be used to review if the proposed changes works as described.
If you are working on new features/bug fixes, you can regenerate kube-prometheus's YAML manifests with `make generate` and deploy it again with `make deploy`.
## Gitpod
Gitpod is already available to everyone to use for free. It can also run commands that we speficy in the `.gitpod.yml` file located in the root directory of the git repository, so even the cluster creation can be fully automated.
You can use the same workflow as mentioned in the [Codespaces](#codespaces) section, however Gitpod doesn't have native support for any kubernetes distribution. The workaround is to create a full QEMU Virtual Machine and deploy [k3s](https://github.com/k3s-io/k3s) inside this VM. Don't worry, this whole process is already fully automated, but due to the workaround the whole workspace may be very slow.
To open up a workspace with Gitpod, you can install the [Google Chrome extension](https://www.gitpod.io/docs/browser-extension/) to add a new button to Github UI and use it on PRs or from the main page. Or by directly typing in the browser `http://gitpod.io/#https://github.com/prometheus-operator/kube-prometheus/pull/<Pull Request Number>` or just `http://gitpod.io/#https://github.com/prometheus-operator/kube-prometheus`
![image](https://user-images.githubusercontent.com/24193764/135534546-4f6bf0e5-57cd-4e35-ad80-88bd47d64276.png)

View File

@@ -1,20 +0,0 @@
#!/bin/bash
which kind
if [[ $? != 0 ]]; then
echo 'kind not available in $PATH, installing latest kind'
# Install latest kind
curl -s https://api.github.com/repos/kubernetes-sigs/kind/releases/latest \
| grep "browser_download_url.*kind-linux-amd64" \
| cut -d : -f 2,3 \
| tr -d \" \
| wget -qi -
mv kind-linux-amd64 kind && chmod +x kind
fi
cluster_created=$($PWD/kind get clusters 2>&1)
if [[ "$cluster_created" == "No kind clusters found." ]]; then
$PWD/kind create cluster
else
echo "Cluster '$cluster_created' already present"
fi

View File

@@ -4,9 +4,9 @@ AWS EKS uses [CNI](https://github.com/aws/amazon-vpc-cni-k8s) networking plugin
One fatal issue that can occur is that you run out of IP addresses in your eks cluster. (Generally happens due to error configs where pods keep scheduling).
You can monitor the `awscni` using kube-promethus with :
```jsonnet mdox-exec="cat examples/eks-cni-example.jsonnet"
You can monitor the `awscni` using kube-promethus with :
[embedmd]:# (../examples/eks-cni-example.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {

View File

@@ -5,7 +5,6 @@ authentication. Until it does, Prometheus must use HTTP (not HTTPS)
for scraping.
You can configure this behavior through kube-prometheus with:
```
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
(import 'kube-prometheus/kube-prometheus-insecure-kubelet.libsonnet') +

View File

@@ -1,16 +1,16 @@
---
title: "Blackbox Exporter"
description: "Generated API docs for the Prometheus Operator"
lead: "This Document documents the types introduced by the Prometheus Operator to be consumed by users."
date: 2021-03-08T08:49:31+00:00
lastmod: 2021-03-08T08:49:31+00:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 630
toc: true
title: Blackbox Exporter
menu:
docs:
parent: kube
lead: This Document documents the types introduced by the Prometheus Operator to be consumed by users.
lastmod: "2021-03-08T08:49:31+00:00"
images: []
draft: false
description: Generated API docs for the Prometheus Operator
date: "2021-03-08T08:49:31+00:00"
---
# Setting up a blackbox exporter
@@ -21,7 +21,6 @@ The `prometheus-operator` defines a `Probe` resource type that can be used to de
1. Override blackbox-related configuration parameters as needed.
2. Add the following to the list of renderers to render the blackbox exporter manifests:
```
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) }
```

View File

@@ -4,7 +4,7 @@ For bugs, you can use the GitHub [issue tracker](https://github.com/prometheus-o
For questions, you can use the GitHub [discussions forum](https://github.com/prometheus-operator/kube-prometheus/discussions).
Many of the `kube-prometheus` project's contributors and users can also be found on the #prometheus-operator channel of the [Kubernetes Slack](https://slack.k8s.io/).
Many of the `kube-prometheus` project's contributors and users can also be found on the #prometheus-operator channel of the [Kubernetes Slack][Kubernetes Slack].
`kube-prometheus` is the aggregation of many projects that all have different
channels to reach out for help and support. This community strives at
@@ -18,7 +18,7 @@ if applicable.
For documentation, check the project's [documentation directory](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation).
For questions, use the #prometheus-operator channel on the [Kubernetes Slack](https://slack.k8s.io/).
For questions, use the #prometheus-operator channel on the [Kubernetes Slack][Kubernetes Slack].
For bugs, use the GitHub [issue tracker](https://github.com/prometheus-operator/prometheus-operator/issues/new/choose).
@@ -26,19 +26,19 @@ For bugs, use the GitHub [issue tracker](https://github.com/prometheus-operator/
For documentation, check the Prometheus [online docs](https://prometheus.io/docs/). There is a
[section](https://prometheus.io/docs/introduction/media/) with links to blog
posts, recorded talks and presentations. This [repository](https://github.com/roaldnefs/awesome-prometheus)
posts, recorded talks and presentations. This [repository](https://github.com/roaldnefs/awesome-prometheus)
(not affiliated to the Prometheus project) has also a list of curated resources
related to the Prometheus ecosystem.
For questions, see the Prometheus [community page](https://prometheus.io/community/) for the various channels.
There is also a #prometheus channel on the [CNCF Slack](https://slack.cncf.io/).
There is also a #prometheus channel on the [CNCF Slack][CNCF Slack].
## kube-state-metrics
For documentation, see the project's [docs directory](https://github.com/kubernetes/kube-state-metrics/tree/master/docs).
For questions, use the #kube-state-metrics channel on the [Kubernetes Slack](https://slack.k8s.io/).
For questions, use the #kube-state-metrics channel on the [Kubernetes Slack][Kubernetes Slack].
For bugs, use the GitHub [issue tracker](https://github.com/kubernetes/kube-state-metrics/issues/new/choose).
@@ -46,7 +46,7 @@ For bugs, use the GitHub [issue tracker](https://github.com/kubernetes/kube-stat
For documentation, check the [Kubernetes docs](https://kubernetes.io/docs/home/).
For questions, use the [community forums](https://discuss.kubernetes.io/) and the [Kubernetes Slack](https://slack.k8s.io/). Check also the [community page](https://kubernetes.io/community/#discuss).
For questions, use the [community forums](https://discuss.kubernetes.io/) and the [Kubernetes Slack][Kubernetes Slack]. Check also the [community page](https://kubernetes.io/community/#discuss).
For bugs, use the GitHub [issue tracker](https://github.com/kubernetes/kubernetes/issues/new/choose).
@@ -54,7 +54,7 @@ For bugs, use the GitHub [issue tracker](https://github.com/kubernetes/kubernete
For documentation, check the project's [README](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/README.md).
For questions, use the #sig-instrumentation channel on the [Kubernetes Slack](https://slack.k8s.io/).
For questions, use the #sig-instrumentation channel on the [Kubernetes Slack][Kubernetes Slack].
For bugs, use the GitHub [issue tracker](https://github.com/DirectXMan12/k8s-prometheus-adapter/issues/new).
@@ -70,7 +70,7 @@ For bugs, use the GitHub [issue tracker](https://github.com/grafana/grafana/issu
For documentation, check the project's [README](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/README.md).
For questions, use #monitoring-mixins channel on the [Kubernetes Slack](https://slack.k8s.io/).
For questions, use #monitoring-mixins channel on the [Kubernetes Slack][Kubernetes Slack].
For bugs, use the GitHub [issue tracker](https://github.com/kubernetes-monitoring/kubernetes-mixin/issues/new).
@@ -79,3 +79,6 @@ For bugs, use the GitHub [issue tracker](https://github.com/kubernetes-monitorin
For documentation, check the [Jsonnet](https://jsonnet.org/) website.
For questions, use the [mailing list](https://groups.google.com/forum/#!forum/jsonnet).
[Kubernetes Slack]: https://slack.k8s.io/
[CNCF Slack]: https://slack.cncf.io/

View File

@@ -1,40 +0,0 @@
### Alertmanager configuration
The Alertmanager configuration is located in the `values.alertmanager.config` configuration field. In order to set a custom Alertmanager configuration simply set this field.
```jsonnet mdox-exec="cat examples/alertmanager-config.jsonnet"
((import 'kube-prometheus/main.libsonnet') + {
values+:: {
alertmanager+: {
config: |||
global:
resolve_timeout: 10m
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'null'
routes:
- match:
alertname: Watchdog
receiver: 'null'
receivers:
- name: 'null'
|||,
},
},
}).alertmanager.secret
```
In the above example the configuration has been inlined, but can just as well be an external file imported in jsonnet via the `importstr` function.
```jsonnet mdox-exec="cat examples/alertmanager-config-external.jsonnet"
((import 'kube-prometheus/main.libsonnet') + {
values+:: {
alertmanager+: {
config: importstr 'alertmanager-config.yaml',
},
},
}).alertmanager.secret
```

View File

@@ -1,56 +0,0 @@
### Components' name and namespace overrides
It is possible to override the namespace where kube-prometheus is going to be deployed, like the example below:
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') +
{
values+:: {
common+: {
namespace: 'monitoring',
},
},
};
```
If prefered, it can be changed individually by component. It is also possible to change the name of Prometheus and Alertmanager Custom Resources, like shown below:
```jsonnet mdox-exec="cat examples/name-namespace-overrides.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') +
{
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+: {
namespace: 'foo',
name: 'bar',
},
alertmanager+: {
namespace: 'bar',
name: 'foo',
},
},
};
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
// Add the restricted psp to setup
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
} +
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
```

View File

@@ -1,81 +0,0 @@
### Monitoring additional namespaces
In order to monitor additional namespaces, the Prometheus server requires the appropriate `Role` and `RoleBinding` to be able to discover targets from that namespace. By default the Prometheus server is limited to the three namespaces it requires: default, kube-system and the namespace you configure the stack to run in via `$.values.namespace`. This is specified in `$.values.prometheus.namespaces`, to add new namespaces to monitor, simply append the additional namespaces:
```jsonnet mdox-exec="cat examples/additional-namespaces.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+: {
namespaces+: ['my-namespace', 'my-second-namespace'],
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
#### Defining the ServiceMonitor for each additional Namespace
In order to Prometheus be able to discovery and scrape services inside the additional namespaces specified in previous step you need to define a ServiceMonitor resource.
> Typically it is up to the users of a namespace to provision the ServiceMonitor resource, but in case you want to generate it with the same tooling as the rest of the cluster monitoring infrastructure, this is a guide on how to achieve this.
You can define ServiceMonitor resources in your `jsonnet` spec. See the snippet bellow:
```jsonnet mdox-exec="cat examples/additional-namespaces-servicemonitor.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+:: {
namespaces+: ['my-namespace', 'my-second-namespace'],
},
},
exampleApplication: {
serviceMonitorMyNamespace: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'my-servicemonitor',
namespace: 'my-namespace',
},
spec: {
jobLabel: 'app',
endpoints: [
{
port: 'http-metrics',
},
],
selector: {
matchLabels: {
'app.kubernetes.io/name': 'myapp',
},
},
},
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['example-application-' + name]: kp.exampleApplication[name] for name in std.objectFields(kp.exampleApplication) }
```
> NOTE: make sure your service resources have the right labels (eg. `'app': 'myapp'`) applied. Prometheus uses kubernetes labels to discover resources inside the namespaces.

View File

@@ -1,29 +0,0 @@
### Monitoring all namespaces
In case you want to monitor all namespaces in a cluster, you can add the following mixin. Also, make sure to empty the namespaces defined in prometheus so that roleBindings are not created against them.
```jsonnet mdox-exec="cat examples/all-namespaces.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/all-namespaces.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+: {
namespaces: [],
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
> NOTE: This configuration can potentially make your cluster insecure especially in a multi-tenant cluster. This is because this gives Prometheus visibility over the whole cluster which might not be expected in a scenario when certain namespaces are locked down for security reasons.
Proceed with [creating ServiceMonitors for the services in the namespaces](monitoring-additional-namespaces.md#defining-the-servicemonitor-for-each-additional-namespace) you actually want to monitor

View File

@@ -1,8 +0,0 @@
### NodePorts
Another mixin that may be useful for exploring the stack is to expose the UIs of Prometheus, Alertmanager and Grafana on NodePorts:
```jsonnet mdox-exec="cat examples/jsonnet-snippets/node-ports.jsonnet"
(import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/node-ports.libsonnet')
```

View File

@@ -1,25 +0,0 @@
### Running kube-prometheus on specific platforms
A common example is that not all Kubernetes clusters are created exactly the same way, meaning the configuration to monitor them may be slightly different. For the following clusters there are mixins available to easily configure them:
* aws
* bootkube
* eks
* gke
* kops
* kops_coredns
* kubeadm
* kubespray
These mixins are selectable via the `platform` field of kubePrometheus:
```jsonnet mdox-exec="cat examples/jsonnet-snippets/platform.jsonnet"
(import 'kube-prometheus/main.libsonnet') +
{
values+:: {
common+: {
platform: 'example-platform',
},
},
}
```

View File

@@ -1,23 +0,0 @@
### Pod Anti-Affinity
To prevent `Prometheus` and `Alertmanager` instances from being deployed onto the same node when
possible, one can include the [kube-prometheus-anti-affinity.libsonnet](../../jsonnet/kube-prometheus/addons/anti-affinity.libsonnet) mixin:
```jsonnet mdox-exec="cat examples/anti-affinity.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/anti-affinity.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```

View File

@@ -1,66 +0,0 @@
### Static etcd configuration
In order to configure a static etcd cluster to scrape there is a simple [static-etcd.libsonnet](../../jsonnet/kube-prometheus/addons/static-etcd.libsonnet) mixin prepared.
An example of how to use it can be seen below:
```jsonnet mdox-exec="cat examples/etcd.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/static-etcd.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
etcd+: {
// Configure this to be the IP(s) to scrape - i.e. your etcd node(s) (use commas to separate multiple values).
ips: ['127.0.0.1'],
// Reference info:
// * https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitorspec (has endpoints)
// * https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint (has tlsConfig)
// * https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig (has: caFile, certFile, keyFile, serverName, & insecureSkipVerify)
// Set these three variables to the fully qualified directory path on your work machine to the certificate files that are valid to scrape etcd metrics with (check the apiserver container).
// Most likely these certificates are generated somewhere in an infrastructure repository, so using the jsonnet `importstr` function can
// be useful here. (Kube-aws stores these three files inside the credential folder.)
// All the sensitive information on the certificates will end up in a Kubernetes Secret.
clientCA: importstr 'etcd-client-ca.crt',
clientKey: importstr 'etcd-client.key',
clientCert: importstr 'etcd-client.crt',
// Note that you should specify a value EITHER for 'serverName' OR for 'insecureSkipVerify'. (Don't specify a value for both of them, and don't specify a value for neither of them.)
// * Specifying serverName: Ideally you should provide a valid value for serverName (and then insecureSkipVerify should be left as false - so that serverName gets used).
// * Specifying insecureSkipVerify: insecureSkipVerify is only to be used (i.e. set to true) if you cannot (based on how your etcd certificates were created) use a Subject Alternative Name.
// * If you specify a value:
// ** for both of these variables: When 'insecureSkipVerify: true' is specified, then also specifying a value for serverName won't hurt anything but it will be ignored.
// ** for neither of these variables: then you'll get authentication errors on the prom '/targets' page with your etcd targets.
// A valid name (DNS or Subject Alternative Name) that the client (i.e. prometheus) will use to verify the etcd TLS certificate.
// * Note that doing `nslookup etcd.kube-system.svc.cluster.local` (on a pod in a K8s cluster where kube-prometheus has been installed) shows that kube-prometheus sets up this hostname.
// * `openssl x509 -noout -text -in etcd-client.pem` will print the Subject Alternative Names.
serverName: 'etcd.kube-system.svc.cluster.local',
// When insecureSkipVerify isn't specified, the default value is "false".
//insecureSkipVerify: true,
// In case you have generated the etcd certificate with kube-aws:
// * If you only have one etcd node, you can use the value from 'etcd.internalDomainName' (specified in your kube-aws cluster.yaml) as the value for 'serverName'.
// * But if you have multiple etcd nodes, you will need to use 'insecureSkipVerify: true' (if using default certificate generators method), as the valid certificate domain
// will be different for each etcd node. (kube-aws default certificates are not valid against the IP - they were created for the DNS.)
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```
If you'd like to monitor an etcd instance that lives outside the cluster, see [Monitoring external etcd](../monitoring-external-etcd.md) for more information.
> Note that monitoring etcd in minikube is currently not possible because of how etcd is setup. (minikube's etcd binds to 127.0.0.1:2379 only, and within host networking namespace.)

View File

@@ -1,23 +0,0 @@
### Stripping container resource limits
Sometimes in small clusters, the CPU/memory limits can get high enough for alerts to be fired continuously. To prevent this, one can strip off the predefined limits.
To do that, one can import the following mixin
```jsonnet mdox-exec="cat examples/strip-limits.jsonnet"
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/strip-limits.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```

View File

@@ -1,39 +0,0 @@
### Internal Registry
Some Kubernetes installations source all their images from an internal registry. kube-prometheus supports this use case and helps the user synchronize every image it uses to the internal registry and generate manifests pointing at the internal registry.
To produce the `docker pull/tag/push` commands that will synchronize upstream images to `internal-registry.com/organization` (after having run the `jb` command to populate the vendor directory):
```shell
$ jsonnet -J vendor -S --tla-str repository=internal-registry.com/organization sync-to-internal-registry.jsonnet
$ docker pull k8s.gcr.io/addon-resizer:1.8.4
$ docker tag k8s.gcr.io/addon-resizer:1.8.4 internal-registry.com/organization/addon-resizer:1.8.4
$ docker push internal-registry.com/organization/addon-resizer:1.8.4
$ docker pull quay.io/prometheus/alertmanager:v0.16.2
$ docker tag quay.io/prometheus/alertmanager:v0.16.2 internal-registry.com/organization/alertmanager:v0.16.2
$ docker push internal-registry.com/organization/alertmanager:v0.16.2
...
```
The output of this command can be piped to a shell to be executed by appending `| sh`.
Then to generate manifests with `internal-registry.com/organization`, use the `withImageRepository` mixin:
```jsonnet mdox-exec="cat examples/internal-registry.jsonnet"
local mixin = import 'kube-prometheus/addons/config-mixins.libsonnet';
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
},
} + mixin.withImageRepository('internal-registry.com/organization');
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
```

View File

@@ -1,15 +1,15 @@
---
title: "Deploy to kind"
description: "Deploy kube-prometheus to Kubernets kind."
lead: "Deploy kube-prometheus to Kubernets kind."
date: 2021-03-08T23:04:32+01:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 500
toc: true
title: Deploy to kind
menu:
docs:
parent: kube
lead: Deploy kube-prometheus to Kubernets kind.
images: []
draft: false
description: Deploy kube-prometheus to Kubernets kind.
date: "2021-03-08T23:04:32+01:00"
---
---

View File

@@ -1,15 +1,15 @@
---
title: "Prometheus Rules and Grafana Dashboards"
description: "Create Prometheus Rules and Grafana Dashboards on top of kube-prometheus"
lead: "Create Prometheus Rules and Grafana Dashboards on top of kube-prometheus"
date: 2021-03-08T23:04:32+01:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 650
toc: true
title: Prometheus Rules and Grafana Dashboards
menu:
docs:
parent: kube
lead: Create Prometheus Rules and Grafana Dashboards on top of kube-prometheus
images: []
draft: false
description: Create Prometheus Rules and Grafana Dashboards on top of kube-prometheus
date: "2021-03-08T23:04:32+01:00"
---
`kube-prometheus` ships with a set of default [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [Grafana](http://grafana.com/) dashboards. At some point one might like to extend them, the purpose of this document is to explain how to do this.
@@ -18,9 +18,10 @@ All manifests of kube-prometheus are generated using [jsonnet](https://jsonnet.o
For both the Prometheus rules and the Grafana dashboards Kubernetes `ConfigMap`s are generated within kube-prometheus. In order to add additional rules and dashboards simply merge them onto the existing json objects. This document illustrates examples for rules as well as dashboards.
As a basis, all examples in this guide are based on the base example of the kube-prometheus [readme](../../README.md):
As a basis, all examples in this guide are based on the base example of the kube-prometheus [readme](../README.md):
```jsonnet mdox-exec="cat example.jsonnet"
[embedmd]:# (../example.jsonnet)
```jsonnet
local kp =
(import 'kube-prometheus/main.libsonnet') +
// Uncomment the following imports to enable its patches
@@ -67,7 +68,8 @@ The format is exactly the Prometheus format, so there should be no changes neces
> Note that alerts can just as well be included into this file, using the jsonnet `import` function. In this example it is just inlined in order to demonstrate their use in a single file.
```jsonnet mdox-exec="cat examples/prometheus-additional-alert-rule-example.jsonnet"
[embedmd]:# (../examples/prometheus-additional-alert-rule-example.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
@@ -122,7 +124,8 @@ In order to add a recording rule, simply do the same with the `prometheusRules`
> Note that rules can just as well be included into this file, using the jsonnet `import` function. In this example it is just inlined in order to demonstrate their use in a single file.
```jsonnet mdox-exec="cat examples/prometheus-additional-recording-rule-example.jsonnet"
[embedmd]:# (../examples/prometheus-additional-recording-rule-example.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
@@ -181,7 +184,8 @@ cat existingrule.yaml | gojsontoyaml -yamltojson > existingrule.json
Then import it in jsonnet:
```jsonnet mdox-exec="cat examples/prometheus-additional-rendered-rule-example.jsonnet"
[embedmd]:# (../examples/prometheus-additional-rendered-rule-example.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
@@ -213,17 +217,14 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['example-application-' + name]: kp.exampleApplication[name] for name in std.objectFields(kp.exampleApplication) }
```
### Changing default rules
Along with adding additional rules, we give the user the option to filter or adjust the existing rules imported by `kube-prometheus/main.libsonnet`. The recording rules can be found in [kube-prometheus/components/mixin/rules](../../jsonnet/kube-prometheus/components/mixin/rules) and [kubernetes-mixin/rules](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/rules) while the alerting rules can be found in [kube-prometheus/components/mixin/alerts](../../jsonnet/kube-prometheus/components/mixin/alerts) and [kubernetes-mixin/alerts](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/alerts).
Along with adding additional rules, we give the user the option to filter or adjust the existing rules imported by `kube-prometheus/main.libsonnet`. The recording rules can be found in [kube-prometheus/components/mixin/rules](../jsonnet/kube-prometheus/components/mixin/rules) and [kubernetes-mixin/rules](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/rules) while the alerting rules can be found in [kube-prometheus/components/mixin/alerts](../jsonnet/kube-prometheus/components/mixin/alerts) and [kubernetes-mixin/alerts](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/alerts).
Knowing which rules to change, the user can now use functions from the [Jsonnet standard library](https://jsonnet.org/ref/stdlib.html) to make these changes. Below are examples of both a filter and an adjustment being made to the default rules. These changes can be assigned to a local variable and then added to the `local kp` object as seen in the examples above.
#### Filter
Here the alert `KubeStatefulSetReplicasMismatch` is being filtered out of the group `kubernetes-apps`. The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet). You first need to find out in which component the rule is defined (here it is kuberentesControlPlane).
```jsonnet
local filter = {
kubernetesControlPlane+: {
@@ -250,9 +251,7 @@ local filter = {
```
#### Adjustment
Here the expression for another alert in the same component is updated from its previous value. The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet).
```jsonnet
local update = {
kubernetesControlPlane+: {
@@ -284,7 +283,6 @@ local update = {
```
Using the example from above about adding in pre-rendered rules, the new local variables can be added in as follows:
```jsonnet
local add = {
exampleApplication:: {
@@ -329,7 +327,6 @@ local kp = (import 'kube-prometheus/main.libsonnet') +
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) } +
{ ['exampleApplication-' + name]: kp.exampleApplication[name] for name in std.objectFields(kp.exampleApplication) }
```
## Dashboards
Dashboards can either be added using jsonnet or simply a pre-rendered json dashboard.
@@ -340,7 +337,8 @@ We recommend using the [grafonnet](https://github.com/grafana/grafonnet-lib/) li
> Note that dashboards can just as well be included into this file, using the jsonnet `import` function. In this example it is just inlined in order to demonstrate their use in a single file.
```jsonnet mdox-exec="cat examples/grafana-additional-jsonnet-dashboard-example.jsonnet"
[embedmd]:# (../examples/grafana-additional-jsonnet-dashboard-example.jsonnet)
```jsonnet
local grafana = import 'grafonnet/grafana.libsonnet';
local dashboard = grafana.dashboard;
local row = grafana.row;
@@ -394,9 +392,10 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
### Pre-rendered Grafana dashboards
As jsonnet is a superset of json, the jsonnet `import` function can be used to include Grafana dashboard json blobs. In this example we are importing a [provided example dashboard](../../examples/example-grafana-dashboard.json).
As jsonnet is a superset of json, the jsonnet `import` function can be used to include Grafana dashboard json blobs. In this example we are importing a [provided example dashboard](../examples/example-grafana-dashboard.json).
```jsonnet mdox-exec="cat examples/grafana-additional-rendered-dashboard-example.jsonnet"
[embedmd]:# (../examples/grafana-additional-rendered-dashboard-example.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+:: {
@@ -420,8 +419,8 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
```
In case you have lots of json dashboard exported out from grafana UI the above approach is going to take lots of time to improve performance we can use `rawDashboards` field and provide it's value as json string by using `importstr`
```jsonnet mdox-exec="cat examples/grafana-additional-rendered-dashboard-example-2.jsonnet"
[embedmd]:# (../examples/grafana-additional-rendered-dashboard-example-2.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+:: {
@@ -524,7 +523,8 @@ values+:: {
Full example of including etcd mixin using method described above:
```jsonnet mdox-exec="cat examples/mixin-inclusion.jsonnet"
[embedmd]:# (../examples/mixin-inclusion.jsonnet)
```jsonnet
local addMixin = (import 'kube-prometheus/lib/mixin.libsonnet');
local etcdMixin = addMixin({
name: 'etcd',

View File

@@ -1,15 +1,15 @@
---
title: "Expose via Ingress"
description: "How to setup a Kubernetes Ingress to expose the Prometheus, Alertmanager and Grafana."
lead: "How to setup a Kubernetes Ingress to expose the Prometheus, Alertmanager and Grafana."
date: 2021-03-08T23:04:32+01:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 500
toc: true
title: Expose via Ingress
menu:
docs:
parent: kube
lead: How to setup a Kubernetes Ingress to expose the Prometheus, Alertmanager and Grafana.
images: []
draft: false
description: How to setup a Kubernetes Ingress to expose the Prometheus, Alertmanager and Grafana.
date: "2021-03-08T23:04:32+01:00"
---
In order to access the web interfaces via the Internet [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) is a popular option. This guide explains, how Kubernetes Ingress can be setup, in order to expose the Prometheus, Alertmanager and Grafana UIs, that are included in the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project.
@@ -102,9 +102,9 @@ k.core.v1.list.new([
])
```
In order to expose Alertmanager and Grafana, simply create additional fields containing an ingress object, but simply pointing at the `alertmanager` or `grafana` instead of the `prometheus-k8s` Service. Make sure to also use the correct port respectively, for Alertmanager it is also `web`, for Grafana it is `http`. Be sure to also specify the appropriate external URL. Note that the external URL for grafana is set in a different way than the external URL for Prometheus or Alertmanager. See [ingress.jsonnet](../../examples/ingress.jsonnet) for how to set the Grafana external URL.
In order to expose Alertmanager and Grafana, simply create additional fields containing an ingress object, but simply pointing at the `alertmanager` or `grafana` instead of the `prometheus-k8s` Service. Make sure to also use the correct port respectively, for Alertmanager it is also `web`, for Grafana it is `http`. Be sure to also specify the appropriate external URL. Note that the external URL for grafana is set in a different way than the external URL for Prometheus or Alertmanager. See [ingress.jsonnet](../examples/ingress.jsonnet) for how to set the Grafana external URL.
In order to render the ingress objects similar to the other objects use as demonstrated in the [main readme](../../README.md):
In order to render the ingress objects similar to the other objects use as demonstrated in the [main readme](../README.md#usage):
```
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
@@ -119,4 +119,4 @@ In order to render the ingress objects similar to the other objects use as demon
Note, that in comparison only the last line was added, the rest is identical to the original.
See [ingress.jsonnet](../../examples/ingress.jsonnet) for an example implementation.
See [ingress.jsonnet](../examples/ingress.jsonnet) for an example implementation.

View File

@@ -1,15 +1,15 @@
---
title: "Deploy to kubeadm"
description: "Deploy kube-prometheus to Kubernets kubeadm."
lead: "Deploy kube-prometheus to Kubernets kubeadm."
date: 2021-03-08T23:04:32+01:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 500
toc: true
title: Deploy to kubeadm
menu:
docs:
parent: kube
lead: Deploy kube-prometheus to Kubernets kubeadm.
images: []
draft: false
description: Deploy kube-prometheus to Kubernets kubeadm.
date: "2021-03-08T23:04:32+01:00"
---
The [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) tool is linked by Kubernetes as the offical way to deploy and manage self-hosted clusters. kubeadm does a lot of heavy lifting by automatically configuring your Kubernetes cluster with some common options. This guide is intended to show you how to deploy Prometheus, Prometheus Operator and Kube Prometheus to get you started monitoring your cluster that was deployed with kubeadm.
@@ -93,6 +93,7 @@ Once you complete this guide you will monitor the following:
* kube-scheduler
* kube-controller-manager
## Getting Up and Running Fast with Kube-Prometheus
To help get started more quickly with monitoring Kubernetes clusters, [kube-prometheus](https://github.com/coreos/kube-prometheus) was created. It is a collection of manifests including dashboards and alerting rules that can easily be deployed. It utilizes the Prometheus Operator and all the manifests demonstrated in this guide.

View File

@@ -2,9 +2,9 @@
An example conversion of a legacy custom jsonnet file to release-0.8
format can be seen by viewing and comparing this
[release-0.3 jsonnet file](my.release-0.3.jsonnet) (when the github
[release-0.3 jsonnet file](./my.release-0.3.jsonnet) (when the github
repo was under `https://github.com/coreos/kube-prometheus...`)
and the corresponding [release-0.8 jsonnet file](my.release-0.8.jsonnet).
and the corresponding [release-0.8 jsonnet file](./my.release-0.8.jsonnet).
These two files have had necessary blank lines added so that they
can be compared side-by-side and line-by-line on screen.
@@ -16,9 +16,8 @@ release-0.3 and also the major migration after release-0.7 as described in
The sample files are intended as an example of format conversion and
not necessarily best practice for the files in release-0.3 or release-0.8.
Below are three sample extracts of the conversion as an indication of the
Below are three sample extracts of the conversion as an indication of the
changes required.
<table>
<tr>
<th> release-0.3 </th>

View File

@@ -33,14 +33,14 @@ Thanks to our community we identified a lot of short-commings of previous design
Those concepts were already present in the repository but it wasn't clear which file is holding what. After refactoring we categorized jsonnet code into 3 buckets and put them into separate directories:
- `components` - main building blocks for kube-prometheus, written as functions responsible for creating multiple objects representing kubernetes manifests. For example all objects for node_exporter deployment are bundled in `components/node_exporter.libsonnet` library
- `addons` - everything that can enhance kube-prometheus deployment. Those are small snippets of code adding a small feature, for example adding anti-affinity to pods via [`addons/anti-affinity.libsonnet`](https://github.com/prometheus-operator/kube-prometheus/blob/main/jsonnet/kube-prometheus/addons/anti-affinity.libsonnet). Addons are meant to be used in object-oriented way like `local kp = (import 'kube-prometheus/main.libsonnet') + (import 'kube-prometheus/addons/all-namespaces.libsonnet')`
- `addons` - everything that can enhance kube-prometheus deployment. Those are small snippets of code adding a small feature, for example adding anti-affinity to pods via [`addons/anti-affinity.libsonnet`][antiaffinity]. Addons are meant to be used in object-oriented way like `local kp = (import 'kube-prometheus/main.libsonnet') + (import 'kube-prometheus/addons/all-namespaces.libsonnet')`
- `platforms` - currently those are `addons` specialized to allow deploying kube-prometheus project on a specific platform.
### Component configuration
Refactoring main components to use functions allowed us to define APIs for said components. Each function has a default set of parameters that can be overridden or that are required to be set by a user. Those default parameters are represented in each component by `defaults` map at the top of each library file, for example in [`node_exporter.libsonnet`](https://github.com/prometheus-operator/kube-prometheus/blob/1d2a0e275af97948667777739a18b24464480dc8/jsonnet/kube-prometheus/components/node-exporter.libsonnet#L3-L34).
Refactoring main components to use functions allowed us to define APIs for said components. Each function has a default set of parameters that can be overridden or that are required to be set by a user. Those default parameters are represented in each component by `defaults` map at the top of each library file, for example in [`node_exporter.libsonnet`][node_exporter_defaults_example].
This API is meant to ease the use of kube-prometheus as parameters can be passed from a JSON file and don't need to be in jsonnet format. However, if you need to modify particular parts of the stack, jsonnet allows you to do this and we are also not restricting such access in any way. An example of such modifications can be seen in any of our `addons`, like the [`addons/anti-affinity.libsonnet`](https://github.com/prometheus-operator/kube-prometheus/blob/main/jsonnet/kube-prometheus/addons/anti-affinity.libsonnet) one.
This API is meant to ease the use of kube-prometheus as parameters can be passed from a JSON file and don't need to be in jsonnet format. However, if you need to modify particular parts of the stack, jsonnet allows you to do this and we are also not restricting such access in any way. An example of such modifications can be seen in any of our `addons`, like the [`addons/anti-affinity.libsonnet`][antiaffinity] one.
### Mixin integration
@@ -63,14 +63,25 @@ All examples from `examples/` directory were adapted to the new codebase. [Pleas
## Legacy migration
An example of conversion of a legacy release-0.3 my.jsonnet file to release-0.8 can be found in [migration-example](migration-example)
An example of conversion of a legacy release-0.3 my.jsonnet file to release-0.8 can be found in [migration-example](./migration-example)
## Advanced usage examples
For more advanced usage examples you can take a look at those two, open to public, implementations:
- [thaum-xyz/ankhmorpork](https://github.com/thaum-xyz/ankhmorpork/blob/master/apps/monitoring/jsonnet) - extending kube-prometheus to adapt to a required environment
- [openshift/cluster-monitoring-operator](https://github.com/openshift/cluster-monitoring-operator/pull/1044) - using kube-prometheus components as standalone libraries to build a custom solution
- [thaum-xyz/ankhmorpork][thaum] - extending kube-prometheus to adapt to a required environment
- [openshift/cluster-monitoring-operator][openshift] - using kube-prometheus components as standalone libraries to build a custom solution
## Final note
Refactoring was a huge undertaking and possibly this document didn't describe in enough detail how to help you with migration to the new stack. If that is the case, please reach out to us by using [GitHub discussions](https://github.com/prometheus-operator/kube-prometheus/discussions) feature or directly on [#prometheus-operator kubernetes slack channel](http://slack.k8s.io/).
Refactoring was a huge undertaking and possibly this document didn't describe in enough detail how to help you with migration to the new stack. If that is the case, please reach out to us by using [GitHub discussions][discussions] feature or directly on [#prometheus-operator kubernetes slack channel][slack].
[antiaffinity]: https://github.com/prometheus-operator/kube-prometheus/blob/main/jsonnet/kube-prometheus/addons/anti-affinity.libsonnet
[node_exporter_defaults_example]: https://github.com/prometheus-operator/kube-prometheus/blob/1d2a0e275af97948667777739a18b24464480dc8/jsonnet/kube-prometheus/components/node-exporter.libsonnet#L3-L34
[openshift]: https://github.com/openshift/cluster-monitoring-operator/pull/1044
[thaum]: https://github.com/thaum-xyz/ankhmorpork/blob/master/apps/monitoring/jsonnet
[discussions]: https://github.com/prometheus-operator/kube-prometheus/discussions
[slack]: http://slack.k8s.io/

View File

@@ -1,23 +1,23 @@
---
title: "Monitoring external etcd"
description: "This guide will help you monitor an external etcd cluster."
lead: "This guide will help you monitor an external etcd cluster."
date: 2021-03-08T23:04:32+01:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 640
toc: true
title: Monitoring external etcd
menu:
docs:
parent: kube
lead: This guide will help you monitor an external etcd cluster.
images: []
draft: false
description: This guide will help you monitor an external etcd cluster.
date: "2021-03-08T23:04:32+01:00"
---
When the etcd cluster is not hosted inside Kubernetes.
This is often the case with Kubernetes setups. This approach has been tested with kube-aws but the same principals apply to other tools.
Note that [etcd.jsonnet](../examples/etcd.jsonnet) & [static-etcd.libsonnet](../jsonnet/kube-prometheus/addons/static-etcd.libsonnet) (which are described by a section of the [customization](customizations/static-etcd-configuration.md)) do the following:
* Put the three etcd TLS client files (CA & cert & key) into a secret in the namespace, and have Prometheus Operator load the secret.
* Create the following (to expose etcd metrics - port 2379): a Service, Endpoint, & ServiceMonitor.
Note that [etcd.jsonnet](../examples/etcd.jsonnet) & [kube-prometheus-static-etcd.libsonnet](../jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet) (which are described by a section of the [Readme](../README.md#static-etcd-configuration)) do the following:
* Put the three etcd TLS client files (CA & cert & key) into a secret in the namespace, and have Prometheus Operator load the secret.
* Create the following (to expose etcd metrics - port 2379): a Service, Endpoint, & ServiceMonitor.
# Step 1: Open the port
@@ -26,7 +26,6 @@ You now need to allow the nodes Prometheus are running on to talk to the etcd on
If using kube-aws, you will need to edit the etcd security group inbound, specifying the security group of your Kubernetes node (worker) as the source.
## kube-aws and EIP or ENI inconsistency
With kube-aws, each etcd node has two IP addresses:
* EC2 instance IP
@@ -41,7 +40,6 @@ Another idea woud be to use the DNS entries of etcd, but those are not currently
# Step 2: verify
Go to the Prometheus UI on :9090/config and check that you have an etcd job entry:
```
- job_name: monitoring/etcd-k8s/0
scrape_interval: 30s
@@ -50,5 +48,6 @@ Go to the Prometheus UI on :9090/config and check that you have an etcd job entr
```
On the :9090/targets page:
* You should see "etcd" with the UP state. If not, check the Error column for more information.
* If no "etcd" targets are even shown on this page, prometheus isn't attempting to scrape it.
* You should see "etcd" with the UP state. If not, check the Error column for more information.
* If no "etcd" targets are even shown on this page, prometheus isn't attempting to scrape it.

View File

@@ -1,28 +1,26 @@
---
title: "Monitoring other Namespaces"
description: "This guide will help you monitor applications in other Namespaces."
lead: "This guide will help you monitor applications in other Namespaces."
date: 2021-03-08T23:04:32+01:00
draft: false
images: []
menu:
docs:
parent: "kube"
weight: 640
toc: true
title: Monitoring other Namespaces
menu:
docs:
parent: kube
lead: This guide will help you monitor applications in other Namespaces.
images: []
draft: false
description: This guide will help you monitor applications in other Namespaces.
date: "2021-03-08T23:04:32+01:00"
---
This guide will help you monitor applications in other Namespaces. By default the RBAC rules are only enabled for the `Default` and `kube-system` Namespace during Install.
# Setup
You have to give the list of the Namespaces that you want to be able to monitor.
This is done in the variable `prometheus.roleSpecificNamespaces`. You usually set this in your `.jsonnet` file when building the manifests.
Example to create the needed `Role` and `RoleBinding` for the Namespace `foo` :
Example to create the needed `Role` and `RoleBinding` for the Namespace `foo` :
```
local kp = (import 'kube-prometheus/main.libsonnet') + {
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
_config+:: {
namespace: 'monitoring',

View File

@@ -1,11 +1,9 @@
# Setup Weave Net monitoring using kube-prometheus
[Weave Net](https://kubernetes.io/docs/concepts/cluster-administration/networking/#weave-net-from-weaveworks) is a resilient and simple to use CNI provider for Kubernetes. A well monitored and observed CNI provider helps in troubleshooting Kubernetes networking problems. [Weave Net](https://www.weave.works/docs/net/latest/concepts/how-it-works/) emits [prometheus metrics](https://www.weave.works/docs/net/latest/tasks/manage/metrics/) for monitoring Weave Net. There are many ways to install Weave Net in your cluster. One of them is using [kops](https://github.com/kubernetes/kops/blob/master/docs/networking.md).
Following this document, you can setup Weave Net monitoring for your cluster using kube-prometheus.
## Contents
Using kube-prometheus and kubectl you will be able install the following for monitoring Weave Net in your cluster:
1. [Service for Weave Net](https://gist.github.com/alok87/379c6234b582f555c141f6fddea9fbce) The service which the [service monitor](https://coreos.com/operators/prometheus/docs/latest/user-guides/cluster-monitoring.html) scrapes.
@@ -17,7 +15,8 @@ Using kube-prometheus and kubectl you will be able install the following for mon
## Instructions
- You can monitor Weave Net using an example like below. **Please note that some alert configurations are environment specific and may require modifications of alert thresholds**. For example: The FastDP flows have never gone below 15000 for us. But if this value is say 20000 for you then you can use an example like below to update the alert. The alerts which may require threshold modifications are `WeaveNetFastDPFlowsLow` and `WeaveNetIPAMUnreachable`.
```jsonnet mdox-exec="cat examples/weave-net-example.jsonnet"
[embedmd]:# (../examples/weave-net-example.jsonnet)
```jsonnet
local kp = (import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/addons/weave-net/weave-net.libsonnet') + {
values+:: {
@@ -67,7 +66,6 @@ local kp = (import 'kube-prometheus/main.libsonnet') +
```
- After you have the required yamls file please run
```
kubectl create -f prometheus-serviceWeaveNet.yaml
kubectl create -f prometheus-serviceMonitorWeaveNet.yaml

View File

@@ -1,10 +1,11 @@
# Windows
The [Windows addon](../examples/windows.jsonnet) adds the dashboards and rules from [kubernetes-monitoring/kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin#dashboards-for-windows-nodes).
The [Windows addon](../examples/windows.jsonnet) adds the dashboards and rules from [kubernetes-monitoring/kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin#dashboards-for-windows-nodes).
Currently, Windows does not support running with [windows_exporter](https://github.com/prometheus-community/windows_exporter) in a pod so this add on uses [additional scrape configuration](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/additional-scrape-config.md) to set up a static config to scrape the node ports where windows_exporter is configured.
The addon requires you to specify the node ips and ports where it can find the windows_exporter. See the [full example](../examples/windows.jsonnet) for setup.
The addon requires you to specify the node ips and ports where it can find the windows_exporter. See the [full example](../examples/windows.jsonnet) for setup.
```
local kp = (import 'kube-prometheus/main.libsonnet') +

View File

@@ -24,7 +24,7 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
],
selector: {
matchLabels: {
'app.kubernetes.io/name': 'myapp',
app: 'myapp',
},
},
},

View File

@@ -1,37 +0,0 @@
# to know more about custom template language read alertmanager documentation
# inspired by : https://gist.github.com/milesbxf/e2744fc90e9c41b47aa47925f8ff6512
{{ define "slack.title" -}}
[{{ .Status | toUpper -}}
{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{- end -}}
] {{ template "__alert_severity_prefix_title" . }} {{ .CommonLabels.alertname }}
{{- end }}
{{ define "slack.color" -}}
{{ if eq .Status "firing" -}}
{{ if eq .CommonLabels.severity "warning" -}}
warning
{{- else if eq .CommonLabels.severity "critical" -}}
danger
{{- else -}}
#439FE0
{{- end -}}
{{ else -}}
good
{{- end }}
{{- end }}
{{ define "slack.icon_emoji" }}:prometheus:{{ end }}
{{/* The test to display in the alert */}}
{{ define "slack.text" -}}
{{ range .Alerts }}
{{- if .Annotations.message }}
{{ .Annotations.message }}
{{- end }}
{{- if .Annotations.description }}
{{ .Annotations.description }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,35 +0,0 @@
local configmap(name, namespace, data) = {
apiVersion: 'v1',
kind: 'ConfigMap',
metadata: {
name: name,
namespace: namespace,
},
data: data,
};
local kp =
// different libsonnet imported
{
values+:: {
common+: {
namespace: 'monitoring',
},
},
alertmanager+:: {
alertmanager+: {
spec+: {
// the important field configmaps:
configMaps: ['alert-templates'], // goes to etc/alermanager/configmaps
},
},
},
configmap+:: {
'alert-templates': configmap(
'alertmanager-alert-template.tmpl',
$.values.common.namespace, // could be $._config.namespace to assign namespace once
{ data: importstr 'alertmanager-alert-template.tmpl' },
),
},
};
{ [name + '-configmap']: kp.configmap[name] for name in std.objectFields(kp.configmap) }

View File

@@ -1,27 +0,0 @@
# external alertmanager yaml
global:
resolve_timeout: 10m
slack_api_url: url
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'null'
routes:
- match:
alertname: Watchdog
receiver: 'null'
receivers:
- name: 'null'
- name: slack
slack_configs:
- channel: '#alertmanager-testing'
send_resolved: true
title: '{{ template "slack.title" . }}'
icon_emoji: '{{ template "slack.icon_emoji" . }}'
color: '{{ template "slack.color" . }}'
text: '{{ template "slack.text" . }}
templates:
- '/etc/alertmanager/configmaps/alertmanager-alert-template.tmpl'

View File

@@ -19,4 +19,4 @@ spec:
- logging
selector:
matchLabels:
app.kubernetes.io/name: myapp
app: myapp

View File

@@ -5,14 +5,15 @@ local kp = (import 'kube-prometheus/main.libsonnet') +
namespace: 'monitoring',
},
// Reference info: https://github.com/coreos/kube-prometheus/blob/master/README.md#static-etcd-configuration
etcd+: {
// Configure this to be the IP(s) to scrape - i.e. your etcd node(s) (use commas to separate multiple values).
ips: ['127.0.0.1'],
// Reference info:
// * https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitorspec (has endpoints)
// * https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint (has tlsConfig)
// * https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig (has: caFile, certFile, keyFile, serverName, & insecureSkipVerify)
// * https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitorspec (has endpoints)
// * https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint (has tlsConfig)
// * https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig (has: caFile, certFile, keyFile, serverName, & insecureSkipVerify)
// Set these three variables to the fully qualified directory path on your work machine to the certificate files that are valid to scrape etcd metrics with (check the apiserver container).
// Most likely these certificates are generated somewhere in an infrastructure repository, so using the jsonnet `importstr` function can

View File

@@ -7,7 +7,7 @@ metadata:
namespace: default
spec:
selector:
app.kubernetes.io/name: example-app
app: example-app
ports:
- name: web
protocol: TCP
@@ -22,17 +22,17 @@ metadata:
spec:
selector:
matchLabels:
app.kubernetes.io/name: example-app
app: example-app
version: 1.1.3
replicas: 4
template:
metadata:
labels:
app.kubernetes.io/name: example-app
app: example-app
version: 1.1.3
spec:
containers:
- name: example-app
- name: example-app
image: quay.io/fabxc/prometheus_demo_service
ports:
- name: web

View File

@@ -1,111 +0,0 @@
local ingress(name, namespace, rules) = {
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name: name,
namespace: namespace,
annotations: {
'nginx.ingress.kubernetes.io/auth-type': 'basic',
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
},
},
spec: { rules: rules },
};
local kp =
(import 'kube-prometheus/main.libsonnet') +
{
_config+:: {
namespace: 'monitoring',
grafana+:: {
config+: {
sections+: {
server+: {
root_url: 'http://grafana.example.com/',
},
},
},
},
},
// Configure External URL's per application
alertmanager+:: {
alertmanager+: {
spec+: {
externalUrl: 'http://alertmanager.example.com',
},
},
},
prometheus+:: {
prometheus+: {
spec+: {
externalUrl: 'http://prometheus.example.com',
},
},
},
// Create one ingress object that routes to each individual application
ingress+:: {
'kube-prometheus': ingress(
'kube-prometheus',
$._config.namespace,
[
{
host: 'alertmanager.example.com',
http: {
paths: [{
backend: {
service: {
name: 'alertmanager-main',
port: 'web',
},
},
}],
},
},
{
host: 'grafana.example.com',
http: {
paths: [{
backend: {
service: {
name: 'grafana',
port: 'http',
},
},
}],
},
},
{
host: 'alertmanager.example.com',
http: {
paths: [{
backend: {
service: {
name: 'prometheus-k8s',
port: 'web',
},
},
}],
},
},
]
),
},
} + {
// Create basic auth secret - replace 'auth' file with your own
ingress+:: {
'basic-auth-secret': {
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: 'basic-auth',
namespace: $._config.namespace,
},
data: { auth: std.base64(importstr 'auth') },
type: 'Opaque',
},
},
};
{ [name + '-ingress']: kp.ingress[name] for name in std.objectFields(kp.ingress) }

View File

@@ -0,0 +1,9 @@
((import 'kube-prometheus/main.libsonnet') + {
nodeExporter+: {
daemonset+: {
metadata+: {
namespace: 'my-custom-namespace',
},
},
},
}).nodeExporter.daemonset

View File

@@ -8,22 +8,25 @@ local kp =
};
local manifests =
// Uncomment line below to enable vertical auto scaling of kube-state-metrics
//{ ['ksm-autoscaler-' + name]: kp.ksmAutoscaler[name] for name in std.objectFields(kp.ksmAutoscaler) } +
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
{
['setup/' + resource]: kp[component][resource]
for component in std.objectFields(kp)
for resource in std.filter(
function(resource)
kp[component][resource].kind == 'CustomResourceDefinition' || kp[component][resource].kind == 'Namespace', std.objectFields(kp[component])
)
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
} +
{
[component + '-' + resource]: kp[component][resource]
for component in std.objectFields(kp)
for resource in std.filter(
function(resource)
kp[component][resource].kind != 'CustomResourceDefinition' && kp[component][resource].kind != 'Namespace', std.objectFields(kp[component])
)
};
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) };
local kustomizationResourceFile(name) = './manifests/' + name + '.yaml';
local kustomization = {

View File

@@ -1,37 +0,0 @@
local kp = (import 'kube-prometheus/main.libsonnet') +
{
values+:: {
common+: {
namespace: 'monitoring',
},
prometheus+: {
namespace: 'foo',
name: 'bar',
},
alertmanager+: {
namespace: 'bar',
name: 'foo',
},
},
};
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
// Add the restricted psp to setup
{
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
} +
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }

View File

@@ -0,0 +1,9 @@
((import 'kube-prometheus/main.libsonnet') + {
prometheus+: {
prometheus+: {
metadata+: {
name: 'my-name',
},
},
},
}).prometheus.prometheus

View File

@@ -1,5 +1,5 @@
{
prometheus+: {
prometheus+:: {
clusterRole+: {
rules+: [
{

View File

@@ -46,7 +46,7 @@
spec+:
$.antiaffinity(
$.alertmanager._config.selectorLabels,
$.values.alertmanager.namespace,
$.values.common.namespace,
$.values.alertmanager.podAntiAffinity,
$.values.alertmanager.podAntiAffinityTopologyKey,
),
@@ -58,7 +58,7 @@
spec+:
$.antiaffinity(
$.prometheus._config.selectorLabels,
$.values.prometheus.namespace,
$.values.common.namespace,
$.values.prometheus.podAntiAffinity,
$.values.prometheus.podAntiAffinityTopologyKey,
),
@@ -72,7 +72,7 @@
spec+:
$.antiaffinity(
$.blackboxExporter._config.selectorLabels,
$.values.blackboxExporter.namespace,
$.values.common.namespace,
$.values.blackboxExporter.podAntiAffinity,
$.values.blackboxExporter.podAntiAffinityTopologyKey,
),
@@ -88,7 +88,7 @@
spec+:
$.antiaffinity(
$.prometheusAdapter._config.selectorLabels,
$.values.prometheusAdapter.namespace,
$.values.common.namespace,
$.values.prometheusAdapter.podAntiAffinity,
$.values.prometheusAdapter.podAntiAffinityTopologyKey,
),

View File

@@ -37,7 +37,7 @@
kind: 'ServiceMonitor',
metadata: {
name: 'aws-node',
namespace: $.values.kubernetesControlPlane.namespace,
namespace: $.values.common.namespace,
labels: {
'app.kubernetes.io/name': 'aws-node',
},

View File

@@ -31,10 +31,6 @@ local withImageRepository(repository) = {
},
};
{
imageName:: imageName,
}
{
withImageRepository:: withImageRepository,
}

View File

@@ -4,7 +4,7 @@
{
values+:: {
prometheusAdapter+: {
namespace: $.values.prometheusAdapter.namespace,
namespace: $.values.common.namespace,
// Rules for custom-metrics
config+:: {
rules+: [

View File

@@ -14,7 +14,7 @@
// Drop all apiserver metrics which are deprecated in kubernetes.
{
sourceLabels: ['__name__'],
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)',
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs)',
action: 'drop',
},
// Drop all docker metrics which are deprecated in kubernetes.

View File

@@ -4,6 +4,7 @@
{
values+:: {
prometheusAdapter+: {
namespace: $.values.common.namespace,
// Rules for external-metrics
config+:: {
externalRules+: [

View File

@@ -30,7 +30,7 @@
kind: 'ClusterRole',
name: 'ksm-autoscaler',
},
subjects: [{ kind: 'ServiceAccount', name: 'ksm-autoscaler', namespace: $.values.kubeStateMetrics.namespace }],
subjects: [{ kind: 'ServiceAccount', name: 'ksm-autoscaler', namespace: $.values.common.namespace }],
},
roleBinding: {
@@ -38,7 +38,7 @@
kind: 'RoleBinding',
metadata: {
name: 'ksm-autoscaler',
namespace: $.values.kubeStateMetrics.namespace,
namespace: $.values.common.namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
@@ -53,7 +53,7 @@
kind: 'Role',
metadata: {
name: 'ksm-autoscaler',
namespace: $.values.kubeStateMetrics.namespace,
namespace: $.values.common.namespace,
},
rules: [
{
@@ -76,19 +76,19 @@
kind: 'ServiceAccount',
metadata: {
name: 'ksm-autoscaler',
namespace: $.values.kubeStateMetrics.namespace,
namespace: $.values.common.namespace,
},
},
deployment:
local podLabels = { 'app.kubernetes.io/name': 'ksm-autoscaler' };
local podLabels = { app: 'ksm-autoscaler' };
local c = {
name: 'ksm-autoscaler',
image: $.values.clusterVerticalAutoscaler.image,
args: [
'/cpvpa',
'--target=deployment/kube-state-metrics',
'--namespace=' + $.values.kubeStateMetrics.namespace,
'--namespace=' + $.values.common.namespace,
'--logtostderr=true',
'--poll-period-seconds=10',
'--default-config={"kube-state-metrics":{"requests":{"cpu":{"base":"' + $.values.clusterVerticalAutoscaler.baseCPU +
@@ -110,7 +110,7 @@
kind: 'Deployment',
metadata: {
name: 'ksm-autoscaler',
namespace: $.values.kubeStateMetrics.namespace,
namespace: $.values.common.namespace,
labels: podLabels,
},
spec: {

View File

@@ -59,7 +59,7 @@ local restrictedPodSecurityPolicy = {
kind: 'Role',
metadata: {
name: 'alertmanager-' + $.values.alertmanager.name,
namespace: $.values.alertmanager.namespace,
namespace: $.values.common.namespace,
},
rules: [{
apiGroups: ['policy'],
@@ -74,7 +74,7 @@ local restrictedPodSecurityPolicy = {
kind: 'RoleBinding',
metadata: {
name: 'alertmanager-' + $.values.alertmanager.name,
namespace: $.values.alertmanager.namespace,
namespace: $.values.common.namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
@@ -132,7 +132,7 @@ local restrictedPodSecurityPolicy = {
kind: 'Role',
metadata: {
name: 'grafana',
namespace: $.values.grafana.namespace,
namespace: $.values.common.namespace,
},
rules: [{
apiGroups: ['policy'],
@@ -147,7 +147,7 @@ local restrictedPodSecurityPolicy = {
kind: 'RoleBinding',
metadata: {
name: 'grafana',
namespace: $.values.grafana.namespace,
namespace: $.values.common.namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',

View File

@@ -84,7 +84,7 @@
type: 'Opaque',
metadata: {
name: 'kube-etcd-client-certs',
namespace: $.values.prometheus.namespace,
namespace: $.values.common.namespace,
},
data: {
'etcd-client-ca.crt': std.base64($.values.etcd.clientCA),

View File

@@ -2,55 +2,46 @@
// https://github.com/prometheus-operator/kube-prometheus/issues/72
{
//TODO(arthursens): Expand example once kube-rbac-proxy can be managed with a first-class
// object inside node-exporter, kube-state-metrics and prometheus-operator.
// See also: https://github.com/prometheus-operator/kube-prometheus/issues/1500#issuecomment-966727623
values+:: {
alertmanager+: {
resources+: {
limits: {},
local noLimit(c) =
//if std.objectHas(c, 'resources') && c.name != 'kube-state-metrics'
if c.name != 'kube-state-metrics'
then c { resources+: { limits: {} } }
else c,
nodeExporter+: {
daemonset+: {
spec+: {
template+: {
spec+: {
containers: std.map(noLimit, super.containers),
},
},
},
},
blackboxExporter+: {
resources+: {
limits: {},
},
kubeStateMetrics+: {
deployment+: {
spec+: {
template+: {
spec+: {
containers: std.map(noLimit, super.containers),
},
},
},
},
grafana+: {
resources+: {
limits: {},
},
},
kubeStateMetrics+: {
resources+: {
limits: {},
},
},
nodeExporter+: {
resources+: {
limits: {},
},
},
prometheusAdapter+: {
resources+: {
limits: {},
},
},
prometheusOperator+: {
resources+: {
limits: {},
},
},
prometheus+: {
resources+: {
limits: {},
},
prometheusOperator+: {
deployment+: {
spec+: {
template+: {
spec+: {
local addArgs(c) =
if c.name == 'prometheus-operator'
then c { args+: ['--config-reloader-cpu-limit=0', '--config-reloader-memory-limit=0'] }
else c,
containers: std.map(addArgs, super.containers),
},
},
},
},
},

View File

@@ -1,8 +1,6 @@
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
namespace:: error 'must provide namespace',
namespace: error 'must provide namespace',
image: error 'must provide image',
version: error 'must provide version',
resources: {
@@ -11,7 +9,6 @@ local defaults = {
},
commonLabels:: {
'app.kubernetes.io/name': 'alertmanager',
'app.kubernetes.io/instance': defaults.name,
'app.kubernetes.io/version': defaults.version,
'app.kubernetes.io/component': 'alert-router',
'app.kubernetes.io/part-of': 'kube-prometheus',
@@ -21,19 +18,26 @@ local defaults = {
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
name:: error 'must provide name',
reloaderPort:: 8080,
config:: {
name: error 'must provide name',
config: {
global: {
resolve_timeout: '5m',
},
inhibit_rules: [{
source_matchers: ['severity = critical'],
target_matchers: ['severity =~ warning|info'],
source_match: {
severity: 'critical',
},
target_match_re: {
severity: 'warning|info',
},
equal: ['namespace', 'alertname'],
}, {
source_matchers: ['severity = warning'],
target_matchers: ['severity = info'],
source_match: {
severity: 'warning',
},
target_match_re: {
severity: 'info',
},
equal: ['namespace', 'alertname'],
}],
route: {
@@ -43,8 +47,8 @@ local defaults = {
repeat_interval: '12h',
receiver: 'Default',
routes: [
{ receiver: 'Watchdog', matchers: ['alertname = Watchdog'] },
{ receiver: 'Critical', matchers: ['severity = critical'] },
{ receiver: 'Watchdog', match: { alertname: 'Watchdog' } },
{ receiver: 'Critical', match: { severity: 'critical' } },
],
},
receivers: [
@@ -54,7 +58,7 @@ local defaults = {
],
},
replicas: 3,
mixin:: {
mixin: {
ruleLabels: {},
_config: {
alertmanagerName: '{{ $labels.namespace }}/{{ $labels.pod}}',
@@ -72,11 +76,6 @@ function(params) {
// Safety check
assert std.isObject(am._config.resources),
assert std.isObject(am._config.mixin._config),
_metadata:: {
name: 'alertmanager-' + am._config.name,
namespace: am._config.namespace,
labels: am._config.commonLabels,
},
mixin:: (import 'github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet') +
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet') {
@@ -86,9 +85,10 @@ function(params) {
prometheusRule: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: am._metadata {
labels+: am._config.mixin.ruleLabels,
name: am._metadata.name + '-rules',
metadata: {
labels: am._config.commonLabels + am._config.mixin.ruleLabels,
name: 'alertmanager-' + am._config.name + '-rules',
namespace: am._config.namespace,
},
spec: {
local r = if std.objectHasAll(am.mixin, 'prometheusRules') then am.mixin.prometheusRules.groups else [],
@@ -101,7 +101,11 @@ function(params) {
apiVersion: 'v1',
kind: 'Secret',
type: 'Opaque',
metadata: am._metadata,
metadata: {
name: 'alertmanager-' + am._config.name,
namespace: am._config.namespace,
labels: { alertmanager: am._config.name } + am._config.commonLabels,
},
stringData: {
'alertmanager.yaml': if std.type(am._config.config) == 'object'
then
@@ -114,19 +118,29 @@ function(params) {
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: am._metadata,
metadata: {
name: 'alertmanager-' + am._config.name,
namespace: am._config.namespace,
labels: { alertmanager: am._config.name } + am._config.commonLabels,
},
},
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: am._metadata,
metadata: {
name: 'alertmanager-' + am._config.name,
namespace: am._config.namespace,
labels: { alertmanager: am._config.name } + am._config.commonLabels,
},
spec: {
ports: [
{ name: 'web', targetPort: 'web', port: 9093 },
{ name: 'reloader-web', port: am._config.reloaderPort, targetPort: 'reloader-web' },
],
selector: am._config.selectorLabels,
selector: {
app: 'alertmanager',
alertmanager: am._config.name,
} + am._config.selectorLabels,
sessionAffinity: 'ClientIP',
},
},
@@ -134,26 +148,37 @@ function(params) {
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: am._metadata,
metadata: {
name: 'alertmanager',
namespace: am._config.namespace,
labels: am._config.commonLabels,
},
spec: {
selector: {
matchLabels: am._config.selectorLabels,
matchLabels: {
alertmanager: am._config.name,
} + am._config.selectorLabels,
},
endpoints: [
{ port: 'web', interval: '30s' },
{ port: 'reloader-web', interval: '30s' },
],
},
},
[if (defaults + params).replicas > 1 then 'podDisruptionBudget']: {
apiVersion: 'policy/v1',
apiVersion: 'policy/v1beta1',
kind: 'PodDisruptionBudget',
metadata: am._metadata,
metadata: {
name: 'alertmanager-' + am._config.name,
namespace: am._config.namespace,
labels: am._config.commonLabels,
},
spec: {
maxUnavailable: 1,
selector: {
matchLabels: am._config.selectorLabels,
matchLabels: {
alertmanager: am._config.name,
} + am._config.selectorLabels,
},
},
},
@@ -161,19 +186,23 @@ function(params) {
alertmanager: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'Alertmanager',
metadata: am._metadata {
metadata: {
name: am._config.name,
namespace: am._config.namespace,
labels: {
alertmanager: am._config.name,
} + am._config.commonLabels,
},
spec: {
replicas: am._config.replicas,
version: am._config.version,
image: am._config.image,
podMetadata: {
labels: am.alertmanager.metadata.labels,
labels: am._config.commonLabels,
},
resources: am._config.resources,
nodeSelector: { 'kubernetes.io/os': 'linux' },
serviceAccountName: am.serviceAccount.metadata.name,
serviceAccountName: 'alertmanager-' + am._config.name,
securityContext: {
runAsUser: 1000,
runAsNonRoot: true,

View File

@@ -2,12 +2,10 @@ local krp = import './kube-rbac-proxy.libsonnet';
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide version',
resources:: {
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide version',
resources: {
requests: { cpu: '10m', memory: '20Mi' },
limits: { cpu: '20m', memory: '40Mi' },
},
@@ -22,13 +20,13 @@ local defaults = {
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
configmapReloaderImage:: error 'must provide version',
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
configmapReloaderImage: error 'must provide version',
kubeRbacProxyImage: error 'must provide kubeRbacProxyImage',
port:: 9115,
internalPort:: 19115,
replicas:: 1,
modules:: {
port: 9115,
internalPort: 19115,
replicas: 1,
modules: {
http_2xx: {
prober: 'http',
http: {
@@ -83,7 +81,7 @@ local defaults = {
},
},
},
privileged::
privileged:
local icmpModules = [self.modules[m] for m in std.objectFields(self.modules) if self.modules[m].prober == 'icmp'];
std.length(icmpModules) > 0,
};
@@ -94,17 +92,14 @@ function(params) {
_config:: defaults + params,
// Safety check
assert std.isObject(bb._config.resources),
_metadata:: {
name: 'blackbox-exporter',
namespace: bb._config.namespace,
labels: bb._config.commonLabels,
},
configuration: {
apiVersion: 'v1',
kind: 'ConfigMap',
metadata: bb._metadata {
metadata: {
name: 'blackbox-exporter-configuration',
namespace: bb._config.namespace,
labels: bb._config.commonLabels,
},
data: {
'config.yml': std.manifestYamlDoc({ modules: bb._config.modules }),
@@ -114,7 +109,10 @@ function(params) {
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: bb._metadata,
metadata: {
name: 'blackbox-exporter',
namespace: bb._config.namespace,
},
},
clusterRole: {
@@ -140,7 +138,9 @@ function(params) {
clusterRoleBinding: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: bb._metadata,
metadata: {
name: 'blackbox-exporter',
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
@@ -212,12 +212,14 @@ function(params) {
{
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: bb._metadata,
metadata: {
name: 'blackbox-exporter',
namespace: bb._config.namespace,
labels: bb._config.commonLabels,
},
spec: {
replicas: bb._config.replicas,
selector: {
matchLabels: bb._config.selectorLabels,
},
selector: { matchLabels: bb._config.selectorLabels },
template: {
metadata: {
labels: bb._config.commonLabels,
@@ -241,7 +243,11 @@ function(params) {
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: bb._metadata,
metadata: {
name: 'blackbox-exporter',
namespace: bb._config.namespace,
labels: bb._config.commonLabels,
},
spec: {
ports: [{
name: 'https',
@@ -256,24 +262,29 @@ function(params) {
},
},
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: bb._metadata,
spec: {
endpoints: [{
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
interval: '30s',
path: '/metrics',
port: 'https',
scheme: 'https',
tlsConfig: {
insecureSkipVerify: true,
serviceMonitor:
{
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'blackbox-exporter',
namespace: bb._config.namespace,
labels: bb._config.commonLabels,
},
spec: {
endpoints: [{
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
interval: '30s',
path: '/metrics',
port: 'https',
scheme: 'https',
tlsConfig: {
insecureSkipVerify: true,
},
}],
selector: {
matchLabels: bb._config.selectorLabels,
},
}],
selector: {
matchLabels: bb._config.selectorLabels,
},
},
},
}

View File

@@ -1,14 +1,10 @@
local kubernetesGrafana = import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet';
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
name:: 'grafana',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide image',
resources:: {
name: 'grafana',
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide image',
resources: {
requests: { cpu: '100m', memory: '100Mi' },
limits: { cpu: '200m', memory: '200Mi' },
},
@@ -23,37 +19,90 @@ local defaults = {
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
prometheusName:: error 'must provide prometheus name',
prometheusName: error 'must provide prometheus name',
dashboards: {},
// TODO(paulfantom): expose those to have a stable API. After kubernetes-grafana refactor those could probably be removed.
rawDashboards: {},
folderDashboards: {},
containers: [],
datasources: [],
config: {},
ldap: null,
plugins: [],
env: [],
};
function(params)
local config = defaults + params;
function(params) {
local g = self,
_config:: defaults + params,
// Safety check
assert std.isObject(config.resources);
assert std.isObject(g._config.resources),
kubernetesGrafana(config) {
local g = self,
_config+:: config,
_metadata:: {
local glib = (import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet') + {
_config+:: {
namespace: g._config.namespace,
versions+:: {
grafana: g._config.version,
},
imageRepos+:: {
grafana: std.split(g._config.image, ':')[0],
},
prometheus+:: {
name: g._config.prometheusName,
},
grafana+:: {
labels: g._config.commonLabels,
dashboards: g._config.dashboards,
resources: g._config.resources,
rawDashboards: g._config.rawDashboards,
folderDashboards: g._config.folderDashboards,
containers: g._config.containers,
config+: g._config.config,
ldap: g._config.ldap,
plugins+: g._config.plugins,
env: g._config.env,
} + (
// Conditionally overwrite default setting.
if std.length(g._config.datasources) > 0 then
{ datasources: g._config.datasources }
else {}
),
},
},
// Add object only if user passes config and config is not empty
[if std.objectHas(params, 'config') && std.length(params.config) > 0 then 'config']: glib.grafana.config,
service: glib.grafana.service,
serviceAccount: glib.grafana.serviceAccount,
deployment: glib.grafana.deployment,
dashboardDatasources: glib.grafana.dashboardDatasources,
dashboardSources: glib.grafana.dashboardSources,
dashboardDefinitions: if std.length(g._config.dashboards) > 0 ||
std.length(g._config.rawDashboards) > 0 ||
std.length(g._config.folderDashboards) > 0 then {
apiVersion: 'v1',
kind: 'ConfigMapList',
items: glib.grafana.dashboardDefinitions,
},
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'grafana',
namespace: g._config.namespace,
labels: g._config.commonLabels,
},
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: g._metadata,
spec: {
selector: {
matchLabels: {
'app.kubernetes.io/name': 'grafana',
},
spec: {
selector: {
matchLabels: {
'app.kubernetes.io/name': 'grafana',
},
endpoints: [{
port: 'http',
interval: '15s',
}],
},
endpoints: [{
port: 'http',
interval: '15s',
}],
},
}
},
}

View File

@@ -1,14 +1,12 @@
local relabelings = import '../addons/dropping-deprecated-metrics-relabelings.libsonnet';
local defaults = {
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
namespace:: error 'must provide namespace',
namespace: error 'must provide namespace',
commonLabels:: {
'app.kubernetes.io/name': 'kube-prometheus',
'app.kubernetes.io/part-of': 'kube-prometheus',
},
mixin:: {
mixin: {
ruleLabels: {},
_config: {
cadvisorSelector: 'job="kubelet", metrics_path="/metrics/cadvisor"',
@@ -24,16 +22,12 @@ local defaults = {
hostNetworkInterfaceSelector: 'device!~"veth.+"',
},
},
kubeProxy:: false,
kubeProxy: false,
};
function(params) {
local k8s = self,
_config:: defaults + params,
_metadata:: {
labels: k8s._config.commonLabels,
namespace: k8s._config.namespace,
},
mixin:: (import 'github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet') {
_config+:: k8s._config.mixin._config,
@@ -42,13 +36,14 @@ function(params) {
prometheusRule: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: k8s._metadata {
metadata: {
labels: k8s._config.commonLabels + k8s._config.mixin.ruleLabels,
name: 'kubernetes-monitoring-rules',
labels+: k8s._config.mixin.ruleLabels,
namespace: k8s._config.namespace,
},
spec: {
local r = if std.objectHasAll(k8s.mixin, 'prometheusRules') then k8s.mixin.prometheusRules.groups else [],
local a = if std.objectHasAll(k8s.mixin, 'prometheusAlerts') then k8s.mixin.prometheusAlerts.groups else [],
local r = if std.objectHasAll(k8s.mixin, 'prometheusRules') then k8s.mixin.prometheusRules.groups else {},
local a = if std.objectHasAll(k8s.mixin, 'prometheusAlerts') then k8s.mixin.prometheusAlerts.groups else {},
groups: a + r,
},
},
@@ -56,9 +51,10 @@ function(params) {
serviceMonitorKubeScheduler: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: k8s._metadata {
metadata: {
name: 'kube-scheduler',
labels+: { 'app.kubernetes.io/name': 'kube-scheduler' },
namespace: k8s._config.namespace,
labels: { 'app.kubernetes.io/name': 'kube-scheduler' },
},
spec: {
jobLabel: 'app.kubernetes.io/name',
@@ -81,9 +77,10 @@ function(params) {
serviceMonitorKubelet: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: k8s._metadata {
metadata: {
name: 'kubelet',
labels+: { 'app.kubernetes.io/name': 'kubelet' },
namespace: k8s._config.namespace,
labels: { 'app.kubernetes.io/name': 'kubelet' },
},
spec: {
jobLabel: 'app.kubernetes.io/name',
@@ -175,9 +172,10 @@ function(params) {
serviceMonitorKubeControllerManager: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: k8s._metadata {
metadata: {
name: 'kube-controller-manager',
labels+: { 'app.kubernetes.io/name': 'kube-controller-manager' },
namespace: k8s._config.namespace,
labels: { 'app.kubernetes.io/name': 'kube-controller-manager' },
},
spec: {
jobLabel: 'app.kubernetes.io/name',
@@ -209,9 +207,10 @@ function(params) {
serviceMonitorApiserver: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: k8s._metadata {
metadata: {
name: 'kube-apiserver',
labels+: { 'app.kubernetes.io/name': 'apiserver' },
namespace: k8s._config.namespace,
labels: { 'app.kubernetes.io/name': 'apiserver' },
},
spec: {
jobLabel: 'component',
@@ -262,9 +261,12 @@ function(params) {
[if (defaults + params).kubeProxy then 'podMonitorKubeProxy']: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PodMonitor',
metadata: k8s._metadata {
labels+: { 'k8s-app': 'kube-proxy' },
metadata: {
labels: {
'k8s-app': 'kube-proxy',
},
name: 'kube-proxy',
namespace: k8s._config.namespace,
},
spec: {
jobLabel: 'k8s-app',
@@ -298,9 +300,10 @@ function(params) {
serviceMonitorCoreDNS: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: k8s._metadata {
metadata: {
name: 'coredns',
labels+: { 'app.kubernetes.io/name': 'coredns' },
namespace: k8s._config.namespace,
labels: { 'app.kubernetes.io/name': 'coredns' },
},
spec: {
jobLabel: 'app.kubernetes.io/name',
@@ -310,22 +313,11 @@ function(params) {
namespaceSelector: {
matchNames: ['kube-system'],
},
endpoints: [
{
port: 'metrics',
interval: '15s',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
metricRelabelings: [
// Drop deprecated metrics
// TODO (pgough) - consolidate how we drop metrics across the project
{
sourceLabels: ['__name__'],
regex: 'coredns_cache_misses_total',
action: 'drop',
},
],
},
],
endpoints: [{
port: 'metrics',
interval: '15s',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
}],
},
},

View File

@@ -1,16 +1,14 @@
local defaults = {
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
namespace:: error 'must provide namespace',
image:: error 'must provide image',
ports:: error 'must provide ports',
secureListenAddress:: error 'must provide secureListenAddress',
upstream:: error 'must provide upstream',
resources:: {
namespace: error 'must provide namespace',
image: error 'must provide image',
ports: error 'must provide ports',
secureListenAddress: error 'must provide secureListenAddress',
upstream: error 'must provide upstream',
resources: {
requests: { cpu: '10m', memory: '20Mi' },
limits: { cpu: '20m', memory: '40Mi' },
},
tlsCipherSuites:: [
tlsCipherSuites: [
'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721
'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721

View File

@@ -2,26 +2,24 @@ local krp = import './kube-rbac-proxy.libsonnet';
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
name:: 'kube-state-metrics',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide version',
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
resources:: {
name: 'kube-state-metrics',
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide version',
kubeRbacProxyImage: error 'must provide kubeRbacProxyImage',
resources: {
requests: { cpu: '10m', memory: '190Mi' },
limits: { cpu: '100m', memory: '250Mi' },
},
kubeRbacProxyMain:: {
kubeRbacProxyMain: {
resources+: {
limits+: { cpu: '40m' },
requests+: { cpu: '20m' },
},
},
scrapeInterval:: '30s',
scrapeTimeout:: '30s',
scrapeInterval: '30s',
scrapeTimeout: '30s',
commonLabels:: {
'app.kubernetes.io/name': defaults.name,
'app.kubernetes.io/version': defaults.version,
@@ -33,7 +31,7 @@ local defaults = {
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
mixin:: {
mixin: {
ruleLabels: {},
_config: {
kubeStateMetricsSelector: 'job="' + defaults.name + '"',
@@ -56,12 +54,6 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
commonLabels:: ksm._config.commonLabels,
podLabels:: ksm._config.selectorLabels,
_metadata:: {
labels: ksm._config.commonLabels,
name: ksm._config.name,
namespace: ksm._config.namespace,
},
mixin:: (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') +
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet') {
_config+:: ksm._config.mixin._config,
@@ -70,9 +62,10 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
prometheusRule: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: ksm._metadata {
labels+: ksm._config.mixin.ruleLabels,
metadata: {
labels: ksm._config.commonLabels + ksm._config.mixin.ruleLabels,
name: ksm._config.name + '-rules',
namespace: ksm._config.namespace,
},
spec: {
local r = if std.objectHasAll(ksm.mixin, 'prometheusRules') then ksm.mixin.prometheusRules.groups else [],
@@ -142,12 +135,14 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
{
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: ksm._metadata,
metadata: {
name: ksm.name,
namespace: ksm._config.namespace,
labels: ksm._config.commonLabels,
},
spec: {
jobLabel: 'app.kubernetes.io/name',
selector: {
matchLabels: ksm._config.selectorLabels,
},
selector: { matchLabels: ksm._config.selectorLabels },
endpoints: [
{
port: 'https-main',

View File

@@ -2,20 +2,17 @@ local krp = import './kube-rbac-proxy.libsonnet';
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
name:: 'node-exporter',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide version',
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
resources:: {
name: 'node-exporter',
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide version',
kubeRbacProxyImage: error 'must provide kubeRbacProxyImage',
resources: {
requests: { cpu: '102m', memory: '180Mi' },
limits: { cpu: '250m', memory: '180Mi' },
},
listenAddress:: '127.0.0.1',
filesystemMountPointsExclude:: '^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
port:: 9100,
listenAddress: '127.0.0.1',
port: 9100,
commonLabels:: {
'app.kubernetes.io/name': defaults.name,
'app.kubernetes.io/version': defaults.version,
@@ -27,16 +24,10 @@ local defaults = {
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
mixin:: {
mixin: {
ruleLabels: {},
_config: {
nodeExporterSelector: 'job="' + defaults.name + '"',
// Adjust NodeFilesystemSpaceFillingUp warning and critical thresholds according to the following default kubelet
// GC values,
// imageGCLowThresholdPercent: 80
// imageGCHighThresholdPercent: 85
// See https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ for more details.
fsSpaceFillingUpWarningThreshold: 20,
fsSpaceFillingUpCriticalThreshold: 15,
diskDeviceSelector: 'device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"',
runbookURLPattern: 'https://runbooks.prometheus-operator.dev/runbooks/node/%s',
@@ -51,11 +42,6 @@ function(params) {
// Safety check
assert std.isObject(ne._config.resources),
assert std.isObject(ne._config.mixin._config),
_metadata:: {
name: ne._config.name,
namespace: ne._config.namespace,
labels: ne._config.commonLabels,
},
mixin:: (import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') +
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet') {
@@ -65,9 +51,10 @@ function(params) {
prometheusRule: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: ne._metadata {
labels+: ne._config.mixin.ruleLabels,
metadata: {
labels: ne._config.commonLabels + ne._config.mixin.ruleLabels,
name: ne._config.name + '-rules',
namespace: ne._config.namespace,
},
spec: {
local r = if std.objectHasAll(ne.mixin, 'prometheusRules') then ne.mixin.prometheusRules.groups else [],
@@ -79,7 +66,10 @@ function(params) {
clusterRoleBinding: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: ne._metadata,
metadata: {
name: ne._config.name,
labels: ne._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
@@ -95,7 +85,10 @@ function(params) {
clusterRole: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: ne._metadata,
metadata: {
name: ne._config.name,
labels: ne._config.commonLabels,
},
rules: [
{
apiGroups: ['authentication.k8s.io'],
@@ -113,13 +106,21 @@ function(params) {
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: ne._metadata,
metadata: {
name: ne._config.name,
namespace: ne._config.namespace,
labels: ne._config.commonLabels,
},
},
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: ne._metadata,
metadata: {
name: ne._config.name,
namespace: ne._config.namespace,
labels: ne._config.commonLabels,
},
spec: {
ports: [
{ name: 'https', targetPort: 'https', port: ne._config.port },
@@ -132,7 +133,11 @@ function(params) {
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: ne._metadata,
metadata: {
name: ne._config.name,
namespace: ne._config.namespace,
labels: ne._config.commonLabels,
},
spec: {
jobLabel: 'app.kubernetes.io/name',
selector: {
@@ -169,7 +174,7 @@ function(params) {
'--path.rootfs=/host/root',
'--no-collector.wifi',
'--no-collector.hwmon',
'--collector.filesystem.mount-points-exclude=' + ne._config.filesystemMountPointsExclude,
'--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
// NOTE: ignore veth network interface associated with containers.
// OVN renames veth.* to <rand-hex>@if<X> where X is /sys/class/net/<if>/ifindex
// thus [a-z0-9] regex below
@@ -209,22 +214,19 @@ function(params) {
{
apiVersion: 'apps/v1',
kind: 'DaemonSet',
metadata: ne._metadata,
metadata: {
name: ne._config.name,
namespace: ne._config.namespace,
labels: ne._config.commonLabels,
},
spec: {
selector: {
matchLabels: ne._config.selectorLabels,
},
selector: { matchLabels: ne._config.selectorLabels },
updateStrategy: {
type: 'RollingUpdate',
rollingUpdate: { maxUnavailable: '10%' },
},
template: {
metadata: {
annotations: {
'kubectl.kubernetes.io/default-container': nodeExporter.name,
},
labels: ne._config.commonLabels,
},
metadata: { labels: ne._config.commonLabels },
spec: {
nodeSelector: { 'kubernetes.io/os': 'linux' },
tolerations: [{
@@ -246,4 +248,6 @@ function(params) {
},
},
},
}

View File

@@ -1,18 +1,16 @@
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
name:: 'prometheus-adapter',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
name: 'prometheus-adapter',
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide image',
resources:: {
resources: {
requests: { cpu: '102m', memory: '180Mi' },
limits: { cpu: '250m', memory: '180Mi' },
},
replicas:: 2,
listenAddress:: '127.0.0.1',
port:: 9100,
replicas: 2,
listenAddress: '127.0.0.1',
port: 9100,
commonLabels:: {
'app.kubernetes.io/name': 'prometheus-adapter',
'app.kubernetes.io/version': defaults.version,
@@ -26,14 +24,14 @@ local defaults = {
},
// Default range intervals are equal to 4 times the default scrape interval.
// This is done in order to follow Prometheus rule of thumb with irate().
rangeIntervals:: {
rangeIntervals: {
kubelet: '4m',
nodeExporter: '4m',
windowsExporter: '4m',
},
prometheusURL:: error 'must provide prometheusURL',
config:: {
prometheusURL: error 'must provide prometheusURL',
config: {
resourceRules: {
cpu: {
containerQuery: |||
@@ -97,7 +95,7 @@ local defaults = {
window: '5m',
},
},
tlsCipherSuites:: [
tlsCipherSuites: [
'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305',
'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305',
'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256',
@@ -122,12 +120,6 @@ function(params) {
// Safety check
assert std.isObject(pa._config.resources),
_metadata:: {
name: pa._config.name,
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
apiService: {
apiVersion: 'apiregistration.k8s.io/v1',
kind: 'APIService',
@@ -151,8 +143,10 @@ function(params) {
configMap: {
apiVersion: 'v1',
kind: 'ConfigMap',
metadata: pa._metadata {
metadata: {
name: 'adapter-config',
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
data: { 'config.yaml': std.manifestYamlDoc(pa._config.config) },
},
@@ -160,7 +154,11 @@ function(params) {
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
spec: {
selector: {
matchLabels: pa._config.selectorLabels,
@@ -174,21 +172,6 @@ function(params) {
insecureSkipVerify: true,
},
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
metricRelabelings: [
{
sourceLabels: ['__name__'],
action: 'drop',
regex: '(' + std.join('|',
[
'apiserver_client_certificate_.*', // The only client supposed to connect to the aggregated API is the apiserver so it is not really meaningful to monitor its certificate.
'apiserver_envelope_.*', // Prometheus-adapter isn't using envelope for storage.
'apiserver_flowcontrol_.*', // Prometheus-adapter isn't using flowcontrol.
'apiserver_storage_.*', // Prometheus-adapter isn't using the apiserver storage.
'apiserver_webhooks_.*', // Prometeus-adapter doesn't make use of apiserver webhooks.
'workqueue_.*', // Metrics related to the internal apiserver auth workqueues are not very useful to prometheus-adapter.
]) + ')',
},
],
},
],
},
@@ -197,7 +180,11 @@ function(params) {
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
spec: {
ports: [
{ name: 'https', targetPort: 6443, port: 443 },
@@ -231,12 +218,14 @@ function(params) {
{
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
spec: {
replicas: pa._config.replicas,
selector: {
matchLabels: pa._config.selectorLabels,
},
selector: { matchLabels: pa._config.selectorLabels },
strategy: {
rollingUpdate: {
maxSurge: 1,
@@ -262,13 +251,20 @@ function(params) {
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
},
clusterRole: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
labels: pa._config.commonLabels,
},
rules: [{
apiGroups: [''],
resources: ['nodes', 'namespaces', 'pods', 'services'],
@@ -279,7 +275,10 @@ function(params) {
clusterRoleBinding: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
labels: pa._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
@@ -295,8 +294,9 @@ function(params) {
clusterRoleBindingDelegator: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: pa._metadata {
metadata: {
name: 'resource-metrics:system:auth-delegator',
labels: pa._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
@@ -313,8 +313,9 @@ function(params) {
clusterRoleServerResources: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: pa._metadata {
metadata: {
name: 'resource-metrics-server-resources',
labels: pa._config.commonLabels,
},
rules: [{
apiGroups: ['metrics.k8s.io'],
@@ -326,13 +327,13 @@ function(params) {
clusterRoleAggregatedMetricsReader: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: pa._metadata {
metadata: {
name: 'system:aggregated-metrics-reader',
labels+: {
labels: {
'rbac.authorization.k8s.io/aggregate-to-admin': 'true',
'rbac.authorization.k8s.io/aggregate-to-edit': 'true',
'rbac.authorization.k8s.io/aggregate-to-view': 'true',
},
} + pa._config.commonLabels,
},
rules: [{
apiGroups: ['metrics.k8s.io'],
@@ -344,9 +345,10 @@ function(params) {
roleBindingAuthReader: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: pa._metadata {
metadata: {
name: 'resource-metrics-auth-reader',
namespace: 'kube-system',
labels: pa._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
@@ -361,9 +363,13 @@ function(params) {
},
[if (defaults + params).replicas > 1 then 'podDisruptionBudget']: {
apiVersion: 'policy/v1',
apiVersion: 'policy/v1beta1',
kind: 'PodDisruptionBudget',
metadata: pa._metadata,
metadata: {
name: pa._config.name,
namespace: pa._config.namespace,
labels: pa._config.commonLabels,
},
spec: {
minAvailable: 1,
selector: {

View File

@@ -3,15 +3,13 @@ local prometheusOperator = import 'github.com/prometheus-operator/prometheus-ope
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
name:: 'prometheus-operator',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide image',
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
configReloaderImage:: error 'must provide config reloader image',
resources:: {
name: 'prometheus-operator',
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide image',
kubeRbacProxyImage: error 'must provide kubeRbacProxyImage',
configReloaderImage: error 'must provide config reloader image',
resources: {
limits: { cpu: '200m', memory: '200Mi' },
requests: { cpu: '100m', memory: '100Mi' },
},
@@ -26,7 +24,7 @@ local defaults = {
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
mixin:: {
mixin: {
ruleLabels: {
role: 'alert-rules',
prometheus: defaults.name,
@@ -47,11 +45,6 @@ function(params)
local po = self,
// declare variable as a field to allow overriding options and to have unified API across all components
_config:: config,
_metadata:: {
labels: po._config.commonLabels,
name: po._config.name,
namespace: po._config.namespace,
},
mixin:: (import 'github.com/prometheus-operator/prometheus-operator/jsonnet/mixin/mixin.libsonnet') +
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet') {
_config+:: po._config.mixin._config,

View File

@@ -1,25 +1,20 @@
local defaults = {
local defaults = self,
// Convention: Top-level fields related to CRDs are public, other fields are hidden
// If there is no CRD for the component, everything is hidden in defaults.
name:: error 'must provide name',
namespace:: error 'must provide namespace',
namespace: error 'must provide namespace',
version: error 'must provide version',
image: error 'must provide image',
resources: {
requests: { memory: '400Mi' },
},
//TODO(paulfantom): remove alertmanagerName after release-0.10 and convert to plain 'alerting' object.
alertmanagerName:: '',
alerting: {},
namespaces:: ['default', 'kube-system', defaults.namespace],
name: error 'must provide name',
alertmanagerName: error 'must provide alertmanagerName',
namespaces: ['default', 'kube-system', defaults.namespace],
replicas: 2,
externalLabels: {},
enableFeatures: [],
ruleSelector: {},
commonLabels:: {
'app.kubernetes.io/name': 'prometheus',
'app.kubernetes.io/instance': defaults.name,
'app.kubernetes.io/version': defaults.version,
'app.kubernetes.io/component': 'prometheus',
'app.kubernetes.io/part-of': 'kube-prometheus',
@@ -28,28 +23,18 @@ local defaults = {
[labelName]: defaults.commonLabels[labelName]
for labelName in std.objectFields(defaults.commonLabels)
if !std.setMember(labelName, ['app.kubernetes.io/version'])
},
mixin:: {
} + { prometheus: defaults.name },
ruleSelector: {},
mixin: {
ruleLabels: {},
_config: {
prometheusSelector: 'job="prometheus-' + defaults.name + '",namespace="' + defaults.namespace + '"',
prometheusName: '{{$labels.namespace}}/{{$labels.pod}}',
// TODO: remove `thanosSelector` after 0.10.0 release.
thanosSelector: '',
thanos: {
targetGroups: {
namespace: defaults.namespace,
},
sidecar: {
selector: 'job="thanos-sidecar"',
thanosPrometheusCommonDimensions: 'namespace, pod',
},
},
thanosSelector: 'job="thanos-sidecar"',
runbookURLPattern: 'https://runbooks.prometheus-operator.dev/runbooks/prometheus/%s',
},
},
thanos: null,
reloaderPort:: 8080,
};
@@ -59,11 +44,6 @@ function(params) {
// Safety check
assert std.isObject(p._config.resources),
assert std.isObject(p._config.mixin._config),
_metadata:: {
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
labels: p._config.commonLabels,
},
mixin::
(import 'github.com/prometheus/prometheus/documentation/prometheus-mixin/mixin.libsonnet') +
@@ -75,17 +55,20 @@ function(params) {
(import 'github.com/thanos-io/thanos/mixin/alerts/sidecar.libsonnet') +
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet') + {
_config+:: p._config.mixin._config,
targetGroups+: p._config.mixin._config.thanos.targetGroups,
// TODO: remove `_config.thanosSelector` after 0.10.0 release.
sidecar+: { selector: p._config.mixin._config.thanosSelector } + p._config.mixin._config.thanos.sidecar,
targetGroups: {},
sidecar: {
selector: p._config.mixin._config.thanosSelector,
dimensions: std.join(', ', ['job', 'instance']),
},
},
prometheusRule: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: p._metadata {
labels+: p._config.mixin.ruleLabels,
name: p._metadata.name + '-prometheus-rules',
metadata: {
labels: p._config.commonLabels + p._config.mixin.ruleLabels,
name: 'prometheus-' + p._config.name + '-prometheus-rules',
namespace: p._config.namespace,
},
spec: {
local r = if std.objectHasAll(p.mixin, 'prometheusRules') then p.mixin.prometheusRules.groups else [],
@@ -97,24 +80,31 @@ function(params) {
serviceAccount: {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: p._metadata,
metadata: {
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
labels: p._config.commonLabels,
},
},
service: {
apiVersion: 'v1',
kind: 'Service',
metadata: p._metadata,
metadata: {
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
labels: { prometheus: p._config.name } + p._config.commonLabels,
},
spec: {
ports: [
{ name: 'web', targetPort: 'web', port: 9090 },
{ name: 'reloader-web', port: p._config.reloaderPort, targetPort: 'reloader-web' },
] +
(
if p._config.thanos != null then
[{ name: 'grpc', port: 10901, targetPort: 10901 }]
else []
),
selector: p._config.selectorLabels,
selector: { app: 'prometheus' } + p._config.selectorLabels,
sessionAffinity: 'ClientIP',
},
},
@@ -123,17 +113,19 @@ function(params) {
local newSpecificRoleBinding(namespace) = {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: p._metadata {
metadata: {
name: 'prometheus-' + p._config.name,
namespace: namespace,
labels: p._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'Role',
name: p._metadata.name,
name: 'prometheus-' + p._config.name,
},
subjects: [{
kind: 'ServiceAccount',
name: p.serviceAccount.metadata.name,
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
}],
};
@@ -146,7 +138,10 @@ function(params) {
clusterRole: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: p._metadata,
metadata: {
name: 'prometheus-' + p._config.name,
labels: p._config.commonLabels,
},
rules: [
{
apiGroups: [''],
@@ -163,8 +158,10 @@ function(params) {
roleConfig: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'Role',
metadata: p._metadata {
name: p._metadata.name + '-config',
metadata: {
name: 'prometheus-' + p._config.name + '-config',
namespace: p._config.namespace,
labels: p._config.commonLabels,
},
rules: [{
apiGroups: [''],
@@ -176,17 +173,19 @@ function(params) {
roleBindingConfig: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: p._metadata {
name: p._metadata.name + '-config',
metadata: {
name: 'prometheus-' + p._config.name + '-config',
namespace: p._config.namespace,
labels: p._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'Role',
name: p._metadata.name + '-config',
name: 'prometheus-' + p._config.name + '-config',
},
subjects: [{
kind: 'ServiceAccount',
name: p._metadata.name,
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
}],
},
@@ -194,15 +193,18 @@ function(params) {
clusterRoleBinding: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRoleBinding',
metadata: p._metadata,
metadata: {
name: 'prometheus-' + p._config.name,
labels: p._config.commonLabels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: p._metadata.name,
name: 'prometheus-' + p._config.name,
},
subjects: [{
kind: 'ServiceAccount',
name: p._metadata.name,
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
}],
},
@@ -211,8 +213,10 @@ function(params) {
local newSpecificRole(namespace) = {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'Role',
metadata: p._metadata {
metadata: {
name: 'prometheus-' + p._config.name,
namespace: namespace,
labels: p._config.commonLabels,
},
rules: [
{
@@ -239,13 +243,19 @@ function(params) {
},
[if (defaults + params).replicas > 1 then 'podDisruptionBudget']: {
apiVersion: 'policy/v1',
apiVersion: 'policy/v1beta1',
kind: 'PodDisruptionBudget',
metadata: p._metadata,
metadata: {
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
labels: p._config.commonLabels,
},
spec: {
minAvailable: 1,
selector: {
matchLabels: p._config.selectorLabels,
matchLabels: {
prometheus: p._config.name,
} + p._config.selectorLabels,
},
},
},
@@ -253,19 +263,21 @@ function(params) {
prometheus: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'Prometheus',
metadata: p._metadata {
metadata: {
name: p._config.name,
namespace: p._config.namespace,
labels: { prometheus: p._config.name } + p._config.commonLabels,
},
spec: {
replicas: p._config.replicas,
version: p._config.version,
image: p._config.image,
podMetadata: {
labels: p.prometheus.metadata.labels,
labels: p._config.commonLabels,
},
externalLabels: p._config.externalLabels,
enableFeatures: p._config.enableFeatures,
serviceAccountName: p.serviceAccount.metadata.name,
serviceAccountName: 'prometheus-' + p._config.name,
podMonitorSelector: {},
podMonitorNamespaceSelector: {},
probeSelector: {},
@@ -276,7 +288,7 @@ function(params) {
serviceMonitorNamespaceSelector: {},
nodeSelector: { 'kubernetes.io/os': 'linux' },
resources: p._config.resources,
alerting: if p._config.alerting != {} then p._config.alerting else {
alerting: {
alertmanagers: [{
namespace: p._config.namespace,
name: 'alertmanager-' + p._config.alertmanagerName,
@@ -296,15 +308,19 @@ function(params) {
serviceMonitor: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: p._metadata,
metadata: {
name: 'prometheus-' + p._config.name,
namespace: p._config.namespace,
labels: p._config.commonLabels,
},
spec: {
selector: {
matchLabels: p._config.selectorLabels,
},
endpoints: [
{ port: 'web', interval: '30s' },
{ port: 'reloader-web', interval: '30s' },
],
endpoints: [{
port: 'web',
interval: '30s',
}],
},
},
@@ -312,9 +328,10 @@ function(params) {
[if std.objectHas(params, 'thanos') && params.thanos != null then 'prometheusRuleThanosSidecar']: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: p._metadata {
labels+: p._config.mixin.ruleLabels,
name: p._metadata.name + '-thanos-sidecar-rules',
metadata: {
labels: p._config.commonLabels + p._config.mixin.ruleLabels,
name: 'prometheus-' + p._config.name + '-thanos-sidecar-rules',
namespace: p._config.namespace,
},
spec: {
local r = if std.objectHasAll(p.mixinThanos, 'prometheusRules') then p.mixinThanos.prometheusRules.groups else [],
@@ -327,9 +344,11 @@ function(params) {
[if std.objectHas(params, 'thanos') && params.thanos != null then 'serviceThanosSidecar']: {
apiVersion: 'v1',
kind: 'Service',
metadata+: p._metadata {
name: p._metadata.name + '-thanos-sidecar',
labels+: {
metadata+: {
name: 'prometheus-' + p._config.name + '-thanos-sidecar',
namespace: p._config.namespace,
labels+: p._config.commonLabels {
prometheus: p._config.name,
'app.kubernetes.io/component': 'thanos-sidecar',
},
},
@@ -339,6 +358,7 @@ function(params) {
{ name: 'http', port: 10902, targetPort: 10902 },
],
selector: p._config.selectorLabels {
prometheus: p._config.name,
'app.kubernetes.io/component': 'prometheus',
},
clusterIP: 'None',
@@ -349,9 +369,11 @@ function(params) {
[if std.objectHas(params, 'thanos') && params.thanos != null then 'serviceMonitorThanosSidecar']: {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata+: p._metadata {
metadata+: {
name: 'thanos-sidecar',
labels+: {
namespace: p._config.namespace,
labels: p._config.commonLabels {
prometheus: p._config.name,
'app.kubernetes.io/component': 'thanos-sidecar',
},
},
@@ -359,6 +381,7 @@ function(params) {
jobLabel: 'app.kubernetes.io/component',
selector: {
matchLabels: {
prometheus: p._config.name,
'app.kubernetes.io/component': 'thanos-sidecar',
},
},

View File

@@ -8,7 +8,7 @@
"subdir": "grafana"
}
},
"version": "199e363523104ff8b3a12483a4e3eca86372b078"
"version": "90f38916f1f8a310a715d18e36f787f84df4ddf5"
},
{
"source": {
@@ -26,7 +26,7 @@
"subdir": "jsonnet/prometheus-operator"
}
},
"version": "release-0.53"
"version": "release-0.50"
},
{
"source": {
@@ -35,7 +35,7 @@
"subdir": "jsonnet/mixin"
}
},
"version": "release-0.53",
"version": "release-0.50",
"name": "prometheus-operator-mixin"
},
{
@@ -45,7 +45,7 @@
"subdir": ""
}
},
"version": "release-0.10"
"version": "release-0.9"
},
{
"source": {
@@ -54,7 +54,7 @@
"subdir": "jsonnet/kube-state-metrics"
}
},
"version": "release-2.3"
"version": "release-2.1"
},
{
"source": {
@@ -63,7 +63,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin"
}
},
"version": "release-2.3"
"version": "release-2.1"
},
{
"source": {
@@ -72,7 +72,7 @@
"subdir": "docs/node-mixin"
}
},
"version": "release-1.3"
"version": "832909dd257eb368cf83363ffcae3ab84cb4bcb1"
},
{
"source": {
@@ -81,7 +81,7 @@
"subdir": "documentation/prometheus-mixin"
}
},
"version": "release-2.32",
"version": "751ca03faddc9c64089c41d0da370a3a0b477742",
"name": "prometheus"
},
{
@@ -91,7 +91,7 @@
"subdir": "doc/alertmanager-mixin"
}
},
"version": "release-0.23",
"version": "b408b522bc653d014e53035e59fa394cc1edd762",
"name": "alertmanager"
},
{
@@ -101,7 +101,7 @@
"subdir": "mixin"
}
},
"version": "release-0.23",
"version": "release-0.22",
"name": "thanos-mixin"
}
],

View File

@@ -39,7 +39,7 @@ local utils = import './lib/utils.libsonnet';
images: {
alertmanager: 'quay.io/prometheus/alertmanager:v' + $.values.common.versions.alertmanager,
blackboxExporter: 'quay.io/prometheus/blackbox-exporter:v' + $.values.common.versions.blackboxExporter,
grafana: 'grafana/grafana:' + $.values.common.versions.grafana,
grafana: 'grafana/grafana:v' + $.values.common.versions.grafana,
kubeStateMetrics: 'k8s.gcr.io/kube-state-metrics/kube-state-metrics:v' + $.values.common.versions.kubeStateMetrics,
nodeExporter: 'quay.io/prometheus/node-exporter:v' + $.values.common.versions.nodeExporter,
prometheus: 'quay.io/prometheus/prometheus:v' + $.values.common.versions.prometheus,
@@ -91,21 +91,14 @@ local utils = import './lib/utils.libsonnet';
version: $.values.common.versions.prometheus,
image: $.values.common.images.prometheus,
name: 'k8s',
alerting: {
alertmanagers: [{
namespace: $.values.common.namespace,
name: 'alertmanager-' + $.values.alertmanager.name,
port: $.alertmanager.service.spec.ports[0].name,
apiVersion: 'v2',
}],
},
alertmanagerName: $.values.alertmanager.name,
mixin+: { ruleLabels: $.values.common.ruleLabels },
},
prometheusAdapter: {
namespace: $.values.common.namespace,
version: $.values.common.versions.prometheusAdapter,
image: $.values.common.images.prometheusAdapter,
prometheusURL: 'http://prometheus-' + $.values.prometheus.name + '.' + $.values.prometheus.namespace + '.svc:9090/',
prometheusURL: 'http://prometheus-' + $.values.prometheus.name + '.' + $.values.common.namespace + '.svc.cluster.local:9090/',
rangeIntervals+: {
kubelet: utils.rangeInterval($.kubernetesControlPlane.serviceMonitorKubelet.spec.endpoints[0].interval),
nodeExporter: utils.rangeInterval($.nodeExporter.serviceMonitor.spec.endpoints[0].interval),

View File

@@ -1,3 +1,3 @@
# Adding a new platform specific configuration
Adding a new platform specific configuration requires to update the [customization example](../../../docs/customizations/platform-specific.md#running-kube-prometheus-on-specific-platforms) and the [platforms.libsonnet](platforms.libsonnet) file by adding the platform to the list of existing ones. This allow the new platform to be discoverable and easily configurable by the users.
Adding a new platform specific configuration requires to update the [README](../../../README.md#cluster-creation-tools) and the [platforms.jsonnet](./platform.jsonnet) file by adding the platform to the list of existing ones. This allow the new platform to be discoverable and easily configurable by the users.

View File

@@ -1,12 +1,12 @@
{
"alertmanager": "0.23.0",
"alertmanager": "0.22.2",
"blackboxExporter": "0.19.0",
"grafana": "8.3.3",
"kubeStateMetrics": "2.3.0",
"nodeExporter": "1.3.1",
"prometheus": "2.32.1",
"prometheusAdapter": "0.9.1",
"prometheusOperator": "0.53.1",
"grafana": "8.1.1",
"kubeStateMetrics": "2.1.1",
"nodeExporter": "1.2.2",
"prometheus": "2.29.1",
"prometheusAdapter": "0.9.0",
"prometheusOperator": "0.49.0",
"kubeRbacProxy": "0.11.0",
"configmapReload": "0.5.0"
}

View File

@@ -8,8 +8,8 @@
"subdir": "grafana"
}
},
"version": "199e363523104ff8b3a12483a4e3eca86372b078",
"sum": "/jDHzVAjHB4AOLkJHw1GyATX5ogZ1iMdcJXZAgaG3+g="
"version": "90f38916f1f8a310a715d18e36f787f84df4ddf5",
"sum": "0kZ1pnuIirDtbg6F9at5+NQOwKNONIGEPq0eECzvRkI="
},
{
"source": {
@@ -18,7 +18,7 @@
"subdir": "contrib/mixin"
}
},
"version": "73080a716634f45d50d0593e0454ed3206a52f5b",
"version": "e8732fb5f35d4f5229c983fea478ed13b11d729e",
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
},
{
@@ -38,8 +38,8 @@
"subdir": "grafana-builder"
}
},
"version": "264a5c2078c5930af57fe2d107eff83ab63553af",
"sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc="
"version": "2ed138b205717af721af57b572bc7cd63bda62fd",
"sum": "U34Nd1ViO2LZ3D8IzygPPRfUcy6zOgCnTMVHZ+9O/QE="
},
{
"source": {
@@ -48,8 +48,8 @@
"subdir": ""
}
},
"version": "b538a10c89508f8d12885680cca72a134d3127f5",
"sum": "GLt5T2k4RKg36Gfcaf9qlTfVumDitqotVD0ipz/bPJ4="
"version": "1163ea85e45e1f7edf6d4f83758d44c6fef1f2fa",
"sum": "4H2pzHd6A47rQIZcQ3B0o+nFMeNgLE9dGYJv7ZP7m2s="
},
{
"source": {
@@ -58,7 +58,7 @@
"subdir": "lib/promgrafonnet"
}
},
"version": "fd913499e956da06f520c3784c59573ee552b152",
"version": "06d00e40b43e4e618afbebe8e453b5650c659015",
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
},
{
@@ -68,8 +68,8 @@
"subdir": "jsonnet/kube-state-metrics"
}
},
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
"sum": "U1wzIpTAtOvC1yj43Y8PfvT0JfvnAcMfNH12Wi+ab0Y="
"version": "d60e6f7ba1719045edc0f60857faadeb87280421",
"sum": "S5qI+PJUdNeYOv76jH5nxwYS9N6U7CRxvyuB1wI4cTE="
},
{
"source": {
@@ -78,7 +78,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin"
}
},
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
"version": "d60e6f7ba1719045edc0f60857faadeb87280421",
"sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk="
},
{
@@ -88,8 +88,8 @@
"subdir": "jsonnet/mixin"
}
},
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
"sum": "qZ4WgiweaE6eeKtFK60QUjLO8sf2L9Q8fgafWvDcyfY=",
"version": "83fe36566f4e0894eb5ffcd2638a0f039a17bdeb",
"sum": "6reUygVmQrLEWQzTKcH8ceDbvM+2ztK3z2VBR2K2l+U=",
"name": "prometheus-operator-mixin"
},
{
@@ -99,8 +99,8 @@
"subdir": "jsonnet/prometheus-operator"
}
},
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
"sum": "yjdwZ+5UXL42EavJleAJmd8Ou6MSDfExvlKAxFCxXVE="
"version": "83fe36566f4e0894eb5ffcd2638a0f039a17bdeb",
"sum": "J1G++A8hrtr3+OZQMmcNeb1w/C30bXqqwpwHL/Xhsd4="
},
{
"source": {
@@ -109,7 +109,7 @@
"subdir": "doc/alertmanager-mixin"
}
},
"version": "16fa045db47d68a09a102c7b80b8899c1f57c153",
"version": "b408b522bc653d014e53035e59fa394cc1edd762",
"sum": "pep+dHzfIjh2SU5pEkwilMCAT/NoL6YYflV4x8cr7vU=",
"name": "alertmanager"
},
@@ -120,8 +120,8 @@
"subdir": "docs/node-mixin"
}
},
"version": "a2321e7b940ddcff26873612bccdf7cd4c42b6b6",
"sum": "MlWDAKGZ+JArozRKdKEvewHeWn8j2DNBzesJfLVd0dk="
"version": "832909dd257eb368cf83363ffcae3ab84cb4bcb1",
"sum": "MmxGhE2PJ1a52mk2x7vDpMT2at4Jglbud/rK74CB5i0="
},
{
"source": {
@@ -130,8 +130,8 @@
"subdir": "documentation/prometheus-mixin"
}
},
"version": "41f1a8125e664985dd30674e5bdf6b683eff5d32",
"sum": "ZjQoYhvgKwJNkg+h+m9lW3SYjnjv5Yx5btEipLhru88=",
"version": "751ca03faddc9c64089c41d0da370a3a0b477742",
"sum": "AS8WYFi/z10BZSF6DFkKBscjB32XDMM7iIso7CO/FyI=",
"name": "prometheus"
},
{
@@ -141,8 +141,8 @@
"subdir": "mixin"
}
},
"version": "632032712f12eea0015aaef24ee1e14f38ef3e55",
"sum": "X+060DnePPeN/87fgj0SrfxVitywTk8hZA9V4nHxl1g=",
"version": "ff363498fc95cfe17de894d7237bcf38bdd0bc36",
"sum": "cajthvLKDjYgYHCKQU2g/pTMRkxcbuJEvTnCyJOihl8=",
"name": "thanos-mixin"
},
{

View File

@@ -8,14 +8,13 @@ resources:
- ./manifests/alertmanager-service.yaml
- ./manifests/alertmanager-serviceAccount.yaml
- ./manifests/alertmanager-serviceMonitor.yaml
- ./manifests/blackboxExporter-clusterRole.yaml
- ./manifests/blackboxExporter-clusterRoleBinding.yaml
- ./manifests/blackboxExporter-configuration.yaml
- ./manifests/blackboxExporter-deployment.yaml
- ./manifests/blackboxExporter-service.yaml
- ./manifests/blackboxExporter-serviceAccount.yaml
- ./manifests/blackboxExporter-serviceMonitor.yaml
- ./manifests/grafana-config.yaml
- ./manifests/blackbox-exporter-clusterRole.yaml
- ./manifests/blackbox-exporter-clusterRoleBinding.yaml
- ./manifests/blackbox-exporter-configuration.yaml
- ./manifests/blackbox-exporter-deployment.yaml
- ./manifests/blackbox-exporter-service.yaml
- ./manifests/blackbox-exporter-serviceAccount.yaml
- ./manifests/blackbox-exporter-serviceMonitor.yaml
- ./manifests/grafana-dashboardDatasources.yaml
- ./manifests/grafana-dashboardDefinitions.yaml
- ./manifests/grafana-dashboardSources.yaml
@@ -23,29 +22,44 @@ resources:
- ./manifests/grafana-service.yaml
- ./manifests/grafana-serviceAccount.yaml
- ./manifests/grafana-serviceMonitor.yaml
- ./manifests/kubePrometheus-prometheusRule.yaml
- ./manifests/kubeStateMetrics-clusterRole.yaml
- ./manifests/kubeStateMetrics-clusterRoleBinding.yaml
- ./manifests/kubeStateMetrics-deployment.yaml
- ./manifests/kubeStateMetrics-prometheusRule.yaml
- ./manifests/kubeStateMetrics-service.yaml
- ./manifests/kubeStateMetrics-serviceAccount.yaml
- ./manifests/kubeStateMetrics-serviceMonitor.yaml
- ./manifests/kubernetesControlPlane-prometheusRule.yaml
- ./manifests/kubernetesControlPlane-serviceMonitorApiserver.yaml
- ./manifests/kubernetesControlPlane-serviceMonitorCoreDNS.yaml
- ./manifests/kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml
- ./manifests/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml
- ./manifests/kubernetesControlPlane-serviceMonitorKubelet.yaml
- ./manifests/nodeExporter-clusterRole.yaml
- ./manifests/nodeExporter-clusterRoleBinding.yaml
- ./manifests/nodeExporter-daemonset.yaml
- ./manifests/nodeExporter-prometheusRule.yaml
- ./manifests/nodeExporter-service.yaml
- ./manifests/nodeExporter-serviceAccount.yaml
- ./manifests/nodeExporter-serviceMonitor.yaml
- ./manifests/kube-prometheus-prometheusRule.yaml
- ./manifests/kube-state-metrics-clusterRole.yaml
- ./manifests/kube-state-metrics-clusterRoleBinding.yaml
- ./manifests/kube-state-metrics-deployment.yaml
- ./manifests/kube-state-metrics-prometheusRule.yaml
- ./manifests/kube-state-metrics-service.yaml
- ./manifests/kube-state-metrics-serviceAccount.yaml
- ./manifests/kube-state-metrics-serviceMonitor.yaml
- ./manifests/kubernetes-prometheusRule.yaml
- ./manifests/kubernetes-serviceMonitorApiserver.yaml
- ./manifests/kubernetes-serviceMonitorCoreDNS.yaml
- ./manifests/kubernetes-serviceMonitorKubeControllerManager.yaml
- ./manifests/kubernetes-serviceMonitorKubeScheduler.yaml
- ./manifests/kubernetes-serviceMonitorKubelet.yaml
- ./manifests/node-exporter-clusterRole.yaml
- ./manifests/node-exporter-clusterRoleBinding.yaml
- ./manifests/node-exporter-daemonset.yaml
- ./manifests/node-exporter-prometheusRule.yaml
- ./manifests/node-exporter-service.yaml
- ./manifests/node-exporter-serviceAccount.yaml
- ./manifests/node-exporter-serviceMonitor.yaml
- ./manifests/prometheus-adapter-apiService.yaml
- ./manifests/prometheus-adapter-clusterRole.yaml
- ./manifests/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml
- ./manifests/prometheus-adapter-clusterRoleBinding.yaml
- ./manifests/prometheus-adapter-clusterRoleBindingDelegator.yaml
- ./manifests/prometheus-adapter-clusterRoleServerResources.yaml
- ./manifests/prometheus-adapter-configMap.yaml
- ./manifests/prometheus-adapter-deployment.yaml
- ./manifests/prometheus-adapter-podDisruptionBudget.yaml
- ./manifests/prometheus-adapter-roleBindingAuthReader.yaml
- ./manifests/prometheus-adapter-service.yaml
- ./manifests/prometheus-adapter-serviceAccount.yaml
- ./manifests/prometheus-adapter-serviceMonitor.yaml
- ./manifests/prometheus-clusterRole.yaml
- ./manifests/prometheus-clusterRoleBinding.yaml
- ./manifests/prometheus-operator-prometheusRule.yaml
- ./manifests/prometheus-operator-serviceMonitor.yaml
- ./manifests/prometheus-podDisruptionBudget.yaml
- ./manifests/prometheus-prometheus.yaml
- ./manifests/prometheus-prometheusRule.yaml
@@ -56,32 +70,17 @@ resources:
- ./manifests/prometheus-service.yaml
- ./manifests/prometheus-serviceAccount.yaml
- ./manifests/prometheus-serviceMonitor.yaml
- ./manifests/prometheusAdapter-apiService.yaml
- ./manifests/prometheusAdapter-clusterRole.yaml
- ./manifests/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml
- ./manifests/prometheusAdapter-clusterRoleBinding.yaml
- ./manifests/prometheusAdapter-clusterRoleBindingDelegator.yaml
- ./manifests/prometheusAdapter-clusterRoleServerResources.yaml
- ./manifests/prometheusAdapter-configMap.yaml
- ./manifests/prometheusAdapter-deployment.yaml
- ./manifests/prometheusAdapter-podDisruptionBudget.yaml
- ./manifests/prometheusAdapter-roleBindingAuthReader.yaml
- ./manifests/prometheusAdapter-service.yaml
- ./manifests/prometheusAdapter-serviceAccount.yaml
- ./manifests/prometheusAdapter-serviceMonitor.yaml
- ./manifests/prometheusOperator-clusterRole.yaml
- ./manifests/prometheusOperator-clusterRoleBinding.yaml
- ./manifests/prometheusOperator-deployment.yaml
- ./manifests/prometheusOperator-prometheusRule.yaml
- ./manifests/prometheusOperator-service.yaml
- ./manifests/prometheusOperator-serviceAccount.yaml
- ./manifests/prometheusOperator-serviceMonitor.yaml
- ./manifests/setup/0alertmanagerConfigCustomResourceDefinition.yaml
- ./manifests/setup/0alertmanagerCustomResourceDefinition.yaml
- ./manifests/setup/0podmonitorCustomResourceDefinition.yaml
- ./manifests/setup/0probeCustomResourceDefinition.yaml
- ./manifests/setup/0prometheusCustomResourceDefinition.yaml
- ./manifests/setup/0prometheusruleCustomResourceDefinition.yaml
- ./manifests/setup/0servicemonitorCustomResourceDefinition.yaml
- ./manifests/setup/0thanosrulerCustomResourceDefinition.yaml
- ./manifests/setup/namespace.yaml
- ./manifests/setup/0namespace-namespace.yaml
- ./manifests/setup/prometheus-operator-0alertmanagerConfigCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0podmonitorCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0probeCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0prometheusCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-0thanosrulerCustomResourceDefinition.yaml
- ./manifests/setup/prometheus-operator-clusterRole.yaml
- ./manifests/setup/prometheus-operator-clusterRoleBinding.yaml
- ./manifests/setup/prometheus-operator-deployment.yaml
- ./manifests/setup/prometheus-operator-service.yaml
- ./manifests/setup/prometheus-operator-serviceAccount.yaml

View File

@@ -2,24 +2,23 @@ apiVersion: monitoring.coreos.com/v1
kind: Alertmanager
metadata:
labels:
alertmanager: main
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
name: main
namespace: monitoring
spec:
image: quay.io/prometheus/alertmanager:v0.23.0
image: quay.io/prometheus/alertmanager:v0.22.2
nodeSelector:
kubernetes.io/os: linux
podMetadata:
labels:
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
replicas: 3
resources:
limits:
@@ -33,4 +32,4 @@ spec:
runAsNonRoot: true
runAsUser: 1000
serviceAccountName: alertmanager-main
version: 0.23.0
version: 0.22.2

View File

@@ -1,19 +1,18 @@
apiVersion: policy/v1
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
labels:
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
name: alertmanager-main
namespace: monitoring
spec:
maxUnavailable: 1
selector:
matchLabels:
alertmanager: main
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus

View File

@@ -3,10 +3,9 @@ kind: PrometheusRule
metadata:
labels:
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
prometheus: k8s
role: alert-rules
name: alertmanager-main-rules
@@ -17,8 +16,7 @@ spec:
rules:
- alert: AlertmanagerFailedReload
annotations:
description: Configuration has failed to load for {{ $labels.namespace }}/{{
$labels.pod}}.
description: Configuration has failed to load for {{ $labels.namespace }}/{{ $labels.pod}}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedreload
summary: Reloading an Alertmanager configuration has failed.
expr: |
@@ -30,11 +28,9 @@ spec:
severity: critical
- alert: AlertmanagerMembersInconsistent
annotations:
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only
found {{ $value }} members of the {{$labels.job}} cluster.
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only found {{ $value }} members of the {{$labels.job}} cluster.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagermembersinconsistent
summary: A member of an Alertmanager cluster has not found all other cluster
members.
summary: A member of an Alertmanager cluster has not found all other cluster members.
expr: |
# Without max_over_time, failed scrapes could create false negatives, see
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
@@ -46,9 +42,7 @@ spec:
severity: critical
- alert: AlertmanagerFailedToSendAlerts
annotations:
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed
to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration
}}.
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedtosendalerts
summary: An Alertmanager instance failed to send notifications.
expr: |
@@ -63,12 +57,9 @@ spec:
severity: warning
- alert: AlertmanagerClusterFailedToSendAlerts
annotations:
description: The minimum notification failure rate to {{ $labels.integration
}} sent from any instance in the {{$labels.job}} cluster is {{ $value |
humanizePercentage }}.
description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts
summary: All Alertmanager instances in a cluster failed to send notifications
to a critical integration.
summary: All Alertmanager instances in a cluster failed to send notifications to a critical integration.
expr: |
min by (namespace,service, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
@@ -81,12 +72,9 @@ spec:
severity: critical
- alert: AlertmanagerClusterFailedToSendAlerts
annotations:
description: The minimum notification failure rate to {{ $labels.integration
}} sent from any instance in the {{$labels.job}} cluster is {{ $value |
humanizePercentage }}.
description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts
summary: All Alertmanager instances in a cluster failed to send notifications
to a non-critical integration.
summary: All Alertmanager instances in a cluster failed to send notifications to a non-critical integration.
expr: |
min by (namespace,service, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
@@ -99,8 +87,7 @@ spec:
severity: warning
- alert: AlertmanagerConfigInconsistent
annotations:
description: Alertmanager instances within the {{$labels.job}} cluster have
different configurations.
description: Alertmanager instances within the {{$labels.job}} cluster have different configurations.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerconfiginconsistent
summary: Alertmanager instances within the same cluster have different configurations.
expr: |
@@ -113,12 +100,9 @@ spec:
severity: critical
- alert: AlertmanagerClusterDown
annotations:
description: '{{ $value | humanizePercentage }} of Alertmanager instances
within the {{$labels.job}} cluster have been up for less than half of the
last 5m.'
description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have been up for less than half of the last 5m.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterdown
summary: Half or more of the Alertmanager instances within the same cluster
are down.
summary: Half or more of the Alertmanager instances within the same cluster are down.
expr: |
(
count by (namespace,service) (
@@ -135,12 +119,9 @@ spec:
severity: critical
- alert: AlertmanagerClusterCrashlooping
annotations:
description: '{{ $value | humanizePercentage }} of Alertmanager instances
within the {{$labels.job}} cluster have restarted at least 5 times in the
last 10m.'
description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have restarted at least 5 times in the last 10m.'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclustercrashlooping
summary: Half or more of the Alertmanager instances within the same cluster
are crashlooping.
summary: Half or more of the Alertmanager instances within the same cluster are crashlooping.
expr: |
(
count by (namespace,service) (

View File

@@ -2,11 +2,11 @@ apiVersion: v1
kind: Secret
metadata:
labels:
alertmanager: main
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
name: alertmanager-main
namespace: monitoring
stringData:
@@ -17,17 +17,17 @@ stringData:
- "equal":
- "namespace"
- "alertname"
"source_matchers":
- "severity = critical"
"target_matchers":
- "severity =~ warning|info"
"source_match":
"severity": "critical"
"target_match_re":
"severity": "warning|info"
- "equal":
- "namespace"
- "alertname"
"source_matchers":
- "severity = warning"
"target_matchers":
- "severity = info"
"source_match":
"severity": "warning"
"target_match_re":
"severity": "info"
"receivers":
- "name": "Default"
- "name": "Watchdog"
@@ -40,10 +40,10 @@ stringData:
"receiver": "Default"
"repeat_interval": "12h"
"routes":
- "matchers":
- "alertname = Watchdog"
- "match":
"alertname": "Watchdog"
"receiver": "Watchdog"
- "matchers":
- "severity = critical"
- "match":
"severity": "critical"
"receiver": "Critical"
type: Opaque

View File

@@ -2,11 +2,11 @@ apiVersion: v1
kind: Service
metadata:
labels:
alertmanager: main
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
name: alertmanager-main
namespace: monitoring
spec:
@@ -14,12 +14,10 @@ spec:
- name: web
port: 9093
targetPort: web
- name: reloader-web
port: 8080
targetPort: reloader-web
selector:
alertmanager: main
app: alertmanager
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
sessionAffinity: ClientIP

View File

@@ -2,10 +2,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
alertmanager: main
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
app.kubernetes.io/version: 0.22.2
name: alertmanager-main
namespace: monitoring

View File

@@ -3,21 +3,18 @@ kind: ServiceMonitor
metadata:
labels:
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.23.0
name: alertmanager-main
app.kubernetes.io/version: 0.22.2
name: alertmanager
namespace: monitoring
spec:
endpoints:
- interval: 30s
port: web
- interval: 30s
port: reloader-web
selector:
matchLabels:
alertmanager: main
app.kubernetes.io/component: alert-router
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus

View File

@@ -1,13 +1,7 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.19.0
name: blackbox-exporter
namespace: monitoring
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: blackbox-exporter
namespace: monitoring

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.19.0
name: blackbox-exporter
namespace: monitoring

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 8.3.3
name: grafana-config
namespace: monitoring
stringData:
grafana.ini: |
[date_formats]
default_timezone = UTC
type: Opaque

Some files were not shown because too many files have changed in this diff Show More