Compare commits

..

1 Commits

Author SHA1 Message Date
Simon Pasquier
45f56819bc chore: fix versions action
Signed-off-by: Simon Pasquier <spasquie@redhat.com>
2023-08-16 15:47:44 +02:00
137 changed files with 33321 additions and 54031 deletions

1
.github/CODEOWNERS vendored
View File

@@ -1 +0,0 @@
* @prometheus-operator/kube-prometheus-reviewers

View File

@@ -3,8 +3,8 @@ on:
- push
- pull_request
env:
golang-version: '1.22'
kind-version: 'v0.24.0'
golang-version: '1.19'
kind-version: 'v0.19.0'
jobs:
generate:
runs-on: ${{ matrix.os }}
@@ -15,10 +15,10 @@ jobs:
- ubuntu-latest
name: Generate
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- run: make --always-make generate validate && git diff --exit-code
@@ -26,10 +26,10 @@ jobs:
runs-on: ubuntu-latest
name: Check Documentation formatting and links
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- run: make check-docs
@@ -37,10 +37,10 @@ jobs:
runs-on: ubuntu-latest
name: Jsonnet linter
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- run: make --always-make lint
@@ -48,10 +48,10 @@ jobs:
runs-on: ubuntu-latest
name: Jsonnet formatter
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- run: make --always-make fmt && git diff --exit-code
@@ -59,10 +59,10 @@ jobs:
runs-on: ubuntu-latest
name: Unit tests
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- run: make --always-make test
@@ -70,10 +70,10 @@ jobs:
runs-on: ubuntu-latest
name: Run security analysis on manifests
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- run: make --always-make kubescape
@@ -83,24 +83,22 @@ jobs:
strategy:
matrix:
kind-image:
- 'kindest/node:v1.31.0'
- 'kindest/node:v1.30.4'
- 'kindest/node:v1.29.8'
- 'kindest/node:v1.27.1'
- 'kindest/node:v1.26.4'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- name: Start kind cluster
uses: helm/kind-action@v1.10.0
- name: Start KinD
uses: engineerd/setup-kind@v0.5.0
with:
version: ${{ env.kind-version }}
node_image: ${{ matrix.kind-image }}
image: ${{ matrix.kind-image }}
wait: 10s # Without default CNI, control-plane doesn't get ready until Cilium is installed
config: .github/workflows/kind/config.yml
cluster_name: e2e
- name: Install kube-router for NetworkPolicy support
run: |
kubectl apply -f .github/workflows/kind/kube-router.yaml

View File

@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
- uses: actions/stale@v8
with:
stale-issue-message: 'This issue has been automatically marked as stale because it has not had any activity in the last 60 days. Thank you for your contributions.'
close-issue-message: 'This issue was closed because it has not had any activity in the last 120 days. Please reopen if you feel this is still valid.'

View File

@@ -5,22 +5,23 @@ on:
schedule:
- cron: '37 7 * * 1'
env:
golang-version: '1.22'
golang-version: '1.19'
jobs:
versions:
runs-on: ubuntu-latest
strategy:
matrix:
branch:
- 'release-0.9'
- 'release-0.10'
- 'release-0.11'
- 'release-0.12'
- 'release-0.13'
- 'main'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{ matrix.branch }}
- uses: actions/setup-go@v5
- uses: actions/setup-go@v4
with:
go-version: ${{ env.golang-version }}
- name: Upgrade versions
@@ -51,7 +52,7 @@ jobs:
git checkout -- jsonnetfile.lock.json;
fi
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
uses: peter-evans/create-pull-request@v5
with:
commit-message: "[bot] [${{ matrix.branch }}] Automated version update"
title: "[bot] [${{ matrix.branch }}] Automated version update"

View File

@@ -6,11 +6,4 @@ validators:
type: "ignore"
# Ignore release links.
- regex: 'https:\/\/github\.com\/prometheus-operator\/kube-prometheus\/releases'
type: "ignore"
# Twitter changed their policy and now returns 403 if not authenticated. We can guarantee this link since we own the account.
- regex: 'https:\/\/twitter.com\/PromOperator'
type: "ignore"
# the www.weave.works domain returns 404 for many pages.
# Ignoring for now but we need remove the related content if it persists.
- regex: 'https:\/\/www.weave.works.*'
type: "ignore"
type: "ignore"

View File

@@ -1,32 +1,3 @@
## release-0.14 / 2024-09-12
* [CHANGE] Prefer new form for `kube_node_status_capacity_pods` metric [#2269](https://github.com/prometheus-operator/kube-prometheus/pull/2269)
* [CHANGE] Add runAsGroup to all components [#2424](https://github.com/prometheus-operator/kube-prometheus/pull/2424)
* [FEATURE] Add support for ScrapeConfig [#2232](https://github.com/prometheus-operator/kube-prometheus/pull/2232)
* [FEATURE] Add Kubernetes components SLI metrics [#2496](https://github.com/prometheus-operator/kube-prometheus/pull/2496)
* [FEATURE] Add monitor and rules resources to user-facing roles add-on [#2238](https://github.com/prometheus-operator/kube-prometheus/pull/2238)
* [BUGFIX] Add thanos-sidecar metrics port to Prometheus Service and NetworkPolicy [#2330](https://github.com/prometheus-operator/kube-prometheus/pull/2330)
* [ENHANCEMENT] Add ability to inject Secrets into alertmanager [#2206](https://github.com/prometheus-operator/kube-prometheus/pull/2206)
* [ENHANCEMENT] Add securityContext items and Pod security labels [#2178](https://github.com/prometheus-operator/kube-prometheus/pull/2178)
## release-0.13 / 2023-08-31
* [CHANGE] Added a AKS platform to `platforms.libsonnet` [#1997](https://github.com/prometheus-operator/kube-prometheus/pull/1997)
* [CHANGE] Disable btrfs collector by default [#2074](https://github.com/prometheus-operator/kube-prometheus/pull/2074)
* [CHANGE] Enable Multi Cluster alerts by default [#2099](https://github.com/prometheus-operator/kube-prometheus/pull/2099)
* [FEATURE] Create dedicated Service to expose CoreDNS metric [#2107](https://github.com/prometheus-operator/kube-prometheus/pull/2107)
* [FEATURE] Add Windows support using Hostprocess instead of static_configs [#2048](https://github.com/prometheus-operator/kube-prometheus/pull/2048)
* [BUGFIX] Fix a compilation error when building the custom-metrics addon [#1996](https://github.com/prometheus-operator/kube-prometheus/pull/1996)
* [BUGFIX] Add `prometheus-adapter` in Prometheus's NetworkPolicy [#1982](https://github.com/prometheus-operator/kube-prometheus/pull/1982)
* [BUGFIX] Fix namespace specified in manifest non-namespaced resources [#2158](https://github.com/prometheus-operator/kube-prometheus/pull/2158)
* [BUGFIX] Override ServiceAccount, Role and ClusterRole names in RoleBinding and ClusterRoleBinding [#2135](https://github.com/prometheus-operator/kube-prometheus/pull/2135)
* [BUGFIX] Remove deprecated `--logtostderr` argument of prometheus-adapter [#2185](https://github.com/prometheus-operator/kube-prometheus/pull/2185)
* [BUGFIX] Fix alertmanager external config example [#1891](https://github.com/prometheus-operator/kube-prometheus/pull/1891)
* [ENHANCEMENT] Add startupProbe to prometheus-adapter [#2029](https://github.com/prometheus-operator/kube-prometheus/pull/2029)
* [ENHANCEMENT] Added configurable default values for kube-rbac-proxy in prometheus-operator, node-exporter and blackbox-exporter [#1987](https://github.com/prometheus-operator/kube-prometheus/pull/1987)
* [ENHANCEMENT] Modify control plane ServiceMonitors to be compatible with Argo [#2041](https://github.com/prometheus-operator/kube-prometheus/pull/2041)
* [ENHANCEMENT] Add md5 hash of the ConfigMap in Prometheus Adapter Deployment Annotations to force its recreation [#2195](https://github.com/prometheus-operator/kube-prometheus/pull/2195)
## release-0.12 / 2023-01-19
* [CHANGE] Updates Prometheus Adapater version to 0.10.0 [#1865](https://github.com/prometheus-operator/kube-prometheus/pull/1865)

View File

@@ -20,7 +20,7 @@ Channel used for project developers discussions
**Discussion forum**: [GitHub discussions](https://github.com/prometheus-operator/kube-prometheus/discussions)
**Twitter**: [@PromOperator](https://twitter.com/PromOperator)
**Twitter**: [@PromOperator](https://twitter.com/promoperator)
**GitHub**: To file bugs and feature requests. For questions and discussions use the GitHub discussions. Generally,
the other community channels listed here are best suited to get support or discuss overarching topics.

View File

@@ -54,16 +54,13 @@ update: $(JB_BIN)
$(JB_BIN) update
.PHONY: validate
validate: validate-1.29 validate-1.30 validate-1.31
validate: validate-1.23 validate-1.24
validate-1.29:
KUBE_VERSION=1.29.8 $(MAKE) kubeconform
validate-1.23:
KUBE_VERSION=1.23.6 $(MAKE) kubeconform
validate-1.30:
KUBE_VERSION=1.30.4 $(MAKE) kubeconform
validate-1.31:
KUBE_VERSION=1.31.0 $(MAKE) kubeconform
validate-1.24:
KUBE_VERSION=1.24.1 $(MAKE) kubeconform
.PHONY: kubeconform
kubeconform: crdschemas manifests $(KUBECONFORM_BIN)

View File

@@ -16,7 +16,6 @@ Components included in this package:
* Highly available [Prometheus](https://prometheus.io/)
* Highly available [Alertmanager](https://github.com/prometheus/alertmanager)
* [Prometheus node-exporter](https://github.com/prometheus/node_exporter)
* [Prometheus blackbox-exporter](https://github.com/prometheus/blackbox_exporter)
* [Prometheus Adapter for Kubernetes Metrics APIs](https://github.com/kubernetes-sigs/prometheus-adapter)
* [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)
* [Grafana](https://grafana.com/)
@@ -41,16 +40,18 @@ no effect, but is still deployed.
The following Kubernetes versions are supported and work as we test against these versions in their respective branches. But note that other versions might work!
| kube-prometheus stack | Kubernetes 1.23 | Kubernetes 1.24 | Kubernetes 1.25 | Kubernetes 1.26 | Kubernetes 1.27 | Kubernetes 1.28 | Kubernetes 1.29 | Kubernetes 1.30 | Kubernetes 1.31 |
|--------------------------------------------------------------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
| [`release-0.11`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.11) | ✔ | ✔ | ✗ | x | x | x | x | x | x |
| [`release-0.12`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.12) | ✗ | ✔ | ✔ | x | x | x | x | x | x |
| [`release-0.13`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.13) | ✗ | ✗ | x | ✔ | ✔ | ✔ | x | x | x |
| [`release-0.14`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.14) | ✗ | ✗ | x | ✔ | ✔ | ✔ | ✔ | ✔ | |
| [`main`](https://github.com/prometheus-operator/kube-prometheus/tree/main) | ✗ | ✗ | x | x | ✔ | | | ✔ | ✔ |
| kube-prometheus stack | Kubernetes 1.21 | Kubernetes 1.22 | Kubernetes 1.23 | Kubernetes 1.24 | Kubernetes 1.25 | Kubernetes 1.26 | Kubernetes 1.27 |
|--------------------------------------------------------------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
| [`release-0.9`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.9) | ✔ | | | | | x | x |
| [`release-0.10`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.10) | ✗ | ✔ | ✔ | | | x | x |
| [`release-0.11`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.11) | ✗ | ✗ | ✔ | ✔ | | x | x |
| [`release-0.12`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.12) | ✗ | ✗ | | ✔ | ✔ | x | x |
| [`main`](https://github.com/prometheus-operator/kube-prometheus/tree/main) | ✗ | ✗ | | | x | ✔ | ✔ |
## Quickstart
> Note: For versions before Kubernetes v1.21.z refer to the [Kubernetes compatibility matrix](#compatibility) in order to choose a compatible branch.
This project is intended to be used as a library (i.e. the intent is not for you to create your own modified copy of this repository).
Though for a quickstart a compiled version of the Kubernetes [manifests](manifests) generated with this library (specifically with `example.jsonnet`) is checked into this repository in order to try the content out quickly. To try out the stack un-customized run:

View File

@@ -1,12 +1,10 @@
# Release schedule
kube-prometheus will follow the Kubernetes release schedule.
For every new Kubernetes release, there will be a corresponding minor release of
kube-prometheus, although it may not be immediate.
We do not guarantee backports from the `main` branch to older release branches.
This differs from the previous release schedule, which was driven by OpenShift releases.
Kube-prometheus has a somehow predictable release schedule, releases were
historically cut in sync with OpenShift releases as per downstream needs. So
far there hasn't been any problem with this schedule since it is also in sync
with Kubernetes releases. So for every new Kubernetes release, there is a new
release of kube-prometheus, although it tends to happen later.
# How to cut a new release
@@ -20,9 +18,23 @@ We use [Semantic Versioning](http://semver.org/).
We maintain a separate branch for each minor release, named
`release-<major>.<minor>`, e.g. `release-1.1`, `release-2.0`.
The usual flow is to merge new features, changes and bug fixes into the `main` branch.
The decision to backport bugfixes into release branches is made on a case-by-case basis.
Maintaining the release branches for older minor releases is best-effort.
The usual flow is to merge new features and changes into the master branch and
to merge bug fixes into the latest release branch. Bug fixes are then merged
into master from the latest release branch. The master branch should always
contain all commits from the latest release branch.
If a bug fix got accidentally merged into master, cherry-pick commits have to be
created in the latest release branch, which then has to be merged back into
master. Try to avoid that situation.
Maintaining the release branches for older minor releases happens on a best
effort basis.
## Cut a release of kubernetes-mixins
kube-prometheus and kubernetes-mixins releases are tied, so before cutting the
release of kube-prometheus we should make sure that the same release of
kubernetes-mixins exists.
## Update components version
@@ -38,7 +50,7 @@ failed or because the main branch was already up-to-date.
## Update Kubernetes supported versions
The `main` branch of kube-prometheus should support the last 2 versions of
The main branch of kube-prometheus should support the last 2 versions of
Kubernetes. We need to make sure that the CI on the main branch is testing the
kube-prometheus configuration against both of these versions by updating the [CI
worklow](.github/workflows/ci.yaml) to include the latest kind version and the

View File

@@ -1,46 +1,29 @@
---
weight: 300
toc: true
title: Access Dashboards
menu:
docs:
parent: kube
images: []
draft: false
---
# Access UIs
Prometheus, Grafana, and Alertmanager dashboards can be accessed quickly using `kubectl port-forward` after running the quickstart via the commands below.
Prometheus, Grafana, and Alertmanager dashboards can be accessed quickly using `kubectl port-forward` after running the quickstart via the commands below. Kubernetes 1.10 or later is required.
> Kubernetes 1.10 or later is required.
You can also learn how to [expose Prometheus/Alertmanager/Grafana via Ingress](customizations/exposing-prometheus-alertmanager-grafana-ingress.md)
> Note: There are instructions on how to route to these pods behind an ingress controller in the [Exposing Prometheus/Alermanager/Grafana via Ingress](customizations/exposing-prometheus-alertmanager-grafana-ingress.md) section.
## Prometheus
```shell
kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090
$ kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090
```
Open Prometheus on [http://localhost:9090](http://localhost:9090) in your browser.
Check out the [alerts](http://localhost:9090/alerts) and [rules](http://localhost:9090/rules) pages with the pre-configured rules and alerts!
This Prometheus is supposed to monitor your Kubernetes cluster and make sure to alert you if theres a problem with it.
For your own applications we recommend running one or more other instances.
Then access via [http://localhost:9090](http://localhost:9090)
## Grafana
```shell
kubectl --namespace monitoring port-forward svc/grafana 3000
$ kubectl --namespace monitoring port-forward svc/grafana 3000
```
Open Grafana on [localhost:3000](https://localhost:3000) in your browser.
You can login with the username `admin` and password `admin`.
Then access via [http://localhost:3000](http://localhost:3000) and use the default grafana user:password of `admin:admin`.
## Alertmanager
## Alert Manager
```shell
kubectl --namespace monitoring port-forward svc/alertmanager-main 9093
$ kubectl --namespace monitoring port-forward svc/alertmanager-main 9093
```
Open Alertmanager on [localhost:9093](http://localhost:9093) in your browser.
Then access via [http://localhost:9093](http://localhost:9093)

View File

@@ -30,7 +30,7 @@ The `prometheus-operator` defines a `Probe` resource type that can be used to de
* `_config.namespace`: the namespace where the various generated resources (`ConfigMap`, `Deployment`, `Service`, `ServiceAccount` and `ServiceMonitor`) will reside. This does not affect where you can place `Probe` objects; that is determined by the configuration of the `Prometheus` resource. This option is shared with other `kube-prometheus` components; defaults to `default`.
* `_config.imageRepos.blackboxExporter`: the name of the blackbox exporter image to deploy. Defaults to `quay.io/prometheus/blackbox-exporter`.
* `_config.versions.blackboxExporter`: the tag of the blackbox exporter image to deploy. Defaults to the version `kube-prometheus` was tested with.
* `_config.imageRepos.configmapReloader`: the name of the ConfigMap reloader image to deploy. Defaults to `ghcr.io/jimmidyson/configmap-reload`.
* `_config.imageRepos.configmapReloader`: the name of the ConfigMap reloader image to deploy. Defaults to `jimmidyson/configmap-reload`.
* `_config.versions.configmapReloader`: the tag of the ConfigMap reloader image to deploy. Defaults to the version `kube-prometheus` was tested with.
* `_config.resources.blackbox-exporter.requests`: the requested resources; this is used for each container. Defaults to `10m` CPU and `20Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
* `_config.resources.blackbox-exporter.limits`: the resource limits; this is used for each container. Defaults to `20m` CPU and `40Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.

View File

@@ -39,12 +39,10 @@ Also, the applications provide external links to themselves in alerts and variou
```jsonnet
local kp =
(import 'kube-prometheus/main.libsonnet') +
(import 'kube-prometheus/kube-prometheus.libsonnet') +
{
values+:: {
common+: {
namespace: 'monitoring',
},
_config+:: {
namespace: 'monitoring',
},
prometheus+:: {
prometheus+: {
@@ -97,7 +95,6 @@ local kp =
},
};
// Output a kubernetes List object with both ingresses (k8s-libsonnet)
k.core.v1.list.new([
kp.ingress['prometheus-k8s'],
kp.ingress['basic-auth-secret'],

View File

@@ -155,7 +155,7 @@ $ kubectl apply --server-side -f manifests/setup
$ kubectl apply -f manifests/
```
> Note that due to some CRD size we are using kubectl server-side apply feature which is generally available since
> Note that due to some CRD size we are using kubeclt server-side apply feature which is generally available since
> kubernetes 1.22. If you are using previous kubernetes versions this feature may not be available and you would need to
> use `kubectl create` instead.

View File

@@ -13,7 +13,7 @@ description: This guide will help you deploying kube-prometheus on Kubernetes ku
The [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) tool is linked by Kubernetes as the offical way to deploy and manage self-hosted clusters. kubeadm does a lot of heavy lifting by automatically configuring your Kubernetes cluster with some common options. This guide is intended to show you how to deploy Prometheus, Prometheus Operator and Kube Prometheus to get you started monitoring your cluster that was deployed with kubeadm.
This guide assumes you have a basic understanding of how to use the functionality the Prometheus Operator implements. If you haven't yet, we recommend reading through the [getting started guide](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md) as well as the [alerting guide](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md).
This guide assumes you have a basic understanding of how to use the functionality the Prometheus Operator implements. If you haven't yet, we recommend reading through the [getting started guide](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md) as well as the [alerting guide](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/alerting.md).
## kubeadm Pre-requisites
@@ -78,12 +78,12 @@ Once you complete this guide you will monitor the following:
## Getting Up and Running Fast with Kube-Prometheus
To help get started more quickly with monitoring Kubernetes clusters, [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) was created. It is a collection of manifests including dashboards and alerting rules that can easily be deployed. It utilizes the Prometheus Operator and all the manifests demonstrated in this guide.
To help get started more quickly with monitoring Kubernetes clusters, [kube-prometheus](https://github.com/coreos/kube-prometheus) was created. It is a collection of manifests including dashboards and alerting rules that can easily be deployed. It utilizes the Prometheus Operator and all the manifests demonstrated in this guide.
This section represent a quick installation and is not intended to teach you about all the components. The easiest way to get started is to clone this repository and use the `kube-prometheus` section of the code.
```
git clone https://github.com/prometheus-operator/kube-prometheus
git clone https://github.com/coreos/kube-prometheus
cd kube-prometheus/
```
@@ -133,7 +133,7 @@ kubectl apply -f manifests/prometheus/prometheus-k8s-roles.yaml
kubectl apply -f manifests/prometheus/prometheus-k8s-role-bindings.yaml
```
Finally, install the [Alertmanager](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md)
Finally, install the [Alertmanager](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/alerting.md)
```
kubectl --namespace="$NAMESPACE" apply -f manifests/alertmanager
@@ -145,4 +145,4 @@ Now you should have a working cluster. After all the pods are ready, you should
* Alertmanager UI on node port `30903`
* Grafana on node port `30902`
These can of course be changed via the Service definitions. It is recommended to look at the [Exposing Prometheus and Alert Manager](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/exposing-prometheus-and-alertmanager.md) documentation for more detailed information on how to expose these services.
These can of course be changed via the Service definitions. It is recommended to look at the [Exposing Prometheus and Alert Manager](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/exposing-prometheus-and-alertmanager.md) documentation for more detailed information on how to expose these services.

View File

@@ -15,9 +15,6 @@ local kp =
common+: {
namespace: 'monitoring',
},
alertmanager+: {
config: importstr 'alertmanager-config.yaml',
},
},
alertmanager+:: {
alertmanager+: {
@@ -29,9 +26,9 @@ local kp =
},
configmap+:: {
'alert-templates': configmap(
'alert-templates',
'alertmanager-alert-template.tmpl',
$.values.common.namespace, // could be $._config.namespace to assign namespace once
{ 'alertmanager-alert-template.tmpl': importstr 'alertmanager-alert-template.tmpl' },
{ data: importstr 'alertmanager-alert-template.tmpl' },
),
},
};

View File

@@ -24,4 +24,4 @@ slack_configs:
text: '{{ template "slack.text" . }}
templates:
- '/etc/alertmanager/configmaps/alert-templates/*.tmpl'
- '/etc/alertmanager/configmaps/alertmanager-alert-template.tmpl'

View File

@@ -1,11 +1,9 @@
## ArgoCD Example
This is the simplest, working example of an argocd app, the JSON object built is now an array of objects as that is the prefered format for ArgoCD. And ArgoCD specific annotations are added to manifests.
This is the simplest, working example of an argocd app, the JSON object built is now an array of objects as that is the prefered format for ArgoCD.
Requirements:
- **ArgoCD 1.7+**
**ArgoCD 1.7+**
- Follow the vendor generation steps at the root of this repository and generate a `vendored` folder (referenced in `application.yaml`).
- Make sure that argocd-cm has `application.instanceLabelKey` set to something else than `app.kubernetes.io/instance`, otherwise it will cause problems with prometheus target discovery. (see also [Why Is My App Out Of Sync Even After Syncing?](https://argo-cd.readthedocs.io/en/stable/faq/#why-is-my-app-out-of-sync-even-after-syncing))
Follow the vendor generation steps at the root of this repository and generate a `vendored` folder (referenced in `application.yaml`).

View File

@@ -1,66 +1,14 @@
// NB! Make sure that argocd-cm has `application.instanceLabelKey` set to something else than `app.kubernetes.io/instance`,
// otherwise it will cause problems with prometheus target discovery.
// See also https://argo-cd.readthedocs.io/en/stable/faq/#why-is-my-app-out-of-sync-even-after-syncing
local kp =
(import 'kube-prometheus/main.libsonnet') +
// Uncomment the following imports to enable its patches
// (import 'kube-prometheus/addons/anti-affinity.libsonnet') +
// (import 'kube-prometheus/addons/managed-cluster.libsonnet') +
// (import 'kube-prometheus/addons/node-ports.libsonnet') +
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
// (import 'kube-prometheus/addons/pyrra.libsonnet') +
{
values+:: {
common+: {
namespace: 'monitoring',
},
local kp = (import 'kube-prometheus/main.libsonnet') + {
values+:: {
common+: {
namespace: 'monitoring',
},
};
},
};
// Unlike in kube-prometheus/example.jsonnet where a map of file-names to manifests is returned,
// for ArgoCD we need to return just a regular list with all the manifests.
local manifests =
[kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus)] +
[kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator)] +
[kp.alertmanager[name] for name in std.objectFields(kp.alertmanager)] +
[kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter)] +
[kp.grafana[name] for name in std.objectFields(kp.grafana)] +
// [ kp.pyrra[name] for name in std.objectFields(kp.pyrra)] +
[kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics)] +
[kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane)] +
[kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter)] +
[kp.prometheus[name] for name in std.objectFields(kp.prometheus)] +
[kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter)];
local argoAnnotations(manifest) =
manifest {
metadata+: {
annotations+: {
'argocd.argoproj.io/sync-wave':
// Make sure to sync the Namespace & CRDs before anything else (to avoid sync failures)
if std.member(['CustomResourceDefinition', 'Namespace'], manifest.kind)
then '-5'
// And sync all the roles outside of the main & kube-system last (in case some of the namespaces don't exist yet)
else if std.objectHas(manifest, 'metadata')
&& std.objectHas(manifest.metadata, 'namespace')
&& !std.member([kp.values.common.namespace, 'kube-system'], manifest.metadata.namespace)
then '10'
else '5',
'argocd.argoproj.io/sync-options':
// Use replace strategy for CRDs, as they're too big fit into the last-applied-configuration annotation that kubectl apply wants to use
if manifest.kind == 'CustomResourceDefinition' then 'Replace=true'
else '',
},
},
};
// Add argo-cd annotations to all the manifests
[
if std.endsWith(manifest.kind, 'List') && std.objectHas(manifest, 'items')
then manifest { items: [argoAnnotations(item) for item in manifest.items] }
else argoAnnotations(manifest)
for manifest in manifests
]
[kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus)] +
[kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator)] +
[kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter)] +
[kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics)] +
[kp.prometheus[name] for name in std.objectFields(kp.prometheus)] +
[kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter)]

62
go.mod
View File

@@ -1,30 +1,27 @@
module github.com/prometheus-operator/kube-prometheus
go 1.22.0
toolchain go1.22.5
go 1.19
require (
github.com/Jeffail/gabs v1.4.0
github.com/prometheus/client_golang v1.20.3
k8s.io/apimachinery v0.31.0
k8s.io/client-go v0.31.0
github.com/prometheus/client_golang v1.16.0
k8s.io/apimachinery v0.27.4
k8s.io/client-go v0.27.4
)
require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -32,25 +29,24 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.22.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.31.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
k8s.io/api v0.27.4 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

221
go.sum
View File

@@ -1,64 +1,81 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=
github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -67,106 +84,142 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4=
github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.27.4 h1:0pCo/AN9hONazBKlNUdhQymmnfLRbSZjd5H5H3f0bSs=
k8s.io/api v0.27.4/go.mod h1:O3smaaX15NfxjzILfiln1D8Z3+gEYpjEpiNA/1EVK1Y=
k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs=
k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
k8s.io/client-go v0.27.4 h1:vj2YTtSJ6J4KxaC88P4pMPEQECWMY8gqPqsTgUKzvjk=
k8s.io/client-go v0.27.4/go.mod h1:ragcly7lUlN0SRPk5/ZkGnDjPknzb37TICq07WhI6Xc=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -14,7 +14,7 @@
// Drop all apiserver metrics which are deprecated in kubernetes.
{
sourceLabels: ['__name__'],
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers|storage_db_total_size_in_bytes|flowcontrol_request_concurrency_limit|flowcontrol_request_concurrency_in_use)',
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)',
action: 'drop',
},
// Drop all docker metrics which are deprecated in kubernetes.

View File

@@ -37,7 +37,7 @@
_config:: defaults + params,
crd: (
import 'github.com/pyrra-dev/pyrra/jsonnet/controller-gen/pyrra.dev_servicelevelobjectives.json'
import 'github.com/pyrra-dev/pyrra/config/crd/bases/pyrra.dev_servicelevelobjectives.json'
),
@@ -80,9 +80,6 @@
securityContext: {
allowPrivilegeEscalation: false,
readOnlyRootFilesystem: true,
runAsNonRoot: true,
capabilities: { drop: ['ALL'] },
seccompProfile: { type: 'RuntimeDefault' },
},
};

View File

@@ -1,67 +0,0 @@
// user facing roles for monitors, probe, and rules
// ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
{
prometheusOperator+: {
local po = self,
clusterRoleView: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: po._metadata {
name: 'monitoring-view',
namespace:: null,
labels+: {
'rbac.authorization.k8s.io/aggregate-to-view': 'true',
},
},
rules: [
{
apiGroups: [
'monitoring.coreos.com',
],
resources: [
'podmonitors',
'probes',
'prometheusrules',
'servicemonitors',
],
verbs: [
'get',
'list',
'watch',
],
},
],
},
clusterRoleEdit: {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'ClusterRole',
metadata: po._metadata {
name: 'monitoring-edit',
namespace:: null,
labels+: {
'rbac.authorization.k8s.io/aggregate-to-edit': 'true',
},
},
rules: [
{
apiGroups: [
'monitoring.coreos.com',
],
resources: [
'podmonitors',
'probes',
'prometheusrules',
'servicemonitors',
],
verbs: [
'create',
'delete',
'deletecollection',
'patch',
'update',
],
},
],
},
},
}

View File

@@ -3135,7 +3135,7 @@
],
"targets": [
{
"expr": "sort_desc(floor(label_replace(max by(node) (max by(instance) (kubelet_running_pod_count{job=\"kubelet\",metrics_path=\"/metrics\"}) * on(instance) group_left(node) kubelet_node_name{job=\"kubelet\",metrics_path=\"/metrics\"}) / max by(node) (kube_node_status_capacity{resource=\"pods\",unit=\"integer\",job=\"kube-state-metrics\"}) , \"node_ip\", \"$1.$2.$3.$4\", \"node\", \"^ip-([0-9]+)-([0-9]+)-([0-9]+)-([0-9]+).*$\") * 100))",
"expr": "sort_desc(floor(label_replace(max by(node) (max by(instance) (kubelet_running_pod_count{job=\"kubelet\",metrics_path=\"/metrics\"}) * on(instance) group_left(node) kubelet_node_name{job=\"kubelet\",metrics_path=\"/metrics\"}) / max by(node) (kube_node_status_capacity_pods{job=\"kube-state-metrics\"}) , \"node_ip\", \"$1.$2.$3.$4\", \"node\", \"^ip-([0-9]+)-([0-9]+)-([0-9]+)-([0-9]+).*$\") * 100))",
"format": "time_series",
"hide": false,
"instant": true,

View File

@@ -8,7 +8,7 @@ local defaults = {
name:: 'windows-exporter',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide image',
image:: error 'must provide version',
resources:: {
requests: { cpu: '300m', memory: '200Mi' },
limits: { memory: '200Mi' },

View File

@@ -60,7 +60,6 @@ local defaults = {
],
},
replicas: 3,
secrets: [],
mixin:: {
ruleLabels: {},
_config: {
@@ -226,7 +225,6 @@ function(params) {
},
resources: am._config.resources,
nodeSelector: { 'kubernetes.io/os': 'linux' },
secrets: am._config.secrets,
serviceAccountName: am.serviceAccount.metadata.name,
securityContext: {
runAsUser: 1000,

View File

@@ -6,7 +6,7 @@ local defaults = {
// If there is no CRD for the component, everything is hidden in defaults.
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide image',
image:: error 'must provide version',
resources:: {
requests: { cpu: '10m', memory: '20Mi' },
limits: { cpu: '20m', memory: '40Mi' },
@@ -183,7 +183,6 @@ function(params) {
} else {
runAsNonRoot: true,
runAsUser: 65534,
runAsGroup: 65534,
allowPrivilegeEscalation: false,
readOnlyRootFilesystem: true,
capabilities: { drop: ['ALL'] },
@@ -206,7 +205,6 @@ function(params) {
securityContext: {
runAsNonRoot: true,
runAsUser: 65534,
runAsGroup: 65534,
allowPrivilegeEscalation: false,
readOnlyRootFilesystem: true,
capabilities: { drop: ['ALL'] },

View File

@@ -116,9 +116,6 @@ function(params)
template+: {
spec+: {
automountServiceAccountToken: false,
securityContext+: {
runAsGroup: 65534,
},
},
},
},

View File

@@ -71,30 +71,13 @@ function(params) {
},
spec: {
jobLabel: 'app.kubernetes.io/name',
endpoints: [
{
port: 'https-metrics',
interval: '30s',
scheme: 'https',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
tlsConfig: { insecureSkipVerify: true },
},
{
port: 'https-metrics',
interval: '5s',
scheme: 'https',
path: '/metrics/slis',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
tlsConfig: { insecureSkipVerify: true },
metricRelabelings: [
{
sourceLabels: ['__name__'],
regex: 'process_start_time_seconds',
action: 'drop',
},
],
},
],
endpoints: [{
port: 'https-metrics',
interval: '30s',
scheme: 'https',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
tlsConfig: { insecureSkipVerify: true },
}],
selector: {
matchLabels: { 'app.kubernetes.io/name': 'kube-scheduler' },
},
@@ -191,27 +174,6 @@ function(params) {
targetLabel: 'metrics_path',
}],
},
{
port: 'https-metrics',
scheme: 'https',
path: '/metrics/slis',
interval: '5s',
honorLabels: true,
tlsConfig: { insecureSkipVerify: true },
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
relabelings: [
{
action: 'replace',
sourceLabels: ['__metrics_path__'],
targetLabel: 'metrics_path',
},
{
sourceLabels: ['__name__'],
regex: 'process_start_time_seconds',
action: 'drop',
},
],
},
],
selector: {
matchLabels: { 'app.kubernetes.io/name': 'kubelet' },
@@ -231,41 +193,22 @@ function(params) {
},
spec: {
jobLabel: 'app.kubernetes.io/name',
endpoints: [
{
port: 'https-metrics',
interval: '30s',
scheme: 'https',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
tlsConfig: {
insecureSkipVerify: true,
},
metricRelabelings: relabelings + [
{
sourceLabels: ['__name__'],
regex: 'etcd_(debugging|disk|request|server).*',
action: 'drop',
},
],
endpoints: [{
port: 'https-metrics',
interval: '30s',
scheme: 'https',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
tlsConfig: {
insecureSkipVerify: true,
},
{
port: 'https-metrics',
interval: '5s',
scheme: 'https',
path: '/metrics/slis',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
tlsConfig: {
insecureSkipVerify: true,
metricRelabelings: relabelings + [
{
sourceLabels: ['__name__'],
regex: 'etcd_(debugging|disk|request|server).*',
action: 'drop',
},
metricRelabelings: [
{
sourceLabels: ['__name__'],
regex: 'process_start_time_seconds',
action: 'drop',
},
],
},
],
],
}],
selector: {
matchLabels: { 'app.kubernetes.io/name': 'kube-controller-manager' },
},
@@ -293,58 +236,38 @@ function(params) {
namespaceSelector: {
matchNames: ['default'],
},
endpoints: [
{
port: 'https',
interval: '30s',
scheme: 'https',
tlsConfig: {
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
serverName: 'kubernetes',
},
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
metricRelabelings: relabelings + [
{
sourceLabels: ['__name__'],
regex: 'etcd_(debugging|disk|server).*',
action: 'drop',
},
{
sourceLabels: ['__name__'],
regex: 'apiserver_admission_controller_admission_latencies_seconds_.*',
action: 'drop',
},
{
sourceLabels: ['__name__'],
regex: 'apiserver_admission_step_admission_latencies_seconds_.*',
action: 'drop',
},
{
sourceLabels: ['__name__', 'le'],
regex: 'apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50)',
action: 'drop',
},
],
endpoints: [{
port: 'https',
interval: '30s',
scheme: 'https',
tlsConfig: {
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
serverName: 'kubernetes',
},
{
port: 'https',
interval: '5s',
scheme: 'https',
path: '/metrics/slis',
tlsConfig: {
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
serverName: 'kubernetes',
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
metricRelabelings: relabelings + [
{
sourceLabels: ['__name__'],
regex: 'etcd_(debugging|disk|server).*',
action: 'drop',
},
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
metricRelabelings: [
{
sourceLabels: ['__name__'],
regex: 'process_start_time_seconds',
action: 'drop',
},
],
},
],
{
sourceLabels: ['__name__'],
regex: 'apiserver_admission_controller_admission_latencies_seconds_.*',
action: 'drop',
},
{
sourceLabels: ['__name__'],
regex: 'apiserver_admission_step_admission_latencies_seconds_.*',
action: 'drop',
},
{
sourceLabels: ['__name__', 'le'],
regex: 'apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50)',
action: 'drop',
},
],
}],
},
},

View File

@@ -50,6 +50,7 @@ function(params) {
name: krp._config.name,
image: krp._config.image,
args: [
'--logtostderr',
'--secure-listen-address=' + krp._config.secureListenAddress,
'--tls-cipher-suites=' + std.join(',', krp._config.tlsCipherSuites),
'--upstream=' + krp._config.upstream,
@@ -63,6 +64,5 @@ function(params) {
allowPrivilegeEscalation: false,
readOnlyRootFilesystem: true,
capabilities: { drop: ['ALL'] },
seccompProfile: { type: 'RuntimeDefault' },
},
}

View File

@@ -7,7 +7,7 @@ local defaults = {
name:: 'kube-state-metrics',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide image',
image:: error 'must provide version',
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
resources:: {
requests: { cpu: '10m', memory: '190Mi' },
@@ -164,9 +164,6 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
ports:: null,
livenessProbe:: null,
readinessProbe:: null,
securityContext+: {
runAsGroup: 65534,
},
args: ['--host=127.0.0.1', '--port=8081', '--telemetry-host=127.0.0.1', '--telemetry-port=8082'],
resources: ksm._config.resources,
}, super.containers) + [kubeRbacProxyMain, kubeRbacProxySelf],

View File

@@ -7,7 +7,7 @@ local defaults = {
name:: 'node-exporter',
namespace:: error 'must provide namespace',
version:: error 'must provide version',
image:: error 'must provide image',
image:: error 'must provide version',
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
resources:: {
requests: { cpu: '102m', memory: '180Mi' },
@@ -295,7 +295,6 @@ function(params) {
serviceAccountName: ne._config.name,
priorityClassName: 'system-cluster-critical',
securityContext: {
runAsGroup: 65534,
runAsUser: 65534,
runAsNonRoot: true,
},

View File

@@ -280,9 +280,7 @@ function(params) {
securityContext: {
allowPrivilegeEscalation: false,
readOnlyRootFilesystem: true,
runAsNonRoot: true,
capabilities: { drop: ['ALL'] },
seccompProfile: { type: 'RuntimeDefault' },
},
};
@@ -302,12 +300,7 @@ function(params) {
},
},
template: {
metadata: {
annotations: {
'checksum.config/md5': std.md5(std.manifestYamlDoc(pa._config.config)),
},
labels: pa._config.commonLabels,
},
metadata: { labels: pa._config.commonLabels },
spec: {
containers: [c],
serviceAccountName: $.serviceAccount.metadata.name,

View File

@@ -163,9 +163,6 @@ function(params)
template+: {
spec+: {
automountServiceAccountToken: true,
securityContext+: {
runAsGroup: 65534,
},
containers+: [kubeRbacProxy],
},
},

View File

@@ -175,10 +175,7 @@ function(params) {
] +
(
if p._config.thanos != null then
[
{ name: 'grpc', port: 10901, targetPort: 10901 },
{ name: 'http', port: 10902, targetPort: 10902 },
]
[{ name: 'grpc', port: 10901, targetPort: 10901 }]
else []
),
selector: p._config.selectorLabels,
@@ -223,7 +220,7 @@ function(params) {
verbs: ['get'],
},
{
nonResourceURLs: ['/metrics', '/metrics/slis'],
nonResourceURLs: ['/metrics'],
verbs: ['get'],
},
],
@@ -343,8 +340,6 @@ function(params) {
probeNamespaceSelector: {},
ruleNamespaceSelector: {},
ruleSelector: p._config.ruleSelector,
scrapeConfigSelector: {},
scrapeConfigNamespaceSelector: {},
serviceMonitorSelector: {},
serviceMonitorNamespaceSelector: {},
nodeSelector: { 'kubernetes.io/os': 'linux' },

View File

@@ -8,7 +8,7 @@
"subdir": "grafana"
}
},
"version": "5698c8940b6dadca3f42107b7839557bc041761f"
"version": "master"
},
{
"source": {
@@ -17,7 +17,7 @@
"subdir": "grafana-mixin"
}
},
"version": "release-11.2.0",
"version": "main",
"name": "grafana-mixin"
},
{
@@ -27,7 +27,7 @@
"subdir": "contrib/mixin"
}
},
"version": "release-3.5"
"version": "main"
},
{
"source": {
@@ -36,7 +36,7 @@
"subdir": "jsonnet/prometheus-operator"
}
},
"version": "release-0.76"
"version": "main"
},
{
"source": {
@@ -45,7 +45,7 @@
"subdir": "jsonnet/mixin"
}
},
"version": "release-0.76",
"version": "main",
"name": "prometheus-operator-mixin"
},
{
@@ -55,7 +55,7 @@
"subdir": ""
}
},
"version": "50150c585ebee6e4d9cb72218182da8f3c616515"
"version": "master"
},
{
"source": {
@@ -64,7 +64,7 @@
"subdir": "jsonnet/kube-state-metrics"
}
},
"version": "release-2.13"
"version": "main"
},
{
"source": {
@@ -73,7 +73,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin"
}
},
"version": "release-2.13"
"version": "main"
},
{
"source": {
@@ -82,7 +82,7 @@
"subdir": "docs/node-mixin"
}
},
"version": "release-1.8"
"version": "master"
},
{
"source": {
@@ -91,7 +91,7 @@
"subdir": "documentation/prometheus-mixin"
}
},
"version": "release-2.54",
"version": "main",
"name": "prometheus"
},
{
@@ -101,18 +101,17 @@
"subdir": "doc/alertmanager-mixin"
}
},
"version": "release-0.27",
"version": "main",
"name": "alertmanager"
},
{
"source": {
"git": {
"remote": "https://github.com/pyrra-dev/pyrra.git",
"subdir": "jsonnet/controller-gen"
"subdir": "config/crd/bases"
}
},
"version": "v0.7.7",
"name": "pyrra"
"version": "main"
},
{
"source": {
@@ -121,7 +120,7 @@
"subdir": "mixin"
}
},
"version": "release-0.36",
"version": "main",
"name": "thanos-mixin"
}
],

View File

@@ -47,7 +47,7 @@ local utils = import './lib/utils.libsonnet';
prometheusOperator: 'quay.io/prometheus-operator/prometheus-operator:v' + $.values.common.versions.prometheusOperator,
prometheusOperatorReloader: 'quay.io/prometheus-operator/prometheus-config-reloader:v' + $.values.common.versions.prometheusOperator,
kubeRbacProxy: 'quay.io/brancz/kube-rbac-proxy:v' + $.values.common.versions.kubeRbacProxy,
configmapReload: 'ghcr.io/jimmidyson/configmap-reload:v' + $.values.common.versions.configmapReload,
configmapReload: 'jimmidyson/configmap-reload:v' + $.values.common.versions.configmapReload,
},
},
alertmanager: {
@@ -150,10 +150,6 @@ local utils = import './lib/utils.libsonnet';
kind: 'Namespace',
metadata: {
name: $.values.common.namespace,
labels: {
'pod-security.kubernetes.io/warn': 'privileged',
'pod-security.kubernetes.io/warn-version': 'latest',
},
},
},
},

View File

@@ -1,13 +1,13 @@
{
"alertmanager": "0.27.0",
"blackboxExporter": "0.25.0",
"grafana": "11.2.0",
"kubeStateMetrics": "2.13.0",
"nodeExporter": "1.8.2",
"prometheus": "2.54.1",
"prometheusAdapter": "0.12.0",
"prometheusOperator": "0.76.2",
"kubeRbacProxy": "0.18.1",
"configmapReload": "0.13.1",
"alertmanager": "0.25.0",
"blackboxExporter": "0.24.0",
"grafana": "9.5.3",
"kubeStateMetrics": "2.9.2",
"nodeExporter": "1.6.1",
"prometheus": "2.45.0",
"prometheusAdapter": "0.10.0",
"prometheusOperator": "0.66.0",
"kubeRbacProxy": "0.14.2",
"configmapReload": "0.5.0",
"pyrra": "0.6.4"
}

View File

@@ -1,15 +1,6 @@
{
"version": 1,
"dependencies": [
{
"source": {
"git": {
"remote": "https://github.com/grafana/jsonnet-libs.git",
"subdir": "mixin-utils"
}
},
"version": "master"
},
{
"source": {
"local": {

View File

@@ -18,8 +18,8 @@
"subdir": "contrib/mixin"
}
},
"version": "f20bbadd404b57c776d1e8876cefd1ac29b03fb5",
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
"version": "26b3ecf5aa25ec085c7bd0f99a871865742c078d",
"sum": "GdePvMDfLQcVhwzk/Ephi/jC27ywGObLB5t0eC0lXd4="
},
{
"source": {
@@ -28,8 +28,8 @@
"subdir": "grafana-mixin"
}
},
"version": "c57667e4481563f5e6cf945b03bc0626caa4dbeb",
"sum": "S8mRTRH4w62kMCa2je3iCtvscYrwQmkyJ7Y/aM14KbE="
"version": "1120f9e255760a3c104b57871fcb91801e934382",
"sum": "MkjR7zCgq6MUZgjDzop574tFKoTX2OBr7DTwm1K+Ofs="
},
{
"source": {
@@ -51,26 +51,6 @@
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
"sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-latest"
}
},
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
"sum": "eyuJ0jOXeA4MrobbNgU4/v5a7ASDHslHZ0eS6hDdWoI="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/grafonnet.git",
"subdir": "gen/grafonnet-v11.0.0"
}
},
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
"sum": "0BvzR0i4bS4hc2O3xDv6i9m52z7mPrjvqxtcPrGhynA="
},
{
"source": {
"git": {
@@ -78,38 +58,8 @@
"subdir": "grafana-builder"
}
},
"version": "474b02b7c297f3923ab040eef95161b310cd2c96",
"sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
},
{
"source": {
"git": {
"remote": "https://github.com/grafana/jsonnet-libs.git",
"subdir": "mixin-utils"
}
},
"version": "474b02b7c297f3923ab040eef95161b310cd2c96",
"sum": "LoYq5QxJmUXEtqkEG8CFUBLBhhzDDaNANHc7Gz36ZdM="
},
{
"source": {
"git": {
"remote": "https://github.com/jsonnet-libs/docsonnet.git",
"subdir": "doc-util"
}
},
"version": "6ac6c69685b8c29c54515448eaca583da2d88150",
"sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U="
},
{
"source": {
"git": {
"remote": "https://github.com/jsonnet-libs/xtd.git",
"subdir": ""
}
},
"version": "63d430b69a95741061c2f7fc9d84b1a778511d9c",
"sum": "qiZi3axUSXCVzKUF83zSAxklwrnitMmrDK4XAfjPMdE="
"version": "a3e4fa30494f4ee94f1cab506df85090a12545d9",
"sum": "xEFMv4+ObwP5L1Wu0XK5agWci4AJzNApys6iKAQxLlQ="
},
{
"source": {
@@ -118,8 +68,8 @@
"subdir": ""
}
},
"version": "50150c585ebee6e4d9cb72218182da8f3c616515",
"sum": "0g1pn3gGq2yZyeUTx+zniK/D7jMKbAnqJ83Lke+uJ6o="
"version": "46fc905d5b2981642043088ac7902ea50db2903e",
"sum": "8FAie1MXww5Ip9F8hQWkU9Fio1Af+hO4weQuuexioIQ="
},
{
"source": {
@@ -128,8 +78,8 @@
"subdir": "jsonnet/kube-state-metrics"
}
},
"version": "76c5888e3402c946abd6f31876f3aada4c0c84fc",
"sum": "pvInhJNQVDOcC3NGWRMKRIP954mAvLXCQpTlafIg7fA="
"version": "3fb1e86c189d40f6807508424b273b463464b372",
"sum": "+dOzAK+fwsFf97uZpjcjTcEJEC1H8hh/j8f5uIQK/5g="
},
{
"source": {
@@ -138,7 +88,7 @@
"subdir": "jsonnet/kube-state-metrics-mixin"
}
},
"version": "76c5888e3402c946abd6f31876f3aada4c0c84fc",
"version": "3fb1e86c189d40f6807508424b273b463464b372",
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
},
{
@@ -148,8 +98,8 @@
"subdir": "jsonnet/mixin"
}
},
"version": "8ba73758bd40233fce49b68ae191692a12c6fdbf",
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
"version": "ec8188c48b186becae6041bcd439fa640086a1e4",
"sum": "n3flMIzlADeyygb0uipZ4KPp2uNSjdtkrwgHjTC7Ca4=",
"name": "prometheus-operator-mixin"
},
{
@@ -159,8 +109,8 @@
"subdir": "jsonnet/prometheus-operator"
}
},
"version": "8ba73758bd40233fce49b68ae191692a12c6fdbf",
"sum": "Qs56OWJ2PLCEGRPlJ2Xd1LukXKj8KBzqMYncwjYTEwo="
"version": "ec8188c48b186becae6041bcd439fa640086a1e4",
"sum": "e+BTsEsyOI6FFW464Xn8Tp1xixtArpLqGHW8qIczNjs="
},
{
"source": {
@@ -169,8 +119,8 @@
"subdir": "doc/alertmanager-mixin"
}
},
"version": "0aa3c2aad14cff039931923ab16b26b7481783b5",
"sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=",
"version": "487db1383b8cc5c2867c77f110431605bb8ce247",
"sum": "PsK+V7oETCPKu2gLoPfqY0wwPKH9TzhNj6o2xezjjXc=",
"name": "alertmanager"
},
{
@@ -180,8 +130,8 @@
"subdir": "docs/node-mixin"
}
},
"version": "f1e0e8360aa60b6cb5e5cc1560bed348fc2c1895",
"sum": "R9ROsvpjZLgQJ78WAyD4HzrIq976Bpr4V2P2Fo2Kfns="
"version": "8fb4f78ce541f404144f86b3202cc53d2a0f387c",
"sum": "By6n6U10hYDogUsyhsaKZehbhzxBZZobJloiKyKadgM="
},
{
"source": {
@@ -190,20 +140,19 @@
"subdir": "documentation/prometheus-mixin"
}
},
"version": "c5e015d29534f06bd1d238c64a06b7ac41abdd7f",
"sum": "dYLcLzGH4yF3qB7OGC/7z4nqeTNjv42L7Q3BENU8XJI=",
"version": "94edd088595b3f872fd770418c8face76a853628",
"sum": "8OngT76gVXOUROOOeP9yTe6E/dn+2D2J34Dn690QCG0=",
"name": "prometheus"
},
{
"source": {
"git": {
"remote": "https://github.com/pyrra-dev/pyrra.git",
"subdir": "jsonnet/controller-gen"
"subdir": "config/crd/bases"
}
},
"version": "d723f4d1a066dd657e9d09c46a158519dda0faa8",
"sum": "cxAPQovFkM16zNB5/94O+sk/n3SETk6ao6Oas2Sa6RE=",
"name": "pyrra"
"version": "1f288e97738e6869b07d3363094f55146ce1d5eb",
"sum": "hF23mXrMFOMwB0zGHaHdL5aw3KOx5j1lyOwWT3IaOXY="
},
{
"source": {
@@ -212,8 +161,8 @@
"subdir": "mixin"
}
},
"version": "99a5742a15f107d4607d280c825eca5b7f09a253",
"sum": "HhSSbGGCNHCMy1ee5jElYDm0yS9Vesa7QB2/SHKdjsY=",
"version": "cdba35b2c3779804adb69fca2162a528a10e682e",
"sum": "WhheqsiX0maUXByZFsb9xhCEsGXK2955bPmPPf1x+Cs=",
"name": "thanos-mixin"
},
{

View File

@@ -6,11 +6,11 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: main
namespace: monitoring
spec:
image: quay.io/prometheus/alertmanager:v0.27.0
image: quay.io/prometheus/alertmanager:v0.25.0
nodeSelector:
kubernetes.io/os: linux
podMetadata:
@@ -19,7 +19,7 @@ spec:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
replicas: 3
resources:
limits:
@@ -28,10 +28,9 @@ spec:
requests:
cpu: 4m
memory: 100Mi
secrets: []
securityContext:
fsGroup: 2000
runAsNonRoot: true
runAsUser: 1000
serviceAccountName: alertmanager-main
version: 0.27.0
version: 0.25.0

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: alertmanager-main
namespace: monitoring
spec:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: alertmanager-main
namespace: monitoring
spec:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
prometheus: k8s
role: alert-rules
name: alertmanager-main-rules
@@ -50,7 +50,7 @@ spec:
(
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring"}[5m])
/
ignoring (reason) group_left rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring"}[5m])
rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring"}[5m])
)
> 0.01
for: 5m
@@ -65,7 +65,7 @@ spec:
min by (namespace,service, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
/
ignoring (reason) group_left rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
)
> 0.01
for: 5m
@@ -80,7 +80,7 @@ spec:
min by (namespace,service, integration) (
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
/
ignoring (reason) group_left rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
)
> 0.01
for: 5m

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: alertmanager-main
namespace: monitoring
stringData:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: alertmanager-main
namespace: monitoring
spec:

View File

@@ -7,6 +7,6 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: alertmanager-main
namespace: monitoring

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: main
app.kubernetes.io/name: alertmanager
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.27.0
app.kubernetes.io/version: 0.25.0
name: alertmanager-main
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@@ -46,6 +46,6 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter-configuration
namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter
namespace: monitoring
spec:
@@ -23,14 +23,14 @@ spec:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
spec:
automountServiceAccountToken: true
containers:
- args:
- --config.file=/etc/blackbox_exporter/config.yml
- --web.listen-address=:19115
image: quay.io/prometheus/blackbox-exporter:v0.25.0
image: quay.io/prometheus/blackbox-exporter:v0.24.0
name: blackbox-exporter
ports:
- containerPort: 19115
@@ -48,7 +48,6 @@ spec:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
volumeMounts:
@@ -58,7 +57,7 @@ spec:
- args:
- --webhook-url=http://localhost:19115/-/reload
- --volume-dir=/etc/blackbox_exporter/
image: ghcr.io/jimmidyson/configmap-reload:v0.13.1
image: jimmidyson/configmap-reload:v0.5.0
name: module-configmap-reloader
resources:
limits:
@@ -73,7 +72,6 @@ spec:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
terminationMessagePath: /dev/termination-log
@@ -83,10 +81,11 @@ spec:
name: config
readOnly: true
- args:
- --logtostderr
- --secure-listen-address=:9115
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:19115/
image: quay.io/brancz/kube-rbac-proxy:v0.18.1
image: quay.io/brancz/kube-rbac-proxy:v0.14.2
name: kube-rbac-proxy
ports:
- containerPort: 9115
@@ -107,8 +106,6 @@ spec:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: blackbox-exporter

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter
namespace: monitoring
spec:

View File

@@ -6,6 +6,6 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter
namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: blackbox-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 0.25.0
app.kubernetes.io/version: 0.24.0
name: blackbox-exporter
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana-config
namespace: monitoring
stringData:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana-datasources
namespace: monitoring
stringData:

File diff suppressed because it is too large Load Diff

View File

@@ -22,6 +22,6 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana-dashboards
namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana
namespace: monitoring
spec:
@@ -18,19 +18,19 @@ spec:
template:
metadata:
annotations:
checksum/grafana-config: c4d088078bb55176e3910a42b41ecc08
checksum/grafana-dashboardproviders: b66e063b0e9d7b9e152e066f0ab965ee
checksum/grafana-datasources: 495c78a90b81354c8feeece92f6f5466
checksum/grafana-config: 5c598ba58d9b65011bdbb3864138399a
checksum/grafana-dashboardproviders: c9c1743868aa1c3dab60d2c402e2dcf0
checksum/grafana-datasources: 5ef0e6acaa5b4e8603740fbad440717d
labels:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
spec:
automountServiceAccountToken: false
containers:
- env: []
image: grafana/grafana:11.2.0
image: grafana/grafana:9.5.3
name: grafana
ports:
- containerPort: 3000
@@ -152,7 +152,6 @@ spec:
kubernetes.io/os: linux
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: grafana

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
prometheus: k8s
role: alert-rules
name: grafana-rules
@@ -19,8 +19,8 @@ spec:
message: '{{ $labels.namespace }}/{{ $labels.job }}/{{ $labels.handler }} is experiencing {{ $value | humanize }}% errors'
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/grafana/grafanarequestsfailing
expr: |
100 * sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query", status_code=~"5.."})
/
100 * namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query", status_code=~"5.."}
/ ignoring (status_code)
sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query"})
> 50
for: 5m

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana
namespace: monitoring
spec:

View File

@@ -6,6 +6,6 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana
namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: grafana
app.kubernetes.io/name: grafana
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 11.2.0
app.kubernetes.io/version: 9.5.3
name: grafana
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
rules:
- apiGroups:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
namespace: monitoring
spec:
@@ -23,7 +23,7 @@ spec:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
spec:
automountServiceAccountToken: true
containers:
@@ -32,7 +32,7 @@ spec:
- --port=8081
- --telemetry-host=127.0.0.1
- --telemetry-port=8082
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0
image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.9.2
name: kube-state-metrics
resources:
limits:
@@ -47,16 +47,16 @@ spec:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
seccompProfile:
type: RuntimeDefault
- args:
- --logtostderr
- --secure-listen-address=:8443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:8081/
image: quay.io/brancz/kube-rbac-proxy:v0.18.1
image: quay.io/brancz/kube-rbac-proxy:v0.14.2
name: kube-rbac-proxy-main
ports:
- containerPort: 8443
@@ -77,13 +77,12 @@ spec:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
- args:
- --logtostderr
- --secure-listen-address=:9443
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:8082/
image: quay.io/brancz/kube-rbac-proxy:v0.18.1
image: quay.io/brancz/kube-rbac-proxy:v0.14.2
name: kube-rbac-proxy-self
ports:
- containerPort: 9443
@@ -104,8 +103,6 @@ spec:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: kube-state-metrics

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
prometheus: k8s
role: alert-rules
name: kube-state-metrics-rules

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
namespace: monitoring
spec:

View File

@@ -6,6 +6,6 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: kube-state-metrics
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.13.0
app.kubernetes.io/version: 2.9.2
name: kube-state-metrics
namespace: monitoring
spec:

View File

@@ -116,7 +116,7 @@ spec:
summary: StatefulSet update has not been rolled out.
expr: |
(
max by(namespace, statefulset, job, cluster) (
max without (revision) (
kube_statefulset_status_current_revision{job="kube-state-metrics"}
unless
kube_statefulset_status_update_revision{job="kube-state-metrics"}
@@ -262,7 +262,7 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubecpuovercommit
summary: Cluster has overcommitted CPU resource requests.
expr: |
sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) by (cluster) - (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
sum(namespace_cpu:kube_pod_container_resource_requests:sum{job="kube-state-metrics",}) by (cluster) - (sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
and
(sum(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster) - max(kube_node_status_allocatable{job="kube-state-metrics",resource="cpu"}) by (cluster)) > 0
for: 10m
@@ -351,9 +351,9 @@ spec:
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/cputhrottlinghigh
summary: Processes experience elevated CPU throttling.
expr: |
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (cluster, container, pod, namespace)
sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (container, pod, namespace)
/
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (cluster, container, pod, namespace)
sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container, pod, namespace)
> ( 25 / 100 )
for: 15m
labels:
@@ -362,7 +362,7 @@ spec:
rules:
- alert: KubePersistentVolumeFillingUp
annotations:
description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} is only {{ $value | humanizePercentage }} free.
description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup
summary: PersistentVolume is filling up.
expr: |
@@ -373,16 +373,16 @@ spec:
) < 0.03
and
kubelet_volume_stats_used_bytes{job="kubelet", metrics_path="/metrics"} > 0
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
for: 1m
labels:
severity: critical
- alert: KubePersistentVolumeFillingUp
annotations:
description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available.
description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumefillingup
summary: PersistentVolume is filling up.
expr: |
@@ -395,16 +395,16 @@ spec:
kubelet_volume_stats_used_bytes{job="kubelet", metrics_path="/metrics"} > 0
and
predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
for: 1h
labels:
severity: warning
- alert: KubePersistentVolumeInodesFillingUp
annotations:
description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} only has {{ $value | humanizePercentage }} free inodes.
description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} only has {{ $value | humanizePercentage }} free inodes.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup
summary: PersistentVolumeInodes are filling up.
expr: |
@@ -415,16 +415,16 @@ spec:
) < 0.03
and
kubelet_volume_stats_inodes_used{job="kubelet", metrics_path="/metrics"} > 0
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
for: 1m
labels:
severity: critical
- alert: KubePersistentVolumeInodesFillingUp
annotations:
description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} is expected to run out of inodes within four days. Currently {{ $value | humanizePercentage }} of its inodes are free.
description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to run out of inodes within four days. Currently {{ $value | humanizePercentage }} of its inodes are free.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeinodesfillingup
summary: PersistentVolumeInodes are filling up.
expr: |
@@ -437,16 +437,16 @@ spec:
kubelet_volume_stats_inodes_used{job="kubelet", metrics_path="/metrics"} > 0
and
predict_linear(kubelet_volume_stats_inodes_free{job="kubelet", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_access_mode{ access_mode="ReadOnlyMany"} == 1
unless on(cluster, namespace, persistentvolumeclaim)
unless on(namespace, persistentvolumeclaim)
kube_persistentvolumeclaim_labels{label_excluded_from_alerts="true"} == 1
for: 1h
labels:
severity: warning
- alert: KubePersistentVolumeErrors
annotations:
description: The persistent volume {{ $labels.persistentvolume }} {{ with $labels.cluster -}} on Cluster {{ . }} {{- end }} has status {{ $labels.phase }}.
description: The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase }}.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubepersistentvolumeerrors
summary: PersistentVolume is having issues with provisioning.
expr: |
@@ -756,6 +756,323 @@ spec:
for: 15m
labels:
severity: critical
- name: kube-apiserver-burnrate.rules
rules:
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1d]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1d]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1d]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1d]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d]))
labels:
verb: read
record: apiserver_request:burnrate1d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1h]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1h]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1h]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1h]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h]))
labels:
verb: read
record: apiserver_request:burnrate1h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[2h]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[2h]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[2h]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[2h]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h]))
labels:
verb: read
record: apiserver_request:burnrate2h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[30m]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[30m]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[30m]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[30m]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m]))
labels:
verb: read
record: apiserver_request:burnrate30m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[3d]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[3d]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[3d]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[3d]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d]))
labels:
verb: read
record: apiserver_request:burnrate3d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[5m]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[5m]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[5m]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))
labels:
verb: read
record: apiserver_request:burnrate5m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[6h]))
-
(
(
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[6h]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[6h]))
+
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[6h]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h]))
labels:
verb: read
record: apiserver_request:burnrate6h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1d]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1d]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d]))
labels:
verb: write
record: apiserver_request:burnrate1d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1h]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1h]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h]))
labels:
verb: write
record: apiserver_request:burnrate1h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[2h]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[2h]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h]))
labels:
verb: write
record: apiserver_request:burnrate2h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[30m]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[30m]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m]))
labels:
verb: write
record: apiserver_request:burnrate30m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[3d]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[3d]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d]))
labels:
verb: write
record: apiserver_request:burnrate3d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[5m]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
labels:
verb: write
record: apiserver_request:burnrate5m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[6h]))
-
sum by (cluster) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[6h]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h]))
labels:
verb: write
record: apiserver_request:burnrate6h
- name: kube-apiserver-histogram.rules
rules:
- expr: |
histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0
labels:
quantile: "0.99"
verb: read
record: cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile
- expr: |
histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_slo_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0
labels:
quantile: "0.99"
verb: write
record: cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile
- interval: 3m
name: kube-apiserver-availability.rules
rules:
@@ -773,39 +1090,39 @@ spec:
verb: write
record: code:apiserver_request_total:increase30d
- expr: |
sum by (cluster, verb, scope) (increase(apiserver_request_sli_duration_seconds_count{job="apiserver"}[1h]))
record: cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase1h
sum by (cluster, verb, scope) (increase(apiserver_request_slo_duration_seconds_count{job="apiserver"}[1h]))
record: cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase1h
- expr: |
sum by (cluster, verb, scope) (avg_over_time(cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase1h[30d]) * 24 * 30)
record: cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d
sum by (cluster, verb, scope) (avg_over_time(cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase1h[30d]) * 24 * 30)
record: cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d
- expr: |
sum by (cluster, verb, scope, le) (increase(apiserver_request_sli_duration_seconds_bucket[1h]))
record: cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h
sum by (cluster, verb, scope, le) (increase(apiserver_request_slo_duration_seconds_bucket[1h]))
record: cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase1h
- expr: |
sum by (cluster, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase1h[30d]) * 24 * 30)
record: cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d
sum by (cluster, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase1h[30d]) * 24 * 30)
record: cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d
- expr: |
1 - (
(
# write too slow
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
-
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"})
) +
(
# read too slow
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"LIST|GET"})
sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~"LIST|GET"})
-
(
(
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"})
or
vector(0)
)
+
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"})
+
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"})
)
) +
# errors
@@ -818,19 +1135,19 @@ spec:
record: apiserver_request:availability30d
- expr: |
1 - (
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"LIST|GET"})
sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~"LIST|GET"})
-
(
# too slow
(
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"})
or
vector(0)
)
+
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"})
+
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"})
)
+
# errors
@@ -845,9 +1162,9 @@ spec:
1 - (
(
# too slow
sum by (cluster) (cluster_verb_scope:apiserver_request_sli_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
sum by (cluster) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"})
-
sum by (cluster) (cluster_verb_scope_le:apiserver_request_sli_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"})
sum by (cluster) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"})
)
+
# errors
@@ -880,324 +1197,7 @@ spec:
- expr: |
sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h]))
record: code_verb:apiserver_request_total:increase1h
- name: kube-apiserver-burnrate.rules
rules:
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1d]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1d]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1d]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1d]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d]))
labels:
verb: read
record: apiserver_request:burnrate1d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[1h]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[1h]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[1h]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[1h]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h]))
labels:
verb: read
record: apiserver_request:burnrate1h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[2h]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[2h]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[2h]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[2h]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h]))
labels:
verb: read
record: apiserver_request:burnrate2h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[30m]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[30m]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[30m]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[30m]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m]))
labels:
verb: read
record: apiserver_request:burnrate30m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[3d]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[3d]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[3d]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[3d]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d]))
labels:
verb: read
record: apiserver_request:burnrate3d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[5m]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[5m]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[5m]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m]))
labels:
verb: read
record: apiserver_request:burnrate5m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[6h]))
-
(
(
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope=~"resource|",le="1"}[6h]))
or
vector(0)
)
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="namespace",le="5"}[6h]))
+
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward",scope="cluster",le="30"}[6h]))
)
)
+
# errors
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h]))
labels:
verb: read
record: apiserver_request:burnrate6h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1d]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1d]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d]))
labels:
verb: write
record: apiserver_request:burnrate1d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[1h]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[1h]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h]))
labels:
verb: write
record: apiserver_request:burnrate1h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[2h]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[2h]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h]))
labels:
verb: write
record: apiserver_request:burnrate2h
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[30m]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[30m]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m]))
labels:
verb: write
record: apiserver_request:burnrate30m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[3d]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[3d]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d]))
labels:
verb: write
record: apiserver_request:burnrate3d
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[5m]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))
labels:
verb: write
record: apiserver_request:burnrate5m
- expr: |
(
(
# too slow
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[6h]))
-
sum by (cluster) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward",le="1"}[6h]))
)
+
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h]))
)
/
sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h]))
labels:
verb: write
record: apiserver_request:burnrate6h
- name: kube-apiserver-histogram.rules
rules:
- expr: |
histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0
labels:
quantile: "0.99"
verb: read
record: cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile
- expr: |
histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_sli_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",subresource!~"proxy|attach|log|exec|portforward"}[5m]))) > 0
labels:
quantile: "0.99"
verb: write
record: cluster_quantile:apiserver_request_sli_duration_seconds:histogram_quantile
- name: k8s.rules.container_cpu_usage_seconds_total
- name: k8s.rules
rules:
- expr: |
sum by (cluster, namespace, pod, container) (
@@ -1206,40 +1206,30 @@ spec:
1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""})
)
record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate
- name: k8s.rules.container_memory_working_set_bytes
rules:
- expr: |
container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,
max by(cluster, namespace, pod, node) (kube_pod_info{node!=""})
)
record: node_namespace_pod_container:container_memory_working_set_bytes
- name: k8s.rules.container_memory_rss
rules:
- expr: |
container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,
max by(cluster, namespace, pod, node) (kube_pod_info{node!=""})
)
record: node_namespace_pod_container:container_memory_rss
- name: k8s.rules.container_memory_cache
rules:
- expr: |
container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,
max by(cluster, namespace, pod, node) (kube_pod_info{node!=""})
)
record: node_namespace_pod_container:container_memory_cache
- name: k8s.rules.container_memory_swap
rules:
- expr: |
container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}
* on (cluster, namespace, pod) group_left(node) topk by(cluster, namespace, pod) (1,
max by(cluster, namespace, pod, node) (kube_pod_info{node!=""})
)
record: node_namespace_pod_container:container_memory_swap
- name: k8s.rules.container_memory_requests
rules:
- expr: |
kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster)
group_left() max by (namespace, pod, cluster) (
@@ -1257,8 +1247,6 @@ spec:
)
)
record: namespace_memory:kube_pod_container_resource_requests:sum
- name: k8s.rules.container_cpu_requests
rules:
- expr: |
kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster)
group_left() max by (namespace, pod, cluster) (
@@ -1276,8 +1264,6 @@ spec:
)
)
record: namespace_cpu:kube_pod_container_resource_requests:sum
- name: k8s.rules.container_memory_limits
rules:
- expr: |
kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster)
group_left() max by (namespace, pod, cluster) (
@@ -1295,8 +1281,6 @@ spec:
)
)
record: namespace_memory:kube_pod_container_resource_limits:sum
- name: k8s.rules.container_cpu_limits
rules:
- expr: |
kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster)
group_left() max by (namespace, pod, cluster) (
@@ -1314,8 +1298,6 @@ spec:
)
)
record: namespace_cpu:kube_pod_container_resource_limits:sum
- name: k8s.rules.pod_owner
rules:
- expr: |
max by (cluster, namespace, workload, pod) (
label_replace(
@@ -1421,8 +1403,8 @@ spec:
- expr: |
count by (cluster, node) (
node_cpu_seconds_total{mode="idle",job="node-exporter"}
* on (cluster, namespace, pod) group_left(node)
topk by(cluster, namespace, pod) (1, node_namespace_pod:kube_pod_info:)
* on (namespace, pod) group_left(node)
topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:)
)
record: node:node_num_cpu:sum
- expr: |

View File

@@ -20,7 +20,7 @@ spec:
sourceLabels:
- __name__
- action: drop
regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers|storage_db_total_size_in_bytes|flowcontrol_request_concurrency_limit|flowcontrol_request_concurrency_in_use)
regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)
sourceLabels:
- __name__
- action: drop
@@ -65,19 +65,6 @@ spec:
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 5s
metricRelabelings:
- action: drop
regex: process_start_time_seconds
sourceLabels:
- __name__
path: /metrics/slis
port: https
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
jobLabel: component
namespaceSelector:
matchNames:

View File

@@ -20,7 +20,7 @@ spec:
sourceLabels:
- __name__
- action: drop
regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers|storage_db_total_size_in_bytes|flowcontrol_request_concurrency_limit|flowcontrol_request_concurrency_in_use)
regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)
sourceLabels:
- __name__
- action: drop
@@ -51,18 +51,6 @@ spec:
scheme: https
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 5s
metricRelabelings:
- action: drop
regex: process_start_time_seconds
sourceLabels:
- __name__
path: /metrics/slis
port: https-metrics
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: app.kubernetes.io/name
namespaceSelector:
matchNames:

View File

@@ -14,18 +14,6 @@ spec:
scheme: https
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
interval: 5s
metricRelabelings:
- action: drop
regex: process_start_time_seconds
sourceLabels:
- __name__
path: /metrics/slis
port: https-metrics
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: app.kubernetes.io/name
namespaceSelector:
matchNames:

View File

@@ -21,7 +21,7 @@ spec:
sourceLabels:
- __name__
- action: drop
regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers|storage_db_total_size_in_bytes|flowcontrol_request_concurrency_limit|flowcontrol_request_concurrency_in_use)
regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)
sourceLabels:
- __name__
- action: drop
@@ -96,23 +96,6 @@ spec:
scheme: https
tlsConfig:
insecureSkipVerify: true
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
interval: 5s
path: /metrics/slis
port: https-metrics
relabelings:
- action: replace
sourceLabels:
- __metrics_path__
targetLabel: metrics_path
- action: drop
regex: process_start_time_seconds
sourceLabels:
- __name__
scheme: https
tlsConfig:
insecureSkipVerify: true
jobLabel: app.kubernetes.io/name
namespaceSelector:
matchNames:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
rules:
- apiGroups:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
namespace: monitoring
spec:
@@ -22,7 +22,7 @@ spec:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
spec:
automountServiceAccountToken: true
containers:
@@ -37,7 +37,7 @@ spec:
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
- --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$
- --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$
image: quay.io/prometheus/node-exporter:v1.8.2
image: quay.io/prometheus/node-exporter:v1.6.1
name: node-exporter
resources:
limits:
@@ -64,6 +64,7 @@ spec:
name: root
readOnly: true
- args:
- --logtostderr
- --secure-listen-address=[$(IP)]:9100
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:9100/
@@ -72,7 +73,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
image: quay.io/brancz/kube-rbac-proxy:v0.18.1
image: quay.io/brancz/kube-rbac-proxy:v0.14.2
name: kube-rbac-proxy
ports:
- containerPort: 9100
@@ -94,15 +95,12 @@ spec:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
hostNetwork: true
hostPID: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
securityContext:
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: node-exporter

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
namespace: monitoring
spec:

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
prometheus: k8s
role: alert-rules
name: node-exporter-rules
@@ -297,7 +297,7 @@ spec:
- alert: NodeDiskIOSaturation
annotations:
description: |
Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above 10 for the last 30 minutes, is currently at {{ printf "%.2f" $value }}.
Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above 10 for the last 15 minutes, is currently at {{ printf "%.2f" $value }}.
This symptom might indicate disk saturation.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodediskiosaturation
summary: Disk IO queue is high.
@@ -316,16 +316,6 @@ spec:
for: 5m
labels:
severity: warning
- alert: NodeBondingDegraded
annotations:
description: Bonding interface {{ $labels.master }} on {{ $labels.instance }} is in degraded state due to one or more slave failures.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/node/nodebondingdegraded
summary: Bonding interface is degraded
expr: |
(node_bonding_slaves - node_bonding_active) != 0
for: 5m
labels:
severity: warning
- name: node-exporter.rules
rules:
- expr: |

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
namespace: monitoring
spec:

View File

@@ -6,6 +6,6 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
namespace: monitoring

View File

@@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 1.8.2
app.kubernetes.io/version: 1.6.1
name: node-exporter
namespace: monitoring
spec:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
rules:
- apiGroups:
@@ -17,6 +17,5 @@ rules:
- get
- nonResourceURLs:
- /metrics
- /metrics/slis
verbs:
- get

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
roleRef:
apiGroup: rbac.authorization.k8s.io

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring
spec:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring
spec:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: k8s
namespace: monitoring
spec:
@@ -18,7 +18,7 @@ spec:
port: web
enableFeatures: []
externalLabels: {}
image: quay.io/prometheus/prometheus:v2.54.1
image: quay.io/prometheus/prometheus:v2.45.0
nodeSelector:
kubernetes.io/os: linux
podMetadata:
@@ -27,7 +27,7 @@ spec:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
podMonitorNamespaceSelector: {}
podMonitorSelector: {}
probeNamespaceSelector: {}
@@ -38,8 +38,6 @@ spec:
memory: 400Mi
ruleNamespaceSelector: {}
ruleSelector: {}
scrapeConfigNamespaceSelector: {}
scrapeConfigSelector: {}
securityContext:
fsGroup: 2000
runAsNonRoot: true
@@ -47,4 +45,4 @@ spec:
serviceAccountName: prometheus-k8s
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector: {}
version: 2.54.1
version: 2.45.0

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
prometheus: k8s
role: alert-rules
name: prometheus-k8s-prometheus-rules
@@ -37,16 +37,6 @@ spec:
for: 20m
labels:
severity: warning
- alert: PrometheusKubernetesListWatchFailures
annotations:
description: Kubernetes service discovery of Prometheus {{$labels.namespace}}/{{$labels.pod}} is experiencing {{ printf "%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/prometheus/prometheuskuberneteslistwatchfailures
summary: Requests in Kubernetes SD are failing.
expr: |
increase(prometheus_sd_kubernetes_failures_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0
for: 15m
labels:
severity: warning
- alert: PrometheusNotificationQueueRunningFull
annotations:
description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full.
@@ -118,7 +108,7 @@ spec:
summary: Prometheus is not ingesting samples.
expr: |
(
sum without(type) (rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-k8s",namespace="monitoring"}[5m])) <= 0
rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-k8s",namespace="monitoring"}[5m]) <= 0
and
(
sum without(scrape_job) (prometheus_target_metadata_cache_entries{job="prometheus-k8s",namespace="monitoring"}) > 0

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s-config
namespace: monitoring
roleRef:

View File

@@ -8,7 +8,7 @@ items:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: default
roleRef:
@@ -27,7 +27,7 @@ items:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: kube-system
roleRef:
@@ -46,7 +46,7 @@ items:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring
roleRef:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s-config
namespace: monitoring
rules:

View File

@@ -8,7 +8,7 @@ items:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: default
rules:
@@ -46,7 +46,7 @@ items:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: kube-system
rules:
@@ -84,7 +84,7 @@ items:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring
rules:

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring
spec:

View File

@@ -7,6 +7,6 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring

View File

@@ -6,7 +6,7 @@ metadata:
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 2.54.1
app.kubernetes.io/version: 2.45.0
name: prometheus-k8s
namespace: monitoring
spec:

Some files were not shown because too many files have changed in this diff Show More