Compare commits
4 Commits
automated-
...
v0.10.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
42ea595d60 | ||
|
|
8ae302328f | ||
|
|
b7b9971715 | ||
|
|
fb9021cf9a |
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -1 +0,0 @@
|
||||
* @prometheus-operator/kube-prometheus-reviewers
|
||||
2
.github/ISSUE_TEMPLATE/support.md
vendored
2
.github/ISSUE_TEMPLATE/support.md
vendored
@@ -5,7 +5,7 @@ labels: kind/support
|
||||
---
|
||||
|
||||
This repository now has the new GitHub Discussions enabled:
|
||||
https://github.com/prometheus-operator/kube-prometheus/discussions
|
||||
https://github.com/coreos/kube-prometheus/discussions
|
||||
|
||||
Please create a new discussion to ask for any kind of support, which is not a Bug or Feature Request.
|
||||
|
||||
|
||||
16
.github/dependabot.yml
vendored
16
.github/dependabot.yml
vendored
@@ -1,16 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: /
|
||||
schedule:
|
||||
interval: daily
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: /scripts/
|
||||
schedule:
|
||||
interval: daily
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: daily
|
||||
2
.github/env
vendored
Normal file
2
.github/env
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
golang-version=1.16
|
||||
kind-version=v0.11.1
|
||||
61
.github/workflows/ci.yaml
vendored
61
.github/workflows/ci.yaml
vendored
@@ -3,8 +3,8 @@ on:
|
||||
- push
|
||||
- pull_request
|
||||
env:
|
||||
golang-version: '1.22'
|
||||
kind-version: 'v0.24.0'
|
||||
golang-version: '1.15'
|
||||
kind-version: 'v0.11.1'
|
||||
jobs:
|
||||
generate:
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -15,10 +15,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
name: Generate
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make --always-make generate validate && git diff --exit-code
|
||||
@@ -26,10 +26,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check Documentation formatting and links
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make check-docs
|
||||
@@ -37,73 +37,44 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Jsonnet linter
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make --always-make lint
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
name: Jsonnet formatter
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make --always-make fmt && git diff --exit-code
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
name: Unit tests
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make --always-make test
|
||||
security-audit:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run security analysis on manifests
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make --always-make kubescape
|
||||
e2e-tests:
|
||||
name: E2E tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
kind-image:
|
||||
- 'kindest/node:v1.31.0'
|
||||
- 'kindest/node:v1.30.4'
|
||||
- 'kindest/node:v1.29.8'
|
||||
- 'kindest/node:v1.23.0'
|
||||
- 'kindest/node:v1.22.4'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- name: Start kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
- name: Start KinD
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: ${{ env.kind-version }}
|
||||
node_image: ${{ matrix.kind-image }}
|
||||
wait: 10s # Without default CNI, control-plane doesn't get ready until Cilium is installed
|
||||
config: .github/workflows/kind/config.yml
|
||||
cluster_name: e2e
|
||||
- name: Install kube-router for NetworkPolicy support
|
||||
run: |
|
||||
kubectl apply -f .github/workflows/kind/kube-router.yaml
|
||||
image: ${{ matrix.kind-image }}
|
||||
wait: 300s
|
||||
- name: Wait for cluster to finish bootstraping
|
||||
run: kubectl wait --for=condition=Ready pods --all --all-namespaces --timeout=300s
|
||||
- name: Create kube-prometheus stack
|
||||
|
||||
15
.github/workflows/kind/config.yml
vendored
15
.github/workflows/kind/config.yml
vendored
@@ -1,15 +0,0 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
networking:
|
||||
podSubnet: "10.10.0.0/16"
|
||||
serviceSubnet: "10.11.0.0/16"
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraMounts:
|
||||
- hostPath: /home/runner/work/kube-prometheus/kube-prometheus/.github/workflows/kind/patches
|
||||
containerPath: /patches
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
patches:
|
||||
directory: /patches
|
||||
186
.github/workflows/kind/kube-router.yaml
vendored
186
.github/workflows/kind/kube-router.yaml
vendored
@@ -1,186 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.0",
|
||||
"name":"mynet",
|
||||
"plugins":[
|
||||
{
|
||||
"name":"kubernetes",
|
||||
"type":"bridge",
|
||||
"bridge":"kube-bridge",
|
||||
"isDefaultGateway":true,
|
||||
"ipam":{
|
||||
"type":"host-local"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: kube-router
|
||||
containers:
|
||||
- name: kube-router
|
||||
image: docker.io/cloudnativelabs/kube-router
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- --run-router=true
|
||||
- --run-firewall=true
|
||||
- --run-service-proxy=false
|
||||
- --bgp-graceful-restart=true
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: KUBE_ROUTER_CNI_CONF_FILE
|
||||
value: /etc/cni/net.d/10-kuberouter.conflist
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 20244
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
memory: 250Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: kubeconfig
|
||||
mountPath: /var/lib/kube-router/kubeconfig
|
||||
readOnly: true
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
readOnly: false
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: docker.io/cloudnativelabs/kube-router
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- set -e -x;
|
||||
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
|
||||
if [ -f /etc/cni/net.d/*.conf ]; then
|
||||
rm -f /etc/cni/net.d/*.conf;
|
||||
fi;
|
||||
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
|
||||
cp /etc/kube-router/cni-conf.json ${TMP};
|
||||
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/cni/net.d
|
||||
name: cni-conf-dir
|
||||
- mountPath: /etc/kube-router
|
||||
name: kube-router-cfg
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kube-router-cfg
|
||||
configMap:
|
||||
name: kube-router-cfg
|
||||
- name: kubeconfig
|
||||
hostPath:
|
||||
path: /var/lib/kube-router/kubeconfig
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
- services
|
||||
- nodes
|
||||
- endpoints
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "networking.k8s.io"
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kube-router
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kube-router
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1m
|
||||
5
.github/workflows/stale.yaml
vendored
5
.github/workflows/stale.yaml
vendored
@@ -1,13 +1,13 @@
|
||||
name: 'Close stale issues and PRs'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 3 * * *'
|
||||
- cron: '30 1 * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
stale-issue-message: 'This issue has been automatically marked as stale because it has not had any activity in the last 60 days. Thank you for your contributions.'
|
||||
close-issue-message: 'This issue was closed because it has not had any activity in the last 120 days. Please reopen if you feel this is still valid.'
|
||||
@@ -18,4 +18,3 @@ jobs:
|
||||
stale-issue-label: 'stale'
|
||||
stale-pr-label: 'stale'
|
||||
exempt-draft-pr: true
|
||||
operations-per-run: 500
|
||||
|
||||
27
.github/workflows/versions.yaml
vendored
27
.github/workflows/versions.yaml
vendored
@@ -4,25 +4,24 @@ on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '37 7 * * 1'
|
||||
env:
|
||||
golang-version: '1.22'
|
||||
jobs:
|
||||
versions:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
branch:
|
||||
- 'release-0.11'
|
||||
- 'release-0.12'
|
||||
- 'release-0.13'
|
||||
- 'release-0.6'
|
||||
- 'release-0.7'
|
||||
- 'release-0.8'
|
||||
- 'release-0.9'
|
||||
- 'main'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
go-version: 1.16
|
||||
- name: Upgrade versions
|
||||
id: versions
|
||||
run: |
|
||||
@@ -30,15 +29,9 @@ jobs:
|
||||
# Write to temporary file to make update atomic
|
||||
scripts/generate-versions.sh > /tmp/versions.json
|
||||
mv /tmp/versions.json jsonnet/kube-prometheus/versions.json
|
||||
# Display the raw diff between versions.
|
||||
git diff
|
||||
# Get the links to the changelogs of the updated versions and make them
|
||||
# available to the reviewers
|
||||
{
|
||||
echo 'new_changelogs<<EOF'
|
||||
echo $(scripts/get-new-changelogs.sh)
|
||||
echo EOF
|
||||
} >> $GITHUB_OUTPUT
|
||||
echo ::set-output name=new_changelogs::$(scripts/get-new-changelogs.sh)
|
||||
if: matrix.branch == 'main'
|
||||
- name: Update jsonnet dependencies
|
||||
run: |
|
||||
@@ -51,7 +44,7 @@ jobs:
|
||||
git checkout -- jsonnetfile.lock.json;
|
||||
fi
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
commit-message: "[bot] [${{ matrix.branch }}] Automated version update"
|
||||
title: "[bot] [${{ matrix.branch }}] Automated version update"
|
||||
@@ -72,7 +65,7 @@ jobs:
|
||||
- [x] `NONE` (if none of the other choices apply. Example, tooling, build system, CI, docs, etc.)
|
||||
|
||||
## Changelog entry
|
||||
|
||||
|
||||
```release-note
|
||||
|
||||
```
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -4,7 +4,6 @@ vendor/
|
||||
./auth
|
||||
.swp
|
||||
crdschemas/
|
||||
.mdoxcache
|
||||
|
||||
developer-workspace/gitpod/_output
|
||||
developer-workspace/codespaces/kind
|
||||
kind
|
||||
@@ -6,11 +6,4 @@ validators:
|
||||
type: "ignore"
|
||||
# Ignore release links.
|
||||
- regex: 'https:\/\/github\.com\/prometheus-operator\/kube-prometheus\/releases'
|
||||
type: "ignore"
|
||||
# Twitter changed their policy and now returns 403 if not authenticated. We can guarantee this link since we own the account.
|
||||
- regex: 'https:\/\/twitter.com\/PromOperator'
|
||||
type: "ignore"
|
||||
# the www.weave.works domain returns 404 for many pages.
|
||||
# Ignoring for now but we need remove the related content if it persists.
|
||||
- regex: 'https:\/\/www.weave.works.*'
|
||||
type: "ignore"
|
||||
type: "ignore"
|
||||
78
CHANGELOG.md
78
CHANGELOG.md
@@ -1,80 +1,3 @@
|
||||
## release-0.14 / 2024-09-12
|
||||
|
||||
* [CHANGE] Prefer new form for `kube_node_status_capacity_pods` metric [#2269](https://github.com/prometheus-operator/kube-prometheus/pull/2269)
|
||||
* [CHANGE] Add runAsGroup to all components [#2424](https://github.com/prometheus-operator/kube-prometheus/pull/2424)
|
||||
* [FEATURE] Add support for ScrapeConfig [#2232](https://github.com/prometheus-operator/kube-prometheus/pull/2232)
|
||||
* [FEATURE] Add Kubernetes components SLI metrics [#2496](https://github.com/prometheus-operator/kube-prometheus/pull/2496)
|
||||
* [FEATURE] Add monitor and rules resources to user-facing roles add-on [#2238](https://github.com/prometheus-operator/kube-prometheus/pull/2238)
|
||||
* [BUGFIX] Add thanos-sidecar metrics port to Prometheus Service and NetworkPolicy [#2330](https://github.com/prometheus-operator/kube-prometheus/pull/2330)
|
||||
* [ENHANCEMENT] Add ability to inject Secrets into alertmanager [#2206](https://github.com/prometheus-operator/kube-prometheus/pull/2206)
|
||||
* [ENHANCEMENT] Add securityContext items and Pod security labels [#2178](https://github.com/prometheus-operator/kube-prometheus/pull/2178)
|
||||
|
||||
## release-0.13 / 2023-08-31
|
||||
|
||||
* [CHANGE] Added a AKS platform to `platforms.libsonnet` [#1997](https://github.com/prometheus-operator/kube-prometheus/pull/1997)
|
||||
* [CHANGE] Disable btrfs collector by default [#2074](https://github.com/prometheus-operator/kube-prometheus/pull/2074)
|
||||
* [CHANGE] Enable Multi Cluster alerts by default [#2099](https://github.com/prometheus-operator/kube-prometheus/pull/2099)
|
||||
* [FEATURE] Create dedicated Service to expose CoreDNS metric [#2107](https://github.com/prometheus-operator/kube-prometheus/pull/2107)
|
||||
* [FEATURE] Add Windows support using Hostprocess instead of static_configs [#2048](https://github.com/prometheus-operator/kube-prometheus/pull/2048)
|
||||
* [BUGFIX] Fix a compilation error when building the custom-metrics addon [#1996](https://github.com/prometheus-operator/kube-prometheus/pull/1996)
|
||||
* [BUGFIX] Add `prometheus-adapter` in Prometheus's NetworkPolicy [#1982](https://github.com/prometheus-operator/kube-prometheus/pull/1982)
|
||||
* [BUGFIX] Fix namespace specified in manifest non-namespaced resources [#2158](https://github.com/prometheus-operator/kube-prometheus/pull/2158)
|
||||
* [BUGFIX] Override ServiceAccount, Role and ClusterRole names in RoleBinding and ClusterRoleBinding [#2135](https://github.com/prometheus-operator/kube-prometheus/pull/2135)
|
||||
* [BUGFIX] Remove deprecated `--logtostderr` argument of prometheus-adapter [#2185](https://github.com/prometheus-operator/kube-prometheus/pull/2185)
|
||||
* [BUGFIX] Fix alertmanager external config example [#1891](https://github.com/prometheus-operator/kube-prometheus/pull/1891)
|
||||
* [ENHANCEMENT] Add startupProbe to prometheus-adapter [#2029](https://github.com/prometheus-operator/kube-prometheus/pull/2029)
|
||||
* [ENHANCEMENT] Added configurable default values for kube-rbac-proxy in prometheus-operator, node-exporter and blackbox-exporter [#1987](https://github.com/prometheus-operator/kube-prometheus/pull/1987)
|
||||
* [ENHANCEMENT] Modify control plane ServiceMonitors to be compatible with Argo [#2041](https://github.com/prometheus-operator/kube-prometheus/pull/2041)
|
||||
* [ENHANCEMENT] Add md5 hash of the ConfigMap in Prometheus Adapter Deployment Annotations to force its recreation [#2195](https://github.com/prometheus-operator/kube-prometheus/pull/2195)
|
||||
|
||||
## release-0.12 / 2023-01-19
|
||||
|
||||
* [CHANGE] Updates Prometheus Adapater version to 0.10.0 [#1865](https://github.com/prometheus-operator/kube-prometheus/pull/1865)
|
||||
* [FEATURE] Added a AKS platform [#1869](https://github.com/prometheus-operator/kube-prometheus/pull/1869)
|
||||
* [BUGFIX] Update Pyrra to 0.4.2 [#1800](https://github.com/prometheus-operator/kube-prometheus/pull/1800)
|
||||
* [BUGFIX] Jsonnet: enable automountServiceAccountToken for prometheus service account [#1808](https://github.com/prometheus-operator/kube-prometheus/pull/1808)
|
||||
* [BUGFIX] Fix diskDeviceSelector regex for aks and eks [#1810](https://github.com/prometheus-operator/kube-prometheus/pull/1810)
|
||||
* [BUGFIX] Set path.udev.data Argument of Node Exporter [#1913](https://github.com/prometheus-operator/kube-prometheus/pull/1913)
|
||||
* [BUGFIX] Include RAID device md.* in disk seletor [#1945](https://github.com/prometheus-operator/kube-prometheus/pull/1945)
|
||||
* [ENHANCEMENT] Prometheus-adapter: add prefix option to config for container metrics [#1844](https://github.com/prometheus-operator/kube-prometheus/pull/1844)
|
||||
* [ENHANCEMENT] Switch kube-state-metrics registry to registry.k8s.io [#1914](https://github.com/prometheus-operator/kube-prometheus/pull/1914)
|
||||
* [ENHANCEMENT] Node Exporter: add parameter for ignored network devices [#1887](https://github.com/prometheus-operator/kube-prometheus/pull/1887)
|
||||
|
||||
## release-0.11 / 2022-06-15
|
||||
|
||||
* [CHANGE] Disable injecting unnecessary variables allowing access to k8s API [#1591](https://github.com/prometheus-operator/kube-prometheus/pull/1591)
|
||||
* [FEATURE] Add grafana-mixin [#1458](https://github.com/prometheus-operator/kube-prometheus/pull/1458)
|
||||
* [FEATURE] Add example usage of prometheus-agent [#1472](https://github.com/prometheus-operator/kube-prometheus/pull/1472)
|
||||
* [FEATURE] Add Pyrra as (optional) component [#1667](https://github.com/prometheus-operator/kube-prometheus/pull/1667)
|
||||
* [ENHANCEMENT] Adds NetworkPolicies to all components of Kube-prometheus [#1650](https://github.com/prometheus-operator/kube-prometheus/pull/1650)
|
||||
* [ENHANCEMENT] Scan generated manifests with kubescape in CI [#1584](https://github.com/prometheus-operator/kube-prometheus/pull/1584)
|
||||
* [ENHANCEMENT] Explicitly declare allowPrivilegeEscalation to false in all components [#1593](https://github.com/prometheus-operator/kube-prometheus/pull/1593)
|
||||
* [ENHANCEMENT] Forbid write access to root filesystem [#1600](https://github.com/prometheus-operator/kube-prometheus/pull/1600)
|
||||
* [ENHANCEMENT] Drop Linux capabilities, , just keeping CAP_SYS_TIME for node-exporter [#1610](https://github.com/prometheus-operator/kube-prometheus/pull/1610)
|
||||
* [ENHANCEMENT] Remove hostPort from node-export daemonset [#1612](https://github.com/prometheus-operator/kube-prometheus/pull/1612)
|
||||
* [ENHANCEMENT] Add priorityClassName as system-cluster-critical for node_exporter [#1649](https://github.com/prometheus-operator/kube-prometheus/pull/1649)
|
||||
* [ENHANCEMENT] Added custom overrides for kube-rbac-proxy-self [#1637](https://github.com/prometheus-operator/kube-prometheus/pull/1637)
|
||||
* [ENHANCEMENT] Adds readinessProbe and livenessProbe to prometheus-adapter jsonnet [#1696](https://github.com/prometheus-operator/kube-prometheus/pull/1696)
|
||||
* [BUGFIX] Update kubeadm integration of kube-prometheus [#1569](https://github.com/prometheus-operator/kube-prometheus/pull/1569)
|
||||
* [BUGFIX] Add projected volumes permission to addon/podsecuritypolicie [#1572](https://github.com/prometheus-operator/kube-prometheus/pull/1572)
|
||||
* [BUGFIX] Hide namespace for prometheus clusterRole and clusterRolebinding [#1566](https://github.com/prometheus-operator/kube-prometheus/pull/1566)
|
||||
* [BUGFIX] Fix accidentally broken thanosSelector after #1543 [#1556](https://github.com/prometheus-operator/kube-prometheus/pull/1556)
|
||||
* [BUGFIX] Jsonnet: filter out kube-proxy alerts when kube-proxy is disabled [#1609](https://github.com/prometheus-operator/kube-prometheus/pull/1609)
|
||||
* [BUGFIX] Sanitize regex denylist in ksm-lite addon [#1613](https://github.com/prometheus-operator/kube-prometheus/pull/1613)
|
||||
* [BUGFIX] Sanitize all regex denylist in ksm-lite addon [#1614](https://github.com/prometheus-operator/kube-prometheus/pull/1614)
|
||||
* [BUGFIX] Add extra-volume mount for plugins downloads [#1624](https://github.com/prometheus-operator/kube-prometheus/pull/1624)
|
||||
* [BUGFIX] Added allowedCapabilities to node-exporter psp [#1642](https://github.com/prometheus-operator/kube-prometheus/pull/1642)
|
||||
* [BUGFIX] Fix cluster:node_cpu:ratio query [#1628](https://github.com/prometheus-operator/kube-prometheus/pull/1628)
|
||||
* [BUGFIX] Removed CAP_ from node-exporter daemonset [#1647](https://github.com/prometheus-operator/kube-prometheus/pull/1647)
|
||||
* [BUGFIX] Update PodMonitor for kube-proxy [#1630](https://github.com/prometheus-operator/kube-prometheus/pull/1630)
|
||||
* [BUGFIX] Adds port name to prometheus-adapter [#1701](https://github.com/prometheus-operator/kube-prometheus/pull/1701)
|
||||
* [BUGFIX] Fix grafana network access [#1721](https://github.com/prometheus-operator/kube-prometheus/pull/1721)
|
||||
* [BUGFIX] Fix networkpolicies-disabled addon [#1724](https://github.com/prometheus-operator/kube-prometheus/pull/1724)
|
||||
* [BUGFIX] Adjust NodeFilesystemSpaceFillingUp thresholds according default kubelet GC behavior [#1729](https://github.com/prometheus-operator/kube-prometheus/pull/1729)
|
||||
* [BUGFIX] Fix problems when enabling eks platform patch [#1675](https://github.com/prometheus-operator/kube-prometheus/pull/1675)
|
||||
* [BUGFIX] Access requests to sidecar from thanos-query [#1730](https://github.com/prometheus-operator/kube-prometheus/pull/1730)
|
||||
* [BUGFIX] Fix prometheus namespace connection for addons/pyrra [#1734](https://github.com/prometheus-operator/kube-prometheus/pull/1734)
|
||||
|
||||
## release-0.10 / 2021-12-17
|
||||
|
||||
* [CHANGE] Adjust node filesystem space filling up warning threshold to 20% [#1357](https://github.com/prometheus-operator/kube-prometheus/pull/1357)
|
||||
@@ -95,7 +18,6 @@
|
||||
* [ENHANCEMENT] Exclude k3s containerd mountpoints [#1497](https://github.com/prometheus-operator/kube-prometheus/pull/1497)
|
||||
* [ENHANCEMENT] Alertmanager now uses the new `matcher` syntax in the routing tree and inhibition rules [#1508](https://github.com/prometheus-operator/kube-prometheus/pull/1508)
|
||||
* [ENHANCEMENT] Deprecate `thanosSelector` and expose `mixin._config.thanos` config variable for thanos sidecar [#1543](https://github.com/prometheus-operator/kube-prometheus/pull/1543)
|
||||
* [ENHANCEMENT] Added configurable default values for sidecar container kube-rbac-proxy-self in deployment kube-statate-metrics. [#1637](https://github.com/prometheus-operator/kube-prometheus/pull/1637)
|
||||
* [FEATURE] Support scraping config-reloader sidecar for Prometheus and AlertManager StatefulSets [#1344](https://github.com/prometheus-operator/kube-prometheus/pull/1344)
|
||||
* [FEATURE] Expose prometheus alerting configuration in $.values.prometheus configuration [#1476](https://github.com/prometheus-operator/kube-prometheus/pull/1476)
|
||||
* [BUGFIX] Remove deprecated policy/v1beta1 Kubernetes API [#1433](https://github.com/prometheus-operator/kube-prometheus/pull/1433)
|
||||
|
||||
@@ -8,6 +8,13 @@ and other resources to make it easier to get your contribution accepted.
|
||||
To maintain a safe and welcoming community, all participants must adhere to the
|
||||
project's [Code of Conduct](code-of-conduct.md).
|
||||
|
||||
## Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution. See the [DCO](DCO) file for details.
|
||||
|
||||
## Community
|
||||
|
||||
The project is developed in the open. Here are some of the channels we use to communicate and contribute:
|
||||
@@ -20,7 +27,7 @@ Channel used for project developers discussions
|
||||
|
||||
**Discussion forum**: [GitHub discussions](https://github.com/prometheus-operator/kube-prometheus/discussions)
|
||||
|
||||
**Twitter**: [@PromOperator](https://twitter.com/PromOperator)
|
||||
**Twitter**: [@PromOperator](https://twitter.com/promoperator)
|
||||
|
||||
**GitHub**: To file bugs and feature requests. For questions and discussions use the GitHub discussions. Generally,
|
||||
the other community channels listed here are best suited to get support or discuss overarching topics.
|
||||
|
||||
36
DCO
Normal file
36
DCO
Normal file
@@ -0,0 +1,36 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
22
Makefile
22
Makefile
@@ -9,16 +9,13 @@ JSONNET_BIN=$(BIN_DIR)/jsonnet
|
||||
JSONNETLINT_BIN=$(BIN_DIR)/jsonnet-lint
|
||||
JSONNETFMT_BIN=$(BIN_DIR)/jsonnetfmt
|
||||
KUBECONFORM_BIN=$(BIN_DIR)/kubeconform
|
||||
KUBESCAPE_BIN=$(BIN_DIR)/kubescape
|
||||
TOOLING=$(JB_BIN) $(GOJSONTOYAML_BIN) $(JSONNET_BIN) $(JSONNETLINT_BIN) $(JSONNETFMT_BIN) $(KUBECONFORM_BIN) $(MDOX_BIN) $(KUBESCAPE_BIN)
|
||||
TOOLING=$(JB_BIN) $(GOJSONTOYAML_BIN) $(JSONNET_BIN) $(JSONNETLINT_BIN) $(JSONNETFMT_BIN) $(KUBECONFORM_BIN) $(MDOX_BIN)
|
||||
|
||||
JSONNETFMT_ARGS=-n 2 --max-blank-lines 2 --string-style s --comment-style s
|
||||
|
||||
MDOX_VALIDATE_CONFIG?=.mdox.validate.yaml
|
||||
MD_FILES_TO_FORMAT=$(shell find docs developer-workspace examples experimental jsonnet manifests -name "*.md") $(shell ls *.md)
|
||||
|
||||
KUBESCAPE_THRESHOLD=1
|
||||
|
||||
all: generate fmt test docs
|
||||
|
||||
.PHONY: clean
|
||||
@@ -54,25 +51,18 @@ update: $(JB_BIN)
|
||||
$(JB_BIN) update
|
||||
|
||||
.PHONY: validate
|
||||
validate: validate-1.29 validate-1.30 validate-1.31
|
||||
validate: validate-1.21 validate-1.22
|
||||
|
||||
validate-1.29:
|
||||
KUBE_VERSION=1.29.8 $(MAKE) kubeconform
|
||||
validate-1.21:
|
||||
KUBE_VERSION=1.21.1 $(MAKE) kubeconform
|
||||
|
||||
validate-1.30:
|
||||
KUBE_VERSION=1.30.4 $(MAKE) kubeconform
|
||||
|
||||
validate-1.31:
|
||||
KUBE_VERSION=1.31.0 $(MAKE) kubeconform
|
||||
validate-1.22:
|
||||
KUBE_VERSION=1.22.0 $(MAKE) kubeconform
|
||||
|
||||
.PHONY: kubeconform
|
||||
kubeconform: crdschemas manifests $(KUBECONFORM_BIN)
|
||||
$(KUBECONFORM_BIN) -kubernetes-version $(KUBE_VERSION) -schema-location 'default' -schema-location 'crdschemas/{{ .ResourceKind }}.json' -skip CustomResourceDefinition manifests/
|
||||
|
||||
.PHONY: kubescape
|
||||
kubescape: $(KUBESCAPE_BIN) ## Runs a security analysis on generated manifests - failing if risk score is above threshold percentage 't'
|
||||
$(KUBESCAPE_BIN) scan -s framework -t $(KUBESCAPE_THRESHOLD) nsa manifests/*.yaml --exceptions 'kubescape-exceptions.json'
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: $(JSONNETFMT_BIN)
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
|
||||
|
||||
426
README.md
426
README.md
@@ -16,13 +16,49 @@ Components included in this package:
|
||||
* Highly available [Prometheus](https://prometheus.io/)
|
||||
* Highly available [Alertmanager](https://github.com/prometheus/alertmanager)
|
||||
* [Prometheus node-exporter](https://github.com/prometheus/node_exporter)
|
||||
* [Prometheus blackbox-exporter](https://github.com/prometheus/blackbox_exporter)
|
||||
* [Prometheus Adapter for Kubernetes Metrics APIs](https://github.com/kubernetes-sigs/prometheus-adapter)
|
||||
* [Prometheus Adapter for Kubernetes Metrics APIs](https://github.com/DirectXMan12/k8s-prometheus-adapter)
|
||||
* [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)
|
||||
* [Grafana](https://grafana.com/)
|
||||
|
||||
This stack is meant for cluster monitoring, so it is pre-configured to collect metrics from all Kubernetes components. In addition to that it delivers a default set of dashboards and alerting rules. Many of the useful dashboards and alerts come from the [kubernetes-mixin project](https://github.com/kubernetes-monitoring/kubernetes-mixin), similar to this project it provides composable jsonnet as a library for users to customize to their needs.
|
||||
|
||||
## Warning
|
||||
|
||||
If you are migrating from `release-0.7` branch or earlier please read [what changed and how to migrate in our guide](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/migration-guide.md).
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [kube-prometheus](#kube-prometheus)
|
||||
- [Warning](#warning)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [minikube](#minikube)
|
||||
- [Compatibility](#compatibility)
|
||||
- [Kubernetes compatibility matrix](#kubernetes-compatibility-matrix)
|
||||
- [Quickstart](#quickstart)
|
||||
- [Access the dashboards](#access-the-dashboards)
|
||||
- [Customizing Kube-Prometheus](#customizing-kube-prometheus)
|
||||
- [Installing](#installing)
|
||||
- [Compiling](#compiling)
|
||||
- [Apply the kube-prometheus stack](#apply-the-kube-prometheus-stack)
|
||||
- [Containerized Installing and Compiling](#containerized-installing-and-compiling)
|
||||
- [Update from upstream project](#update-from-upstream-project)
|
||||
- [Update jb](#update-jb)
|
||||
- [Update kube-prometheus](#update-kube-prometheus)
|
||||
- [Compile the manifests and apply](#compile-the-manifests-and-apply)
|
||||
- [Configuration](#configuration)
|
||||
- [Customization Examples](#customization-examples)
|
||||
- [Minikube Example](#minikube-example)
|
||||
- [Continuous Delivery](#continuous-delivery)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
|
||||
- [Authentication problem](#authentication-problem)
|
||||
- [Authorization problem](#authorization-problem)
|
||||
- [kube-state-metrics resource usage](#kube-state-metrics-resource-usage)
|
||||
- [Error retrieving kube-proxy metrics](#error-retrieving-kube-proxy-metrics)
|
||||
- [Contributing](CONTRIBUTING.md)
|
||||
- [License](#license)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You will need a Kubernetes cluster, that's it! By default it is assumed, that the kubelet uses token authentication and authorization, as otherwise Prometheus needs a client certificate, which gives it full access to the kubelet, rather than just the metrics. Token authentication and authorization allows more fine grained and easier access control.
|
||||
@@ -32,59 +68,15 @@ This means the kubelet configuration must contain these flags:
|
||||
* `--authentication-token-webhook=true` This flag enables, that a `ServiceAccount` token can be used to authenticate against the kubelet(s). This can also be enabled by setting the kubelet configuration value `authentication.webhook.enabled` to `true`.
|
||||
* `--authorization-mode=Webhook` This flag enables, that the kubelet will perform an RBAC request with the API to determine, whether the requesting entity (Prometheus in this case) is allowed to access a resource, in specific for this project the `/metrics` endpoint. This can also be enabled by setting the kubelet configuration value `authorization.mode` to `Webhook`.
|
||||
|
||||
This stack provides [resource metrics](https://github.com/kubernetes/metrics#resource-metrics-api) by deploying
|
||||
the [Prometheus Adapter](https://github.com/kubernetes-sigs/prometheus-adapter).
|
||||
This adapter is an Extension API Server and Kubernetes needs to be have this feature enabled, otherwise the adapter has
|
||||
no effect, but is still deployed.
|
||||
|
||||
## Compatibility
|
||||
|
||||
The following Kubernetes versions are supported and work as we test against these versions in their respective branches. But note that other versions might work!
|
||||
|
||||
| kube-prometheus stack | Kubernetes 1.23 | Kubernetes 1.24 | Kubernetes 1.25 | Kubernetes 1.26 | Kubernetes 1.27 | Kubernetes 1.28 | Kubernetes 1.29 | Kubernetes 1.30 | Kubernetes 1.31 |
|
||||
|--------------------------------------------------------------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| [`release-0.11`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.11) | ✔ | ✔ | ✗ | x | x | x | x | x | x |
|
||||
| [`release-0.12`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.12) | ✗ | ✔ | ✔ | x | x | x | x | x | x |
|
||||
| [`release-0.13`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.13) | ✗ | ✗ | x | ✔ | ✔ | ✔ | x | x | x |
|
||||
| [`release-0.14`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.14) | ✗ | ✗ | x | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
| [`main`](https://github.com/prometheus-operator/kube-prometheus/tree/main) | ✗ | ✗ | x | x | ✔ | ✔ | ✔ | ✔ | ✔ |
|
||||
|
||||
## Quickstart
|
||||
|
||||
This project is intended to be used as a library (i.e. the intent is not for you to create your own modified copy of this repository).
|
||||
|
||||
Though for a quickstart a compiled version of the Kubernetes [manifests](manifests) generated with this library (specifically with `example.jsonnet`) is checked into this repository in order to try the content out quickly. To try out the stack un-customized run:
|
||||
* Create the monitoring stack using the config in the `manifests` directory:
|
||||
|
||||
```shell
|
||||
# Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources
|
||||
# Note that due to some CRD size we are using kubectl server-side apply feature which is generally available since kubernetes 1.22.
|
||||
# If you are using previous kubernetes versions this feature may not be available and you would need to use kubectl create instead.
|
||||
kubectl apply --server-side -f manifests/setup
|
||||
kubectl wait \
|
||||
--for condition=Established \
|
||||
--all CustomResourceDefinition \
|
||||
--namespace=monitoring
|
||||
kubectl apply -f manifests/
|
||||
```
|
||||
|
||||
We create the namespace and CustomResourceDefinitions first to avoid race conditions when deploying the monitoring components.
|
||||
Alternatively, the resources in both folders can be applied with a single command
|
||||
`kubectl apply --server-side -f manifests/setup -f manifests`, but it may be necessary to run the command multiple times for all components to
|
||||
be created successfully.
|
||||
|
||||
* And to teardown the stack:
|
||||
|
||||
```shell
|
||||
kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
|
||||
```
|
||||
This stack provides [resource metrics](https://github.com/kubernetes/metrics#resource-metrics-api) by deploying the [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/).
|
||||
This adapter is an Extension API Server and Kubernetes needs to be have this feature enabled, otherwise the adapter has no effect, but is still deployed.
|
||||
|
||||
### minikube
|
||||
|
||||
To try out this stack, start [minikube](https://github.com/kubernetes/minikube) with the following command:
|
||||
|
||||
```shell
|
||||
$ minikube delete && minikube start --kubernetes-version=v1.23.0 --memory=6g --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.bind-address=0.0.0.0 --extra-config=controller-manager.bind-address=0.0.0.0
|
||||
$ minikube delete && minikube start --kubernetes-version=v1.20.0 --memory=6g --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.bind-address=0.0.0.0 --extra-config=controller-manager.bind-address=0.0.0.0
|
||||
```
|
||||
|
||||
The kube-prometheus stack includes a resource metrics API server, so the metrics-server addon is not necessary. Ensure the metrics-server addon is disabled on minikube:
|
||||
@@ -93,28 +85,336 @@ The kube-prometheus stack includes a resource metrics API server, so the metrics
|
||||
$ minikube addons disable metrics-server
|
||||
```
|
||||
|
||||
## Getting started
|
||||
## Compatibility
|
||||
|
||||
Before deploying kube-prometheus in a production environment, read:
|
||||
### Kubernetes compatibility matrix
|
||||
|
||||
1. [Customizing kube-prometheus](docs/customizing.md)
|
||||
2. [Customization examples](docs/customizations)
|
||||
3. [Accessing Graphical User Interfaces](docs/access-ui.md)
|
||||
4. [Troubleshooting kube-prometheus](docs/troubleshooting.md)
|
||||
The following versions are supported and work as we test against these versions in their respective branches. But note that other versions might work!
|
||||
|
||||
## Documentation
|
||||
| kube-prometheus stack | Kubernetes 1.19 | Kubernetes 1.20 | Kubernetes 1.21 | Kubernetes 1.22 | Kubernetes 1.23 |
|
||||
|--------------------------------------------------------------------------------------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| [`release-0.7`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.7) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [`release-0.8`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.8) | ✗ | ✔ | ✔ | ✗ | ✗ |
|
||||
| [`release-0.9`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.9) | ✗ | ✗ | ✔ | ✔ | ✗ |
|
||||
| [`release-0.10`](https://github.com/prometheus-operator/kube-prometheus/tree/release-0.10) | ✗ | ✗ | ✗ | ✔ | ✔ |
|
||||
| [`main`](https://github.com/prometheus-operator/kube-prometheus/tree/main) | ✗ | ✗ | ✗ | ✔ | ✔ |
|
||||
|
||||
1. [Continuous Delivery](examples/continuous-delivery)
|
||||
2. [Update to new version](docs/update.md)
|
||||
3. For more documentation on the project refer to `docs/` directory.
|
||||
## Quickstart
|
||||
|
||||
## Contributing
|
||||
> Note: For versions before Kubernetes v1.21.z refer to the [Kubernetes compatibility matrix](#kubernetes-compatibility-matrix) in order to choose a compatible branch.
|
||||
|
||||
To contribute to kube-prometheus, refer to [Contributing](CONTRIBUTING.md).
|
||||
This project is intended to be used as a library (i.e. the intent is not for you to create your own modified copy of this repository).
|
||||
|
||||
## Join the discussion
|
||||
Though for a quickstart a compiled version of the Kubernetes [manifests](manifests) generated with this library (specifically with `example.jsonnet`) is checked into this repository in order to try the content out quickly. To try out the stack un-customized run:
|
||||
* Create the monitoring stack using the config in the `manifests` directory:
|
||||
|
||||
If you have any questions or feedback regarding kube-prometheus, join the [kube-prometheus discussion](https://github.com/prometheus-operator/kube-prometheus/discussions). Alternatively, consider joining [the kubernetes slack #prometheus-operator channel](http://slack.k8s.io/) or project's bi-weekly [Contributor Office Hours](https://docs.google.com/document/d/1-fjJmzrwRpKmSPHtXN5u6VZnn39M28KqyQGBEJsqUOk/edit#).
|
||||
```shell
|
||||
# Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources
|
||||
kubectl apply --server-side -f manifests/setup
|
||||
until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
|
||||
kubectl apply -f manifests/
|
||||
```
|
||||
|
||||
We create the namespace and CustomResourceDefinitions first to avoid race conditions when deploying the monitoring components.
|
||||
Alternatively, the resources in both folders can be applied with a single command
|
||||
`kubectl apply --server-side -f manifests/setup -f manifests`, but it may be necessary to run the command multiple times for all components to
|
||||
be created successfullly.
|
||||
|
||||
* And to teardown the stack:
|
||||
|
||||
```shell
|
||||
kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
|
||||
```
|
||||
|
||||
### Access the dashboards
|
||||
|
||||
Prometheus, Grafana, and Alertmanager dashboards can be accessed quickly using `kubectl port-forward` after running the quickstart via the commands below. Kubernetes 1.10 or later is required.
|
||||
|
||||
> Note: There are instructions on how to route to these pods behind an ingress controller in the [Exposing Prometheus/Alermanager/Grafana via Ingress](docs/customizations/exposing-prometheus-alertmanager-grafana-ingress.md) section.
|
||||
|
||||
Prometheus
|
||||
|
||||
```shell
|
||||
$ kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090
|
||||
```
|
||||
|
||||
Then access via [http://localhost:9090](http://localhost:9090)
|
||||
|
||||
Grafana
|
||||
|
||||
```shell
|
||||
$ kubectl --namespace monitoring port-forward svc/grafana 3000
|
||||
```
|
||||
|
||||
Then access via [http://localhost:3000](http://localhost:3000) and use the default grafana user:password of `admin:admin`.
|
||||
|
||||
Alert Manager
|
||||
|
||||
```shell
|
||||
$ kubectl --namespace monitoring port-forward svc/alertmanager-main 9093
|
||||
```
|
||||
|
||||
Then access via [http://localhost:9093](http://localhost:9093)
|
||||
|
||||
## Customizing Kube-Prometheus
|
||||
|
||||
This section:
|
||||
* describes how to customize the kube-prometheus library via compiling the kube-prometheus manifests yourself (as an alternative to the [Quickstart section](#quickstart)).
|
||||
* still doesn't require you to make a copy of this entire repository, but rather only a copy of a few select files.
|
||||
|
||||
### Installing
|
||||
|
||||
The content of this project consists of a set of [jsonnet](http://jsonnet.org/) files making up a library to be consumed.
|
||||
|
||||
Install this library in your own project with [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler#install) (the jsonnet package manager):
|
||||
|
||||
```shell
|
||||
$ mkdir my-kube-prometheus; cd my-kube-prometheus
|
||||
$ jb init # Creates the initial/empty `jsonnetfile.json`
|
||||
# Install the kube-prometheus dependency
|
||||
$ jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json`
|
||||
|
||||
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/example.jsonnet -O example.jsonnet
|
||||
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/build.sh -O build.sh
|
||||
```
|
||||
|
||||
> `jb` can be installed with `go install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest`
|
||||
|
||||
> An e.g. of how to install a given version of this library: `jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main`
|
||||
|
||||
In order to update the kube-prometheus dependency, simply use the jsonnet-bundler update functionality:
|
||||
|
||||
```shell
|
||||
$ jb update
|
||||
```
|
||||
|
||||
### Compiling
|
||||
|
||||
e.g. of how to compile the manifests: `./build.sh example.jsonnet`
|
||||
|
||||
> before compiling, install `gojsontoyaml` tool with `go install github.com/brancz/gojsontoyaml@latest` and `jsonnet` with `go install github.com/google/go-jsonnet/cmd/jsonnet@latest`
|
||||
|
||||
Here's [example.jsonnet](example.jsonnet):
|
||||
|
||||
> Note: some of the following components must be configured beforehand. See [configuration](#configuration) and [customization-examples](#customization-examples).
|
||||
|
||||
```jsonnet mdox-exec="cat example.jsonnet"
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/addons/anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
```
|
||||
|
||||
And here's the [build.sh](build.sh) script (which uses `vendor/` to render all manifests in a json structure of `{filename: manifest-content}`):
|
||||
|
||||
```sh mdox-exec="cat build.sh"
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script uses arg $1 (name of *.jsonnet file to use) to generate the manifests/*.yaml files.
|
||||
|
||||
set -e
|
||||
set -x
|
||||
# only exit with zero if all commands of the pipeline exit successfully
|
||||
set -o pipefail
|
||||
|
||||
# Make sure to use project tooling
|
||||
PATH="$(pwd)/tmp/bin:${PATH}"
|
||||
|
||||
# Make sure to start with a clean 'manifests' dir
|
||||
rm -rf manifests
|
||||
mkdir -p manifests/setup
|
||||
|
||||
# Calling gojsontoyaml is optional, but we would like to generate yaml, not json
|
||||
jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml' -- {}
|
||||
|
||||
# Make sure to remove json files
|
||||
find manifests -type f ! -name '*.yaml' -delete
|
||||
rm -f kustomization
|
||||
|
||||
```
|
||||
|
||||
> Note you need `jsonnet` (`go get github.com/google/go-jsonnet/cmd/jsonnet`) and `gojsontoyaml` (`go get github.com/brancz/gojsontoyaml`) installed to run `build.sh`. If you just want json output, not yaml, then you can skip the pipe and everything afterwards.
|
||||
|
||||
This script runs the jsonnet code, then reads each key of the generated json and uses that as the file name, and writes the value of that key to that file, and converts each json manifest to yaml.
|
||||
|
||||
### Apply the kube-prometheus stack
|
||||
|
||||
The previous steps (compilation) has created a bunch of manifest files in the manifest/ folder.
|
||||
Now simply use `kubectl` to install Prometheus and Grafana as per your configuration:
|
||||
|
||||
```shell
|
||||
# Update the namespace and CRDs, and then wait for them to be available before creating the remaining resources
|
||||
$ kubectl apply --server-side -f manifests/setup
|
||||
$ kubectl apply -f manifests/
|
||||
```
|
||||
|
||||
> Note that due to some CRD size we are using kubeclt server-side apply feature which is generally available since
|
||||
> kubernetes 1.22. If you are using previous kubernetes versions this feature may not be available and you would need to
|
||||
> use `kubectl create` instead.
|
||||
|
||||
Alternatively, the resources in both folders can be applied with a single command
|
||||
`kubectl apply --server-side -Rf manifests`, but it may be necessary to run the command multiple times for all components to
|
||||
be created successfullly.
|
||||
|
||||
Check the monitoring namespace (or the namespace you have specific in `namespace: `) and make sure the pods are running. Prometheus and Grafana should be up and running soon.
|
||||
|
||||
### Containerized Installing and Compiling
|
||||
|
||||
If you don't care to have `jb` nor `jsonnet` nor `gojsontoyaml` installed, then use `quay.io/coreos/jsonnet-ci` container image. Do the following from this `kube-prometheus` directory:
|
||||
|
||||
```shell
|
||||
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) quay.io/coreos/jsonnet-ci jb update
|
||||
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) quay.io/coreos/jsonnet-ci ./build.sh example.jsonnet
|
||||
```
|
||||
|
||||
## Update from upstream project
|
||||
|
||||
You may wish to fetch changes made on this project so they are available to you.
|
||||
|
||||
### Update jb
|
||||
|
||||
`jb` may have been updated so it's a good idea to get the latest version of this binary:
|
||||
|
||||
```shell
|
||||
$ go get -u github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
```
|
||||
|
||||
### Update kube-prometheus
|
||||
|
||||
The command below will sync with upstream project:
|
||||
|
||||
```shell
|
||||
$ jb update
|
||||
```
|
||||
|
||||
### Compile the manifests and apply
|
||||
|
||||
Once updated, just follow the instructions under "Compiling" and "Apply the kube-prometheus stack" to apply the changes to your cluster.
|
||||
|
||||
## Configuration
|
||||
|
||||
Jsonnet has the concept of hidden fields. These are fields, that are not going to be rendered in a result. This is used to configure the kube-prometheus components in jsonnet. In the example jsonnet code of the above [Customizing Kube-Prometheus section](#customizing-kube-prometheus), you can see an example of this, where the `namespace` is being configured to be `monitoring`. In order to not override the whole object, use the `+::` construct of jsonnet, to merge objects, this way you can override individual settings, but retain all other settings and defaults.
|
||||
|
||||
The available fields and their default values can be seen in [main.libsonnet](jsonnet/kube-prometheus/main.libsonnet). Note that many of the fields get their default values from variables, and for example the version numbers are imported from [versions.json](jsonnet/kube-prometheus/versions.json).
|
||||
|
||||
Configuration is mainly done in the `values` map. You can see this being used in the `example.jsonnet` to set the namespace to `monitoring`. This is done in the `common` field, which all other components take their default value from. See for example how Alertmanager is configured in `main.libsonnet`:
|
||||
|
||||
```
|
||||
alertmanager: {
|
||||
name: 'main',
|
||||
// Use the namespace specified under values.common by default.
|
||||
namespace: $.values.common.namespace,
|
||||
version: $.values.common.versions.alertmanager,
|
||||
image: $.values.common.images.alertmanager,
|
||||
mixin+: { ruleLabels: $.values.common.ruleLabels },
|
||||
},
|
||||
```
|
||||
|
||||
The grafana definition is located in a different project (https://github.com/brancz/kubernetes-grafana ), but needed configuration can be customized from the same top level `values` field. For example to allow anonymous access to grafana, add the following `values` section:
|
||||
|
||||
```
|
||||
grafana+:: {
|
||||
config: { // http://docs.grafana.org/installation/configuration/
|
||||
sections: {
|
||||
"auth.anonymous": {enabled: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
## Customization Examples
|
||||
|
||||
Jsonnet is a turing complete language, any logic can be reflected in it. It also has powerful merge functionalities, allowing sophisticated customizations of any kind simply by merging it into the object the library provides.
|
||||
|
||||
To get started, we provide several customization examples in the [docs/customizations/](docs/customizations) section.
|
||||
|
||||
## Minikube Example
|
||||
|
||||
To use an easy to reproduce example, see [minikube.jsonnet](examples/minikube.jsonnet), which uses the minikube setup as demonstrated in [Prerequisites](#prerequisites). Because we would like easy access to our Prometheus, Alertmanager and Grafana UIs, `minikube.jsonnet` exposes the services as NodePort type services.
|
||||
|
||||
## Continuous Delivery
|
||||
|
||||
Working examples of use with continuous delivery tools are found in examples/continuous-delivery.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See the general [guidelines](docs/community-support.md) for getting support from the community.
|
||||
|
||||
### Error retrieving kubelet metrics
|
||||
|
||||
Should the Prometheus `/targets` page show kubelet targets, but not able to successfully scrape the metrics, then most likely it is a problem with the authentication and authorization setup of the kubelets.
|
||||
|
||||
As described in the [Prerequisites](#prerequisites) section, in order to retrieve metrics from the kubelet token authentication and authorization must be enabled. Some Kubernetes setup tools do not enable this by default.
|
||||
|
||||
- If you are using Google's GKE product, see [cAdvisor support](docs/GKE-cadvisor-support.md).
|
||||
- If you are using AWS EKS, see [AWS EKS CNI support](docs/EKS-cni-support.md).
|
||||
- If you are using Weave Net, see [Weave Net support](docs/weave-net-support.md).
|
||||
|
||||
#### Authentication problem
|
||||
|
||||
The Prometheus `/targets` page will show the kubelet job with the error `403 Unauthorized`, when token authentication is not enabled. Ensure, that the `--authentication-token-webhook=true` flag is enabled on all kubelet configurations.
|
||||
|
||||
#### Authorization problem
|
||||
|
||||
The Prometheus `/targets` page will show the kubelet job with the error `401 Unauthorized`, when token authorization is not enabled. Ensure that the `--authorization-mode=Webhook` flag is enabled on all kubelet configurations.
|
||||
|
||||
### kube-state-metrics resource usage
|
||||
|
||||
In some environments, kube-state-metrics may need additional
|
||||
resources. One driver for more resource needs, is a high number of
|
||||
namespaces. There may be others.
|
||||
|
||||
kube-state-metrics resource allocation is managed by
|
||||
[addon-resizer](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer/nanny)
|
||||
You can control it's parameters by setting variables in the
|
||||
config. They default to:
|
||||
|
||||
```jsonnet
|
||||
kubeStateMetrics+:: {
|
||||
baseCPU: '100m',
|
||||
cpuPerNode: '2m',
|
||||
baseMemory: '150Mi',
|
||||
memoryPerNode: '30Mi',
|
||||
}
|
||||
```
|
||||
|
||||
### Error retrieving kube-proxy metrics
|
||||
|
||||
By default, kubeadm will configure kube-proxy to listen on 127.0.0.1 for metrics. Because of this prometheus would not be able to scrape these metrics. This would have to be changed to 0.0.0.0 in one of the following two places:
|
||||
|
||||
1. Before cluster initialization, the config file passed to kubeadm init should have KubeProxyConfiguration manifest with the field metricsBindAddress set to 0.0.0.0:10249
|
||||
2. If the k8s cluster is already up and running, we'll have to modify the configmap kube-proxy in the namespace kube-system and set the metricsBindAddress field. After this kube-proxy daemonset would have to be restarted with
|
||||
`kubectl -n kube-system rollout restart daemonset kube-proxy`
|
||||
|
||||
## License
|
||||
|
||||
|
||||
38
RELEASE.md
38
RELEASE.md
@@ -1,12 +1,10 @@
|
||||
# Release schedule
|
||||
|
||||
kube-prometheus will follow the Kubernetes release schedule.
|
||||
For every new Kubernetes release, there will be a corresponding minor release of
|
||||
kube-prometheus, although it may not be immediate.
|
||||
|
||||
We do not guarantee backports from the `main` branch to older release branches.
|
||||
|
||||
This differs from the previous release schedule, which was driven by OpenShift releases.
|
||||
Kube-prometheus has a somehow predictable release schedule, releases were
|
||||
historically cut in sync with OpenShift releases as per downstream needs. So
|
||||
far there hasn't been any problem with this schedule since it is also in sync
|
||||
with Kubernetes releases. So for every new Kubernetes release, there is a new
|
||||
release of kube-prometheus, although it tends to happen later.
|
||||
|
||||
# How to cut a new release
|
||||
|
||||
@@ -20,9 +18,23 @@ We use [Semantic Versioning](http://semver.org/).
|
||||
We maintain a separate branch for each minor release, named
|
||||
`release-<major>.<minor>`, e.g. `release-1.1`, `release-2.0`.
|
||||
|
||||
The usual flow is to merge new features, changes and bug fixes into the `main` branch.
|
||||
The decision to backport bugfixes into release branches is made on a case-by-case basis.
|
||||
Maintaining the release branches for older minor releases is best-effort.
|
||||
The usual flow is to merge new features and changes into the master branch and
|
||||
to merge bug fixes into the latest release branch. Bug fixes are then merged
|
||||
into master from the latest release branch. The master branch should always
|
||||
contain all commits from the latest release branch.
|
||||
|
||||
If a bug fix got accidentally merged into master, cherry-pick commits have to be
|
||||
created in the latest release branch, which then has to be merged back into
|
||||
master. Try to avoid that situation.
|
||||
|
||||
Maintaining the release branches for older minor releases happens on a best
|
||||
effort basis.
|
||||
|
||||
## Cut a release of kubernetes-mixins
|
||||
|
||||
kube-prometheus and kubernetes-mixins releases are tied, so before cutting the
|
||||
release of kube-prometheus we should make sure that the same release of
|
||||
kubernetes-mixins exists.
|
||||
|
||||
## Update components version
|
||||
|
||||
@@ -38,12 +50,12 @@ failed or because the main branch was already up-to-date.
|
||||
|
||||
## Update Kubernetes supported versions
|
||||
|
||||
The `main` branch of kube-prometheus should support the last 2 versions of
|
||||
The main branch of kube-prometheus should support the last 2 versions of
|
||||
Kubernetes. We need to make sure that the CI on the main branch is testing the
|
||||
kube-prometheus configuration against both of these versions by updating the [CI
|
||||
worklow](.github/workflows/ci.yaml) to include the latest kind version and the
|
||||
2 latest images versions that are attached to the kind release. Once that is
|
||||
done, the [compatibility matrix](README.md#compatibility) in
|
||||
done, the [compatibility matrix](README.md#kubernetes-compatibility-matrix) in
|
||||
the README should also be updated to reflect the CI changes.
|
||||
|
||||
## Create pull request to cut the release
|
||||
@@ -75,7 +87,7 @@ make generate
|
||||
|
||||
### Update the compatibility matrix
|
||||
|
||||
Update the [compatibility matrix](README.md#compatibility) in
|
||||
Update the [compatibility matrix](README.md#kubernetes-compatibility-matrix) in
|
||||
the README, by adding the new release based on the `main` branch compatibility
|
||||
and removing the oldest release branch to only keep the latest 5 branches in the
|
||||
matrix.
|
||||
|
||||
@@ -9,27 +9,12 @@ if [[ $? != 0 ]]; then
|
||||
| cut -d : -f 2,3 \
|
||||
| tr -d \" \
|
||||
| wget -qi -
|
||||
mv kind-linux-amd64 developer-workspace/codespaces/kind && chmod +x developer-workspace/codespaces/kind
|
||||
export PATH=$PATH:$PWD/developer-workspace/codespaces
|
||||
mv kind-linux-amd64 kind && chmod +x kind
|
||||
fi
|
||||
|
||||
cluster_created=$($PWD/developer-workspace/codespaces/kind get clusters 2>&1)
|
||||
cluster_created=$($PWD/kind get clusters 2>&1)
|
||||
if [[ "$cluster_created" == "No kind clusters found." ]]; then
|
||||
$PWD/developer-workspace/codespaces/kind create cluster --config $PWD/.github/workflows/kind/config.yml
|
||||
$PWD/kind create cluster
|
||||
else
|
||||
echo "Cluster '$cluster_created' already present"
|
||||
fi
|
||||
|
||||
helm repo add --force-update cilium https://helm.cilium.io/
|
||||
helm install cilium cilium/cilium --version 1.9.13 \
|
||||
--namespace kube-system \
|
||||
--set nodeinit.enabled=true \
|
||||
--set kubeProxyReplacement=partial \
|
||||
--set hostServices.enabled=false \
|
||||
--set externalIPs.enabled=true \
|
||||
--set nodePort.enabled=true \
|
||||
--set hostPort.enabled=true \
|
||||
--set bpf.masquerade=false \
|
||||
--set image.pullPolicy=IfNotPresent \
|
||||
--set ipam.mode=kubernetes \
|
||||
--set operator.replicas=1
|
||||
fi
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
weight: 300
|
||||
toc: true
|
||||
title: Access Dashboards
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
images: []
|
||||
draft: false
|
||||
---
|
||||
|
||||
Prometheus, Grafana, and Alertmanager dashboards can be accessed quickly using `kubectl port-forward` after running the quickstart via the commands below.
|
||||
|
||||
> Kubernetes 1.10 or later is required.
|
||||
|
||||
You can also learn how to [expose Prometheus/Alertmanager/Grafana via Ingress](customizations/exposing-prometheus-alertmanager-grafana-ingress.md)
|
||||
|
||||
## Prometheus
|
||||
|
||||
```shell
|
||||
kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090
|
||||
```
|
||||
|
||||
Open Prometheus on [http://localhost:9090](http://localhost:9090) in your browser.
|
||||
|
||||
Check out the [alerts](http://localhost:9090/alerts) and [rules](http://localhost:9090/rules) pages with the pre-configured rules and alerts!
|
||||
This Prometheus is supposed to monitor your Kubernetes cluster and make sure to alert you if there’s a problem with it.
|
||||
|
||||
For your own applications we recommend running one or more other instances.
|
||||
|
||||
## Grafana
|
||||
|
||||
```shell
|
||||
kubectl --namespace monitoring port-forward svc/grafana 3000
|
||||
```
|
||||
|
||||
Open Grafana on [localhost:3000](https://localhost:3000) in your browser.
|
||||
You can login with the username `admin` and password `admin`.
|
||||
|
||||
## Alertmanager
|
||||
|
||||
```shell
|
||||
kubectl --namespace monitoring port-forward svc/alertmanager-main 9093
|
||||
```
|
||||
|
||||
Open Alertmanager on [localhost:9093](http://localhost:9093) in your browser.
|
||||
@@ -1,14 +1,15 @@
|
||||
---
|
||||
weight: 304
|
||||
weight: 630
|
||||
toc: true
|
||||
title: Blackbox Exporter
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you deploying the blackbox-exporter with the Probe custom resource definition.
|
||||
lead: This Document documents the types introduced by the Prometheus Operator to be consumed by users.
|
||||
lastmod: "2021-03-08T08:49:31+00:00"
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you deploying the blackbox-exporter with the Probe custom resource definition.
|
||||
description: Generated API docs for the Prometheus Operator
|
||||
date: "2021-03-08T08:49:31+00:00"
|
||||
---
|
||||
|
||||
@@ -30,7 +31,7 @@ The `prometheus-operator` defines a `Probe` resource type that can be used to de
|
||||
* `_config.namespace`: the namespace where the various generated resources (`ConfigMap`, `Deployment`, `Service`, `ServiceAccount` and `ServiceMonitor`) will reside. This does not affect where you can place `Probe` objects; that is determined by the configuration of the `Prometheus` resource. This option is shared with other `kube-prometheus` components; defaults to `default`.
|
||||
* `_config.imageRepos.blackboxExporter`: the name of the blackbox exporter image to deploy. Defaults to `quay.io/prometheus/blackbox-exporter`.
|
||||
* `_config.versions.blackboxExporter`: the tag of the blackbox exporter image to deploy. Defaults to the version `kube-prometheus` was tested with.
|
||||
* `_config.imageRepos.configmapReloader`: the name of the ConfigMap reloader image to deploy. Defaults to `ghcr.io/jimmidyson/configmap-reload`.
|
||||
* `_config.imageRepos.configmapReloader`: the name of the ConfigMap reloader image to deploy. Defaults to `jimmidyson/configmap-reload`.
|
||||
* `_config.versions.configmapReloader`: the tag of the ConfigMap reloader image to deploy. Defaults to the version `kube-prometheus` was tested with.
|
||||
* `_config.resources.blackbox-exporter.requests`: the requested resources; this is used for each container. Defaults to `10m` CPU and `20Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
|
||||
* `_config.resources.blackbox-exporter.limits`: the resource limits; this is used for each container. Defaults to `20m` CPU and `40Mi` RAM. See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for details.
|
||||
|
||||
@@ -36,7 +36,7 @@ There is also a #prometheus channel on the [CNCF Slack](https://slack.cncf.io/).
|
||||
|
||||
## kube-state-metrics
|
||||
|
||||
For documentation, see the project's [docs directory](https://github.com/kubernetes/kube-state-metrics/tree/main/docs).
|
||||
For documentation, see the project's [docs directory](https://github.com/kubernetes/kube-state-metrics/tree/master/docs).
|
||||
|
||||
For questions, use the #kube-state-metrics channel on the [Kubernetes Slack](https://slack.k8s.io/).
|
||||
|
||||
|
||||
@@ -1,25 +1,24 @@
|
||||
---
|
||||
weight: 307
|
||||
weight: 650
|
||||
toc: true
|
||||
title: Prometheus Rules and Grafana Dashboards
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you adding Prometheus Rules and Grafana Dashboards on top of kube-prometheus
|
||||
lead: Create Prometheus Rules and Grafana Dashboards on top of kube-prometheus
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you adding Prometheus Rules and Grafana Dashboards on top of kube-prometheus
|
||||
description: Create Prometheus Rules and Grafana Dashboards on top of kube-prometheus
|
||||
date: "2021-03-08T23:04:32+01:00"
|
||||
---
|
||||
|
||||
`kube-prometheus` ships with a set of default [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) and [Grafana](http://grafana.com/) dashboards. At some point one might like to extend them, the purpose of this document is to explain how to do this.
|
||||
|
||||
All manifests of kube-prometheus are generated using [jsonnet](https://jsonnet.org/).
|
||||
Prometheus rules and Grafana dashboards in specific follow the
|
||||
[Prometheus Monitoring Mixins proposal](https://github.com/monitoring-mixins/docs/blob/master/design.pdf).
|
||||
All manifests of kube-prometheus are generated using [jsonnet](https://jsonnet.org/) and Prometheus rules and Grafana dashboards in specific follow the [Prometheus Monitoring Mixins proposal](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/).
|
||||
|
||||
For both the Prometheus rules and the Grafana dashboards Kubernetes `ConfigMap`s are generated within kube-prometheus. In order to add additional rules and dashboards simply merge them onto the existing json objects. This document illustrates examples for rules as well as dashboards.
|
||||
|
||||
As a basis, all examples in this guide are based on the base example of the kube-prometheus [readme](https://github.com/prometheus-operator/kube-prometheus/blob/main/README.md):
|
||||
As a basis, all examples in this guide are based on the base example of the kube-prometheus [readme](../../README.md):
|
||||
|
||||
```jsonnet mdox-exec="cat example.jsonnet"
|
||||
local kp =
|
||||
@@ -31,7 +30,6 @@ local kp =
|
||||
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/pyrra.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
@@ -45,7 +43,6 @@ local kp =
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// { 'setup/pyrra-slo-CustomResourceDefinition': kp.pyrra.crd } +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
@@ -53,7 +50,6 @@ local kp =
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
// { ['pyrra-' + name]: kp.pyrra[name] for name in std.objectFields(kp.pyrra) if name != 'crd' } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
@@ -65,14 +61,11 @@ local kp =
|
||||
|
||||
### Alerting rules
|
||||
|
||||
As per the [Prometheus Monitoring Mixins proposal](https://github.com/monitoring-mixins/docs/blob/master/design.pdf)
|
||||
Prometheus alerting rules are under the key `prometheusAlerts` in the top level object.
|
||||
Additional alerting rules can be added by merging into the existing object.
|
||||
According to the [Prometheus Monitoring Mixins proposal](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/) Prometheus alerting rules are under the key `prometheusAlerts` in the top level object, so in order to add an additional alerting rule, we can simply merge an extra rule into the existing object.
|
||||
|
||||
The format is exactly the Prometheus format, so there should be no changes necessary should you have existing rules that you want to include.
|
||||
|
||||
> Note that alerts can also be included into this file, using the jsonnet `import` function.
|
||||
> In this example it is just inlined in order to demonstrate their use in a single file.
|
||||
> Note that alerts can just as well be included into this file, using the jsonnet `import` function. In this example it is just inlined in order to demonstrate their use in a single file.
|
||||
|
||||
```jsonnet mdox-exec="cat examples/prometheus-additional-alert-rule-example.jsonnet"
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
@@ -127,8 +120,7 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
|
||||
In order to add a recording rule, simply do the same with the `prometheusRules` field.
|
||||
|
||||
> Note that rules can just as well be included into this file, using the jsonnet `import` function.
|
||||
> In this example it is just inlined in order to demonstrate their use in a single file.
|
||||
> Note that rules can just as well be included into this file, using the jsonnet `import` function. In this example it is just inlined in order to demonstrate their use in a single file.
|
||||
|
||||
```jsonnet mdox-exec="cat examples/prometheus-additional-recording-rule-example.jsonnet"
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
@@ -224,21 +216,13 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
|
||||
### Changing default rules
|
||||
|
||||
Along with adding additional rules, we give the user the option to filter or adjust the existing rules imported by `kube-prometheus/main.libsonnet`.
|
||||
The recording rules can be found in [kube-prometheus/components/mixin/rules](https://github.com/prometheus-operator/kube-prometheus/tree/main/jsonnet/kube-prometheus/components/mixin/rules)
|
||||
and [kubernetes-mixin/rules](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/rules).
|
||||
The alerting rules can be found in [kube-prometheus/components/mixin/alerts](https://github.com/prometheus-operator/kube-prometheus/tree/main/jsonnet/kube-prometheus/components/mixin/alerts)
|
||||
and [kubernetes-mixin/alerts](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/alerts).
|
||||
Along with adding additional rules, we give the user the option to filter or adjust the existing rules imported by `kube-prometheus/main.libsonnet`. The recording rules can be found in [kube-prometheus/components/mixin/rules](../../jsonnet/kube-prometheus/components/mixin/rules) and [kubernetes-mixin/rules](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/rules) while the alerting rules can be found in [kube-prometheus/components/mixin/alerts](../../jsonnet/kube-prometheus/components/mixin/alerts) and [kubernetes-mixin/alerts](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/alerts).
|
||||
|
||||
Knowing which rules to change, the user can now use functions from the [Jsonnet standard library](https://jsonnet.org/ref/stdlib.html) to make these changes.
|
||||
Below are examples of both a filter and an adjustment being made to the default rules.
|
||||
These changes can be assigned to a local variable and then added to the `local kp` object as seen in the examples above.
|
||||
Knowing which rules to change, the user can now use functions from the [Jsonnet standard library](https://jsonnet.org/ref/stdlib.html) to make these changes. Below are examples of both a filter and an adjustment being made to the default rules. These changes can be assigned to a local variable and then added to the `local kp` object as seen in the examples above.
|
||||
|
||||
#### Filter
|
||||
|
||||
Here the alert `KubeStatefulSetReplicasMismatch` is being filtered out of the group `kubernetes-apps`.
|
||||
The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet).
|
||||
You first need to find out in which component the rule is defined (here it is kuberentesControlPlane).
|
||||
Here the alert `KubeStatefulSetReplicasMismatch` is being filtered out of the group `kubernetes-apps`. The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet). You first need to find out in which component the rule is defined (here it is kuberentesControlPlane).
|
||||
|
||||
```jsonnet
|
||||
local filter = {
|
||||
@@ -267,8 +251,7 @@ local filter = {
|
||||
|
||||
#### Adjustment
|
||||
|
||||
Here the expression for another alert in the same component is updated from its previous value.
|
||||
The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet).
|
||||
Here the expression for another alert in the same component is updated from its previous value. The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet).
|
||||
|
||||
```jsonnet
|
||||
local update = {
|
||||
@@ -353,14 +336,9 @@ Dashboards can either be added using jsonnet or simply a pre-rendered json dashb
|
||||
|
||||
### Jsonnet dashboard
|
||||
|
||||
We recommend using the [grafonnet](https://github.com/grafana/grafonnet-lib/) library for jsonnet,
|
||||
which gives you a simple DSL to generate Grafana dashboards.
|
||||
Following the [Prometheus Monitoring Mixins proposal](https://github.com/monitoring-mixins/docs/blob/master/design.pdf)
|
||||
additional dashboards are added to the `grafanaDashboards` key, located in the top level object.
|
||||
To add new jsonnet dashboards, simply add one.
|
||||
We recommend using the [grafonnet](https://github.com/grafana/grafonnet-lib/) library for jsonnet, which gives you a simple DSL to generate Grafana dashboards. Following the [Prometheus Monitoring Mixins proposal](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/) additional dashboards are added to the `grafanaDashboards` key, located in the top level object. To add new jsonnet dashboards, simply add one.
|
||||
|
||||
> Note that dashboards can just as well be included into this file, using the jsonnet `import` function.
|
||||
> In this example it is just inlined in order to demonstrate their use in a single file.
|
||||
> Note that dashboards can just as well be included into this file, using the jsonnet `import` function. In this example it is just inlined in order to demonstrate their use in a single file.
|
||||
|
||||
```jsonnet mdox-exec="cat examples/grafana-additional-jsonnet-dashboard-example.jsonnet"
|
||||
local grafana = import 'grafonnet/grafana.libsonnet';
|
||||
@@ -416,8 +394,7 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
|
||||
### Pre-rendered Grafana dashboards
|
||||
|
||||
As jsonnet is a superset of json, the jsonnet `import` function can be used to include Grafana dashboard json blobs.
|
||||
In this example we are importing a [provided example dashboard](https://github.com/prometheus-operator/kube-prometheus/tree/main/examples/example-grafana-dashboard.json).
|
||||
As jsonnet is a superset of json, the jsonnet `import` function can be used to include Grafana dashboard json blobs. In this example we are importing a [provided example dashboard](../../examples/example-grafana-dashboard.json).
|
||||
|
||||
```jsonnet mdox-exec="cat examples/grafana-additional-rendered-dashboard-example.jsonnet"
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
@@ -442,8 +419,7 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
In case you have lots of json dashboard exported out from grafana UI the above approach is going to take lots of time.
|
||||
To improve performance we can use `rawDashboards` field and provide it's value as json string by using `importstr`
|
||||
In case you have lots of json dashboard exported out from grafana UI the above approach is going to take lots of time to improve performance we can use `rawDashboards` field and provide it's value as json string by using `importstr`
|
||||
|
||||
```jsonnet mdox-exec="cat examples/grafana-additional-rendered-dashboard-example-2.jsonnet"
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
@@ -470,10 +446,7 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
|
||||
### Mixins
|
||||
|
||||
Kube-prometheus comes with a couple of default mixins as the Kubernetes-mixin and the Node-exporter mixin,
|
||||
however there [are many more mixins](https://monitoring.mixins.dev/).
|
||||
To use other mixins, kube-prometheus has a jsonnet library for creating a PrometheusRule CRD and Grafana dashboards from a mixin.
|
||||
Below is an example of creating a mixin object that has Prometheus rules and Grafana dashboards:
|
||||
Kube-prometheus comes with a couple of default mixins as the Kubernetes-mixin and the Node-exporter mixin, however there [are many more mixins](https://monitoring.mixins.dev/). To use other mixins Kube-prometheus has a jsonnet library for creating a Kubernetes PrometheusRule CRD and Grafana dashboards from a mixin. Below is an example of creating a mixin object that has Prometheus rules and Grafana dashboards:
|
||||
|
||||
```jsonnet
|
||||
// Import the library function for adding mixins
|
||||
@@ -494,10 +467,7 @@ values+:: {
|
||||
dashboards+:: myMixin.grafanaDashboards
|
||||
```
|
||||
|
||||
The `prometheusRules` object is a PrometheusRule CRD. It should be defined as its own jsonnet object.
|
||||
If you define multiple mixins in a single jsonnet object, there is a possibility that they will overwrite each others'
|
||||
configuration and there will be unintended effects.
|
||||
Therefore, use the `prometheusRules` object as its own jsonnet object:
|
||||
The `prometheusRules` object is a PrometheusRule Kubernetes CRD and it should be defined as its own jsonnet object. If you define multiple mixins in a single jsonnet object there is a possibility that they will overwrite each others' configuration and there will be unintended effects. Therefore, use the `prometheusRules` object as its own jsonnet object:
|
||||
|
||||
```jsonnet
|
||||
...
|
||||
@@ -521,8 +491,7 @@ local myMixin = addMixin({
|
||||
});
|
||||
```
|
||||
|
||||
The library has also two optional parameters - the namespace for the `PrometheusRule` CRD and the dashboard folder for the Grafana dashboards.
|
||||
The below example shows how to use both:
|
||||
The library has also two optional parameters - the namespace for the `PrometheusRule` CRD and the dashboard folder for the Grafana dashboards. The below example shows how to use both:
|
||||
|
||||
```jsonnet
|
||||
local myMixin = addMixin({
|
||||
@@ -538,8 +507,7 @@ local myMixin = addMixin({
|
||||
});
|
||||
```
|
||||
|
||||
The created `prometheusRules` object will have the metadata field `namespace` added and the usage will remain the same.
|
||||
However, the `grafanaDasboards` will be added to the `folderDashboards` field instead of the `dashboards` field as shown in the example below:
|
||||
The created `prometheusRules` object will have the metadata field `namespace` added and the usage will remain the same. However, the `grafanaDasboards` will be added to the `folderDashboards` field instead of the `dashboards` field as shown in the example below:
|
||||
|
||||
```jsonnet
|
||||
values+:: {
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
### Dropping unwanted dashboards
|
||||
|
||||
When deploying kube-prometheus, your Grafana instance is deployed with a lot of dashboards by default. All those dashboards are comming from upstream projects like [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin), [prometheus-mixin](https://github.com/prometheus/prometheus/tree/main/documentation/prometheus-mixin) and [node-exporter-mixin](https://github.com/prometheus/node_exporter/tree/master/docs/node-mixin), among others.
|
||||
|
||||
In case you find out that you don't need some of them, you can choose to remove those dashboards like in the example below, which removes the [`alertmanager-overview.json`](https://github.com/prometheus/alertmanager/blob/main/doc/alertmanager-mixin/dashboards/overview.libsonnet) dashboard.
|
||||
|
||||
```jsonnet mdox-exec="cat examples/drop-dashboards.jsonnet"
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafana+: {
|
||||
dashboards: std.mergePatch(super.dashboards, {
|
||||
// Add more unwanted dashboards here
|
||||
'alertmanager-overview.json': null,
|
||||
}),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
```
|
||||
@@ -1,14 +1,15 @@
|
||||
---
|
||||
weight: 303
|
||||
weight: 500
|
||||
toc: true
|
||||
title: Expose via Ingress
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you deploying a Kubernetes Ingress to expose Prometheus, Alertmanager and Grafana.
|
||||
lead: How to setup a Kubernetes Ingress to expose the Prometheus, Alertmanager and Grafana.
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you deploying a Kubernetes Ingress to expose Prometheus, Alertmanager and Grafana.
|
||||
description: How to setup a Kubernetes Ingress to expose the Prometheus, Alertmanager and Grafana.
|
||||
date: "2021-03-08T23:04:32+01:00"
|
||||
---
|
||||
|
||||
In order to access the web interfaces via the Internet [Kubernetes Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) is a popular option. This guide explains, how Kubernetes Ingress can be setup, in order to expose the Prometheus, Alertmanager and Grafana UIs, that are included in the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project.
|
||||
@@ -39,12 +40,10 @@ Also, the applications provide external links to themselves in alerts and variou
|
||||
|
||||
```jsonnet
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheus+:: {
|
||||
prometheus+: {
|
||||
@@ -97,18 +96,17 @@ local kp =
|
||||
},
|
||||
};
|
||||
|
||||
// Output a kubernetes List object with both ingresses (k8s-libsonnet)
|
||||
k.core.v1.list.new([
|
||||
kp.ingress['prometheus-k8s'],
|
||||
kp.ingress['basic-auth-secret'],
|
||||
])
|
||||
```
|
||||
|
||||
In order to expose Alertmanager and Grafana, simply create additional fields containing an ingress object, but simply pointing at the `alertmanager` or `grafana` instead of the `prometheus-k8s` Service. Make sure to also use the correct port respectively, for Alertmanager it is also `web`, for Grafana it is `http`. Be sure to also specify the appropriate external URL. Note that the external URL for grafana is set in a different way than the external URL for Prometheus or Alertmanager. See [ingress.jsonnet](https://github.com/prometheus-operator/kube-prometheus/tree/main/examples/ingress.jsonnet) for how to set the Grafana external URL.
|
||||
In order to expose Alertmanager and Grafana, simply create additional fields containing an ingress object, but simply pointing at the `alertmanager` or `grafana` instead of the `prometheus-k8s` Service. Make sure to also use the correct port respectively, for Alertmanager it is also `web`, for Grafana it is `http`. Be sure to also specify the appropriate external URL. Note that the external URL for grafana is set in a different way than the external URL for Prometheus or Alertmanager. See [ingress.jsonnet](../../examples/ingress.jsonnet) for how to set the Grafana external URL.
|
||||
|
||||
In order to render the ingress objects similar to the other objects use as demonstrated in the [main readme](https://github.com/prometheus-operator/kube-prometheus/tree/main/README.md):
|
||||
In order to render the ingress objects similar to the other objects use as demonstrated in the [main readme](../../README.md):
|
||||
|
||||
```jsonnet
|
||||
```
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
@@ -121,36 +119,4 @@ In order to render the ingress objects similar to the other objects use as demon
|
||||
|
||||
Note, that in comparison only the last line was added, the rest is identical to the original.
|
||||
|
||||
See [ingress.jsonnet](https://github.com/prometheus-operator/kube-prometheus/tree/main/examples/ingress.jsonnet) for an example implementation.
|
||||
|
||||
## Adding Ingress namespace to NetworkPolicies
|
||||
|
||||
NetworkPolicies restricting access to the components are added by default. These can either be removed as in
|
||||
[networkpolicies-disabled.jsonnet](https://github.com/prometheus-operator/kube-prometheus/tree/main/examples/networkpolicies-disabled.jsonnet) or modified as
|
||||
described here.
|
||||
|
||||
This is an example for grafana, but the same can be applied to alertmanager and prometheus.
|
||||
|
||||
```jsonnet
|
||||
{
|
||||
alertmanager+:: {
|
||||
networkPolicy+: {
|
||||
spec+: {
|
||||
ingress: [
|
||||
super.ingress[0] + {
|
||||
from+: [
|
||||
{
|
||||
namespaceSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'ingress-nginx',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] + super.ingress[1:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
See [ingress.jsonnet](../../examples/ingress.jsonnet) for an example implementation.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
### Pod Anti-Affinity
|
||||
|
||||
To prevent `Prometheus` and `Alertmanager` instances from being deployed onto the same node when
|
||||
possible, one can include the [kube-prometheus-anti-affinity.libsonnet](https://github.com/prometheus-operator/kube-prometheus/tree/main/jsonnet/kube-prometheus/addons/anti-affinity.libsonnet) mixin:
|
||||
possible, one can include the [kube-prometheus-anti-affinity.libsonnet](../../jsonnet/kube-prometheus/addons/anti-affinity.libsonnet) mixin:
|
||||
|
||||
```jsonnet mdox-exec="cat examples/anti-affinity.jsonnet"
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
### Prometheus-agent mode
|
||||
|
||||
***ATTENTION***: Although it is possible to run Prometheus in Agent mode with Prometheus-Operator, it requires strategic merge patches. This practice is not recommended and we do not provide support if Prometheus doesn't work as you expect. **Try it at your own risk!**
|
||||
|
||||
```jsonnet mdox-exec="cat examples/prometheus-agent.jsonnet"
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheus+: {
|
||||
resources: {
|
||||
requests: { memory: '100Mi' },
|
||||
},
|
||||
enableFeatures: ['agent'],
|
||||
},
|
||||
},
|
||||
prometheus+: {
|
||||
prometheus+: {
|
||||
spec+: {
|
||||
replicas: 1,
|
||||
alerting:: {},
|
||||
ruleSelector:: {},
|
||||
remoteWrite: [{
|
||||
url: 'http://remote-write-url.com',
|
||||
}],
|
||||
containers+: [
|
||||
{
|
||||
name: 'prometheus',
|
||||
args+: [
|
||||
'--config.file=/etc/prometheus/config_out/prometheus.env.yaml',
|
||||
'--storage.agent.path=/prometheus',
|
||||
'--enable-feature=agent',
|
||||
'--web.enable-lifecycle',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
### Static etcd configuration
|
||||
|
||||
In order to configure a static etcd cluster to scrape there is a simple [static-etcd.libsonnet](https://github.com/prometheus-operator/kube-prometheus/tree/main/jsonnet/kube-prometheus/addons/static-etcd.libsonnet) mixin prepared.
|
||||
In order to configure a static etcd cluster to scrape there is a simple [static-etcd.libsonnet](../../jsonnet/kube-prometheus/addons/static-etcd.libsonnet) mixin prepared.
|
||||
|
||||
An example of how to use it can be seen below:
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Some Kubernetes installations source all their images from an internal registry.
|
||||
To produce the `docker pull/tag/push` commands that will synchronize upstream images to `internal-registry.com/organization` (after having run the `jb` command to populate the vendor directory):
|
||||
|
||||
```shell
|
||||
$ jsonnet -J vendor -S --tla-str repository=internal-registry.com/organization examples/sync-to-internal-registry.jsonnet
|
||||
$ jsonnet -J vendor -S --tla-str repository=internal-registry.com/organization sync-to-internal-registry.jsonnet
|
||||
$ docker pull k8s.gcr.io/addon-resizer:1.8.4
|
||||
$ docker tag k8s.gcr.io/addon-resizer:1.8.4 internal-registry.com/organization/addon-resizer:1.8.4
|
||||
$ docker push internal-registry.com/organization/addon-resizer:1.8.4
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
# Customizing Kube-Prometheus
|
||||
|
||||
This section:
|
||||
* describes how to customize the kube-prometheus library via compiling the kube-prometheus manifests yourself (as an alternative to the [README.md quickstart section](../README.md#quickstart)).
|
||||
* still doesn't require you to make a copy of this entire repository, but rather only a copy of a few select files.
|
||||
|
||||
## Installing
|
||||
|
||||
The content of this project consists of a set of [jsonnet](http://jsonnet.org/) files making up a library to be consumed.
|
||||
|
||||
Install this library in your own project with [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler#install) (the jsonnet package manager):
|
||||
|
||||
```shell
|
||||
$ mkdir my-kube-prometheus; cd my-kube-prometheus
|
||||
$ jb init # Creates the initial/empty `jsonnetfile.json`
|
||||
# Install the kube-prometheus dependency
|
||||
$ jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json`
|
||||
|
||||
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/example.jsonnet -O example.jsonnet
|
||||
$ wget https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/build.sh -O build.sh
|
||||
$ chmod +x build.sh
|
||||
```
|
||||
|
||||
> `jb` can be installed with `go install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest`
|
||||
|
||||
> An e.g. of how to install a given version of this library: `jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@main`
|
||||
|
||||
In order to update the kube-prometheus dependency, simply use the jsonnet-bundler update functionality:
|
||||
|
||||
```shell
|
||||
$ jb update
|
||||
```
|
||||
|
||||
## Generating
|
||||
|
||||
e.g. of how to compile the manifests: `./build.sh example.jsonnet`
|
||||
|
||||
> before compiling, install `gojsontoyaml` tool with `go install github.com/brancz/gojsontoyaml@latest` and `jsonnet` with `go install github.com/google/go-jsonnet/cmd/jsonnet@latest`
|
||||
|
||||
Here's [example.jsonnet](../example.jsonnet):
|
||||
|
||||
> Note: some of the following components must be configured beforehand. See [configuration](#configuring) and [customization-examples](customizations).
|
||||
|
||||
```jsonnet mdox-exec="cat example.jsonnet"
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/addons/anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/pyrra.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// { 'setup/pyrra-slo-CustomResourceDefinition': kp.pyrra.crd } +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
// { ['pyrra-' + name]: kp.pyrra[name] for name in std.objectFields(kp.pyrra) if name != 'crd' } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
```
|
||||
|
||||
And here's the [build.sh](../build.sh) script (which uses `vendor/` to render all manifests in a json structure of `{filename: manifest-content}`):
|
||||
|
||||
```sh mdox-exec="cat ./build.sh"
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script uses arg $1 (name of *.jsonnet file to use) to generate the manifests/*.yaml files.
|
||||
|
||||
set -e
|
||||
set -x
|
||||
# only exit with zero if all commands of the pipeline exit successfully
|
||||
set -o pipefail
|
||||
|
||||
# Make sure to use project tooling
|
||||
PATH="$(pwd)/tmp/bin:${PATH}"
|
||||
|
||||
# Make sure to start with a clean 'manifests' dir
|
||||
rm -rf manifests
|
||||
mkdir -p manifests/setup
|
||||
|
||||
# Calling gojsontoyaml is optional, but we would like to generate yaml, not json
|
||||
jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml' -- {}
|
||||
|
||||
# Make sure to remove json files
|
||||
find manifests -type f ! -name '*.yaml' -delete
|
||||
rm -f kustomization
|
||||
|
||||
```
|
||||
|
||||
> Note you need `jsonnet` (`go install github.com/google/go-jsonnet/cmd/jsonnet@latest`) and `gojsontoyaml` (`go install github.com/brancz/gojsontoyaml@latest`) installed to run `build.sh`. If you just want json output, not yaml, then you can skip the pipe and everything afterwards.
|
||||
|
||||
This script runs the jsonnet code, then reads each key of the generated json and uses that as the file name, and writes the value of that key to that file, and converts each json manifest to yaml.
|
||||
|
||||
## Configuring
|
||||
|
||||
Jsonnet has the concept of hidden fields. These are fields, that are not going to be rendered in a result. This is used to configure the kube-prometheus components in jsonnet. In the example jsonnet code of the above [Generating section](#generating), you can see an example of this, where the `namespace` is being configured to be `monitoring`. In order to not override the whole object, use the `+::` construct of jsonnet, to merge objects, this way you can override individual settings, but retain all other settings and defaults.
|
||||
|
||||
The available fields and their default values can be seen in [main.libsonnet](../jsonnet/kube-prometheus/main.libsonnet). Note that many of the fields get their default values from variables, and for example the version numbers are imported from [versions.json](../jsonnet/kube-prometheus/versions.json).
|
||||
|
||||
Configuration is mainly done in the `values` map. You can see this being used in the `example.jsonnet` to set the namespace to `monitoring`. This is done in the `common` field, which all other components take their default value from. See for example how Alertmanager is configured in `main.libsonnet`:
|
||||
|
||||
```
|
||||
alertmanager: {
|
||||
name: 'main',
|
||||
// Use the namespace specified under values.common by default.
|
||||
namespace: $.values.common.namespace,
|
||||
version: $.values.common.versions.alertmanager,
|
||||
image: $.values.common.images.alertmanager,
|
||||
mixin+: { ruleLabels: $.values.common.ruleLabels },
|
||||
},
|
||||
```
|
||||
|
||||
The grafana definition is located in a different project (https://github.com/brancz/kubernetes-grafana ), but needed configuration can be customized from the same top level `values` field. For example to allow anonymous access to grafana, add the following `values` section:
|
||||
|
||||
```
|
||||
grafana+:: {
|
||||
config: { // http://docs.grafana.org/installation/configuration/
|
||||
sections: {
|
||||
"auth.anonymous": {enabled: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
## Apply the kube-prometheus stack
|
||||
|
||||
The previous generation step has created a bunch of manifest files in the manifest/ folder.
|
||||
Now simply use `kubectl` to install Prometheus and Grafana as per your configuration:
|
||||
|
||||
```shell
|
||||
# Update the namespace and CRDs, and then wait for them to be available before creating the remaining resources
|
||||
$ kubectl apply --server-side -f manifests/setup
|
||||
$ kubectl apply -f manifests/
|
||||
```
|
||||
|
||||
> Note that due to some CRD size we are using kubectl server-side apply feature which is generally available since
|
||||
> kubernetes 1.22. If you are using previous kubernetes versions this feature may not be available and you would need to
|
||||
> use `kubectl create` instead.
|
||||
|
||||
Alternatively, the resources in both folders can be applied with a single command
|
||||
`kubectl apply --server-side -Rf manifests`, but it may be necessary to run the command multiple times for all components to
|
||||
be created successfully.
|
||||
|
||||
Check the monitoring namespace (or the namespace you have specific in `namespace: `) and make sure the pods are running. Prometheus and Grafana should be up and running soon.
|
||||
|
||||
## Minikube Example
|
||||
|
||||
To use an easy to reproduce example, see [minikube.jsonnet](../examples/minikube.jsonnet), which uses the minikube setup as demonstrated in [Prerequisites](../README.md#prerequisites). Because we would like easy access to our Prometheus, Alertmanager and Grafana UIs, `minikube.jsonnet` exposes the services as NodePort type services.
|
||||
@@ -1,14 +1,17 @@
|
||||
---
|
||||
weight: 301
|
||||
weight: 500
|
||||
toc: true
|
||||
title: Deploy to kind
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you deploying kube-prometheus on Kubernetes kind.
|
||||
lead: Deploy kube-prometheus to Kubernets kind.
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you deploying kube-prometheus on Kubernetes kind.
|
||||
description: Deploy kube-prometheus to Kubernets kind.
|
||||
date: "2021-03-08T23:04:32+01:00"
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
Time to explain how!
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
---
|
||||
weight: 302
|
||||
weight: 500
|
||||
toc: true
|
||||
title: Deploy to kubeadm
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you deploying kube-prometheus on Kubernetes kubeadm.
|
||||
lead: Deploy kube-prometheus to Kubernets kubeadm.
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you deploying kube-prometheus on Kubernetes kubeadm.
|
||||
description: Deploy kube-prometheus to Kubernets kubeadm.
|
||||
date: "2021-03-08T23:04:32+01:00"
|
||||
---
|
||||
|
||||
The [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) tool is linked by Kubernetes as the offical way to deploy and manage self-hosted clusters. kubeadm does a lot of heavy lifting by automatically configuring your Kubernetes cluster with some common options. This guide is intended to show you how to deploy Prometheus, Prometheus Operator and Kube Prometheus to get you started monitoring your cluster that was deployed with kubeadm.
|
||||
|
||||
This guide assumes you have a basic understanding of how to use the functionality the Prometheus Operator implements. If you haven't yet, we recommend reading through the [getting started guide](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md) as well as the [alerting guide](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md).
|
||||
This guide assumes you have a basic understanding of how to use the functionality the Prometheus Operator implements. If you haven't yet, we recommend reading through the [getting started guide](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md) as well as the [alerting guide](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/alerting.md).
|
||||
|
||||
## kubeadm Pre-requisites
|
||||
|
||||
@@ -22,39 +23,55 @@ This guide assumes you have some familiarity with `kubeadm` or at least have dep
|
||||
By default, `kubeadm` runs these pods on your master and bound to `127.0.0.1`. There are a couple of ways to change this. The recommended way to change these features is to use the [kubeadm config file](https://kubernetes.io/docs/reference/generated/kubeadm/#config-file). An example configuration file can be used:
|
||||
|
||||
```yaml
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
kind: ClusterConfiguration
|
||||
controlPlaneEndpoint: "192.168.1.173:6443"
|
||||
apiServer:
|
||||
extraArgs:
|
||||
authorization-mode: "Node,RBAC"
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: "0.0.0.0"
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: "0.0.0.0"
|
||||
certificatesDir: "/etc/kubernetes/pki"
|
||||
apiVersion: kubeadm.k8s.io/v1alpha1
|
||||
kind: MasterConfiguration
|
||||
api:
|
||||
advertiseAddress: 192.168.1.173
|
||||
bindPort: 6443
|
||||
authorizationModes:
|
||||
- Node
|
||||
- RBAC
|
||||
certificatesDir: /etc/kubernetes/pki
|
||||
cloudProvider:
|
||||
etcd:
|
||||
# one of local or external
|
||||
local:
|
||||
dataDir: "/var/lib/etcd"
|
||||
kubernetesVersion: "v1.23.1"
|
||||
dataDir: /var/lib/etcd
|
||||
endpoints: null
|
||||
imageRepository: gcr.io/google_containers
|
||||
kubernetesVersion: v1.8.3
|
||||
networking:
|
||||
dnsDomain: "cluster.local"
|
||||
serviceSubnet: "10.96.0.0/12"
|
||||
imageRepository: "registry.k8s.io"
|
||||
dnsDomain: cluster.local
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
nodeName: your-dev
|
||||
tokenTTL: 24h0m0s
|
||||
controllerManagerExtraArgs:
|
||||
address: 0.0.0.0
|
||||
schedulerExtraArgs:
|
||||
address: 0.0.0.0
|
||||
```
|
||||
|
||||
Notice the `.scheduler.extraArgs` and `.controllerManager.extraArgs`. This exposes the `kube-controller-manager` and `kube-scheduler` services to the rest of the cluster. If you have kubernetes core components as pods in the kube-system namespace, ensure that the `kube-prometheus-exporter-kube-scheduler` and `kube-prometheus-exporter-kube-controller-manager` services' `spec.selector` values match those of pods.
|
||||
Notice the `schedulerExtraArgs` and `controllerManagerExtraArgs`. This exposes the `kube-controller-manager` and `kube-scheduler` services to the rest of the cluster. If you have kubernetes core components as pods in the kube-system namespace, ensure that the `kube-prometheus-exporter-kube-scheduler` and `kube-prometheus-exporter-kube-controller-manager` services' `spec.selector` values match those of pods.
|
||||
|
||||
In previous versions of Kubernetes, we had to make a change to the `kubelet` setting with regard to `cAdvisor` monitoring on the control-plane as well as all the nodes. But this is **no longer required due to [the change of Kubernetes](https://github.com/kubernetes/kubernetes/issues/56523)**.
|
||||
In addition, we will be using `node-exporter` to monitor the `cAdvisor` service on all the nodes. This, however requires a change to the `kubelet` service on the master as well as all the nodes. According to the Kubernetes documentation
|
||||
|
||||
> The kubeadm deb package ships with configuration for how the kubelet should be run. Note that the `kubeadm` CLI command will never touch this drop-in file. This drop-in file belongs to the kubeadm deb/rpm package.
|
||||
|
||||
Again, we need to expose the `cadvisor` that is installed and managed by the `kubelet` daemon and allow webhook token authentication. To do so, we do the following on all the masters and nodes:
|
||||
|
||||
```bash
|
||||
KUBEADM_SYSTEMD_CONF=/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
sed -e "/cadvisor-port=0/d" -i "$KUBEADM_SYSTEMD_CONF"
|
||||
if ! grep -q "authentication-token-webhook=true" "$KUBEADM_SYSTEMD_CONF"; then
|
||||
sed -e "s/--authorization-mode=Webhook/--authentication-token-webhook=true --authorization-mode=Webhook/" -i "$KUBEADM_SYSTEMD_CONF"
|
||||
fi
|
||||
systemctl daemon-reload
|
||||
systemctl restart kubelet
|
||||
```
|
||||
|
||||
In case you already have a Kubernetes deployed with kubeadm, change the address kube-controller-manager and kube-scheduler listens in addition to previous kubelet change:
|
||||
|
||||
```
|
||||
sed -e "s/- --bind-address=127.0.0.1/- --bind-address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
sed -e "s/- --bind-address=127.0.0.1/- --bind-address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-scheduler.yaml
|
||||
sed -e "s/- --address=127.0.0.1/- --address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-controller-manager.yaml
|
||||
sed -e "s/- --address=127.0.0.1/- --address=0.0.0.0/" -i /etc/kubernetes/manifests/kube-scheduler.yaml
|
||||
```
|
||||
|
||||
With these changes, your Kubernetes cluster is ready.
|
||||
@@ -78,12 +95,12 @@ Once you complete this guide you will monitor the following:
|
||||
|
||||
## Getting Up and Running Fast with Kube-Prometheus
|
||||
|
||||
To help get started more quickly with monitoring Kubernetes clusters, [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) was created. It is a collection of manifests including dashboards and alerting rules that can easily be deployed. It utilizes the Prometheus Operator and all the manifests demonstrated in this guide.
|
||||
To help get started more quickly with monitoring Kubernetes clusters, [kube-prometheus](https://github.com/coreos/kube-prometheus) was created. It is a collection of manifests including dashboards and alerting rules that can easily be deployed. It utilizes the Prometheus Operator and all the manifests demonstrated in this guide.
|
||||
|
||||
This section represent a quick installation and is not intended to teach you about all the components. The easiest way to get started is to clone this repository and use the `kube-prometheus` section of the code.
|
||||
|
||||
```
|
||||
git clone https://github.com/prometheus-operator/kube-prometheus
|
||||
git clone https://github.com/coreos/kube-prometheus
|
||||
cd kube-prometheus/
|
||||
```
|
||||
|
||||
@@ -133,7 +150,7 @@ kubectl apply -f manifests/prometheus/prometheus-k8s-roles.yaml
|
||||
kubectl apply -f manifests/prometheus/prometheus-k8s-role-bindings.yaml
|
||||
```
|
||||
|
||||
Finally, install the [Alertmanager](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/alerting.md)
|
||||
Finally, install the [Alertmanager](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/alerting.md)
|
||||
|
||||
```
|
||||
kubectl --namespace="$NAMESPACE" apply -f manifests/alertmanager
|
||||
@@ -145,4 +162,4 @@ Now you should have a working cluster. After all the pods are ready, you should
|
||||
* Alertmanager UI on node port `30903`
|
||||
* Grafana on node port `30902`
|
||||
|
||||
These can of course be changed via the Service definitions. It is recommended to look at the [Exposing Prometheus and Alert Manager](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/exposing-prometheus-and-alertmanager.md) documentation for more detailed information on how to expose these services.
|
||||
These can of course be changed via the Service definitions. It is recommended to look at the [Exposing Prometheus and Alert Manager](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/exposing-prometheus-and-alertmanager.md) documentation for more detailed information on how to expose these services.
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
---
|
||||
weight: 305
|
||||
weight: 640
|
||||
toc: true
|
||||
title: Monitoring external etcd
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you monitoring an external etcd cluster.
|
||||
lead: This guide will help you monitor an external etcd cluster.
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you monitoring an external etcd cluster.
|
||||
description: This guide will help you monitor an external etcd cluster.
|
||||
date: "2021-03-08T23:04:32+01:00"
|
||||
---
|
||||
|
||||
When the etcd cluster is not hosted inside Kubernetes.
|
||||
|
||||
@@ -1,55 +1,43 @@
|
||||
---
|
||||
weight: 306
|
||||
weight: 640
|
||||
toc: true
|
||||
title: Monitoring other Namespaces
|
||||
menu:
|
||||
docs:
|
||||
parent: kube
|
||||
lead: This guide will help you monitoring applications in other namespaces.
|
||||
lead: This guide will help you monitor applications in other Namespaces.
|
||||
images: []
|
||||
draft: false
|
||||
description: This guide will help you monitoring applications in other namespaces.
|
||||
description: This guide will help you monitor applications in other Namespaces.
|
||||
date: "2021-03-08T23:04:32+01:00"
|
||||
---
|
||||
|
||||
By default the RBAC rules are only enabled for the `Default` and `kube-system` namespaces.
|
||||
This guide will help you monitor applications in other Namespaces. By default the RBAC rules are only enabled for the `Default` and `kube-system` Namespace during Install.
|
||||
|
||||
# Setup
|
||||
|
||||
You have to give the list of the namespaces that you want to be able to monitor.
|
||||
You have to give the list of the Namespaces that you want to be able to monitor.
|
||||
This is done in the variable `prometheus.roleSpecificNamespaces`. You usually set this in your `.jsonnet` file when building the manifests.
|
||||
|
||||
Example to create the needed `Role` and `RoleBinding` for the Namespace `foo` :
|
||||
|
||||
```
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
|
||||
prometheus+:: {
|
||||
namespaces: ["default", "kube-system", "monitoring", "foo"],
|
||||
},
|
||||
prometheus+:: {
|
||||
namespaces: ["default", "kube-system", "foo"],
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
|
||||
```
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
## Security
|
||||
|
||||
The manifests generated in this repository are subject to a security audit in CI via [kubescape](https://github.com/armosec/kubescape).
|
||||
The scan can be run locally via `make kubescape`.
|
||||
|
||||
While we aim for best practices in terms of security by default, due to the nature of the project, we are required to make the exceptions in the following components:
|
||||
|
||||
#### node-exporter
|
||||
* Host Port is set. [Kubernetes already sets a Host Port by default when Host Network is enabled.](https://github.com/kubernetes/kubernetes/blob/1945829906546caf867992669a0bfa588edf8be6/pkg/apis/core/v1/defaults.go#L402-L411). Since nothing can be done here, we configure it to our preference port.
|
||||
* Host PID is set to `true`, since node-exporter requires direct access to the host namespace to gather statistics.
|
||||
* Host Network is set to `true`, since node-exporter requires direct access to the host network to gather statistics.
|
||||
* `automountServiceAccountToken` is set to `true` on Pod level as kube-rbac-proxy sidecar requires connection to kubernetes API server.
|
||||
|
||||
#### prometheus-adapter
|
||||
* `automountServiceAccountToken` is set to `true` on Pod level as application requires connection to kubernetes API server.
|
||||
|
||||
#### blackbox-exporter
|
||||
* `automountServiceAccountToken` is set to `true` on Pod level as kube-rbac-proxy sidecar requires connection to kubernetes API server.
|
||||
|
||||
#### kube-state-metrics
|
||||
* `automountServiceAccountToken` is set to `true` on Pod level as kube-rbac-proxy sidecars requires connection to kubernetes API server.
|
||||
|
||||
#### prometheus-operator
|
||||
* `automountServiceAccountToken` is set to `true` on Pod level as kube-rbac-proxy sidecars requires connection to kubernetes API server.
|
||||
@@ -1,49 +0,0 @@
|
||||
# Troubleshooting
|
||||
|
||||
See the general [guidelines](community-support.md) for getting support from the community.
|
||||
|
||||
## Error retrieving kubelet metrics
|
||||
|
||||
Should the Prometheus `/targets` page show kubelet targets, but not able to successfully scrape the metrics, then most likely it is a problem with the authentication and authorization setup of the kubelets.
|
||||
|
||||
As described in the [README.md Prerequisites](../README.md#prerequisites) section, in order to retrieve metrics from the kubelet token authentication and authorization must be enabled. Some Kubernetes setup tools do not enable this by default.
|
||||
|
||||
- If you are using Google's GKE product, see [cAdvisor support](GKE-cadvisor-support.md).
|
||||
- If you are using AWS EKS, see [AWS EKS CNI support](EKS-cni-support.md).
|
||||
- If you are using Weave Net, see [Weave Net support](weave-net-support.md).
|
||||
|
||||
### Authentication problem
|
||||
|
||||
The Prometheus `/targets` page will show the kubelet job with the error `403 Unauthorized`, when token authentication is not enabled. Ensure, that the `--authentication-token-webhook=true` flag is enabled on all kubelet configurations.
|
||||
|
||||
### Authorization problem
|
||||
|
||||
The Prometheus `/targets` page will show the kubelet job with the error `401 Unauthorized`, when token authorization is not enabled. Ensure that the `--authorization-mode=Webhook` flag is enabled on all kubelet configurations.
|
||||
|
||||
## kube-state-metrics resource usage
|
||||
|
||||
In some environments, kube-state-metrics may need additional
|
||||
resources. One driver for more resource needs, is a high number of
|
||||
namespaces. There may be others.
|
||||
|
||||
kube-state-metrics resource allocation is managed by
|
||||
[addon-resizer](https://github.com/kubernetes/autoscaler/tree/master/addon-resizer/nanny)
|
||||
You can control it's parameters by setting variables in the
|
||||
config. They default to:
|
||||
|
||||
```jsonnet
|
||||
kubeStateMetrics+:: {
|
||||
baseCPU: '100m',
|
||||
cpuPerNode: '2m',
|
||||
baseMemory: '150Mi',
|
||||
memoryPerNode: '30Mi',
|
||||
}
|
||||
```
|
||||
|
||||
## Error retrieving kube-proxy metrics
|
||||
|
||||
By default, kubeadm will configure kube-proxy to listen on 127.0.0.1 for metrics. Because of this prometheus would not be able to scrape these metrics. This would have to be changed to 0.0.0.0 in one of the following two places:
|
||||
|
||||
1. Before cluster initialization, the config file passed to kubeadm init should have KubeProxyConfiguration manifest with the field metricsBindAddress set to 0.0.0.0:10249
|
||||
2. If the k8s cluster is already up and running, we'll have to modify the configmap kube-proxy in the namespace kube-system and set the metricsBindAddress field. After this kube-proxy daemonset would have to be restarted with
|
||||
`kubectl -n kube-system rollout restart daemonset kube-proxy`
|
||||
@@ -1,29 +0,0 @@
|
||||
# Update kube-prometheus
|
||||
|
||||
You may wish to fetch changes made on this project so they are available to you.
|
||||
|
||||
## Update jb
|
||||
|
||||
`jb` may have been updated so it's a good idea to get the latest version of this binary:
|
||||
|
||||
```shell
|
||||
$ go install -a github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
|
||||
```
|
||||
|
||||
## Update kube-prometheus
|
||||
|
||||
The command below will sync with upstream project:
|
||||
|
||||
```shell
|
||||
$ jb update
|
||||
```
|
||||
|
||||
## Compile the manifests and apply
|
||||
|
||||
Once updated, just follow the instructions under [Generating](customizing.md#generating) and [Apply the kube-prometheus stack](customizing.md#apply-the-kube-prometheus-stack) from [customizing.md doc](customizing.md) to apply the changes to your cluster.
|
||||
|
||||
## Migration from previous versions
|
||||
|
||||
If you are migrating from `release-0.7` branch or earlier please read [what changed and how to migrate in our guide](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/migration-guide.md).
|
||||
|
||||
Refer to [migration document](migration-example) for more information about migration from 0.3 and 0.8 versions of kube-prometheus.
|
||||
@@ -8,7 +8,7 @@ Following this document, you can setup Weave Net monitoring for your cluster usi
|
||||
|
||||
Using kube-prometheus and kubectl you will be able install the following for monitoring Weave Net in your cluster:
|
||||
|
||||
1. [Service for Weave Net](https://gist.github.com/alok87/379c6234b582f555c141f6fddea9fbce) The service which the ServiceMonitor scrapes.
|
||||
1. [Service for Weave Net](https://gist.github.com/alok87/379c6234b582f555c141f6fddea9fbce) The service which the [service monitor](https://coreos.com/operators/prometheus/docs/latest/user-guides/cluster-monitoring.html) scrapes.
|
||||
2. [ServiceMonitor for Weave Net](https://gist.github.com/alok87/e46a7f9a79ef6d1da6964a035be2cfb9) Service monitor to scrape the Weave Net metrics and bring it to Prometheus.
|
||||
3. [Prometheus Alerts for Weave Net](https://stackoverflow.com/a/60447864) This will setup all the important Weave Net metrics you should be alerted on.
|
||||
4. [Grafana Dashboard for Weave Net](https://grafana.com/grafana/dashboards/11789) This will setup the per Weave Net pod level monitoring for Weave Net.
|
||||
|
||||
@@ -1,27 +1,8 @@
|
||||
# Windows
|
||||
|
||||
The [Windows hostprocess addon](../examples/windows-hostprocess.jsonnet) adds the dashboards and rules from [kubernetes-monitoring/kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin#dashboards-for-windows-nodes).
|
||||
The [Windows addon](../examples/windows.jsonnet) adds the dashboards and rules from [kubernetes-monitoring/kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin#dashboards-for-windows-nodes).
|
||||
|
||||
It also deploys [windows_exporter](https://github.com/prometheus-community/windows_exporter) as a [hostprocess pod](https://github.com/prometheus-community/windows_exporter/blob/master/kubernetes/kubernetes.md) as Kubernetes now supports HostProcess containers on Windows nodes (as of [v1.22](https://kubernetes.io/blog/2021/08/16/windows-hostprocess-containers/)). The cluster should be using containerd runtime.
|
||||
|
||||
```
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/addons/windows-hostprocess.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
windowsExporter+:: {
|
||||
image: "ghcr.io/prometheus-community/windows-exporter",
|
||||
version: "0.21.0",
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['windows-exporter-' + name]: kp.windowsExporter[name] for name in std.objectFields(kp.windowsExporter) }
|
||||
```
|
||||
|
||||
See the [full example](../examples/windows-hostprocess.jsonnet) for setup.
|
||||
|
||||
If the cluster is running docker runtime then use the other [Windows addon](../examples/windows.jsonnet). The Windows addon does not deploy windows_exporter. Docker based Windows does not support running with [windows_exporter](https://github.com/prometheus-community/windows_exporter) in a pod so this add on uses [additional scrape configuration](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/additional-scrape-config.md) to set up a static config to scrape the node ports where windows_exporter is configured.
|
||||
Currently, Windows does not support running with [windows_exporter](https://github.com/prometheus-community/windows_exporter) in a pod so this add on uses [additional scrape configuration](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/additional-scrape-config.md) to set up a static config to scrape the node ports where windows_exporter is configured.
|
||||
|
||||
The addon requires you to specify the node ips and ports where it can find the windows_exporter. See the [full example](../examples/windows.jsonnet) for setup.
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ local kp =
|
||||
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/pyrra.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
@@ -21,7 +20,6 @@ local kp =
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// { 'setup/pyrra-slo-CustomResourceDefinition': kp.pyrra.crd } +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
@@ -29,7 +27,6 @@ local kp =
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
// { ['pyrra-' + name]: kp.pyrra[name] for name in std.objectFields(kp.pyrra) if name != 'crd' } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
|
||||
@@ -15,9 +15,6 @@ local kp =
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
alertmanager+: {
|
||||
config: importstr 'alertmanager-config.yaml',
|
||||
},
|
||||
},
|
||||
alertmanager+:: {
|
||||
alertmanager+: {
|
||||
@@ -29,9 +26,9 @@ local kp =
|
||||
},
|
||||
configmap+:: {
|
||||
'alert-templates': configmap(
|
||||
'alert-templates',
|
||||
'alertmanager-alert-template.tmpl',
|
||||
$.values.common.namespace, // could be $._config.namespace to assign namespace once
|
||||
{ 'alertmanager-alert-template.tmpl': importstr 'alertmanager-alert-template.tmpl' },
|
||||
{ data: importstr 'alertmanager-alert-template.tmpl' },
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -24,4 +24,4 @@ slack_configs:
|
||||
text: '{{ template "slack.text" . }}
|
||||
|
||||
templates:
|
||||
- '/etc/alertmanager/configmaps/alert-templates/*.tmpl'
|
||||
- '/etc/alertmanager/configmaps/alertmanager-alert-template.tmpl'
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
## ArgoCD Example
|
||||
|
||||
This is the simplest, working example of an argocd app, the JSON object built is now an array of objects as that is the prefered format for ArgoCD. And ArgoCD specific annotations are added to manifests.
|
||||
This is the simplest, working example of an argocd app, the JSON object built is now an array of objects as that is the prefered format for ArgoCD.
|
||||
|
||||
Requirements:
|
||||
|
||||
- **ArgoCD 1.7+**
|
||||
**ArgoCD 1.7+**
|
||||
|
||||
- Follow the vendor generation steps at the root of this repository and generate a `vendored` folder (referenced in `application.yaml`).
|
||||
|
||||
- Make sure that argocd-cm has `application.instanceLabelKey` set to something else than `app.kubernetes.io/instance`, otherwise it will cause problems with prometheus target discovery. (see also [Why Is My App Out Of Sync Even After Syncing?](https://argo-cd.readthedocs.io/en/stable/faq/#why-is-my-app-out-of-sync-even-after-syncing))
|
||||
Follow the vendor generation steps at the root of this repository and generate a `vendored` folder (referenced in `application.yaml`).
|
||||
|
||||
@@ -1,66 +1,14 @@
|
||||
// NB! Make sure that argocd-cm has `application.instanceLabelKey` set to something else than `app.kubernetes.io/instance`,
|
||||
// otherwise it will cause problems with prometheus target discovery.
|
||||
// See also https://argo-cd.readthedocs.io/en/stable/faq/#why-is-my-app-out-of-sync-even-after-syncing
|
||||
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/addons/anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/pyrra.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
// Unlike in kube-prometheus/example.jsonnet where a map of file-names to manifests is returned,
|
||||
// for ArgoCD we need to return just a regular list with all the manifests.
|
||||
local manifests =
|
||||
[kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus)] +
|
||||
[kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator)] +
|
||||
[kp.alertmanager[name] for name in std.objectFields(kp.alertmanager)] +
|
||||
[kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter)] +
|
||||
[kp.grafana[name] for name in std.objectFields(kp.grafana)] +
|
||||
// [ kp.pyrra[name] for name in std.objectFields(kp.pyrra)] +
|
||||
[kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics)] +
|
||||
[kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane)] +
|
||||
[kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter)] +
|
||||
[kp.prometheus[name] for name in std.objectFields(kp.prometheus)] +
|
||||
[kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter)];
|
||||
|
||||
local argoAnnotations(manifest) =
|
||||
manifest {
|
||||
metadata+: {
|
||||
annotations+: {
|
||||
'argocd.argoproj.io/sync-wave':
|
||||
// Make sure to sync the Namespace & CRDs before anything else (to avoid sync failures)
|
||||
if std.member(['CustomResourceDefinition', 'Namespace'], manifest.kind)
|
||||
then '-5'
|
||||
// And sync all the roles outside of the main & kube-system last (in case some of the namespaces don't exist yet)
|
||||
else if std.objectHas(manifest, 'metadata')
|
||||
&& std.objectHas(manifest.metadata, 'namespace')
|
||||
&& !std.member([kp.values.common.namespace, 'kube-system'], manifest.metadata.namespace)
|
||||
then '10'
|
||||
else '5',
|
||||
'argocd.argoproj.io/sync-options':
|
||||
// Use replace strategy for CRDs, as they're too big fit into the last-applied-configuration annotation that kubectl apply wants to use
|
||||
if manifest.kind == 'CustomResourceDefinition' then 'Replace=true'
|
||||
else '',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Add argo-cd annotations to all the manifests
|
||||
[
|
||||
if std.endsWith(manifest.kind, 'List') && std.objectHas(manifest, 'items')
|
||||
then manifest { items: [argoAnnotations(item) for item in manifest.items] }
|
||||
else argoAnnotations(manifest)
|
||||
for manifest in manifests
|
||||
]
|
||||
[kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus)] +
|
||||
[kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator)] +
|
||||
[kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter)] +
|
||||
[kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics)] +
|
||||
[kp.prometheus[name] for name in std.objectFields(kp.prometheus)] +
|
||||
[kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter)]
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafana+: {
|
||||
dashboards: std.mergePatch(super.dashboards, {
|
||||
// Add more unwanted dashboards here
|
||||
'alertmanager-overview.json': null,
|
||||
}),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: prometheus-frontend
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: alertmanager-discovery
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: prometheus-frontend
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: prometheus-frontend
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
local kp = (import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/addons/networkpolicies-disabled.libsonnet') + {
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
['setup/' + resource]: kp[component][resource]
|
||||
for component in std.objectFields(kp)
|
||||
for resource in std.filter(
|
||||
function(resource)
|
||||
kp[component][resource].kind == 'CustomResourceDefinition' || kp[component][resource].kind == 'Namespace', std.objectFields(kp[component])
|
||||
)
|
||||
} +
|
||||
{
|
||||
[component + '-' + resource]: kp[component][resource]
|
||||
for component in std.objectFields(kp)
|
||||
for resource in std.filter(
|
||||
function(resource)
|
||||
kp[component][resource].kind != 'CustomResourceDefinition' && kp[component][resource].kind != 'Namespace', std.objectFields(kp[component])
|
||||
)
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheus+: {
|
||||
resources: {
|
||||
requests: { memory: '100Mi' },
|
||||
},
|
||||
enableFeatures: ['agent'],
|
||||
},
|
||||
},
|
||||
prometheus+: {
|
||||
prometheus+: {
|
||||
spec+: {
|
||||
replicas: 1,
|
||||
alerting:: {},
|
||||
ruleSelector:: {},
|
||||
remoteWrite: [{
|
||||
url: 'http://remote-write-url.com',
|
||||
}],
|
||||
containers+: [
|
||||
{
|
||||
name: 'prometheus',
|
||||
args+: [
|
||||
'--config.file=/etc/prometheus/config_out/prometheus.env.yaml',
|
||||
'--storage.agent.path=/prometheus',
|
||||
'--enable-feature=agent',
|
||||
'--web.enable-lifecycle',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
@@ -1,47 +0,0 @@
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/addons/anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/external-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/addons/pyrra.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheus+: {
|
||||
thanos: {
|
||||
baseImage: 'quay.io/thanos/thanos',
|
||||
version: 'v0.8.1',
|
||||
objectStorageConfig: {
|
||||
key: 'thanos.yaml',
|
||||
name: 'thanos-objstore-config',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// { 'setup/pyrra-slo-CustomResourceDefinition': kp.pyrra.crd } +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
// { ['pyrra-' + name]: kp.pyrra[name] for name in std.objectFields(kp.pyrra) if name != 'crd' } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
@@ -1,31 +0,0 @@
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
local rulePatches = import 'rule-patches.libsonnet';
|
||||
|
||||
local sanitizePrometheusRules = (import 'kube-prometheus/lib/rule-sanitizer.libsonnet')(rulePatches).sanitizePrometheusRules;
|
||||
|
||||
sanitizePrometheusRules({ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) })
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
excludedRuleGroups: [
|
||||
'alertmanager.rules',
|
||||
],
|
||||
excludedRules: [
|
||||
{
|
||||
name: 'prometheus-operator',
|
||||
rules: [
|
||||
{ alert: 'PrometheusOperatorListErrors' },
|
||||
],
|
||||
},
|
||||
],
|
||||
patchedRules: [
|
||||
{
|
||||
name: 'prometheus-operator',
|
||||
rules: [
|
||||
{
|
||||
alert: 'PrometheusOperatorWatchErrors',
|
||||
labels: {
|
||||
severity: 'info',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
local kp =
|
||||
(import 'kube-prometheus/main.libsonnet') +
|
||||
(import 'kube-prometheus/addons/windows-hostprocess.libsonnet') +
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
windowsExporter+:: {
|
||||
image: 'ghcr.io/prometheus-community/windows-exporter',
|
||||
version: '0.21.0',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ 'setup/0namespace-namespace': kp.kubePrometheus.namespace } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor' && name != 'prometheusRule'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor and prometheusRule are separated so that they can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ 'prometheus-operator-prometheusRule': kp.prometheusOperator.prometheusRule } +
|
||||
{ 'kube-prometheus-prometheusRule': kp.kubePrometheus.prometheusRule } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['blackbox-exporter-' + name]: kp.blackboxExporter[name] for name in std.objectFields(kp.blackboxExporter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['kubernetes-' + name]: kp.kubernetesControlPlane[name] for name in std.objectFields(kp.kubernetesControlPlane) }
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['windows-exporter-' + name]: kp.windowsExporter[name] for name in std.objectFields(kp.windowsExporter) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: metrics-server-auth-reader
|
||||
|
||||
55
go.mod
55
go.mod
@@ -1,56 +1,11 @@
|
||||
module github.com/prometheus-operator/kube-prometheus
|
||||
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.5
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/Jeffail/gabs v1.4.0
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
k8s.io/apimachinery v0.31.1
|
||||
k8s.io/client-go v0.31.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.31.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.8.0
|
||||
k8s.io/apimachinery v0.19.3
|
||||
k8s.io/client-go v0.19.3
|
||||
)
|
||||
|
||||
702
go.sum
702
go.sum
@@ -1,172 +1,606 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=
|
||||
github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
|
||||
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4=
|
||||
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
|
||||
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
|
||||
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
|
||||
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
|
||||
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU=
|
||||
k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs=
|
||||
k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc=
|
||||
k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg=
|
||||
k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
||||
@@ -77,13 +77,9 @@
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'PrometheusRule',
|
||||
metadata: {
|
||||
labels: {
|
||||
'app.kubernetes.io/name': 'prometheus-vpc-cni-rules',
|
||||
'app.kubernetes.io/component': 'prometheus',
|
||||
'app.kubernetes.io/part-of': 'kube-prometheus',
|
||||
},
|
||||
labels: $.prometheus._config.commonLabels + $.prometheus._config.mixin.ruleLabels,
|
||||
name: 'aws-vpc-cni-rules',
|
||||
namespace: $.values.prometheus.namespace,
|
||||
namespace: $.prometheus._config.namespace,
|
||||
},
|
||||
spec: {
|
||||
groups: [
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
{
|
||||
values+:: {
|
||||
prometheusAdapter+: {
|
||||
namespace: $.values.prometheusAdapter.namespace,
|
||||
// Rules for custom-metrics
|
||||
config+:: {
|
||||
rules+: [
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
// Drop all apiserver metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers|storage_db_total_size_in_bytes|flowcontrol_request_concurrency_limit|flowcontrol_request_concurrency_in_use)',
|
||||
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs|longrunning_gauge|registered_watchers)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all docker metrics which are deprecated in kubernetes.
|
||||
|
||||
@@ -16,17 +16,17 @@ local addArgs(args, name, containers) = std.map(
|
||||
containers: addArgs(
|
||||
[|||
|
||||
--metric-denylist=
|
||||
^kube_.+_created$,
|
||||
^kube_.+_metadata_resource_version$,
|
||||
^kube_replicaset_metadata_generation$,
|
||||
^kube_replicaset_status_observed_generation$,
|
||||
^kube_pod_restart_policy$,
|
||||
^kube_pod_init_container_status_terminated$,
|
||||
^kube_pod_init_container_status_running$,
|
||||
^kube_pod_container_status_terminated$,
|
||||
^kube_pod_container_status_running$,
|
||||
^kube_pod_completion_time$,
|
||||
^kube_pod_status_scheduled$
|
||||
kube_.+_created,
|
||||
kube_.+_metadata_resource_version,
|
||||
kube_replicaset_metadata_generation,
|
||||
kube_replicaset_status_observed_generation,
|
||||
kube_pod_restart_policy,
|
||||
kube_pod_init_container_status_terminated,
|
||||
kube_pod_init_container_status_running,
|
||||
kube_pod_container_status_terminated,
|
||||
kube_pod_container_status_running,
|
||||
kube_pod_completion_time,
|
||||
kube_pod_status_scheduled
|
||||
|||],
|
||||
'kube-state-metrics',
|
||||
super.containers
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
// Disables creation of NetworkPolicies
|
||||
|
||||
{
|
||||
blackboxExporter+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
kubeStateMetrics+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
nodeExporter+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
prometheusAdapter+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
alertmanager+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
grafana+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
prometheus+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
|
||||
prometheusOperator+: {
|
||||
networkPolicy:: {},
|
||||
},
|
||||
}
|
||||
@@ -16,7 +16,6 @@ local restrictedPodSecurityPolicy = {
|
||||
'configMap',
|
||||
'emptyDir',
|
||||
'secret',
|
||||
'projected',
|
||||
// Assume that persistentVolumes set up by the cluster admin are safe to use.
|
||||
'persistentVolumeClaim',
|
||||
],
|
||||
@@ -200,9 +199,6 @@ local restrictedPodSecurityPolicy = {
|
||||
name: 'node-exporter-psp',
|
||||
},
|
||||
spec+: {
|
||||
allowedCapabilities: [
|
||||
'SYS_TIME',
|
||||
],
|
||||
allowedHostPaths+: [
|
||||
{
|
||||
pathPrefix: '/proc',
|
||||
|
||||
@@ -1,626 +0,0 @@
|
||||
{
|
||||
values+:: {
|
||||
common+: {
|
||||
versions+: {
|
||||
pyrra: error 'must provide version',
|
||||
} + (import '../versions.json'),
|
||||
images+: {
|
||||
pyrra+: 'ghcr.io/pyrra-dev/pyrra:v' + $.values.common.versions.pyrra,
|
||||
},
|
||||
},
|
||||
pyrra+: {
|
||||
namespace: $.values.common.namespace,
|
||||
version: $.values.common.versions.pyrra,
|
||||
image: $.values.common.images.pyrra,
|
||||
},
|
||||
},
|
||||
|
||||
local defaults = {
|
||||
local defaults = self,
|
||||
|
||||
name:: 'pyrra',
|
||||
namespace:: error 'must provide namespace',
|
||||
version:: error 'must provide version',
|
||||
image: error 'must provide image',
|
||||
replicas:: 1,
|
||||
port:: 9099,
|
||||
|
||||
commonLabels:: {
|
||||
'app.kubernetes.io/name': 'pyrra',
|
||||
'app.kubernetes.io/version': defaults.version,
|
||||
'app.kubernetes.io/part-of': 'kube-prometheus',
|
||||
},
|
||||
},
|
||||
|
||||
local pyrra = function(params) {
|
||||
local pyrra = self,
|
||||
_config:: defaults + params,
|
||||
|
||||
crd: (
|
||||
import 'github.com/pyrra-dev/pyrra/jsonnet/controller-gen/pyrra.dev_servicelevelobjectives.json'
|
||||
),
|
||||
|
||||
|
||||
_apiMetadata:: {
|
||||
name: pyrra._config.name + '-api',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: pyrra._config.commonLabels {
|
||||
'app.kubernetes.io/component': 'api',
|
||||
},
|
||||
},
|
||||
apiSelectorLabels:: {
|
||||
[labelName]: pyrra._apiMetadata.labels[labelName]
|
||||
for labelName in std.objectFields(pyrra._apiMetadata.labels)
|
||||
if !std.setMember(labelName, ['app.kubernetes.io/version'])
|
||||
},
|
||||
|
||||
apiService: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: pyrra._apiMetadata,
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'http', targetPort: pyrra._config.port, port: pyrra._config.port },
|
||||
],
|
||||
selector: pyrra.apiSelectorLabels,
|
||||
},
|
||||
},
|
||||
|
||||
apiDeployment:
|
||||
local c = {
|
||||
name: pyrra._config.name,
|
||||
image: pyrra._config.image,
|
||||
args: [
|
||||
'api',
|
||||
'--api-url=http://%s.%s.svc.cluster.local:9444' % [pyrra.kubernetesService.metadata.name, pyrra.kubernetesService.metadata.namespace],
|
||||
'--prometheus-url=http://prometheus-k8s.%s.svc.cluster.local:9090' % pyrra._config.namespace,
|
||||
],
|
||||
// resources: pyrra._config.resources,
|
||||
ports: [{ containerPort: pyrra._config.port }],
|
||||
securityContext: {
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
runAsNonRoot: true,
|
||||
capabilities: { drop: ['ALL'] },
|
||||
seccompProfile: { type: 'RuntimeDefault' },
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
apiVersion: 'apps/v1',
|
||||
kind: 'Deployment',
|
||||
metadata: pyrra._apiMetadata,
|
||||
spec: {
|
||||
replicas: pyrra._config.replicas,
|
||||
selector: {
|
||||
matchLabels: pyrra.apiSelectorLabels,
|
||||
},
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: 1,
|
||||
maxUnavailable: 1,
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: { labels: pyrra._apiMetadata.labels },
|
||||
spec: {
|
||||
containers: [c],
|
||||
// serviceAccountName: $.serviceAccount.metadata.name,
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
_kubernetesMetadata:: {
|
||||
name: pyrra._config.name + '-kubernetes',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: pyrra._config.commonLabels {
|
||||
'app.kubernetes.io/component': 'kubernetes',
|
||||
},
|
||||
},
|
||||
kubernetesSelectorLabels:: {
|
||||
[labelName]: pyrra._kubernetesMetadata.labels[labelName]
|
||||
for labelName in std.objectFields(pyrra._kubernetesMetadata.labels)
|
||||
if !std.setMember(labelName, ['app.kubernetes.io/version'])
|
||||
},
|
||||
|
||||
kubernetesServiceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: pyrra._kubernetesMetadata,
|
||||
},
|
||||
|
||||
kubernetesClusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: pyrra._kubernetesMetadata,
|
||||
rules: [{
|
||||
apiGroups: ['monitoring.coreos.com'],
|
||||
resources: ['prometheusrules'],
|
||||
verbs: ['create', 'delete', 'get', 'list', 'patch', 'update', 'watch'],
|
||||
}, {
|
||||
apiGroups: ['monitoring.coreos.com'],
|
||||
resources: ['prometheusrules/status'],
|
||||
verbs: ['get'],
|
||||
}, {
|
||||
apiGroups: ['pyrra.dev'],
|
||||
resources: ['servicelevelobjectives'],
|
||||
verbs: ['create', 'delete', 'get', 'list', 'patch', 'update', 'watch'],
|
||||
}, {
|
||||
apiGroups: ['pyrra.dev'],
|
||||
resources: ['servicelevelobjectives/status'],
|
||||
verbs: ['get', 'patch', 'update'],
|
||||
}],
|
||||
},
|
||||
|
||||
kubernetesClusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: pyrra._kubernetesMetadata,
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: pyrra.kubernetesClusterRole.metadata.name,
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: pyrra.kubernetesServiceAccount.metadata.name,
|
||||
namespace: pyrra._config.namespace,
|
||||
}],
|
||||
},
|
||||
|
||||
kubernetesService: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: pyrra._kubernetesMetadata,
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'http', targetPort: 9444, port: 9444 },
|
||||
],
|
||||
selector: pyrra.kubernetesSelectorLabels,
|
||||
},
|
||||
},
|
||||
|
||||
kubernetesDeployment:
|
||||
local c = {
|
||||
name: pyrra._config.name,
|
||||
image: pyrra._config.image,
|
||||
args: [
|
||||
'kubernetes',
|
||||
],
|
||||
// resources: pyrra._config.resources,
|
||||
ports: [{ containerPort: pyrra._config.port }],
|
||||
securityContext: {
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
apiVersion: 'apps/v1',
|
||||
kind: 'Deployment',
|
||||
metadata: pyrra._kubernetesMetadata {
|
||||
name: pyrra._config.name + '-kubernetes',
|
||||
},
|
||||
spec: {
|
||||
replicas: pyrra._config.replicas,
|
||||
selector: {
|
||||
matchLabels: pyrra.kubernetesSelectorLabels,
|
||||
},
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: 1,
|
||||
maxUnavailable: 1,
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: { labels: pyrra._kubernetesMetadata.labels },
|
||||
spec: {
|
||||
containers: [c],
|
||||
serviceAccountName: pyrra.kubernetesServiceAccount.metadata.name,
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Most of these should eventually be moved to the components themselves.
|
||||
// For now, this is a good start to have everything in one place.
|
||||
'slo-apiserver-read-response-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'apiserver-read-response-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'apiserver_request_total{component="apiserver",verb=~"LIST|GET",code=~"5.."}',
|
||||
},
|
||||
total: {
|
||||
metric: 'apiserver_request_total{component="apiserver",verb=~"LIST|GET"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-apiserver-write-response-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'apiserver-write-response-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'apiserver_request_total{component="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}',
|
||||
},
|
||||
total: {
|
||||
metric: 'apiserver_request_total{component="apiserver",verb=~"POST|PUT|PATCH|DELETE"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-apiserver-read-resource-latency': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'apiserver-read-resource-latency',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
latency: {
|
||||
success: {
|
||||
metric: 'apiserver_request_duration_seconds_bucket{component="apiserver",scope=~"resource|",verb=~"LIST|GET",le="0.1"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'apiserver_request_duration_seconds_count{component="apiserver",scope=~"resource|",verb=~"LIST|GET"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-apiserver-read-namespace-latency': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'apiserver-read-namespace-latency',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
latency: {
|
||||
success: {
|
||||
metric: 'apiserver_request_duration_seconds_bucket{component="apiserver",scope=~"namespace|",verb=~"LIST|GET",le="5"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'apiserver_request_duration_seconds_count{component="apiserver",scope=~"namespace|",verb=~"LIST|GET"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-apiserver-read-cluster-latency': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'apiserver-read-cluster-latency',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
latency: {
|
||||
success: {
|
||||
metric: 'apiserver_request_duration_seconds_bucket{component="apiserver",scope=~"cluster|",verb=~"LIST|GET",le="5"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'apiserver_request_duration_seconds_count{component="apiserver",scope=~"cluster|",verb=~"LIST|GET"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-kubelet-request-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'kubelet-request-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'rest_client_requests_total{job="kubelet",code=~"5.."}',
|
||||
},
|
||||
total: {
|
||||
metric: 'rest_client_requests_total{job="kubelet"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-kubelet-runtime-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'kubelet-runtime-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'kubelet_runtime_operations_errors_total{job="kubelet"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'kubelet_runtime_operations_total{job="kubelet"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-coredns-response-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'coredns-response-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99.99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'coredns_dns_responses_total{job="kube-dns",rcode="SERVFAIL"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'coredns_dns_responses_total{job="kube-dns"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-prometheus-operator-reconcile-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'prometheus-operator-reconcile-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '95',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'prometheus_operator_reconcile_errors_total{job="prometheus-operator"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'prometheus_operator_reconcile_operations_total{job="prometheus-operator"}',
|
||||
},
|
||||
grouping: ['controller'],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-prometheus-operator-http-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'prometheus-operator-http-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99.5',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'prometheus_operator_kubernetes_client_http_requests_total{job="prometheus-operator",status_code=~"5.."}',
|
||||
},
|
||||
total: {
|
||||
metric: 'prometheus_operator_kubernetes_client_http_requests_total{job="prometheus-operator"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-prometheus-rule-evaluation-failures': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'prometheus-rule-evaluation-failures',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99.99',
|
||||
window: '2w',
|
||||
description: 'Rule and alerting rules are being evaluated every few seconds. This needs to work for recording rules to be created and most importantly for alerts to be evaluated.',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'prometheus_rule_evaluation_failures_total{job="prometheus-k8s"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'prometheus_rule_evaluations_total{job="prometheus-k8s"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-prometheus-sd-kubernetes-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'prometheus-sd-kubernetes-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: 'If there are too many errors Prometheus is having a bad time discovering new Kubernetes services.',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'prometheus_sd_kubernetes_http_request_total{job="prometheus-k8s",status_code=~"5..|<error>"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'prometheus_sd_kubernetes_http_request_total{job="prometheus-k8s"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-prometheus-query-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'prometheus-query-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
grouping: ['handler'],
|
||||
errors: {
|
||||
metric: 'prometheus_http_requests_total{job="prometheus-k8s",handler=~"/api/v1/query.*",code=~"5.."}',
|
||||
},
|
||||
total: {
|
||||
metric: 'prometheus_http_requests_total{job="prometheus-k8s",handler=~"/api/v1/query.*"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
'slo-prometheus-notification-errors': {
|
||||
apiVersion: 'pyrra.dev/v1alpha1',
|
||||
kind: 'ServiceLevelObjective',
|
||||
metadata: {
|
||||
name: 'prometheus-notification-errors',
|
||||
namespace: pyrra._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
role: 'alert-rules',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
target: '99',
|
||||
window: '2w',
|
||||
description: '',
|
||||
indicator: {
|
||||
ratio: {
|
||||
errors: {
|
||||
metric: 'prometheus_notifications_errors_total{job="prometheus-k8s"}',
|
||||
},
|
||||
total: {
|
||||
metric: 'prometheus_notifications_sent_total{job="prometheus-k8s"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
pyrra: pyrra($.values.pyrra),
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
// user facing roles for monitors, probe, and rules
|
||||
// ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
|
||||
{
|
||||
prometheusOperator+: {
|
||||
local po = self,
|
||||
clusterRoleView: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: po._metadata {
|
||||
name: 'monitoring-view',
|
||||
namespace:: null,
|
||||
labels+: {
|
||||
'rbac.authorization.k8s.io/aggregate-to-view': 'true',
|
||||
},
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
'monitoring.coreos.com',
|
||||
],
|
||||
resources: [
|
||||
'podmonitors',
|
||||
'probes',
|
||||
'prometheusrules',
|
||||
'servicemonitors',
|
||||
],
|
||||
verbs: [
|
||||
'get',
|
||||
'list',
|
||||
'watch',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
clusterRoleEdit: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: po._metadata {
|
||||
name: 'monitoring-edit',
|
||||
namespace:: null,
|
||||
labels+: {
|
||||
'rbac.authorization.k8s.io/aggregate-to-edit': 'true',
|
||||
},
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
'monitoring.coreos.com',
|
||||
],
|
||||
resources: [
|
||||
'podmonitors',
|
||||
'probes',
|
||||
'prometheusrules',
|
||||
'servicemonitors',
|
||||
],
|
||||
verbs: [
|
||||
'create',
|
||||
'delete',
|
||||
'deletecollection',
|
||||
'patch',
|
||||
'update',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -3135,7 +3135,7 @@
|
||||
],
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sort_desc(floor(label_replace(max by(node) (max by(instance) (kubelet_running_pod_count{job=\"kubelet\",metrics_path=\"/metrics\"}) * on(instance) group_left(node) kubelet_node_name{job=\"kubelet\",metrics_path=\"/metrics\"}) / max by(node) (kube_node_status_capacity{resource=\"pods\",unit=\"integer\",job=\"kube-state-metrics\"}) , \"node_ip\", \"$1.$2.$3.$4\", \"node\", \"^ip-([0-9]+)-([0-9]+)-([0-9]+)-([0-9]+).*$\") * 100))",
|
||||
"expr": "sort_desc(floor(label_replace(max by(node) (max by(instance) (kubelet_running_pod_count{job=\"kubelet\",metrics_path=\"/metrics\"}) * on(instance) group_left(node) kubelet_node_name{job=\"kubelet\",metrics_path=\"/metrics\"}) / max by(node) (kube_node_status_capacity_pods{job=\"kube-state-metrics\"}) , \"node_ip\", \"$1.$2.$3.$4\", \"node\", \"^ip-([0-9]+)-([0-9]+)-([0-9]+)-([0-9]+).*$\") * 100))",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
local windowsdashboards = import 'github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/windows.libsonnet';
|
||||
local windowsrules = import 'github.com/kubernetes-monitoring/kubernetes-mixin/rules/windows.libsonnet';
|
||||
|
||||
local defaults = {
|
||||
local defaults = self,
|
||||
// Convention: Top-level fields related to CRDs are public, other fields are hidden
|
||||
// If there is no CRD for the component, everything is hidden in defaults.
|
||||
name:: 'windows-exporter',
|
||||
namespace:: error 'must provide namespace',
|
||||
version:: error 'must provide version',
|
||||
image:: error 'must provide image',
|
||||
resources:: {
|
||||
requests: { cpu: '300m', memory: '200Mi' },
|
||||
limits: { memory: '200Mi' },
|
||||
},
|
||||
collectorsEnabled:: 'cpu,logical_disk,net,os,system,container,memory',
|
||||
scrapeTimeout:: '15s',
|
||||
interval:: '30s',
|
||||
listenAddress:: '127.0.0.1',
|
||||
port:: 9182,
|
||||
commonLabels:: {
|
||||
'app.kubernetes.io/name': defaults.name,
|
||||
'app.kubernetes.io/version': defaults.version,
|
||||
'app.kubernetes.io/component': 'windows-exporter',
|
||||
'app.kubernetes.io/part-of': 'kube-prometheus',
|
||||
},
|
||||
selectorLabels:: {
|
||||
[labelName]: defaults.commonLabels[labelName]
|
||||
for labelName in std.objectFields(defaults.commonLabels)
|
||||
if !std.setMember(labelName, ['app.kubernetes.io/version'])
|
||||
},
|
||||
};
|
||||
|
||||
local windowsExporter = function(params) {
|
||||
local we = self,
|
||||
_config:: defaults + params,
|
||||
// Safety check
|
||||
assert std.isObject(we._config.resources),
|
||||
_metadata:: {
|
||||
name: we._config.name,
|
||||
namespace: we._config.namespace,
|
||||
labels: we._config.commonLabels,
|
||||
},
|
||||
|
||||
daemonset: {
|
||||
apiVersion: 'apps/v1',
|
||||
kind: 'DaemonSet',
|
||||
metadata: we._metadata,
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: we._config.selectorLabels,
|
||||
},
|
||||
updateStrategy: {
|
||||
type: 'RollingUpdate',
|
||||
rollingUpdate: { maxUnavailable: '10%' },
|
||||
},
|
||||
template: {
|
||||
metadata: we._metadata,
|
||||
spec: {
|
||||
securityContext: {
|
||||
windowsOptions: {
|
||||
hostProcess: true,
|
||||
runAsUserName: 'NT AUTHORITY\\system',
|
||||
},
|
||||
},
|
||||
hostNetwork: true,
|
||||
initContainers: [
|
||||
{
|
||||
name: 'configure-firewall',
|
||||
image: 'mcr.microsoft.com/windows/nanoserver:1809',
|
||||
resources: we._config.resources,
|
||||
command: [
|
||||
'powershell',
|
||||
],
|
||||
args: [
|
||||
'New-NetFirewallRule',
|
||||
'-DisplayName',
|
||||
"'windows-exporter'",
|
||||
'-Direction',
|
||||
'inbound',
|
||||
'-Profile',
|
||||
'Any',
|
||||
'-Action',
|
||||
'Allow',
|
||||
'-LocalPort',
|
||||
std.toString(we._config.port),
|
||||
'-Protocol',
|
||||
'TCP',
|
||||
],
|
||||
},
|
||||
],
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
'--config.file=%CONTAINER_SANDBOX_MOUNT_POINT%/config.yml',
|
||||
'--collector.textfile.directory=%CONTAINER_SANDBOX_MOUNT_POINT%',
|
||||
],
|
||||
name: we._config.name,
|
||||
image: we._config.image + ':' + we._config.version,
|
||||
imagePullPolicy: 'Always',
|
||||
resources: we._config.resources,
|
||||
ports: [
|
||||
{
|
||||
containerPort: we._config.port,
|
||||
hostPort: we._config.port,
|
||||
name: 'http',
|
||||
},
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
name: 'windows-exporter-config',
|
||||
mountPath: '/config.yml',
|
||||
subPath: 'config.yml',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
nodeSelector: {
|
||||
'kubernetes.io/os': 'windows',
|
||||
},
|
||||
volumes: [
|
||||
{
|
||||
name: 'windows-exporter-config',
|
||||
configMap: {
|
||||
name: we._config.name,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
configmap: {
|
||||
kind: 'ConfigMap',
|
||||
apiVersion: 'v1',
|
||||
metadata: we._metadata,
|
||||
data: {
|
||||
'config.yml': "collectors:\n enabled: '" + we._config.collectorsEnabled + "'",
|
||||
},
|
||||
},
|
||||
podmonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'PodMonitor',
|
||||
metadata: we._metadata,
|
||||
spec: {
|
||||
jobLabel: 'app.kubernetes.io/name',
|
||||
selector: {
|
||||
matchLabels: we._config.selectorLabels,
|
||||
},
|
||||
podMetricsEndpoints: [
|
||||
{
|
||||
port: 'http',
|
||||
scheme: 'http',
|
||||
scrapeTimeout: we._config.scrapeTimeout,
|
||||
interval: we._config.interval,
|
||||
relabelings: [
|
||||
{
|
||||
action: 'replace',
|
||||
regex: '(.*)',
|
||||
replacement: '$1',
|
||||
sourceLabels: ['__meta_kubernetes_pod_node_name'],
|
||||
targetLabel: 'instance',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
values+:: {
|
||||
windowsExporter+: {
|
||||
name: defaults.name,
|
||||
namespace: $.values.common.namespace,
|
||||
},
|
||||
grafana+:: {
|
||||
dashboards+:: windowsdashboards {
|
||||
_config: $.kubernetesControlPlane.mixin._config {
|
||||
windowsExporterSelector: 'job="' + $.values.windowsExporter.name + '"',
|
||||
},
|
||||
}.grafanaDashboards,
|
||||
},
|
||||
},
|
||||
kubernetesControlPlane+: {
|
||||
mixin+:: {
|
||||
prometheusRules+:: {
|
||||
groups+: windowsrules {
|
||||
_config: $.kubernetesControlPlane.mixin._config {
|
||||
windowsExporterSelector: 'job="' + $.values.windowsExporter.name + '"',
|
||||
},
|
||||
}.prometheusRules.groups,
|
||||
},
|
||||
},
|
||||
},
|
||||
windowsExporter: windowsExporter($.values.windowsExporter),
|
||||
}
|
||||
@@ -35,10 +35,6 @@ local defaults = {
|
||||
source_matchers: ['severity = warning'],
|
||||
target_matchers: ['severity = info'],
|
||||
equal: ['namespace', 'alertname'],
|
||||
}, {
|
||||
source_matchers: ['alertname = InfoInhibitor'],
|
||||
target_matchers: ['severity = info'],
|
||||
equal: ['namespace'],
|
||||
}],
|
||||
route: {
|
||||
group_by: ['namespace'],
|
||||
@@ -48,7 +44,6 @@ local defaults = {
|
||||
receiver: 'Default',
|
||||
routes: [
|
||||
{ receiver: 'Watchdog', matchers: ['alertname = Watchdog'] },
|
||||
{ receiver: 'null', matchers: ['alertname = InfoInhibitor'] },
|
||||
{ receiver: 'Critical', matchers: ['severity = critical'] },
|
||||
],
|
||||
},
|
||||
@@ -56,11 +51,9 @@ local defaults = {
|
||||
{ name: 'Default' },
|
||||
{ name: 'Watchdog' },
|
||||
{ name: 'Critical' },
|
||||
{ name: 'null' },
|
||||
],
|
||||
},
|
||||
replicas: 3,
|
||||
secrets: [],
|
||||
mixin:: {
|
||||
ruleLabels: {},
|
||||
_config: {
|
||||
@@ -104,51 +97,6 @@ function(params) {
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: am.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: am._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [
|
||||
{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, am.service.spec.ports),
|
||||
},
|
||||
// Alertmanager cluster peer-to-peer communication
|
||||
{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'alertmanager',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: [{
|
||||
port: 9094,
|
||||
protocol: 'TCP',
|
||||
}, {
|
||||
port: 9094,
|
||||
protocol: 'UDP',
|
||||
}],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
secret: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Secret',
|
||||
@@ -167,7 +115,6 @@ function(params) {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: am._metadata,
|
||||
automountServiceAccountToken: false,
|
||||
},
|
||||
|
||||
service: {
|
||||
@@ -226,7 +173,6 @@ function(params) {
|
||||
},
|
||||
resources: am._config.resources,
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
secrets: am._config.secrets,
|
||||
serviceAccountName: am.serviceAccount.metadata.name,
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
|
||||
@@ -6,17 +6,11 @@ local defaults = {
|
||||
// If there is no CRD for the component, everything is hidden in defaults.
|
||||
namespace:: error 'must provide namespace',
|
||||
version:: error 'must provide version',
|
||||
image:: error 'must provide image',
|
||||
image:: error 'must provide version',
|
||||
resources:: {
|
||||
requests: { cpu: '10m', memory: '20Mi' },
|
||||
limits: { cpu: '20m', memory: '40Mi' },
|
||||
},
|
||||
kubeRbacProxy:: {
|
||||
resources+: {
|
||||
requests: { cpu: '10m', memory: '20Mi' },
|
||||
limits: { cpu: '20m', memory: '40Mi' },
|
||||
},
|
||||
},
|
||||
commonLabels:: {
|
||||
'app.kubernetes.io/name': 'blackbox-exporter',
|
||||
'app.kubernetes.io/version': defaults.version,
|
||||
@@ -121,7 +115,6 @@ function(params) {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: bb._metadata,
|
||||
automountServiceAccountToken: false,
|
||||
},
|
||||
|
||||
clusterRole: {
|
||||
@@ -147,10 +140,7 @@ function(params) {
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'blackbox-exporter',
|
||||
labels: bb._config.commonLabels,
|
||||
},
|
||||
metadata: bb._metadata,
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
@@ -179,14 +169,9 @@ function(params) {
|
||||
securityContext: if bb._config.privileged then {
|
||||
runAsNonRoot: false,
|
||||
capabilities: { drop: ['ALL'], add: ['NET_RAW'] },
|
||||
readOnlyRootFilesystem: true,
|
||||
} else {
|
||||
runAsNonRoot: true,
|
||||
runAsUser: 65534,
|
||||
runAsGroup: 65534,
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
capabilities: { drop: ['ALL'] },
|
||||
},
|
||||
volumeMounts: [{
|
||||
mountPath: '/etc/blackbox_exporter/',
|
||||
@@ -203,14 +188,7 @@ function(params) {
|
||||
'--volume-dir=/etc/blackbox_exporter/',
|
||||
],
|
||||
resources: bb._config.resources,
|
||||
securityContext: {
|
||||
runAsNonRoot: true,
|
||||
runAsUser: 65534,
|
||||
runAsGroup: 65534,
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
capabilities: { drop: ['ALL'] },
|
||||
},
|
||||
securityContext: { runAsNonRoot: true, runAsUser: 65534 },
|
||||
terminationMessagePath: '/dev/termination-log',
|
||||
terminationMessagePolicy: 'FallbackToLogsOnError',
|
||||
volumeMounts: [{
|
||||
@@ -220,7 +198,7 @@ function(params) {
|
||||
}],
|
||||
};
|
||||
|
||||
local kubeRbacProxy = krp(bb._config.kubeRbacProxy {
|
||||
local kubeRbacProxy = krp({
|
||||
name: 'kube-rbac-proxy',
|
||||
upstream: 'http://127.0.0.1:' + bb._config.internalPort + '/',
|
||||
resources: bb._config.resources,
|
||||
@@ -250,7 +228,6 @@ function(params) {
|
||||
spec: {
|
||||
containers: [blackboxExporter, reloader, kubeRbacProxy],
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
automountServiceAccountToken: true,
|
||||
serviceAccountName: 'blackbox-exporter',
|
||||
volumes: [{
|
||||
name: 'config',
|
||||
@@ -261,32 +238,6 @@ function(params) {
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: bb.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: bb._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, bb.service.spec.ports),
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
service: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
|
||||
@@ -24,12 +24,6 @@ local defaults = {
|
||||
if !std.setMember(labelName, ['app.kubernetes.io/version'])
|
||||
},
|
||||
prometheusName:: error 'must provide prometheus name',
|
||||
mixin: {
|
||||
ruleLabels: {},
|
||||
_config: {
|
||||
runbookURLPattern: 'https://runbooks.prometheus-operator.dev/runbooks/grafana/%s',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
function(params)
|
||||
@@ -46,27 +40,6 @@ function(params)
|
||||
labels: g._config.commonLabels,
|
||||
},
|
||||
|
||||
mixin::
|
||||
(import 'github.com/grafana/grafana/grafana-mixin/mixin.libsonnet') +
|
||||
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet') + {
|
||||
_config+:: g._config.mixin._config,
|
||||
},
|
||||
|
||||
prometheusRule: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'PrometheusRule',
|
||||
metadata: {
|
||||
labels: g._config.commonLabels + g._config.mixin.ruleLabels,
|
||||
name: g._config.name + '-rules',
|
||||
namespace: g._config.namespace,
|
||||
},
|
||||
spec: {
|
||||
local r = if std.objectHasAll(g.mixin, 'prometheusRules') then g.mixin.prometheusRules.groups else [],
|
||||
local a = if std.objectHasAll(g.mixin, 'prometheusAlerts') then g.mixin.prometheusAlerts.groups else [],
|
||||
groups: a + r,
|
||||
},
|
||||
},
|
||||
|
||||
serviceMonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
@@ -83,44 +56,4 @@ function(params)
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: g.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: g._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, g.service.spec.ports),
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
// FIXME(paulfantom): `automountServiceAccountToken` can be removed after porting to brancz/kuberentes-grafana
|
||||
deployment+: {
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
automountServiceAccountToken: false,
|
||||
securityContext+: {
|
||||
runAsGroup: 65534,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ local defaults = {
|
||||
mixin:: {
|
||||
ruleLabels: {},
|
||||
_config: {
|
||||
showMultiCluster: true,
|
||||
cadvisorSelector: 'job="kubelet", metrics_path="/metrics/cadvisor"',
|
||||
kubeletSelector: 'job="kubelet", metrics_path="/metrics"',
|
||||
kubeStateMetricsSelector: 'job="kube-state-metrics"',
|
||||
@@ -21,7 +20,7 @@ local defaults = {
|
||||
kubeApiserverSelector: 'job="apiserver"',
|
||||
podLabel: 'pod',
|
||||
runbookURLPattern: 'https://runbooks.prometheus-operator.dev/runbooks/kubernetes/%s',
|
||||
diskDeviceSelector: 'device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"',
|
||||
diskDeviceSelector: 'device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"',
|
||||
hostNetworkInterfaceSelector: 'device!~"veth.+"',
|
||||
},
|
||||
},
|
||||
@@ -38,14 +37,6 @@ function(params) {
|
||||
|
||||
mixin:: (import 'github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet') {
|
||||
_config+:: k8s._config.mixin._config,
|
||||
} + {
|
||||
// Filter-out alerts related to kube-proxy when `kubeProxy: false`
|
||||
[if !(defaults + params).kubeProxy then 'prometheusAlerts']+:: {
|
||||
groups: std.filter(
|
||||
function(g) !std.member(['kubernetes-system-kube-proxy'], g.name),
|
||||
super.groups
|
||||
),
|
||||
},
|
||||
},
|
||||
|
||||
prometheusRule: {
|
||||
@@ -71,30 +62,13 @@ function(params) {
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'app.kubernetes.io/name',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
},
|
||||
{
|
||||
port: 'https-metrics',
|
||||
interval: '5s',
|
||||
scheme: 'https',
|
||||
path: '/metrics/slis',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
metricRelabelings: [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'process_start_time_seconds',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
endpoints: [{
|
||||
port: 'https-metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
}],
|
||||
selector: {
|
||||
matchLabels: { 'app.kubernetes.io/name': 'kube-scheduler' },
|
||||
},
|
||||
@@ -123,7 +97,6 @@ function(params) {
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: relabelings,
|
||||
relabelings: [{
|
||||
action: 'replace',
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
}],
|
||||
@@ -140,7 +113,6 @@ function(params) {
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [{
|
||||
action: 'replace',
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
}],
|
||||
@@ -186,32 +158,10 @@ function(params) {
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [{
|
||||
action: 'replace',
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
}],
|
||||
},
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
path: '/metrics/slis',
|
||||
interval: '5s',
|
||||
honorLabels: true,
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [
|
||||
{
|
||||
action: 'replace',
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'process_start_time_seconds',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: { 'app.kubernetes.io/name': 'kubelet' },
|
||||
@@ -231,41 +181,22 @@ function(params) {
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'app.kubernetes.io/name',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
metricRelabelings: relabelings + [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|request|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
endpoints: [{
|
||||
port: 'https-metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
{
|
||||
port: 'https-metrics',
|
||||
interval: '5s',
|
||||
scheme: 'https',
|
||||
path: '/metrics/slis',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
metricRelabelings: relabelings + [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|request|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
metricRelabelings: [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'process_start_time_seconds',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
],
|
||||
}],
|
||||
selector: {
|
||||
matchLabels: { 'app.kubernetes.io/name': 'kube-controller-manager' },
|
||||
},
|
||||
@@ -293,58 +224,38 @@ function(params) {
|
||||
namespaceSelector: {
|
||||
matchNames: ['default'],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
tlsConfig: {
|
||||
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
|
||||
serverName: 'kubernetes',
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: relabelings + [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_controller_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_step_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__', 'le'],
|
||||
regex: 'apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50)',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
endpoints: [{
|
||||
port: 'https',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
tlsConfig: {
|
||||
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
|
||||
serverName: 'kubernetes',
|
||||
},
|
||||
{
|
||||
port: 'https',
|
||||
interval: '5s',
|
||||
scheme: 'https',
|
||||
path: '/metrics/slis',
|
||||
tlsConfig: {
|
||||
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
|
||||
serverName: 'kubernetes',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: relabelings + [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'process_start_time_seconds',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_controller_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_step_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__', 'le'],
|
||||
regex: 'apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50)',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
@@ -369,6 +280,7 @@ function(params) {
|
||||
},
|
||||
podMetricsEndpoints: [{
|
||||
honorLabels: true,
|
||||
targetPort: 10249,
|
||||
relabelings: [
|
||||
{
|
||||
action: 'replace',
|
||||
@@ -377,13 +289,6 @@ function(params) {
|
||||
sourceLabels: ['__meta_kubernetes_pod_node_name'],
|
||||
targetLabel: 'instance',
|
||||
},
|
||||
{
|
||||
action: 'replace',
|
||||
regex: '(.*)',
|
||||
replacement: '$1:10249',
|
||||
targetLabel: '__address__',
|
||||
sourceLabels: ['__meta_kubernetes_pod_ip'],
|
||||
},
|
||||
],
|
||||
}],
|
||||
},
|
||||
|
||||
@@ -50,6 +50,7 @@ function(params) {
|
||||
name: krp._config.name,
|
||||
image: krp._config.image,
|
||||
args: [
|
||||
'--logtostderr',
|
||||
'--secure-listen-address=' + krp._config.secureListenAddress,
|
||||
'--tls-cipher-suites=' + std.join(',', krp._config.tlsCipherSuites),
|
||||
'--upstream=' + krp._config.upstream,
|
||||
@@ -60,9 +61,5 @@ function(params) {
|
||||
runAsUser: 65532,
|
||||
runAsGroup: 65532,
|
||||
runAsNonRoot: true,
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
capabilities: { drop: ['ALL'] },
|
||||
seccompProfile: { type: 'RuntimeDefault' },
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ local defaults = {
|
||||
name:: 'kube-state-metrics',
|
||||
namespace:: error 'must provide namespace',
|
||||
version:: error 'must provide version',
|
||||
image:: error 'must provide image',
|
||||
image:: error 'must provide version',
|
||||
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
|
||||
resources:: {
|
||||
requests: { cpu: '10m', memory: '190Mi' },
|
||||
@@ -20,12 +20,6 @@ local defaults = {
|
||||
requests+: { cpu: '20m' },
|
||||
},
|
||||
},
|
||||
kubeRbacProxySelf:: {
|
||||
resources+: {
|
||||
limits+: { cpu: '20m' },
|
||||
requests+: { cpu: '10m' },
|
||||
},
|
||||
},
|
||||
scrapeInterval:: '30s',
|
||||
scrapeTimeout:: '30s',
|
||||
commonLabels:: {
|
||||
@@ -114,7 +108,7 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
|
||||
image: ksm._config.kubeRbacProxyImage,
|
||||
}),
|
||||
|
||||
local kubeRbacProxySelf = krp(ksm._config.kubeRbacProxySelf {
|
||||
local kubeRbacProxySelf = krp({
|
||||
name: 'kube-rbac-proxy-self',
|
||||
upstream: 'http://127.0.0.1:8082/',
|
||||
secureListenAddress: ':9443',
|
||||
@@ -124,32 +118,6 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
|
||||
image: ksm._config.kubeRbacProxyImage,
|
||||
}),
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: ksm.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: ksm._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, ksm.service.spec.ports),
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
deployment+: {
|
||||
spec+: {
|
||||
template+: {
|
||||
@@ -159,14 +127,10 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
|
||||
},
|
||||
},
|
||||
spec+: {
|
||||
automountServiceAccountToken: true,
|
||||
containers: std.map(function(c) c {
|
||||
ports:: null,
|
||||
livenessProbe:: null,
|
||||
readinessProbe:: null,
|
||||
securityContext+: {
|
||||
runAsGroup: 65534,
|
||||
},
|
||||
args: ['--host=127.0.0.1', '--port=8081', '--telemetry-host=127.0.0.1', '--telemetry-port=8082'],
|
||||
resources: ksm._config.resources,
|
||||
}, super.containers) + [kubeRbacProxyMain, kubeRbacProxySelf],
|
||||
@@ -198,14 +162,6 @@ function(params) (import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-
|
||||
action: 'labeldrop',
|
||||
},
|
||||
],
|
||||
metricRelabelings: [
|
||||
{
|
||||
// Dropping metric deprecated from kube-state-metrics 2.6.0 version
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'kube_endpoint_address_not_ready|kube_endpoint_address_available',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
summary: 'One or more targets are unreachable.',
|
||||
description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.',
|
||||
},
|
||||
expr: '100 * (count(up == 0) BY (cluster, job, namespace, service) / count(up) BY (cluster, job, namespace, service)) > 10',
|
||||
expr: '100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10',
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
@@ -33,24 +33,6 @@
|
||||
severity: 'none',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'InfoInhibitor',
|
||||
annotations: {
|
||||
summary: 'Info-level alert inhibition.',
|
||||
description: |||
|
||||
This is an alert that is used to inhibit info alerts.
|
||||
By themselves, the info-level alerts are sometimes very noisy, but they are relevant when combined with
|
||||
other alerts.
|
||||
This alert fires whenever there's a severity="info" alert, and stops firing when another alert with a
|
||||
severity of 'warning' or 'critical' starts firing on the same namespace.
|
||||
This alert should be routed to a null receiver and configured to inhibit alerts with severity="info".
|
||||
|||,
|
||||
},
|
||||
expr: 'ALERTS{severity = "info"} == 1 unless on(namespace) ALERTS{alertname != "InfoInhibitor", severity =~ "warning|critical", alertstate="firing"} == 1',
|
||||
labels: {
|
||||
severity: 'none',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
record: 'cluster:node_cpu:sum_rate5m',
|
||||
},
|
||||
{
|
||||
expr: 'cluster:node_cpu:sum_rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu))',
|
||||
expr: 'cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu))',
|
||||
record: 'cluster:node_cpu:ratio',
|
||||
},
|
||||
],
|
||||
|
||||
@@ -7,24 +7,14 @@ local defaults = {
|
||||
name:: 'node-exporter',
|
||||
namespace:: error 'must provide namespace',
|
||||
version:: error 'must provide version',
|
||||
image:: error 'must provide image',
|
||||
image:: error 'must provide version',
|
||||
kubeRbacProxyImage:: error 'must provide kubeRbacProxyImage',
|
||||
resources:: {
|
||||
requests: { cpu: '102m', memory: '180Mi' },
|
||||
limits: { cpu: '250m', memory: '180Mi' },
|
||||
},
|
||||
kubeRbacProxy:: {
|
||||
resources+: {
|
||||
requests: { cpu: '10m', memory: '20Mi' },
|
||||
limits: { cpu: '20m', memory: '40Mi' },
|
||||
},
|
||||
},
|
||||
listenAddress:: '127.0.0.1',
|
||||
filesystemMountPointsExclude:: '^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
|
||||
// NOTE: ignore veth network interface associated with containers.
|
||||
// OVN renames veth.* to <rand-hex>@if<X> where X is /sys/class/net/<if>/ifindex
|
||||
// thus [a-z0-9] regex below
|
||||
ignoredNetworkDevices:: '^(veth.*|[a-f0-9]{15})$',
|
||||
port:: 9100,
|
||||
commonLabels:: {
|
||||
'app.kubernetes.io/name': defaults.name,
|
||||
@@ -45,13 +35,10 @@ local defaults = {
|
||||
// GC values,
|
||||
// imageGCLowThresholdPercent: 80
|
||||
// imageGCHighThresholdPercent: 85
|
||||
// GC kicks in when imageGCHighThresholdPercent is hit and attempts to free upto imageGCLowThresholdPercent.
|
||||
// See https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ for more details.
|
||||
// Warn only after imageGCHighThresholdPercent is hit, but filesystem is not freed up for a prolonged duration.
|
||||
fsSpaceFillingUpWarningThreshold: 15,
|
||||
// Send critical alert only after (imageGCHighThresholdPercent + 5) is hit, but filesystem is not freed up for a prolonged duration.
|
||||
fsSpaceFillingUpCriticalThreshold: 10,
|
||||
diskDeviceSelector: 'device=~"(/dev/)?(mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|md.+|dasd.+)"',
|
||||
fsSpaceFillingUpWarningThreshold: 20,
|
||||
fsSpaceFillingUpCriticalThreshold: 15,
|
||||
diskDeviceSelector: 'device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"',
|
||||
runbookURLPattern: 'https://runbooks.prometheus-operator.dev/runbooks/node/%s',
|
||||
},
|
||||
},
|
||||
@@ -92,10 +79,7 @@ function(params) {
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: ne._config.name,
|
||||
labels: ne._config.commonLabels,
|
||||
},
|
||||
metadata: ne._metadata,
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
@@ -111,10 +95,7 @@ function(params) {
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: ne._config.name,
|
||||
labels: ne._config.commonLabels,
|
||||
},
|
||||
metadata: ne._metadata,
|
||||
rules: [
|
||||
{
|
||||
apiGroups: ['authentication.k8s.io'],
|
||||
@@ -133,7 +114,6 @@ function(params) {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: ne._metadata,
|
||||
automountServiceAccountToken: false,
|
||||
},
|
||||
|
||||
service: {
|
||||
@@ -179,32 +159,6 @@ function(params) {
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: ne.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: ne._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, ne.service.spec.ports),
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
daemonset:
|
||||
local nodeExporter = {
|
||||
name: ne._config.name,
|
||||
@@ -213,27 +167,23 @@ function(params) {
|
||||
'--web.listen-address=' + std.join(':', [ne._config.listenAddress, std.toString(ne._config.port)]),
|
||||
'--path.sysfs=/host/sys',
|
||||
'--path.rootfs=/host/root',
|
||||
'--path.udev.data=/host/root/run/udev/data',
|
||||
'--no-collector.wifi',
|
||||
'--no-collector.hwmon',
|
||||
'--no-collector.btrfs',
|
||||
'--collector.filesystem.mount-points-exclude=' + ne._config.filesystemMountPointsExclude,
|
||||
'--collector.netclass.ignored-devices=' + ne._config.ignoredNetworkDevices,
|
||||
'--collector.netdev.device-exclude=' + ne._config.ignoredNetworkDevices,
|
||||
// NOTE: ignore veth network interface associated with containers.
|
||||
// OVN renames veth.* to <rand-hex>@if<X> where X is /sys/class/net/<if>/ifindex
|
||||
// thus [a-z0-9] regex below
|
||||
'--collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$',
|
||||
'--collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$',
|
||||
],
|
||||
volumeMounts: [
|
||||
{ name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true },
|
||||
{ name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true },
|
||||
],
|
||||
resources: ne._config.resources,
|
||||
securityContext: {
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
capabilities: { drop: ['ALL'], add: ['SYS_TIME'] },
|
||||
},
|
||||
};
|
||||
|
||||
local kubeRbacProxy = krp(ne._config.kubeRbacProxy {
|
||||
local kubeRbacProxy = krp({
|
||||
name: 'kube-rbac-proxy',
|
||||
//image: krpImage,
|
||||
upstream: 'http://127.0.0.1:' + ne._config.port + '/',
|
||||
@@ -246,12 +196,6 @@ function(params) {
|
||||
// used by the service is tied to the proxy container. We *could*
|
||||
// forgo declaring the host port, however it is important to declare
|
||||
// it so that the scheduler can decide if the pod is schedulable.
|
||||
//
|
||||
// Although hostPort might not seem necessary, kubernetes adds it anyway
|
||||
// when running with 'hostNetwork'. We might as well make sure it works
|
||||
// the way we want.
|
||||
//
|
||||
// See also: https://github.com/kubernetes/kubernetes/blob/1945829906546caf867992669a0bfa588edf8be6/pkg/apis/core/v1/defaults.go#L402-L411
|
||||
ports: [
|
||||
{ name: 'https', containerPort: ne._config.port, hostPort: ne._config.port },
|
||||
],
|
||||
@@ -291,11 +235,8 @@ function(params) {
|
||||
{ name: 'sys', hostPath: { path: '/sys' } },
|
||||
{ name: 'root', hostPath: { path: '/' } },
|
||||
],
|
||||
automountServiceAccountToken: true,
|
||||
serviceAccountName: ne._config.name,
|
||||
priorityClassName: 'system-cluster-critical',
|
||||
securityContext: {
|
||||
runAsGroup: 65534,
|
||||
runAsUser: 65534,
|
||||
runAsNonRoot: true,
|
||||
},
|
||||
|
||||
@@ -31,27 +31,22 @@ local defaults = {
|
||||
nodeExporter: '4m',
|
||||
windowsExporter: '4m',
|
||||
},
|
||||
containerMetricsPrefix:: '',
|
||||
|
||||
prometheusURL:: error 'must provide prometheusURL',
|
||||
containerQuerySelector:: '',
|
||||
nodeQuerySelector:: '',
|
||||
config:: {
|
||||
local containerSelector = if $.containerQuerySelector != '' then ',' + $.containerQuerySelector else '',
|
||||
local nodeSelector = if $.nodeQuerySelector != '' then ',' + $.nodeQuerySelector else '',
|
||||
resourceRules: {
|
||||
cpu: {
|
||||
containerQuery: |||
|
||||
sum by (<<.GroupBy>>) (
|
||||
irate (
|
||||
%(containerMetricsPrefix)scontainer_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="",pod!=""%(addtionalSelector)s}[%(kubelet)s]
|
||||
container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="",pod!=""}[%(kubelet)s]
|
||||
)
|
||||
)
|
||||
||| % { kubelet: $.rangeIntervals.kubelet, containerMetricsPrefix: $.containerMetricsPrefix, addtionalSelector: containerSelector },
|
||||
||| % $.rangeIntervals,
|
||||
nodeQuery: |||
|
||||
sum by (<<.GroupBy>>) (
|
||||
1 - irate(
|
||||
node_cpu_seconds_total{mode="idle"%(addtionalSelector)s}[%(nodeExporter)s]
|
||||
node_cpu_seconds_total{mode="idle"}[%(nodeExporter)s]
|
||||
)
|
||||
* on(namespace, pod) group_left(node) (
|
||||
node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}
|
||||
@@ -59,10 +54,10 @@ local defaults = {
|
||||
)
|
||||
or sum by (<<.GroupBy>>) (
|
||||
1 - irate(
|
||||
windows_cpu_time_total{mode="idle", job="windows-exporter",<<.LabelMatchers>>%(addtionalSelector)s}[%(windowsExporter)s]
|
||||
windows_cpu_time_total{mode="idle", job="windows-exporter",<<.LabelMatchers>>}[%(windowsExporter)s]
|
||||
)
|
||||
)
|
||||
||| % { nodeExporter: $.rangeIntervals.nodeExporter, windowsExporter: $.rangeIntervals.windowsExporter, containerMetricsPrefix: $.containerMetricsPrefix, addtionalSelector: nodeSelector },
|
||||
||| % $.rangeIntervals,
|
||||
resources: {
|
||||
overrides: {
|
||||
node: { resource: 'node' },
|
||||
@@ -75,21 +70,21 @@ local defaults = {
|
||||
memory: {
|
||||
containerQuery: |||
|
||||
sum by (<<.GroupBy>>) (
|
||||
%(containerMetricsPrefix)scontainer_memory_working_set_bytes{<<.LabelMatchers>>,container!="",pod!=""%(addtionalSelector)s}
|
||||
container_memory_working_set_bytes{<<.LabelMatchers>>,container!="",pod!=""}
|
||||
)
|
||||
||| % { containerMetricsPrefix: $.containerMetricsPrefix, addtionalSelector: containerSelector },
|
||||
|||,
|
||||
nodeQuery: |||
|
||||
sum by (<<.GroupBy>>) (
|
||||
node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>%(addtionalSelector)s}
|
||||
node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>}
|
||||
-
|
||||
node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>%(addtionalSelector)s}
|
||||
node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}
|
||||
)
|
||||
or sum by (<<.GroupBy>>) (
|
||||
windows_cs_physical_memory_bytes{job="windows-exporter",<<.LabelMatchers>>%(addtionalSelector)s}
|
||||
windows_cs_physical_memory_bytes{job="windows-exporter",<<.LabelMatchers>>}
|
||||
-
|
||||
windows_memory_available_bytes{job="windows-exporter",<<.LabelMatchers>>%(addtionalSelector)s}
|
||||
windows_memory_available_bytes{job="windows-exporter",<<.LabelMatchers>>}
|
||||
)
|
||||
||| % { containerMetricsPrefix: $.containerMetricsPrefix, addtionalSelector: nodeSelector },
|
||||
|||,
|
||||
resources: {
|
||||
overrides: {
|
||||
instance: { resource: 'node' },
|
||||
@@ -133,11 +128,6 @@ function(params) {
|
||||
labels: pa._config.commonLabels,
|
||||
},
|
||||
|
||||
_metadata_no_ns:: {
|
||||
name: pa._config.name,
|
||||
labels: pa._config.commonLabels,
|
||||
},
|
||||
|
||||
apiService: {
|
||||
apiVersion: 'apiregistration.k8s.io/v1',
|
||||
kind: 'APIService',
|
||||
@@ -216,21 +206,6 @@ function(params) {
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: pa.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: pa._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
// Prometheus-adapter needs ingress allowed so HPAs can request metrics from it.
|
||||
ingress: [{}],
|
||||
},
|
||||
},
|
||||
|
||||
deployment:
|
||||
local c = {
|
||||
name: pa._config.name,
|
||||
@@ -238,52 +213,19 @@ function(params) {
|
||||
args: [
|
||||
'--cert-dir=/var/run/serving-cert',
|
||||
'--config=/etc/adapter/config.yaml',
|
||||
'--logtostderr=true',
|
||||
'--metrics-relist-interval=1m',
|
||||
'--prometheus-url=' + pa._config.prometheusURL,
|
||||
'--secure-port=6443',
|
||||
'--tls-cipher-suites=' + std.join(',', pa._config.tlsCipherSuites),
|
||||
],
|
||||
resources: pa._config.resources,
|
||||
startupProbe: {
|
||||
httpGet: {
|
||||
path: '/livez',
|
||||
port: 'https',
|
||||
scheme: 'HTTPS',
|
||||
},
|
||||
periodSeconds: 10,
|
||||
failureThreshold: 18,
|
||||
},
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: '/readyz',
|
||||
port: 'https',
|
||||
scheme: 'HTTPS',
|
||||
},
|
||||
periodSeconds: 5,
|
||||
failureThreshold: 5,
|
||||
},
|
||||
livenessProbe: {
|
||||
httpGet: {
|
||||
path: '/livez',
|
||||
port: 'https',
|
||||
scheme: 'HTTPS',
|
||||
},
|
||||
periodSeconds: 5,
|
||||
failureThreshold: 5,
|
||||
},
|
||||
ports: [{ containerPort: 6443, name: 'https' }],
|
||||
ports: [{ containerPort: 6443 }],
|
||||
volumeMounts: [
|
||||
{ name: 'tmpfs', mountPath: '/tmp', readOnly: false },
|
||||
{ name: 'volume-serving-cert', mountPath: '/var/run/serving-cert', readOnly: false },
|
||||
{ name: 'config', mountPath: '/etc/adapter', readOnly: false },
|
||||
],
|
||||
securityContext: {
|
||||
allowPrivilegeEscalation: false,
|
||||
readOnlyRootFilesystem: true,
|
||||
runAsNonRoot: true,
|
||||
capabilities: { drop: ['ALL'] },
|
||||
seccompProfile: { type: 'RuntimeDefault' },
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
@@ -302,16 +244,10 @@ function(params) {
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
annotations: {
|
||||
'checksum.config/md5': std.md5(std.manifestYamlDoc(pa._config.config)),
|
||||
},
|
||||
labels: pa._config.commonLabels,
|
||||
},
|
||||
metadata: { labels: pa._config.commonLabels },
|
||||
spec: {
|
||||
containers: [c],
|
||||
serviceAccountName: $.serviceAccount.metadata.name,
|
||||
automountServiceAccountToken: true,
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
volumes: [
|
||||
{ name: 'tmpfs', emptyDir: {} },
|
||||
@@ -327,13 +263,12 @@ function(params) {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: pa._metadata,
|
||||
automountServiceAccountToken: false,
|
||||
},
|
||||
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: pa._metadata_no_ns,
|
||||
metadata: pa._metadata,
|
||||
rules: [{
|
||||
apiGroups: [''],
|
||||
resources: ['nodes', 'namespaces', 'pods', 'services'],
|
||||
@@ -344,7 +279,7 @@ function(params) {
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: pa._metadata_no_ns,
|
||||
metadata: pa._metadata,
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
@@ -360,7 +295,7 @@ function(params) {
|
||||
clusterRoleBindingDelegator: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: pa._metadata_no_ns {
|
||||
metadata: pa._metadata {
|
||||
name: 'resource-metrics:system:auth-delegator',
|
||||
},
|
||||
roleRef: {
|
||||
@@ -378,7 +313,7 @@ function(params) {
|
||||
clusterRoleServerResources: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: pa._metadata_no_ns {
|
||||
metadata: pa._metadata {
|
||||
name: 'resource-metrics-server-resources',
|
||||
},
|
||||
rules: [{
|
||||
@@ -391,7 +326,7 @@ function(params) {
|
||||
clusterRoleAggregatedMetricsReader: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: pa._metadata_no_ns {
|
||||
metadata: pa._metadata {
|
||||
name: 'system:aggregated-metrics-reader',
|
||||
labels+: {
|
||||
'rbac.authorization.k8s.io/aggregate-to-admin': 'true',
|
||||
|
||||
@@ -15,12 +15,6 @@ local defaults = {
|
||||
limits: { cpu: '200m', memory: '200Mi' },
|
||||
requests: { cpu: '100m', memory: '100Mi' },
|
||||
},
|
||||
kubeRbacProxy:: {
|
||||
resources+: {
|
||||
requests: { cpu: '10m', memory: '20Mi' },
|
||||
limits: { cpu: '20m', memory: '40Mi' },
|
||||
},
|
||||
},
|
||||
commonLabels:: {
|
||||
'app.kubernetes.io/name': defaults.name,
|
||||
'app.kubernetes.io/version': defaults.version,
|
||||
@@ -38,7 +32,6 @@ local defaults = {
|
||||
prometheus: defaults.name,
|
||||
},
|
||||
_config: {
|
||||
groupLabels: 'cluster,controller,namespace',
|
||||
prometheusOperatorSelector: 'job="prometheus-operator",namespace="' + defaults.namespace + '"',
|
||||
runbookURLPattern: 'https://runbooks.prometheus-operator.dev/runbooks/prometheus-operator/%s',
|
||||
},
|
||||
@@ -79,32 +72,6 @@ function(params)
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: po.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: po._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, po.service.spec.ports),
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
service+: {
|
||||
spec+: {
|
||||
ports: [
|
||||
@@ -148,7 +115,7 @@ function(params)
|
||||
],
|
||||
},
|
||||
|
||||
local kubeRbacProxy = krp(po._config.kubeRbacProxy {
|
||||
local kubeRbacProxy = krp({
|
||||
name: 'kube-rbac-proxy',
|
||||
upstream: 'http://127.0.0.1:8080/',
|
||||
secureListenAddress: ':8443',
|
||||
@@ -162,10 +129,6 @@ function(params)
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
automountServiceAccountToken: true,
|
||||
securityContext+: {
|
||||
runAsGroup: 65534,
|
||||
},
|
||||
containers+: [kubeRbacProxy],
|
||||
},
|
||||
},
|
||||
|
||||
@@ -35,13 +35,13 @@ local defaults = {
|
||||
prometheusSelector: 'job="prometheus-' + defaults.name + '",namespace="' + defaults.namespace + '"',
|
||||
prometheusName: '{{$labels.namespace}}/{{$labels.pod}}',
|
||||
// TODO: remove `thanosSelector` after 0.10.0 release.
|
||||
thanosSelector: 'job="thanos-sidecar"',
|
||||
thanosSelector: '',
|
||||
thanos: {
|
||||
targetGroups: {
|
||||
namespace: defaults.namespace,
|
||||
},
|
||||
sidecar: {
|
||||
selector: defaults.mixin._config.thanosSelector,
|
||||
selector: 'job="thanos-sidecar"',
|
||||
thanosPrometheusCommonDimensions: 'namespace, pod',
|
||||
},
|
||||
},
|
||||
@@ -94,74 +94,10 @@ function(params) {
|
||||
},
|
||||
},
|
||||
|
||||
networkPolicy: {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'NetworkPolicy',
|
||||
metadata: p.service.metadata,
|
||||
spec: {
|
||||
podSelector: {
|
||||
matchLabels: p._config.selectorLabels,
|
||||
},
|
||||
policyTypes: ['Egress', 'Ingress'],
|
||||
egress: [{}],
|
||||
ingress: [{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: std.map(function(o) {
|
||||
port: o.port,
|
||||
protocol: 'TCP',
|
||||
}, p.service.spec.ports),
|
||||
}, {
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'prometheus-adapter',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: [{
|
||||
port: 9090,
|
||||
protocol: 'TCP',
|
||||
}],
|
||||
}, {
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'grafana',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: [{
|
||||
port: 9090,
|
||||
protocol: 'TCP',
|
||||
}],
|
||||
}] + (if p._config.thanos != null then
|
||||
[{
|
||||
from: [{
|
||||
podSelector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'thanos-query',
|
||||
},
|
||||
},
|
||||
}],
|
||||
ports: [{
|
||||
port: 10901,
|
||||
protocol: 'TCP',
|
||||
}],
|
||||
}] else []),
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: p._metadata,
|
||||
automountServiceAccountToken: true,
|
||||
},
|
||||
|
||||
service: {
|
||||
@@ -175,10 +111,7 @@ function(params) {
|
||||
] +
|
||||
(
|
||||
if p._config.thanos != null then
|
||||
[
|
||||
{ name: 'grpc', port: 10901, targetPort: 10901 },
|
||||
{ name: 'http', port: 10902, targetPort: 10902 },
|
||||
]
|
||||
[{ name: 'grpc', port: 10901, targetPort: 10901 }]
|
||||
else []
|
||||
),
|
||||
selector: p._config.selectorLabels,
|
||||
@@ -213,9 +146,7 @@ function(params) {
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: p._metadata {
|
||||
namespace:: null,
|
||||
},
|
||||
metadata: p._metadata,
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [''],
|
||||
@@ -223,7 +154,7 @@ function(params) {
|
||||
verbs: ['get'],
|
||||
},
|
||||
{
|
||||
nonResourceURLs: ['/metrics', '/metrics/slis'],
|
||||
nonResourceURLs: ['/metrics'],
|
||||
verbs: ['get'],
|
||||
},
|
||||
],
|
||||
@@ -251,11 +182,11 @@ function(params) {
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'Role',
|
||||
name: p.roleConfig.metadata.name,
|
||||
name: p._metadata.name + '-config',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: p.serviceAccount.metadata.name,
|
||||
name: p._metadata.name,
|
||||
namespace: p._config.namespace,
|
||||
}],
|
||||
},
|
||||
@@ -263,17 +194,15 @@ function(params) {
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: p._metadata {
|
||||
namespace:: null,
|
||||
},
|
||||
metadata: p._metadata,
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: p.clusterRole.metadata.name,
|
||||
name: p._metadata.name,
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: p.serviceAccount.metadata.name,
|
||||
name: p._metadata.name,
|
||||
namespace: p._config.namespace,
|
||||
}],
|
||||
},
|
||||
@@ -343,8 +272,6 @@ function(params) {
|
||||
probeNamespaceSelector: {},
|
||||
ruleNamespaceSelector: {},
|
||||
ruleSelector: p._config.ruleSelector,
|
||||
scrapeConfigSelector: {},
|
||||
scrapeConfigNamespaceSelector: {},
|
||||
serviceMonitorSelector: {},
|
||||
serviceMonitorNamespaceSelector: {},
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
|
||||
@@ -8,17 +8,7 @@
|
||||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafana",
|
||||
"subdir": "grafana-mixin"
|
||||
}
|
||||
},
|
||||
"version": "main",
|
||||
"name": "grafana-mixin"
|
||||
"version": "199e363523104ff8b3a12483a4e3eca86372b078"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -27,7 +17,7 @@
|
||||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "main"
|
||||
"version": "release-3.5"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -36,7 +26,7 @@
|
||||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "main"
|
||||
"version": "release-0.53"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -45,7 +35,7 @@
|
||||
"subdir": "jsonnet/mixin"
|
||||
}
|
||||
},
|
||||
"version": "main",
|
||||
"version": "release-0.53",
|
||||
"name": "prometheus-operator-mixin"
|
||||
},
|
||||
{
|
||||
@@ -55,7 +45,7 @@
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
"version": "release-0.10"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -64,7 +54,7 @@
|
||||
"subdir": "jsonnet/kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"version": "main"
|
||||
"version": "release-2.3"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -73,7 +63,7 @@
|
||||
"subdir": "jsonnet/kube-state-metrics-mixin"
|
||||
}
|
||||
},
|
||||
"version": "main"
|
||||
"version": "release-2.3"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -82,7 +72,7 @@
|
||||
"subdir": "docs/node-mixin"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
"version": "release-1.3"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -91,7 +81,7 @@
|
||||
"subdir": "documentation/prometheus-mixin"
|
||||
}
|
||||
},
|
||||
"version": "main",
|
||||
"version": "release-2.32",
|
||||
"name": "prometheus"
|
||||
},
|
||||
{
|
||||
@@ -101,19 +91,9 @@
|
||||
"subdir": "doc/alertmanager-mixin"
|
||||
}
|
||||
},
|
||||
"version": "main",
|
||||
"version": "release-0.23",
|
||||
"name": "alertmanager"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/pyrra-dev/pyrra.git",
|
||||
"subdir": "jsonnet/controller-gen"
|
||||
}
|
||||
},
|
||||
"version": "release-0.7",
|
||||
"name": "pyrra"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
@@ -121,7 +101,7 @@
|
||||
"subdir": "mixin"
|
||||
}
|
||||
},
|
||||
"version": "main",
|
||||
"version": "release-0.23",
|
||||
"name": "thanos-mixin"
|
||||
}
|
||||
],
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
local defaults = {
|
||||
/* name of rule groups to exclude */
|
||||
excludedRuleGroups: [],
|
||||
/* Rule match is based on field "alert" or "record" for excludedRules and patchedRules.
|
||||
* When multiple match is found, we can use a "index" field to distingush each rule,
|
||||
* which represents their order of appearance. For example, if we have two rules:
|
||||
* [
|
||||
* {
|
||||
* name: 'alertmanager.rules',
|
||||
* rules: [
|
||||
* {
|
||||
* alert: 'A',
|
||||
* field: 'A0 rule',
|
||||
* labels: {
|
||||
* severity: 'warning',
|
||||
* },
|
||||
* },
|
||||
* {
|
||||
* alert: 'A',
|
||||
* field: 'A1 rule',
|
||||
* labels: {
|
||||
* severity: 'warning',
|
||||
* },
|
||||
* },
|
||||
* ],
|
||||
* },
|
||||
* ]
|
||||
* We can use index 1 to choose "A1 rule" for patching, as shown in the example below:
|
||||
* [
|
||||
* {
|
||||
* name: 'alertmanager.rules',
|
||||
* rules: [
|
||||
* {
|
||||
* alert: 'A',
|
||||
* index: 1,
|
||||
* patch: 'A1',
|
||||
* labels: {
|
||||
* severity: 'warning',
|
||||
* },
|
||||
* },
|
||||
* ],
|
||||
* },
|
||||
* ]
|
||||
*/
|
||||
excludedRules: [],
|
||||
patchedRules: [],
|
||||
};
|
||||
|
||||
|
||||
local deleteIndex(rule) = {
|
||||
[k]: rule[k]
|
||||
for k in std.objectFields(rule)
|
||||
if k != 'index'
|
||||
};
|
||||
|
||||
|
||||
local patchOrExcludeRule(rule, ruleSet, operation) =
|
||||
if std.length(ruleSet) == 0 then
|
||||
[deleteIndex(rule)]
|
||||
/* 2 rules match when the name of the patch is a prefix of the name of the rule to patch. */
|
||||
else if ((('alert' in rule && 'alert' in ruleSet[0]) && std.startsWith(rule.alert, ruleSet[0].alert)) ||
|
||||
(('record' in rule && 'record' in ruleSet[0]) && std.startsWith(rule.record, ruleSet[0].record))) &&
|
||||
(!('index' in ruleSet[0]) || (('index' in ruleSet[0]) && (ruleSet[0].index == rule.index))) then
|
||||
if operation == 'patch' then
|
||||
local patch = {
|
||||
[k]: ruleSet[0][k]
|
||||
for k in std.objectFields(ruleSet[0])
|
||||
if k != 'alert' && k != 'record' && k != 'index'
|
||||
};
|
||||
[deleteIndex(std.mergePatch(rule, patch))]
|
||||
else // equivalnt to operation == 'exclude'
|
||||
[]
|
||||
|
||||
else
|
||||
[] + patchOrExcludeRule(rule, ruleSet[1:], operation);
|
||||
|
||||
|
||||
local sameRuleName(rule1, rule2) =
|
||||
if ('alert' in rule1 && 'alert' in rule2) then
|
||||
rule1.alert == rule2.alert
|
||||
else if ('record' in rule1 && 'record' in rule2) then
|
||||
rule1.record == rule2.record
|
||||
else
|
||||
false;
|
||||
|
||||
local indexRules(lastRule, ruleSet) =
|
||||
if std.length(ruleSet) == 0 then
|
||||
[]
|
||||
else if (lastRule != null) && sameRuleName(lastRule, ruleSet[0]) then
|
||||
local updatedRule = std.mergePatch(ruleSet[0], { index: lastRule.index + 1 });
|
||||
[updatedRule] + indexRules(updatedRule, ruleSet[1:])
|
||||
else
|
||||
local updatedRule = std.mergePatch(ruleSet[0], { index: 0 });
|
||||
[updatedRule] + indexRules(updatedRule, ruleSet[1:]);
|
||||
|
||||
local ruleName(rule) =
|
||||
if ('alert' in rule) then
|
||||
rule.alert
|
||||
else if ('record' in rule) then
|
||||
rule.record
|
||||
else
|
||||
assert false : 'rule should have either "alert" or "record" field' + std.toString(rule);
|
||||
'';
|
||||
|
||||
local patchOrExcludeRuleGroup(group, groupSet, operation) =
|
||||
if std.length(groupSet) == 0 then
|
||||
[group.rules]
|
||||
else if (group.name == groupSet[0].name) then
|
||||
local indexedRules = indexRules(null, std.sort(
|
||||
group.rules, keyF=ruleName
|
||||
));
|
||||
[patchOrExcludeRule(rule, groupSet[0].rules, operation) for rule in indexedRules]
|
||||
else
|
||||
[] + patchOrExcludeRuleGroup(group, groupSet[1:], operation);
|
||||
|
||||
function(params) {
|
||||
local ruleModifications = defaults + params,
|
||||
assert std.isArray(ruleModifications.excludedRuleGroups) : 'rule-patcher: excludedRuleGroups should be an array',
|
||||
assert std.isArray(ruleModifications.excludedRules) : 'rule-patcher: excludedRules should be an array',
|
||||
assert std.isArray(ruleModifications.patchedRules) : 'rule-patcher: patchedRules should be an array',
|
||||
|
||||
local excludeRule(o) = o {
|
||||
[if (o.kind == 'PrometheusRule') then 'spec']+: {
|
||||
groups: std.filterMap(
|
||||
function(group) !std.member(ruleModifications.excludedRuleGroups, group.name),
|
||||
function(group)
|
||||
group {
|
||||
rules: std.flattenArrays(
|
||||
patchOrExcludeRuleGroup(group, ruleModifications.excludedRules, 'exclude')
|
||||
),
|
||||
},
|
||||
super.groups,
|
||||
),
|
||||
},
|
||||
},
|
||||
|
||||
local patchRule(o) = o {
|
||||
[if (o.kind == 'PrometheusRule') then 'spec']+: {
|
||||
groups: std.map(
|
||||
function(group)
|
||||
group {
|
||||
rules: std.flattenArrays(
|
||||
patchOrExcludeRuleGroup(group, ruleModifications.patchedRules, 'patch')
|
||||
),
|
||||
},
|
||||
super.groups,
|
||||
),
|
||||
},
|
||||
},
|
||||
|
||||
// shorthand for rule patching, rule excluding
|
||||
sanitizePrometheusRules(o): {
|
||||
[k]: patchRule(excludeRule(o[k]))
|
||||
for k in std.objectFields(o)
|
||||
},
|
||||
}
|
||||
@@ -40,14 +40,14 @@ local utils = import './lib/utils.libsonnet';
|
||||
alertmanager: 'quay.io/prometheus/alertmanager:v' + $.values.common.versions.alertmanager,
|
||||
blackboxExporter: 'quay.io/prometheus/blackbox-exporter:v' + $.values.common.versions.blackboxExporter,
|
||||
grafana: 'grafana/grafana:' + $.values.common.versions.grafana,
|
||||
kubeStateMetrics: 'registry.k8s.io/kube-state-metrics/kube-state-metrics:v' + $.values.common.versions.kubeStateMetrics,
|
||||
kubeStateMetrics: 'k8s.gcr.io/kube-state-metrics/kube-state-metrics:v' + $.values.common.versions.kubeStateMetrics,
|
||||
nodeExporter: 'quay.io/prometheus/node-exporter:v' + $.values.common.versions.nodeExporter,
|
||||
prometheus: 'quay.io/prometheus/prometheus:v' + $.values.common.versions.prometheus,
|
||||
prometheusAdapter: 'registry.k8s.io/prometheus-adapter/prometheus-adapter:v' + $.values.common.versions.prometheusAdapter,
|
||||
prometheusAdapter: 'k8s.gcr.io/prometheus-adapter/prometheus-adapter:v' + $.values.common.versions.prometheusAdapter,
|
||||
prometheusOperator: 'quay.io/prometheus-operator/prometheus-operator:v' + $.values.common.versions.prometheusOperator,
|
||||
prometheusOperatorReloader: 'quay.io/prometheus-operator/prometheus-config-reloader:v' + $.values.common.versions.prometheusOperator,
|
||||
kubeRbacProxy: 'quay.io/brancz/kube-rbac-proxy:v' + $.values.common.versions.kubeRbacProxy,
|
||||
configmapReload: 'ghcr.io/jimmidyson/configmap-reload:v' + $.values.common.versions.configmapReload,
|
||||
configmapReload: 'jimmidyson/configmap-reload:v' + $.values.common.versions.configmapReload,
|
||||
},
|
||||
},
|
||||
alertmanager: {
|
||||
@@ -70,12 +70,7 @@ local utils = import './lib/utils.libsonnet';
|
||||
image: $.values.common.images.grafana,
|
||||
prometheusName: $.values.prometheus.name,
|
||||
// TODO(paulfantom) This should be done by iterating over all objects and looking for object.mixin.grafanaDashboards
|
||||
dashboards: $.nodeExporter.mixin.grafanaDashboards +
|
||||
$.prometheus.mixin.grafanaDashboards +
|
||||
$.kubernetesControlPlane.mixin.grafanaDashboards +
|
||||
$.alertmanager.mixin.grafanaDashboards +
|
||||
$.grafana.mixin.grafanaDashboards,
|
||||
mixin+: { ruleLabels: $.values.common.ruleLabels },
|
||||
dashboards: $.nodeExporter.mixin.grafanaDashboards + $.prometheus.mixin.grafanaDashboards + $.kubernetesControlPlane.mixin.grafanaDashboards + $.alertmanager.mixin.grafanaDashboards,
|
||||
},
|
||||
kubeStateMetrics: {
|
||||
namespace: $.values.common.namespace,
|
||||
@@ -150,10 +145,6 @@ local utils = import './lib/utils.libsonnet';
|
||||
kind: 'Namespace',
|
||||
metadata: {
|
||||
name: $.values.common.namespace,
|
||||
labels: {
|
||||
'pod-security.kubernetes.io/warn': 'privileged',
|
||||
'pod-security.kubernetes.io/warn-version': 'latest',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Adding a new platform specific configuration
|
||||
|
||||
Adding a new platform specific configuration requires to update the [customization example](https://github.com/prometheus-operator/kube-prometheus/tree/main/../docs/customizations/platform-specific.md#running-kube-prometheus-on-specific-platforms) and the [platforms.libsonnet](platforms.libsonnet) file by adding the platform to the list of existing ones. This allow the new platform to be discoverable and easily configurable by the users.
|
||||
Adding a new platform specific configuration requires to update the [customization example](../../../docs/customizations/platform-specific.md#running-kube-prometheus-on-specific-platforms) and the [platforms.libsonnet](platforms.libsonnet) file by adding the platform to the list of existing ones. This allow the new platform to be discoverable and easily configurable by the users.
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
(import '../addons/managed-cluster.libsonnet') + {
|
||||
values+:: {
|
||||
prometheusAdapter+: {
|
||||
config+: {
|
||||
resourceRules:: null,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
prometheusAdapter+:: {
|
||||
apiService:: null,
|
||||
},
|
||||
|
||||
kubernetesControlPlane+: {
|
||||
kubeDnsPrometheusStackService: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'kube-prometheus-stack-coredns',
|
||||
namespace: 'kube-system',
|
||||
labels: { 'k8s-app': 'kube-dns' },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'metrics', port: 9153, targetPort: 9153 },
|
||||
],
|
||||
selector: { 'k8s-app': 'kube-dns' },
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
local platforms = {
|
||||
aks: import './aks.libsonnet',
|
||||
aws: import './aws.libsonnet',
|
||||
bootkube: import './bootkube.libsonnet',
|
||||
gke: import './gke.libsonnet',
|
||||
@@ -17,18 +16,16 @@ local platformPatch(p) = if p != null && std.objectHas(platforms, p) then platfo
|
||||
{
|
||||
// initialize the object to prevent "Indexed object has no field" lint errors
|
||||
local p = {
|
||||
values+:: $.values,
|
||||
alertmanager: {},
|
||||
blackboxExporter: {},
|
||||
grafana: {},
|
||||
kubePrometheus: {},
|
||||
kubernetesControlPlane: {},
|
||||
kubeStateMetrics: {},
|
||||
nodeExporter: {},
|
||||
prometheus: {},
|
||||
prometheusAdapter: {},
|
||||
prometheusOperator: {},
|
||||
pyrra: {},
|
||||
kubernetesControlPlane: {},
|
||||
kubePrometheus: {},
|
||||
} + platformPatch($.values.common.platform),
|
||||
|
||||
alertmanager+: p.alertmanager,
|
||||
@@ -41,5 +38,4 @@ local platformPatch(p) = if p != null && std.objectHas(platforms, p) then platfo
|
||||
prometheusOperator+: p.prometheusOperator,
|
||||
kubernetesControlPlane+: p.kubernetesControlPlane,
|
||||
kubePrometheus+: p.kubePrometheus,
|
||||
pyrra+: p.pyrra,
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
{
|
||||
"alertmanager": "0.27.0",
|
||||
"blackboxExporter": "0.25.0",
|
||||
"grafana": "11.2.0",
|
||||
"kubeStateMetrics": "2.13.0",
|
||||
"nodeExporter": "1.8.2",
|
||||
"prometheus": "2.54.1",
|
||||
"prometheusAdapter": "0.12.0",
|
||||
"prometheusOperator": "0.77.0",
|
||||
"kubeRbacProxy": "0.18.1",
|
||||
"configmapReload": "0.13.1",
|
||||
"pyrra": "0.6.4"
|
||||
"alertmanager": "0.23.0",
|
||||
"blackboxExporter": "0.19.0",
|
||||
"grafana": "8.3.3",
|
||||
"kubeStateMetrics": "2.3.0",
|
||||
"nodeExporter": "1.3.1",
|
||||
"prometheus": "2.32.1",
|
||||
"prometheusAdapter": "0.9.1",
|
||||
"prometheusOperator": "0.53.1",
|
||||
"kubeRbacProxy": "0.11.0",
|
||||
"configmapReload": "0.5.0"
|
||||
}
|
||||
|
||||
@@ -1,15 +1,6 @@
|
||||
{
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/jsonnet-libs.git",
|
||||
"subdir": "mixin-utils"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"local": {
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "5698c8940b6dadca3f42107b7839557bc041761f",
|
||||
"sum": "l6fPvh3tW6fWot308w71QY/amrYsFPeitvz1IgJxqQA="
|
||||
"version": "199e363523104ff8b3a12483a4e3eca86372b078",
|
||||
"sum": "/jDHzVAjHB4AOLkJHw1GyATX5ogZ1iMdcJXZAgaG3+g="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -18,18 +18,8 @@
|
||||
"subdir": "contrib/mixin"
|
||||
}
|
||||
},
|
||||
"version": "bd93a0060ab4d177e28e03c8765cb904adc0fae2",
|
||||
"sum": "IXI3LQIT9NmTPJAk8WLUJd5+qZfcGpeNCyWIK7oEpws="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafana.git",
|
||||
"subdir": "grafana-mixin"
|
||||
}
|
||||
},
|
||||
"version": "1120f9e255760a3c104b57871fcb91801e934382",
|
||||
"sum": "MkjR7zCgq6MUZgjDzop574tFKoTX2OBr7DTwm1K+Ofs="
|
||||
"version": "73080a716634f45d50d0593e0454ed3206a52f5b",
|
||||
"sum": "W/Azptf1PoqjyMwJON96UY69MFugDA4IAYiKURscryc="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -38,48 +28,8 @@
|
||||
"subdir": "grafonnet"
|
||||
}
|
||||
},
|
||||
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
|
||||
"sum": "342u++/7rViR/zj2jeJOjshzglkZ1SY+hFNuyCBFMdc="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
||||
"subdir": "grafonnet-7.0"
|
||||
}
|
||||
},
|
||||
"version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
|
||||
"sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-latest"
|
||||
}
|
||||
},
|
||||
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
|
||||
"sum": "eyuJ0jOXeA4MrobbNgU4/v5a7ASDHslHZ0eS6hDdWoI="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-v10.0.0"
|
||||
}
|
||||
},
|
||||
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
|
||||
"sum": "xdcrJPJlpkq4+5LpGwN4tPAuheNNLXZjE6tDcyvFjr0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-v11.0.0"
|
||||
}
|
||||
},
|
||||
"version": "733beadbc8dab55c5fe1bcdcf0d8a2d215759a55",
|
||||
"sum": "0BvzR0i4bS4hc2O3xDv6i9m52z7mPrjvqxtcPrGhynA="
|
||||
"version": "3626fc4dc2326931c530861ac5bebe39444f6cbf",
|
||||
"sum": "gF8foHByYcB25jcUOBqP6jxk0OPifQMjPvKY0HaCk6w="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -88,38 +38,8 @@
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "4ad199dab450b829274234b1014ca577649b4557",
|
||||
"sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/jsonnet-libs.git",
|
||||
"subdir": "mixin-utils"
|
||||
}
|
||||
},
|
||||
"version": "4ad199dab450b829274234b1014ca577649b4557",
|
||||
"sum": "LoYq5QxJmUXEtqkEG8CFUBLBhhzDDaNANHc7Gz36ZdM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/jsonnet-libs/docsonnet.git",
|
||||
"subdir": "doc-util"
|
||||
}
|
||||
},
|
||||
"version": "6ac6c69685b8c29c54515448eaca583da2d88150",
|
||||
"sum": "BrAL/k23jq+xy9oA7TWIhUx07dsA/QLm3g7ktCwe//U="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/jsonnet-libs/xtd.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "63d430b69a95741061c2f7fc9d84b1a778511d9c",
|
||||
"sum": "qiZi3axUSXCVzKUF83zSAxklwrnitMmrDK4XAfjPMdE="
|
||||
"version": "264a5c2078c5930af57fe2d107eff83ab63553af",
|
||||
"sum": "0KkygBQd/AFzUvVzezE4qF/uDYgrwUXVpZfINBti0oc="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -128,8 +48,18 @@
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "3cb7958a56688386e8f6cb0f1258bdb1234797d6",
|
||||
"sum": "f+GOrDpxTRmyYkaZKy6CCwqGoCs9MMCmEGT1cTJ0m6k="
|
||||
"version": "b538a10c89508f8d12885680cca72a134d3127f5",
|
||||
"sum": "GLt5T2k4RKg36Gfcaf9qlTfVumDitqotVD0ipz/bPJ4="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
|
||||
"subdir": "lib/promgrafonnet"
|
||||
}
|
||||
},
|
||||
"version": "fd913499e956da06f520c3784c59573ee552b152",
|
||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -138,8 +68,8 @@
|
||||
"subdir": "jsonnet/kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"version": "3d969c5ce9fec2e9e7cd59e2429cc4cc5a287ead",
|
||||
"sum": "lO7jUSzAIy8Yk9pOWJIWgPRhubkWzVh56W6wtYfbVH4="
|
||||
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
|
||||
"sum": "U1wzIpTAtOvC1yj43Y8PfvT0JfvnAcMfNH12Wi+ab0Y="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -148,8 +78,8 @@
|
||||
"subdir": "jsonnet/kube-state-metrics-mixin"
|
||||
}
|
||||
},
|
||||
"version": "3d969c5ce9fec2e9e7cd59e2429cc4cc5a287ead",
|
||||
"sum": "qclI7LwucTjBef3PkGBkKxF0mfZPbHnn4rlNWKGtR4c="
|
||||
"version": "e080c3ce73ad514254e38dccb37c93bec6b257ae",
|
||||
"sum": "u8gaydJoxEjzizQ8jY8xSjYgWooPmxw+wIWdDxifMAk="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -158,8 +88,8 @@
|
||||
"subdir": "jsonnet/mixin"
|
||||
}
|
||||
},
|
||||
"version": "23cbc111619cbe089a04ae81463584e88a183af9",
|
||||
"sum": "gi+knjdxs2T715iIQIntrimbHRgHnpM8IFBJDD1gYfs=",
|
||||
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
|
||||
"sum": "qZ4WgiweaE6eeKtFK60QUjLO8sf2L9Q8fgafWvDcyfY=",
|
||||
"name": "prometheus-operator-mixin"
|
||||
},
|
||||
{
|
||||
@@ -169,8 +99,8 @@
|
||||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "23cbc111619cbe089a04ae81463584e88a183af9",
|
||||
"sum": "/bnXn93GATeKKUfv5o3BoURZ/f/uOx6IqPqiUjSM4Z4="
|
||||
"version": "d8ba1c766a141cb35072ae2f2578ec8588c9efcd",
|
||||
"sum": "yjdwZ+5UXL42EavJleAJmd8Ou6MSDfExvlKAxFCxXVE="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -179,8 +109,8 @@
|
||||
"subdir": "doc/alertmanager-mixin"
|
||||
}
|
||||
},
|
||||
"version": "e1492602209b86e0ca6d7671c7353b62a31b897b",
|
||||
"sum": "IpF46ZXsm+0wJJAPtAre8+yxTNZA57mBqGpBP/r7/kw=",
|
||||
"version": "16fa045db47d68a09a102c7b80b8899c1f57c153",
|
||||
"sum": "pep+dHzfIjh2SU5pEkwilMCAT/NoL6YYflV4x8cr7vU=",
|
||||
"name": "alertmanager"
|
||||
},
|
||||
{
|
||||
@@ -190,8 +120,8 @@
|
||||
"subdir": "docs/node-mixin"
|
||||
}
|
||||
},
|
||||
"version": "a37174eccfa67882184acc7f44e338aa5f5163b8",
|
||||
"sum": "rhUvbqviGjQ2mwsRhHKMN0TiS3YvnYpUXHew3XlQ+Wg="
|
||||
"version": "a2321e7b940ddcff26873612bccdf7cd4c42b6b6",
|
||||
"sum": "MlWDAKGZ+JArozRKdKEvewHeWn8j2DNBzesJfLVd0dk="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
@@ -200,21 +130,10 @@
|
||||
"subdir": "documentation/prometheus-mixin"
|
||||
}
|
||||
},
|
||||
"version": "919648cafc2c07ed5c1d5dd657b8080bee331aaf",
|
||||
"sum": "dYLcLzGH4yF3qB7OGC/7z4nqeTNjv42L7Q3BENU8XJI=",
|
||||
"version": "41f1a8125e664985dd30674e5bdf6b683eff5d32",
|
||||
"sum": "ZjQoYhvgKwJNkg+h+m9lW3SYjnjv5Yx5btEipLhru88=",
|
||||
"name": "prometheus"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/pyrra-dev/pyrra.git",
|
||||
"subdir": "jsonnet/controller-gen"
|
||||
}
|
||||
},
|
||||
"version": "d723f4d1a066dd657e9d09c46a158519dda0faa8",
|
||||
"sum": "cxAPQovFkM16zNB5/94O+sk/n3SETk6ao6Oas2Sa6RE=",
|
||||
"name": "pyrra"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
@@ -222,8 +141,8 @@
|
||||
"subdir": "mixin"
|
||||
}
|
||||
},
|
||||
"version": "a2113fd81cba3926f5f9fcf807e24920e6e81fe3",
|
||||
"sum": "ieCD4eMgGbOlrI8GmckGPHBGQDcLasE1rULYq56W/bs=",
|
||||
"version": "632032712f12eea0015aaef24ee1e14f38ef3e55",
|
||||
"sum": "X+060DnePPeN/87fgj0SrfxVitywTk8hZA9V4nHxl1g=",
|
||||
"name": "thanos-mixin"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
[
|
||||
{
|
||||
"name": "exclude-automountServiceAccountToken-checks",
|
||||
"policyType": "postureExceptionPolicy",
|
||||
"actions": [
|
||||
"alertOnly"
|
||||
],
|
||||
"resources": [
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "DaemonSet",
|
||||
"name": "node-exporter"
|
||||
}
|
||||
},
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "Deployment",
|
||||
"name": "blackbox-exporter"
|
||||
}
|
||||
},
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "Deployment",
|
||||
"name": "kube-state-metrics"
|
||||
}
|
||||
},
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "Deployment",
|
||||
"name": "prometheus-adapter"
|
||||
}
|
||||
},
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "Deployment",
|
||||
"name": "prometheus-operator"
|
||||
}
|
||||
},
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "ServiceAccount",
|
||||
"name": "prometheus-k8s"
|
||||
}
|
||||
}
|
||||
],
|
||||
"posturePolicies": [
|
||||
{
|
||||
"controlName": "Automatic mapping of service account"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "exclude-node-exporter-host-access-checks",
|
||||
"policyType": "postureExceptionPolicy",
|
||||
"actions": [
|
||||
"alertOnly"
|
||||
],
|
||||
"resources": [
|
||||
{
|
||||
"designatorType": "Attributes",
|
||||
"attributes": {
|
||||
"kind": "DaemonSet",
|
||||
"name": "node-exporter"
|
||||
}
|
||||
}
|
||||
],
|
||||
"posturePolicies": [
|
||||
{
|
||||
"controlName": "Container hostPort"
|
||||
},
|
||||
{
|
||||
"controlName": "Host PID/IPC privileges"
|
||||
},
|
||||
{
|
||||
"controlName": "HostNetwork access"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -2,7 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ./manifests/alertmanager-alertmanager.yaml
|
||||
- ./manifests/alertmanager-networkPolicy.yaml
|
||||
- ./manifests/alertmanager-podDisruptionBudget.yaml
|
||||
- ./manifests/alertmanager-prometheusRule.yaml
|
||||
- ./manifests/alertmanager-secret.yaml
|
||||
@@ -13,7 +12,6 @@ resources:
|
||||
- ./manifests/blackboxExporter-clusterRoleBinding.yaml
|
||||
- ./manifests/blackboxExporter-configuration.yaml
|
||||
- ./manifests/blackboxExporter-deployment.yaml
|
||||
- ./manifests/blackboxExporter-networkPolicy.yaml
|
||||
- ./manifests/blackboxExporter-service.yaml
|
||||
- ./manifests/blackboxExporter-serviceAccount.yaml
|
||||
- ./manifests/blackboxExporter-serviceMonitor.yaml
|
||||
@@ -22,8 +20,6 @@ resources:
|
||||
- ./manifests/grafana-dashboardDefinitions.yaml
|
||||
- ./manifests/grafana-dashboardSources.yaml
|
||||
- ./manifests/grafana-deployment.yaml
|
||||
- ./manifests/grafana-networkPolicy.yaml
|
||||
- ./manifests/grafana-prometheusRule.yaml
|
||||
- ./manifests/grafana-service.yaml
|
||||
- ./manifests/grafana-serviceAccount.yaml
|
||||
- ./manifests/grafana-serviceMonitor.yaml
|
||||
@@ -31,7 +27,6 @@ resources:
|
||||
- ./manifests/kubeStateMetrics-clusterRole.yaml
|
||||
- ./manifests/kubeStateMetrics-clusterRoleBinding.yaml
|
||||
- ./manifests/kubeStateMetrics-deployment.yaml
|
||||
- ./manifests/kubeStateMetrics-networkPolicy.yaml
|
||||
- ./manifests/kubeStateMetrics-prometheusRule.yaml
|
||||
- ./manifests/kubeStateMetrics-service.yaml
|
||||
- ./manifests/kubeStateMetrics-serviceAccount.yaml
|
||||
@@ -45,14 +40,12 @@ resources:
|
||||
- ./manifests/nodeExporter-clusterRole.yaml
|
||||
- ./manifests/nodeExporter-clusterRoleBinding.yaml
|
||||
- ./manifests/nodeExporter-daemonset.yaml
|
||||
- ./manifests/nodeExporter-networkPolicy.yaml
|
||||
- ./manifests/nodeExporter-prometheusRule.yaml
|
||||
- ./manifests/nodeExporter-service.yaml
|
||||
- ./manifests/nodeExporter-serviceAccount.yaml
|
||||
- ./manifests/nodeExporter-serviceMonitor.yaml
|
||||
- ./manifests/prometheus-clusterRole.yaml
|
||||
- ./manifests/prometheus-clusterRoleBinding.yaml
|
||||
- ./manifests/prometheus-networkPolicy.yaml
|
||||
- ./manifests/prometheus-podDisruptionBudget.yaml
|
||||
- ./manifests/prometheus-prometheus.yaml
|
||||
- ./manifests/prometheus-prometheusRule.yaml
|
||||
@@ -71,7 +64,6 @@ resources:
|
||||
- ./manifests/prometheusAdapter-clusterRoleServerResources.yaml
|
||||
- ./manifests/prometheusAdapter-configMap.yaml
|
||||
- ./manifests/prometheusAdapter-deployment.yaml
|
||||
- ./manifests/prometheusAdapter-networkPolicy.yaml
|
||||
- ./manifests/prometheusAdapter-podDisruptionBudget.yaml
|
||||
- ./manifests/prometheusAdapter-roleBindingAuthReader.yaml
|
||||
- ./manifests/prometheusAdapter-service.yaml
|
||||
@@ -80,7 +72,6 @@ resources:
|
||||
- ./manifests/prometheusOperator-clusterRole.yaml
|
||||
- ./manifests/prometheusOperator-clusterRoleBinding.yaml
|
||||
- ./manifests/prometheusOperator-deployment.yaml
|
||||
- ./manifests/prometheusOperator-networkPolicy.yaml
|
||||
- ./manifests/prometheusOperator-prometheusRule.yaml
|
||||
- ./manifests/prometheusOperator-service.yaml
|
||||
- ./manifests/prometheusOperator-serviceAccount.yaml
|
||||
@@ -90,9 +81,7 @@ resources:
|
||||
- ./manifests/setup/0podmonitorCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0probeCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0prometheusCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0prometheusagentCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0prometheusruleCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0scrapeconfigCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0servicemonitorCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/0thanosrulerCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/namespace.yaml
|
||||
|
||||
@@ -6,11 +6,11 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
name: main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
image: quay.io/prometheus/alertmanager:v0.27.0
|
||||
image: quay.io/prometheus/alertmanager:v0.23.0
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
podMetadata:
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
replicas: 3
|
||||
resources:
|
||||
limits:
|
||||
@@ -28,10 +28,9 @@ spec:
|
||||
requests:
|
||||
cpu: 4m
|
||||
memory: 100Mi
|
||||
secrets: []
|
||||
securityContext:
|
||||
fsGroup: 2000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
serviceAccountName: alertmanager-main
|
||||
version: 0.27.0
|
||||
version: 0.23.0
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: alert-router
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
egress:
|
||||
- {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: prometheus
|
||||
ports:
|
||||
- port: 9093
|
||||
protocol: TCP
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: alertmanager
|
||||
ports:
|
||||
- port: 9094
|
||||
protocol: TCP
|
||||
- port: 9094
|
||||
protocol: UDP
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: alert-router
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
policyTypes:
|
||||
- Egress
|
||||
- Ingress
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
prometheus: k8s
|
||||
role: alert-rules
|
||||
name: alertmanager-main-rules
|
||||
@@ -17,7 +17,8 @@ spec:
|
||||
rules:
|
||||
- alert: AlertmanagerFailedReload
|
||||
annotations:
|
||||
description: Configuration has failed to load for {{ $labels.namespace }}/{{ $labels.pod}}.
|
||||
description: Configuration has failed to load for {{ $labels.namespace }}/{{
|
||||
$labels.pod}}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedreload
|
||||
summary: Reloading an Alertmanager configuration has failed.
|
||||
expr: |
|
||||
@@ -29,9 +30,11 @@ spec:
|
||||
severity: critical
|
||||
- alert: AlertmanagerMembersInconsistent
|
||||
annotations:
|
||||
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only found {{ $value }} members of the {{$labels.job}} cluster.
|
||||
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} has only
|
||||
found {{ $value }} members of the {{$labels.job}} cluster.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagermembersinconsistent
|
||||
summary: A member of an Alertmanager cluster has not found all other cluster members.
|
||||
summary: A member of an Alertmanager cluster has not found all other cluster
|
||||
members.
|
||||
expr: |
|
||||
# Without max_over_time, failed scrapes could create false negatives, see
|
||||
# https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
|
||||
@@ -43,14 +46,16 @@ spec:
|
||||
severity: critical
|
||||
- alert: AlertmanagerFailedToSendAlerts
|
||||
annotations:
|
||||
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration }}.
|
||||
description: Alertmanager {{ $labels.namespace }}/{{ $labels.pod}} failed
|
||||
to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration
|
||||
}}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerfailedtosendalerts
|
||||
summary: An Alertmanager instance failed to send notifications.
|
||||
expr: |
|
||||
(
|
||||
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring"}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring"}[5m])
|
||||
rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring"}[5m])
|
||||
)
|
||||
> 0.01
|
||||
for: 5m
|
||||
@@ -58,14 +63,17 @@ spec:
|
||||
severity: warning
|
||||
- alert: AlertmanagerClusterFailedToSendAlerts
|
||||
annotations:
|
||||
description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.
|
||||
description: The minimum notification failure rate to {{ $labels.integration
|
||||
}} sent from any instance in the {{$labels.job}} cluster is {{ $value |
|
||||
humanizePercentage }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts
|
||||
summary: All Alertmanager instances in a cluster failed to send notifications to a critical integration.
|
||||
summary: All Alertmanager instances in a cluster failed to send notifications
|
||||
to a critical integration.
|
||||
expr: |
|
||||
min by (namespace,service, integration) (
|
||||
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
|
||||
rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration=~`.*`}[5m])
|
||||
)
|
||||
> 0.01
|
||||
for: 5m
|
||||
@@ -73,14 +81,17 @@ spec:
|
||||
severity: critical
|
||||
- alert: AlertmanagerClusterFailedToSendAlerts
|
||||
annotations:
|
||||
description: The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the {{$labels.job}} cluster is {{ $value | humanizePercentage }}.
|
||||
description: The minimum notification failure rate to {{ $labels.integration
|
||||
}} sent from any instance in the {{$labels.job}} cluster is {{ $value |
|
||||
humanizePercentage }}.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterfailedtosendalerts
|
||||
summary: All Alertmanager instances in a cluster failed to send notifications to a non-critical integration.
|
||||
summary: All Alertmanager instances in a cluster failed to send notifications
|
||||
to a non-critical integration.
|
||||
expr: |
|
||||
min by (namespace,service, integration) (
|
||||
rate(alertmanager_notifications_failed_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
|
||||
/
|
||||
ignoring (reason) group_left rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
|
||||
rate(alertmanager_notifications_total{job="alertmanager-main",namespace="monitoring", integration!~`.*`}[5m])
|
||||
)
|
||||
> 0.01
|
||||
for: 5m
|
||||
@@ -88,7 +99,8 @@ spec:
|
||||
severity: warning
|
||||
- alert: AlertmanagerConfigInconsistent
|
||||
annotations:
|
||||
description: Alertmanager instances within the {{$labels.job}} cluster have different configurations.
|
||||
description: Alertmanager instances within the {{$labels.job}} cluster have
|
||||
different configurations.
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerconfiginconsistent
|
||||
summary: Alertmanager instances within the same cluster have different configurations.
|
||||
expr: |
|
||||
@@ -101,9 +113,12 @@ spec:
|
||||
severity: critical
|
||||
- alert: AlertmanagerClusterDown
|
||||
annotations:
|
||||
description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have been up for less than half of the last 5m.'
|
||||
description: '{{ $value | humanizePercentage }} of Alertmanager instances
|
||||
within the {{$labels.job}} cluster have been up for less than half of the
|
||||
last 5m.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclusterdown
|
||||
summary: Half or more of the Alertmanager instances within the same cluster are down.
|
||||
summary: Half or more of the Alertmanager instances within the same cluster
|
||||
are down.
|
||||
expr: |
|
||||
(
|
||||
count by (namespace,service) (
|
||||
@@ -120,9 +135,12 @@ spec:
|
||||
severity: critical
|
||||
- alert: AlertmanagerClusterCrashlooping
|
||||
annotations:
|
||||
description: '{{ $value | humanizePercentage }} of Alertmanager instances within the {{$labels.job}} cluster have restarted at least 5 times in the last 10m.'
|
||||
description: '{{ $value | humanizePercentage }} of Alertmanager instances
|
||||
within the {{$labels.job}} cluster have restarted at least 5 times in the
|
||||
last 10m.'
|
||||
runbook_url: https://runbooks.prometheus-operator.dev/runbooks/alertmanager/alertmanagerclustercrashlooping
|
||||
summary: Half or more of the Alertmanager instances within the same cluster are crashlooping.
|
||||
summary: Half or more of the Alertmanager instances within the same cluster
|
||||
are crashlooping.
|
||||
expr: |
|
||||
(
|
||||
count by (namespace,service) (
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
stringData:
|
||||
@@ -28,17 +28,10 @@ stringData:
|
||||
- "severity = warning"
|
||||
"target_matchers":
|
||||
- "severity = info"
|
||||
- "equal":
|
||||
- "namespace"
|
||||
"source_matchers":
|
||||
- "alertname = InfoInhibitor"
|
||||
"target_matchers":
|
||||
- "severity = info"
|
||||
"receivers":
|
||||
- "name": "Default"
|
||||
- "name": "Watchdog"
|
||||
- "name": "Critical"
|
||||
- "name": "null"
|
||||
"route":
|
||||
"group_by":
|
||||
- "namespace"
|
||||
@@ -50,9 +43,6 @@ stringData:
|
||||
- "matchers":
|
||||
- "alertname = Watchdog"
|
||||
"receiver": "Watchdog"
|
||||
- "matchers":
|
||||
- "alertname = InfoInhibitor"
|
||||
"receiver": "null"
|
||||
- "matchers":
|
||||
- "severity = critical"
|
||||
"receiver": "Critical"
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
apiVersion: v1
|
||||
automountServiceAccountToken: false
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
@@ -7,6 +6,6 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
app.kubernetes.io/instance: main
|
||||
app.kubernetes.io/name: alertmanager
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 0.27.0
|
||||
app.kubernetes.io/version: 0.23.0
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user