Merge pull request #2131 from metalmatze/custom-metrics-adapter-update

contrib/kube-prometheus: Update experimental Custom Metrics adapter example
This commit is contained in:
Frederic Branczyk
2018-11-15 10:45:13 +01:00
committed by GitHub
14 changed files with 193 additions and 161 deletions

View File

@@ -1,11 +1,21 @@
# Custom Metrics API
The custom metrics API allows the HPA v2 to scale on arbirary metrics.
The custom metrics API allows the HPA v2 to scale based on arbirary metrics.
This directory contains an example deployment of the custom metrics API adapter using Prometheus as the backing monitoring system.
This directory contains an example deployment which extends the Prometheus Adapter, deployed with kube-prometheus, serve the [Custom Metrics API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/custom-metrics-api.md) by talking to Prometheus running inside the cluster.
In order to deploy the custom metrics adapter for Prometheus you need to generate TLS certficates used to serve the API. An example of how these could be generated can be found in `./gencerts.sh`, note that this is _not_ recommended to be used in production. You need to employ a secure PKI strategy, this is merely an example to get started and try it out quickly.
Make sure you have the Prometheus Adapter up and running in the `monitoring` namespace.
Once the generated `Secret` with the certificates is in place, you can deploy everything in the `monitoring` namespace using `./deploy.sh`.
You can deploy everything in the `monitoring` namespace using `./deploy.sh`.
When you're done, you can teardown using the `./teardown.sh` script.
### Sample App
Additionally, this directory contains a sample app that uses the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale the Deployment's replicas of Pods up and down as needed.
Deploy this app by running `kubectl apply -f sample-app.yaml`.
Make the app accessible on your system, for example by using `kubectl -n monitoring port-forward svc/sample-app 8080`. Next you need to put some load on its http endpoints.
A tool like [hey](https://github.com/rakyll/hey) is helpful for doing so: `hey -c 20 -n 100000000 http://localhost:8080/metrics`
There is an even more detailed information on this sample app at [luxas/kubeadm-workshop](https://github.com/luxas/kubeadm-workshop#deploying-the-prometheus-operator-for-monitoring-services-in-the-cluster).

View File

@@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: custom-metrics:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: custom-metrics-apiserver
namespace: monitoring

View File

@@ -1,13 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: custom-metrics-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: custom-metrics-apiserver
namespace: monitoring

View File

@@ -1,41 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: custom-metrics-apiserver
name: custom-metrics-apiserver
spec:
replicas: 1
selector:
matchLabels:
app: custom-metrics-apiserver
template:
metadata:
labels:
app: custom-metrics-apiserver
name: custom-metrics-apiserver
spec:
serviceAccountName: custom-metrics-apiserver
containers:
- name: custom-metrics-apiserver
image: quay.io/coreos/k8s-prometheus-adapter-amd64:v0.2.0
args:
- /adapter
- --secure-port=6443
- --tls-cert-file=/var/run/serving-cert/serving.crt
- --tls-private-key-file=/var/run/serving-cert/serving.key
- --logtostderr=true
- --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/
- --metrics-relist-interval=30s
- --rate-interval=5m
- --v=10
ports:
- containerPort: 6443
volumeMounts:
- mountPath: /var/run/serving-cert
name: volume-serving-cert
readOnly: true
volumes:
- name: volume-serving-cert
secret:
secretName: cm-adapter-serving-certs

View File

@@ -1,12 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: custom-metrics-resource-reader
name: custom-metrics-server-resources
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: custom-metrics-resource-reader
name: custom-metrics-server-resources
subjects:
- kind: ServiceAccount
name: custom-metrics-apiserver
name: prometheus-adapter
namespace: monitoring

View File

@@ -1,4 +0,0 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: custom-metrics-apiserver

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: custom-metrics-apiserver
spec:
ports:
- port: 443
targetPort: 6443
selector:
app: custom-metrics-apiserver

View File

@@ -4,7 +4,7 @@ metadata:
name: v1beta1.custom.metrics.k8s.io
spec:
service:
name: custom-metrics-apiserver
name: prometheus-adapter
namespace: monitoring
group: custom.metrics.k8s.io
version: v1beta1

View File

@@ -0,0 +1,98 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: adapter-config
namespace: monitoring
data:
config.yaml: |
rules:
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
seriesFilters: []
resources:
overrides:
namespace:
resource: namespace
pod_name:
resource: pod
name:
matches: ^container_(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
seriesFilters:
- isNot: ^container_.*_seconds_total$
resources:
overrides:
namespace:
resource: namespace
pod_name:
resource: pod
name:
matches: ^container_(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
seriesFilters:
- isNot: ^container_.*_total$
resources:
overrides:
namespace:
resource: namespace
pod_name:
resource: pod
name:
matches: ^container_(.*)$
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_total$
resources:
template: <<.Resource>>
name:
matches: ""
as: ""
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters:
- isNot: .*_seconds_total
resources:
template: <<.Resource>>
name:
matches: ^(.*)_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
seriesFilters: []
resources:
template: <<.Resource>>
name:
matches: ^(.*)_seconds_total$
as: ""
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
resourceRules:
cpu:
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>)
resources:
overrides:
node:
resource: node
namespace:
resource: namespace
pod_name:
resource: pod
containerLabel: container_name
memory:
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
resources:
overrides:
node:
resource: node
namespace:
resource: namespace
pod_name:
resource: pod
containerLabel: container_name
window: 1m

View File

@@ -1,14 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: custom-metrics-resource-reader
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
verbs:
- get
- list

16
experimental/custom-metrics-api/deploy.sh Executable file → Normal file
View File

@@ -1,13 +1,7 @@
#!/usr/bin/env bash
kubectl create -f custom-metrics-apiserver-auth-delegator-cluster-role-binding.yaml
kubectl create -f custom-metrics-apiserver-auth-reader-role-binding.yaml
kubectl -n monitoring create -f cm-adapter-serving-certs.yaml
kubectl -n monitoring create -f custom-metrics-apiserver-deployment.yaml
kubectl create -f custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
kubectl -n monitoring create -f custom-metrics-apiserver-service-account.yaml
kubectl -n monitoring create -f custom-metrics-apiserver-service.yaml
kubectl create -f custom-metrics-apiservice.yaml
kubectl create -f custom-metrics-cluster-role.yaml
kubectl create -f custom-metrics-resource-reader-cluster-role.yaml
kubectl create -f hpa-custom-metrics-cluster-role-binding.yaml
kubectl apply -n monitoring custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
kubectl apply -n monitoring custom-metrics-apiservice.yaml
kubectl apply -n monitoring custom-metrics-cluster-role.yaml
kubectl apply -n monitoring custom-metrics-configmap.yaml
kubectl apply -n monitoring hpa-custom-metrics-cluster-role-binding.yaml

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env bash
# exit immediately when a command fails
set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
# error on unset variables
set -u
# Detect if we are on mac or should use GNU base64 options
case $(uname) in
Darwin)
b64_opts='-b=0'
;;
*)
b64_opts='--wrap=0'
esac
go get -v -u github.com/cloudflare/cfssl/cmd/...
export PURPOSE=metrics
openssl req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout ${PURPOSE}-ca.key -out ${PURPOSE}-ca.crt -subj "/CN=ca"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","'${PURPOSE}'"]}}}' > "${PURPOSE}-ca-config.json"
export SERVICE_NAME=custom-metrics-apiserver
export ALT_NAMES='"custom-metrics-apiserver.monitoring","custom-metrics-apiserver.monitoring.svc"'
echo "{\"CN\":\"${SERVICE_NAME}\", \"hosts\": [${ALT_NAMES}], \"key\": {\"algo\": \"rsa\",\"size\": 2048}}" | \
cfssl gencert -ca=metrics-ca.crt -ca-key=metrics-ca.key -config=metrics-ca-config.json - | cfssljson -bare apiserver
cat <<-EOF > cm-adapter-serving-certs.yaml
apiVersion: v1
kind: Secret
metadata:
name: cm-adapter-serving-certs
data:
serving.crt: $(base64 ${b64_opts} < apiserver.pem)
serving.key: $(base64 ${b64_opts} < apiserver-key.pem)
EOF

View File

@@ -0,0 +1,67 @@
kind: ServiceMonitor
apiVersion: monitoring.coreos.com/v1
metadata:
name: sample-app
labels:
app: sample-app
spec:
selector:
matchLabels:
app: sample-app
endpoints:
- port: http
interval: 5s
---
apiVersion: v1
kind: Service
metadata:
name: sample-app
labels:
app: sample-app
spec:
ports:
- name: http
port: 8080
targetPort: 8080
selector:
app: sample-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: sample-app
labels:
app: sample-app
spec:
replicas: 1
selector:
matchLabels:
app: sample-app
template:
metadata:
labels:
app: sample-app
spec:
containers:
- image: luxas/autoscale-demo:v0.1.2
name: metrics-provider
ports:
- name: http
containerPort: 8080
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2beta1
metadata:
name: sample-app
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: sample-app
minReplicas: 1
maxReplicas: 10
metrics:
- type: Pods
pods:
metricName: http_requests
targetAverageValue: 500m

16
experimental/custom-metrics-api/teardown.sh Executable file → Normal file
View File

@@ -1,13 +1,7 @@
#!/usr/bin/env bash
kubectl delete -f custom-metrics-apiserver-auth-delegator-cluster-role-binding.yaml
kubectl delete -f custom-metrics-apiserver-auth-reader-role-binding.yaml
kubectl -n monitoring delete -f cm-adapter-serving-certs.yaml
kubectl -n monitoring delete -f custom-metrics-apiserver-deployment.yaml
kubectl delete -f custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
kubectl -n monitoring delete -f custom-metrics-apiserver-service-account.yaml
kubectl -n monitoring delete -f custom-metrics-apiserver-service.yaml
kubectl delete -f custom-metrics-apiservice.yaml
kubectl delete -f custom-metrics-cluster-role.yaml
kubectl delete -f custom-metrics-resource-reader-cluster-role.yaml
kubectl delete -f hpa-custom-metrics-cluster-role-binding.yaml
kubectl delete -n monitoring custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
kubectl delete -n monitoring custom-metrics-apiservice.yaml
kubectl delete -n monitoring custom-metrics-cluster-role.yaml
kubectl delete -n monitoring custom-metrics-configmap.yaml
kubectl delete -n monitoring hpa-custom-metrics-cluster-role-binding.yaml