Compare commits
773 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
84f24095d6 | ||
|
|
df8ef58246 | ||
|
|
407ced0d9f | ||
|
|
20c3894b5f | ||
|
|
6a05efd636 | ||
|
|
68b78e84c9 | ||
|
|
0cfe5bba45 | ||
|
|
9593d5c457 | ||
|
|
6f67548eec | ||
|
|
456b87c3f1 | ||
|
|
8eac39f35d | ||
|
|
fe771c0108 | ||
|
|
c45a81999e | ||
|
|
d4ca837790 | ||
|
|
ca2f2d5e61 | ||
|
|
af67589d2a | ||
|
|
d748fe4d96 | ||
|
|
25f88e8132 | ||
|
|
6fb6cae970 | ||
|
|
e0358a4b26 | ||
|
|
c02e0c0e31 | ||
|
|
4b690d4a5c | ||
|
|
91fc8c92c2 | ||
|
|
5282761710 | ||
|
|
79f4232513 | ||
|
|
874be477b4 | ||
|
|
297b6817a4 | ||
|
|
409add16d4 | ||
|
|
17cbb1bf85 | ||
|
|
d2f051745e | ||
|
|
37107791d4 | ||
|
|
41a973d0cf | ||
|
|
1c11c1b092 | ||
|
|
45dbb278be | ||
|
|
6ec37c8be6 | ||
|
|
6b9502c5f4 | ||
|
|
3522d642aa | ||
|
|
b9c95f02f1 | ||
|
|
29d590b65a | ||
|
|
469523e572 | ||
|
|
a05f39b65d | ||
|
|
f6cfaa0fb1 | ||
|
|
d2fb1de1d2 | ||
|
|
0534d14813 | ||
|
|
d7eadef17d | ||
|
|
93e4b70d2c | ||
|
|
2a3981b75c | ||
|
|
47412b3037 | ||
|
|
20fa80fb4c | ||
|
|
befa960a1e | ||
|
|
30c04b88ff | ||
|
|
916b0ef90e | ||
|
|
a82a5b1d35 | ||
|
|
28d661a5fc | ||
|
|
2138463b1f | ||
|
|
6430acf980 | ||
|
|
31185dc8aa | ||
|
|
fe9507b510 | ||
|
|
87ddb30a41 | ||
|
|
7f50004133 | ||
|
|
2cc09fb2a6 | ||
|
|
5a610adc8c | ||
|
|
5005f4ac2e | ||
|
|
77f1ad3b1c | ||
|
|
e667d85de8 | ||
|
|
b75a8a2efa | ||
|
|
f674b1b557 | ||
|
|
2a49d0f8aa | ||
|
|
1b12e6882d | ||
|
|
c123d87347 | ||
|
|
64fd5ac304 | ||
|
|
09eb11bdbb | ||
|
|
c90d8a02a9 | ||
|
|
8d8c45c4dd | ||
|
|
739200dea3 | ||
|
|
ead1c35fac | ||
|
|
c14fc2a709 | ||
|
|
aff0ef6582 | ||
|
|
38daabf6e0 | ||
|
|
f321584df7 | ||
|
|
f579d8c065 | ||
|
|
6f7cf5cf72 | ||
|
|
170296603a | ||
|
|
4dc68103b9 | ||
|
|
07188b2509 | ||
|
|
4bbc92bf15 | ||
|
|
b0acdc7b86 | ||
|
|
6e6bd49f7f | ||
|
|
6ba3f6a6c8 | ||
|
|
01b396efc5 | ||
|
|
ea51bdb6eb | ||
|
|
ef04c0c114 | ||
|
|
90f811411b | ||
|
|
5f9eac9008 | ||
|
|
432411ba33 | ||
|
|
a8fe172085 | ||
|
|
e1b9f0ea9e | ||
|
|
a54498482c | ||
|
|
9f5f14bfe0 | ||
|
|
373441cce9 | ||
|
|
c10fda45b1 | ||
|
|
0788ab7978 | ||
|
|
08b577c898 | ||
|
|
3a47f2d20e | ||
|
|
491dbd2f74 | ||
|
|
fcf7d8afa2 | ||
|
|
05744ccfc3 | ||
|
|
283e28fe20 | ||
|
|
8c71e8c804 | ||
|
|
9a82d41cfa | ||
|
|
116a42ae7a | ||
|
|
207a133233 | ||
|
|
922fe4571e | ||
|
|
7da6b04785 | ||
|
|
f274702ece | ||
|
|
c236fec779 | ||
|
|
9d051b2c63 | ||
|
|
b84bd5c72f | ||
|
|
4b9f8bb107 | ||
|
|
6d641706de | ||
|
|
eea76c5272 | ||
|
|
8b0eebdd08 | ||
|
|
369cd81d14 | ||
|
|
fb5ae888e7 | ||
|
|
945fcce5c4 | ||
|
|
0c2a69e492 | ||
|
|
71b7931c9d | ||
|
|
aee7f28bcd | ||
|
|
08550c2db8 | ||
|
|
0118932183 | ||
|
|
e4ebb8f55b | ||
|
|
fae7e1a987 | ||
|
|
414d15c895 | ||
|
|
9764726cf0 | ||
|
|
3b38fcc350 | ||
|
|
95a4414caf | ||
|
|
895dcbe1a7 | ||
|
|
9a3e639345 | ||
|
|
980e95de01 | ||
|
|
491ecfa65b | ||
|
|
09239aaa49 | ||
|
|
eda75c7c39 | ||
|
|
9cbafdb812 | ||
|
|
45e2cd1248 | ||
|
|
1e8a60ea57 | ||
|
|
ea21375f15 | ||
|
|
f0755dcaa8 | ||
|
|
ad4462e506 | ||
|
|
3adc752839 | ||
|
|
5ffed9f9ba | ||
|
|
714275f3b9 | ||
|
|
f2a014c751 | ||
|
|
c409837c16 | ||
|
|
e699efff3f | ||
|
|
63f4577de7 | ||
|
|
6eb8c9d123 | ||
|
|
f321bc49c8 | ||
|
|
e85046ad6f | ||
|
|
59d6843d3d | ||
|
|
d5e3c294d8 | ||
|
|
ea109b463d | ||
|
|
bb4b7019a7 | ||
|
|
358c4e75f1 | ||
|
|
e8cc596699 | ||
|
|
5fe45c57b6 | ||
|
|
e6111e58c4 | ||
|
|
09293866ae | ||
|
|
2212ee3db6 | ||
|
|
2d3c10e3f7 | ||
|
|
e55b6a8272 | ||
|
|
152c02b7f8 | ||
|
|
9e18cf2097 | ||
|
|
a6e6853f62 | ||
|
|
b8b92a002f | ||
|
|
911543d0ee | ||
|
|
4cef8bb7d9 | ||
|
|
44060e5f19 | ||
|
|
ddc853e1ac | ||
|
|
88fe47b35e | ||
|
|
dc82316d0a | ||
|
|
d869da16fc | ||
|
|
1e0ad975c1 | ||
|
|
28b717a984 | ||
|
|
4e458eeb2a | ||
|
|
d9d4eebd96 | ||
|
|
4f872f1e31 | ||
|
|
93e692427f | ||
|
|
8a8595b5f3 | ||
|
|
cce58b9514 | ||
|
|
be50fc2453 | ||
|
|
f108af7fda | ||
|
|
c62b9569d9 | ||
|
|
0edb796d57 | ||
|
|
16778e0f9f | ||
|
|
5417071d15 | ||
|
|
9cc4430369 | ||
|
|
6c47a6e898 | ||
|
|
490580dde7 | ||
|
|
e254781636 | ||
|
|
d4f8ffcd03 | ||
|
|
055d378db3 | ||
|
|
87fcf52469 | ||
|
|
7d5d6d6a63 | ||
|
|
2932a74170 | ||
|
|
685a85e3e0 | ||
|
|
2326773ee1 | ||
|
|
f0955e0540 | ||
|
|
7c35752e3f | ||
|
|
df3bfc6575 | ||
|
|
b51b9b983f | ||
|
|
6771c9bcc2 | ||
|
|
63ad66e3f3 | ||
|
|
8f85949438 | ||
|
|
2539ba9548 | ||
|
|
3a6a0d0837 | ||
|
|
6dfbcf35f2 | ||
|
|
c1304caa28 | ||
|
|
4410a80e4e | ||
|
|
40adbfae6c | ||
|
|
ba5c6e2e6a | ||
|
|
d67c5da75e | ||
|
|
6a34239786 | ||
|
|
a9ffdaa35c | ||
|
|
fcf7a2fcbf | ||
|
|
550d42d95b | ||
|
|
4e116aa7e2 | ||
|
|
b55c2825f7 | ||
|
|
d88cb26377 | ||
|
|
5743540fbb | ||
|
|
1917a57280 | ||
|
|
2421e8cbe9 | ||
|
|
a5b71282cd | ||
|
|
dfe9184c9b | ||
|
|
6f4a9e5233 | ||
|
|
a87f322edc | ||
|
|
617003a583 | ||
|
|
3865eacdb3 | ||
|
|
bce16b41eb | ||
|
|
4d6e3d5c19 | ||
|
|
f4568b06dc | ||
|
|
cc7583fefb | ||
|
|
176e9659f3 | ||
|
|
ea7a834755 | ||
|
|
2c1fc1cc11 | ||
|
|
829a553e7a | ||
|
|
de9591cbb0 | ||
|
|
83ebd535e6 | ||
|
|
bbd4e61fc1 | ||
|
|
1d41243b54 | ||
|
|
b707a94314 | ||
|
|
e82acdb253 | ||
|
|
f55a17718d | ||
|
|
6b4bc0bb26 | ||
|
|
6f488250fd | ||
|
|
97ca4616ff | ||
|
|
0a43e85917 | ||
|
|
c3ea4675da | ||
|
|
3cbc97d782 | ||
|
|
17989b42aa | ||
|
|
beaba9f4da | ||
|
|
c5ecc42244 | ||
|
|
53bb3431ad | ||
|
|
7e0c503b13 | ||
|
|
e3ad00999f | ||
|
|
4b0fb40717 | ||
|
|
ce1bc17d98 | ||
|
|
5a84ac52c7 | ||
|
|
37fb5cb53a | ||
|
|
28332b410a | ||
|
|
0983947755 | ||
|
|
5b9341cad6 | ||
|
|
d288206d06 | ||
|
|
6742260399 | ||
|
|
b40e70065b | ||
|
|
d1690d95f7 | ||
|
|
69b6883033 | ||
|
|
f58d7b5695 | ||
|
|
11d57e468c | ||
|
|
7faed14744 | ||
|
|
96ea25d5de | ||
|
|
dab022fc62 | ||
|
|
4840cdcb66 | ||
|
|
d07466766d | ||
|
|
7130905473 | ||
|
|
ab8f1bb9f2 | ||
|
|
8d6679658f | ||
|
|
49ad6a67af | ||
|
|
be4b525774 | ||
|
|
070413521c | ||
|
|
60424ff54c | ||
|
|
affbc9d7ff | ||
|
|
320d512fc8 | ||
|
|
a1cf984749 | ||
|
|
145ee24e09 | ||
|
|
626f1af8c0 | ||
|
|
be4d32cba2 | ||
|
|
b3dfd223b6 | ||
|
|
dcc46c8aa8 | ||
|
|
b0f70c173b | ||
|
|
926337feac | ||
|
|
fd67733729 | ||
|
|
f6ff666135 | ||
|
|
a8b4985de4 | ||
|
|
e590ae2c68 | ||
|
|
876bb9c5a1 | ||
|
|
6a08c7d69e | ||
|
|
63078c2e78 | ||
|
|
d1c90625b1 | ||
|
|
2ab69fdac0 | ||
|
|
115721bbba | ||
|
|
46aa9554d1 | ||
|
|
bb21ea32e3 | ||
|
|
7f9b082ed3 | ||
|
|
7b4adb08f6 | ||
|
|
c9900d6a57 | ||
|
|
432db2c799 | ||
|
|
026425117d | ||
|
|
f4b8064899 | ||
|
|
9a7ba10755 | ||
|
|
0904ea78c0 | ||
|
|
441065c2f9 | ||
|
|
cb49f90491 | ||
|
|
a9df00baec | ||
|
|
0f6cd6d0a8 | ||
|
|
ea9f474ab3 | ||
|
|
8fdf1c772c | ||
|
|
7992aa4e73 | ||
|
|
5ee1229be8 | ||
|
|
82c3d9e8e4 | ||
|
|
0d907098ae | ||
|
|
63bdb7d931 | ||
|
|
cf7bb8706c | ||
|
|
86b0419f59 | ||
|
|
771ff9dcf4 | ||
|
|
6b253bf13b | ||
|
|
0ad11b64d7 | ||
|
|
805d2e65f5 | ||
|
|
ff6b7ae5f3 | ||
|
|
83812948b7 | ||
|
|
474d4e39dc | ||
|
|
6f4f34606d | ||
|
|
68505af1f9 | ||
|
|
1dd5bbeb58 | ||
|
|
f846c2e722 | ||
|
|
6f37ddbcf9 | ||
|
|
4541b9e10c | ||
|
|
75c532df17 | ||
|
|
09813bea10 | ||
|
|
a5e278372a | ||
|
|
bb5de11c89 | ||
|
|
285624d8fb | ||
|
|
0d4bfe7db5 | ||
|
|
c4561b3206 | ||
|
|
502f81b235 | ||
|
|
d2389d3e71 | ||
|
|
ae69b62d01 | ||
|
|
081f418273 | ||
|
|
b100eead2e | ||
|
|
50ff549b52 | ||
|
|
4ebc37e47b | ||
|
|
486b233c6a | ||
|
|
90148e2356 | ||
|
|
66c625d0bf | ||
|
|
848285797c | ||
|
|
e27f575347 | ||
|
|
17db5a68e5 | ||
|
|
7a2572d1f9 | ||
|
|
7a85d7d8a6 | ||
|
|
23a6adea16 | ||
|
|
b5ba409b9a | ||
|
|
c942d6b837 | ||
|
|
9323c4c98f | ||
|
|
5469bea0a6 | ||
|
|
15185bf4c0 | ||
|
|
29d4648af9 | ||
|
|
b6ad6644d5 | ||
|
|
d24cf329d2 | ||
|
|
e4a8abe17f | ||
|
|
dd00a80be4 | ||
|
|
90daccf6c7 | ||
|
|
f66f94ac79 | ||
|
|
0e8353ba91 | ||
|
|
50eee211dd | ||
|
|
60dcc3a86b | ||
|
|
298f216847 | ||
|
|
2e73de0106 | ||
|
|
f2540537cb | ||
|
|
5638f48f9d | ||
|
|
eaa83c461f | ||
|
|
8e6f5217b4 | ||
|
|
199d619741 | ||
|
|
895bf84e87 | ||
|
|
953c5464f7 | ||
|
|
3f3d4e2947 | ||
|
|
1144885da0 | ||
|
|
af9c1539e3 | ||
|
|
3531e303dc | ||
|
|
c736d1a47b | ||
|
|
8b0b0bc514 | ||
|
|
6a2cc72573 | ||
|
|
bb0ca63533 | ||
|
|
d8b4d25f9a | ||
|
|
0ed3f70014 | ||
|
|
5cabd5eeda | ||
|
|
95a853c531 | ||
|
|
f043bc32d3 | ||
|
|
7f315e2262 | ||
|
|
cdaaf3d51c | ||
|
|
8550ac35bf | ||
|
|
9095ed4ccf | ||
|
|
59de4a911b | ||
|
|
33c7e23ccd | ||
|
|
92212085c6 | ||
|
|
37c8d369ee | ||
|
|
5774353d24 | ||
|
|
7292f0950a | ||
|
|
e3174aef84 | ||
|
|
eee5e10e72 | ||
|
|
0b66cd33bd | ||
|
|
27e0a4c9a2 | ||
|
|
519ae8681e | ||
|
|
f30cf2e778 | ||
|
|
fabf273d30 | ||
|
|
3e7d8b391a | ||
|
|
9b429842e6 | ||
|
|
52e46a68a0 | ||
|
|
1973936fd3 | ||
|
|
7bbec26ff3 | ||
|
|
bd20662d48 | ||
|
|
748b889a9f | ||
|
|
ecf4a99634 | ||
|
|
3137c5f607 | ||
|
|
3277200fc5 | ||
|
|
23344a39eb | ||
|
|
f2b4528b63 | ||
|
|
a7628e0223 | ||
|
|
8984606f5d | ||
|
|
48d95f0b9f | ||
|
|
e410043b6b | ||
|
|
894069f24d | ||
|
|
d074ea1427 | ||
|
|
269aef6e37 | ||
|
|
90e5982de4 | ||
|
|
7165938b39 | ||
|
|
9ebe632d5d | ||
|
|
72ae778bfc | ||
|
|
0608c96bf6 | ||
|
|
44f3c61010 | ||
|
|
f517b35a42 | ||
|
|
54c0fda307 | ||
|
|
6a3d667d3e | ||
|
|
d9d3139dc8 | ||
|
|
67ed0f63c2 | ||
|
|
7788d0d327 | ||
|
|
fca505f2a2 | ||
|
|
d40548d3a0 | ||
|
|
dba42d3477 | ||
|
|
ee37661c34 | ||
|
|
8b36950f0e | ||
|
|
932745172d | ||
|
|
1af59f3130 | ||
|
|
6562b02da8 | ||
|
|
23999e44df | ||
|
|
69d3357892 | ||
|
|
3465b0fa0d | ||
|
|
1d1ce4967f | ||
|
|
3a0e6ba91f | ||
|
|
81e2d19398 | ||
|
|
92d4cbae08 | ||
|
|
2e72a8a832 | ||
|
|
9493a1a5f7 | ||
|
|
0a48577bb7 | ||
|
|
9211c42df0 | ||
|
|
5cddfd8da7 | ||
|
|
bd69007c8c | ||
|
|
4f2b9c1ec8 | ||
|
|
0be63d47fc | ||
|
|
5fe60f37a2 | ||
|
|
200fee8d7c | ||
|
|
1b9be6d00b | ||
|
|
ce68c4b392 | ||
|
|
5e9b883528 | ||
|
|
69b0ba03f1 | ||
|
|
3279f222a0 | ||
|
|
543ccec970 | ||
|
|
f17ddfd293 | ||
|
|
3b8530d742 | ||
|
|
44fe363211 | ||
|
|
326453cf47 | ||
|
|
159a14ef47 | ||
|
|
d03d57e6bb | ||
|
|
31cb71fcd9 | ||
|
|
4474b24a32 | ||
|
|
339ade5a81 | ||
|
|
ce7c5fa3b4 | ||
|
|
3f388b797d | ||
|
|
20abdf3b72 | ||
|
|
cd0f3c641e | ||
|
|
408fde189b | ||
|
|
90cf0ae21c | ||
|
|
3ba4b5602a | ||
|
|
cb0e6e2c89 | ||
|
|
03f7adcf92 | ||
|
|
fd267aebeb | ||
|
|
420425d88e | ||
|
|
965bec0ad7 | ||
|
|
d22bad8293 | ||
|
|
8c255e9e6c | ||
|
|
56027ac757 | ||
|
|
50b06b0d33 | ||
|
|
6f6fd65a48 | ||
|
|
f48fe057dc | ||
|
|
8487871388 | ||
|
|
ce5fe790ee | ||
|
|
3b82c11944 | ||
|
|
65e57d8ec7 | ||
|
|
223c163915 | ||
|
|
235761f915 | ||
|
|
6a6a43e227 | ||
|
|
8ee17e6735 | ||
|
|
528f338477 | ||
|
|
78edcc0276 | ||
|
|
f05e73881a | ||
|
|
60bd13b34b | ||
|
|
9072e3530a | ||
|
|
737720c119 | ||
|
|
4bd3cb586a | ||
|
|
01f944aa30 | ||
|
|
b8f513e4d4 | ||
|
|
c8f0471279 | ||
|
|
5e75f27ae2 | ||
|
|
02369dd1da | ||
|
|
a3b1121562 | ||
|
|
78b9183837 | ||
|
|
01d30382aa | ||
|
|
cc389a718c | ||
|
|
20eb5b312a | ||
|
|
fd5b77cadf | ||
|
|
7f2e669d46 | ||
|
|
d99aefe276 | ||
|
|
059e74d156 | ||
|
|
03b36af546 | ||
|
|
2f54bcb4c6 | ||
|
|
1129dd7fb7 | ||
|
|
6e0ca7565f | ||
|
|
ebb960ee5e | ||
|
|
f72d49ca26 | ||
|
|
5686d7b439 | ||
|
|
dc77f255de | ||
|
|
4e5b454ba8 | ||
|
|
a3ab6bd49b | ||
|
|
a7884a6c18 | ||
|
|
c8273cf9e9 | ||
|
|
d4ba158f9b | ||
|
|
3c4dbc52d9 | ||
|
|
79c670bcd0 | ||
|
|
b03ff4f593 | ||
|
|
8228ebd2ba | ||
|
|
edb327531d | ||
|
|
13c114a72f | ||
|
|
9249256b4a | ||
|
|
e2b7e7f17d | ||
|
|
d32e859a11 | ||
|
|
1cbc994344 | ||
|
|
cbbfa0cad5 | ||
|
|
6ef4b3d330 | ||
|
|
648db9d544 | ||
|
|
5cc6daab4a | ||
|
|
c156f21d50 | ||
|
|
55db3208da | ||
|
|
3b8e685082 | ||
|
|
3640448229 | ||
|
|
19624d9def | ||
|
|
24aebaf985 | ||
|
|
3d5fc3e38d | ||
|
|
86cd3bc703 | ||
|
|
ace8b4d6d3 | ||
|
|
a16d5b69ab | ||
|
|
4e846a146f | ||
|
|
e7c3ca314d | ||
|
|
1ebce4955a | ||
|
|
7a2befe7fa | ||
|
|
bbd991a3b2 | ||
|
|
8405360a46 | ||
|
|
bd8d597f8d | ||
|
|
bcadf3ae05 | ||
|
|
40a5dc2b71 | ||
|
|
52685175f2 | ||
|
|
6ec81661fa | ||
|
|
5155e57141 | ||
|
|
73395e6d78 | ||
|
|
a9f7b03f27 | ||
|
|
d2dd84bc0f | ||
|
|
21ace9b55e | ||
|
|
dfb626837f | ||
|
|
c72ae7b63c | ||
|
|
f458e85e5d | ||
|
|
ee7d0d367f | ||
|
|
139df678f0 | ||
|
|
0f5400e5fe | ||
|
|
3924379e84 | ||
|
|
be47e4a7c2 | ||
|
|
5839b8c1cb | ||
|
|
48eefc51d9 | ||
|
|
9486ec2bc1 | ||
|
|
2e996fce91 | ||
|
|
e304d2a60f | ||
|
|
4da422095b | ||
|
|
e0c232df8b | ||
|
|
2dcc928425 | ||
|
|
b9504efef7 | ||
|
|
8d7db8862d | ||
|
|
b6c6d39fb9 | ||
|
|
dc20838d65 | ||
|
|
87a4567faf | ||
|
|
b6becc0936 | ||
|
|
0fc41a075a | ||
|
|
cb227144e2 | ||
|
|
974d3a70be | ||
|
|
0739c11ebb | ||
|
|
c0b4e45bb4 | ||
|
|
3f3236d050 | ||
|
|
6a11f9c3bc | ||
|
|
7e33e90fb1 | ||
|
|
6458803cee | ||
|
|
d04ddf5a98 | ||
|
|
26750eadf5 | ||
|
|
0038e25165 | ||
|
|
81038c7f3c | ||
|
|
fe11d190d6 | ||
|
|
53ebff6465 | ||
|
|
6710ef8739 | ||
|
|
223a3be924 | ||
|
|
506a591d0e | ||
|
|
c34bbb21e5 | ||
|
|
b4c941cfd0 | ||
|
|
907d6071e6 | ||
|
|
f6774fb7e6 | ||
|
|
e2ba988f8f | ||
|
|
5bea571be1 | ||
|
|
5310aef112 | ||
|
|
ea6c1195a6 | ||
|
|
ea5790f965 | ||
|
|
f383664170 | ||
|
|
3eda46c36c | ||
|
|
c5f22b61d4 | ||
|
|
fa6d6833cd | ||
|
|
3d40f41381 | ||
|
|
fda5811975 | ||
|
|
e9341231bc | ||
|
|
2511a6c529 | ||
|
|
9c82f86770 | ||
|
|
31513f5b69 | ||
|
|
4ee8f93c49 | ||
|
|
621a9054d6 | ||
|
|
a8927ac86d | ||
|
|
ec8920043d | ||
|
|
2396fa9483 | ||
|
|
bcbf54805a | ||
|
|
da959c6436 | ||
|
|
c4b89c9414 | ||
|
|
a2875bc637 | ||
|
|
c6e6f2e74f | ||
|
|
909f51b3bd | ||
|
|
2c7191d11c | ||
|
|
1635c2cd23 | ||
|
|
f22cb8f797 | ||
|
|
04309dcdb6 | ||
|
|
41a6549171 | ||
|
|
636f2b6e81 | ||
|
|
da644166a3 | ||
|
|
16c22b7250 | ||
|
|
517aded231 | ||
|
|
1bba75da80 | ||
|
|
34cdedde43 | ||
|
|
c793073789 | ||
|
|
6d07ef87cd | ||
|
|
3c64cc935c | ||
|
|
4adb70b017 | ||
|
|
b5ebde0586 | ||
|
|
7b2138be7d | ||
|
|
e7d1ada775 | ||
|
|
14b893c212 | ||
|
|
31cf273922 | ||
|
|
8865f275dd | ||
|
|
eb132e923e | ||
|
|
8b570f5fbb | ||
|
|
ca6ca5f65d | ||
|
|
62caa7adc9 | ||
|
|
3c652101f1 | ||
|
|
6faecd63b7 | ||
|
|
7d6183a9ec | ||
|
|
a821a80a41 | ||
|
|
ad2b941e77 | ||
|
|
2f1083be7e | ||
|
|
4761cac933 | ||
|
|
5157534678 | ||
|
|
05f2a18974 | ||
|
|
aa7df507e9 | ||
|
|
ccb138374e | ||
|
|
44dab89344 | ||
|
|
16a49f00d6 | ||
|
|
4d315c44e6 | ||
|
|
583fef10f2 | ||
|
|
176a187117 | ||
|
|
620e0e4d1d | ||
|
|
8b37004c5b | ||
|
|
a535968c33 | ||
|
|
7212340368 | ||
|
|
3692390075 | ||
|
|
c8c850ef2b | ||
|
|
f0afafdb08 | ||
|
|
f1cd88fdbb | ||
|
|
50d90fba66 | ||
|
|
a7a1101269 | ||
|
|
daf2e76bc6 | ||
|
|
5a34b9de81 | ||
|
|
b4a8b7f3c5 | ||
|
|
0bc30832d0 | ||
|
|
48651090a6 | ||
|
|
552b341bf6 | ||
|
|
a5ba4c5116 | ||
|
|
9a681fad7d | ||
|
|
98b87e2890 | ||
|
|
aa18a46155 | ||
|
|
6c34ff2d72 | ||
|
|
026c09e6ad | ||
|
|
d0efc60a2e | ||
|
|
90b8632fb3 | ||
|
|
ffc8832ef9 | ||
|
|
8e7d55d795 | ||
|
|
291f7c64fa | ||
|
|
b28a65534f | ||
|
|
bdf84bf186 | ||
|
|
c478aa3ade | ||
|
|
e4ff0f8746 | ||
|
|
3f6d0c6dd3 | ||
|
|
e4a8f22622 | ||
|
|
4e586fb3c6 | ||
|
|
88338bb762 | ||
|
|
272ff23cb6 | ||
|
|
33523d0450 | ||
|
|
6e65c508f8 | ||
|
|
c526434dd2 | ||
|
|
ef8bb5aac0 | ||
|
|
9c1fda7fbe | ||
|
|
e4c31bfd92 | ||
|
|
fa93506941 | ||
|
|
a86fcce12f | ||
|
|
3acc79a6bd | ||
|
|
f4d7270c53 | ||
|
|
df8a5b51b4 | ||
|
|
dfa21f449b | ||
|
|
e85d2f3b64 | ||
|
|
5d7154cea5 | ||
|
|
12da6336af | ||
|
|
b1c674100b | ||
|
|
133d9a9540 | ||
|
|
fa08d12e23 | ||
|
|
50d3e774a5 | ||
|
|
e7d077937c | ||
|
|
f7f9bdea91 | ||
|
|
72a9f8727f | ||
|
|
9d066577a5 | ||
|
|
eea692c190 | ||
|
|
107028fff3 | ||
|
|
2e4b89b091 | ||
|
|
bedeadb7f5 | ||
|
|
ee8f8f1872 | ||
|
|
de2edfc5de | ||
|
|
a249b1c434 | ||
|
|
f21a134fa4 | ||
|
|
6562f95c14 |
46
.github/ISSUE_TEMPLATE/support.md
vendored
46
.github/ISSUE_TEMPLATE/support.md
vendored
@@ -4,48 +4,14 @@ about: If you have questions about kube-prometheus
|
||||
labels: kind/support
|
||||
---
|
||||
|
||||
<!--
|
||||
This repository now has the new GitHub Discussions enabled:
|
||||
https://github.com/coreos/kube-prometheus/discussions
|
||||
|
||||
Feel free to ask questions in #prometheus-operator on Kubernetes Slack!
|
||||
Please create a new discussion to ask for any kind of support, which is not a Bug or Feature Request.
|
||||
|
||||
-->
|
||||
Thank you for being part of this community!
|
||||
|
||||
**What did you do?**
|
||||
---
|
||||
|
||||
**Did you expect to see some different?**
|
||||
We are still happy to chat with you in the #prometheus-operator channel on Kubernetes Slack!
|
||||
|
||||
**Environment**
|
||||
|
||||
* Prometheus Operator version:
|
||||
|
||||
`Insert image tag or Git SHA here`
|
||||
<!-- Try kubectl -n monitoring describe deployment prometheus-operator -->
|
||||
|
||||
* Kubernetes version information:
|
||||
|
||||
`kubectl version`
|
||||
<!-- Replace the command with its output above -->
|
||||
|
||||
* Kubernetes cluster kind:
|
||||
|
||||
insert how you created your cluster: kops, bootkube, tectonic-installer, etc.
|
||||
|
||||
* Manifests:
|
||||
|
||||
```
|
||||
insert manifests relevant to the issue
|
||||
```
|
||||
|
||||
* Prometheus Operator Logs:
|
||||
|
||||
```
|
||||
Insert Prometheus Operator logs relevant to the issue here
|
||||
```
|
||||
|
||||
* Prometheus Logs:
|
||||
|
||||
```
|
||||
Insert Prometheus logs relevant to the issue here
|
||||
```
|
||||
|
||||
**Anything else we need to know?**:
|
||||
|
||||
59
.github/workflows/ci.yaml
vendored
Normal file
59
.github/workflows/ci.yaml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: ci
|
||||
on:
|
||||
- push
|
||||
- pull_request
|
||||
env:
|
||||
golang-version: '1.15'
|
||||
kind-version: 'v0.9.0'
|
||||
jobs:
|
||||
generate:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- ubuntu-latest
|
||||
name: Generate
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.golang-version }}
|
||||
- run: make --always-make generate && git diff --exit-code
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
name: Unit tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: make --always-make test
|
||||
e2e-tests:
|
||||
name: E2E tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
kind-image:
|
||||
- 'kindest/node:v1.19.0'
|
||||
- 'kindest/node:v1.20.0'
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Start KinD
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: ${{ env.kind-version }}
|
||||
image: ${{ matrix.kind-image }}
|
||||
- name: Wait for cluster to finish bootstraping
|
||||
run: |
|
||||
until [ "$(kubectl get pods --all-namespaces --no-headers | grep -cEv '([0-9]+)/\1')" -eq 0 ]; do
|
||||
sleep 5s
|
||||
done
|
||||
kubectl cluster-info
|
||||
kubectl get pods -A
|
||||
- name: Create kube-prometheus stack
|
||||
run: |
|
||||
kubectl create -f manifests/setup
|
||||
until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
|
||||
kubectl create -f manifests/
|
||||
- name: Run tests
|
||||
run: |
|
||||
export KUBECONFIG="${HOME}/.kube/config"
|
||||
make test-e2e
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@ tmp/
|
||||
minikube-manifests/
|
||||
vendor/
|
||||
./auth
|
||||
.swp
|
||||
|
||||
21
.travis.yml
21
.travis.yml
@@ -1,21 +0,0 @@
|
||||
sudo: required
|
||||
dist: xenial
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.12.x"
|
||||
go_import_path: github.com/coreos/kube-prometheus
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOCACHE
|
||||
- $GOPATH/pkg/mod
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
jobs:
|
||||
include:
|
||||
- script: make --always-make generate-in-docker && git diff --exit-code
|
||||
- script: make --always-make test-in-docker
|
||||
- script: GO111MODULE=on ./tests/e2e/travis-e2e.sh
|
||||
70
Makefile
70
Makefile
@@ -1,53 +1,53 @@
|
||||
JSONNET_FMT := jsonnet fmt -n 2 --max-blank-lines 2 --string-style s --comment-style s
|
||||
SHELL=/bin/bash -o pipefail
|
||||
|
||||
JB_BINARY:=$(GOPATH)/bin/jb
|
||||
EMBEDMD_BINARY:=$(GOPATH)/bin/embedmd
|
||||
CONTAINER_CMD:=docker run --rm \
|
||||
-u="$(shell id -u):$(shell id -g)" \
|
||||
-v "$(shell go env GOCACHE):/.cache/go-build" \
|
||||
-v "$(PWD):/go/src/github.com/coreos/kube-prometheus:Z" \
|
||||
-w "/go/src/github.com/coreos/kube-prometheus" \
|
||||
quay.io/coreos/jsonnet-ci
|
||||
BIN_DIR?=$(shell pwd)/tmp/bin
|
||||
|
||||
EMBEDMD_BIN=$(BIN_DIR)/embedmd
|
||||
JB_BIN=$(BIN_DIR)/jb
|
||||
GOJSONTOYAML_BIN=$(BIN_DIR)/gojsontoyaml
|
||||
JSONNET_BIN=$(BIN_DIR)/jsonnet
|
||||
JSONNETFMT_BIN=$(BIN_DIR)/jsonnetfmt
|
||||
TOOLING=$(EMBEDMD_BIN) $(JB_BIN) $(GOJSONTOYAML_BIN) $(JSONNET_BIN) $(JSONNETFMT_BIN)
|
||||
|
||||
JSONNETFMT_ARGS=-n 2 --max-blank-lines 2 --string-style s --comment-style s
|
||||
|
||||
all: generate fmt test
|
||||
|
||||
.PHONY: generate-in-docker
|
||||
generate-in-docker:
|
||||
@echo ">> Compiling assets and generating Kubernetes manifests"
|
||||
$(CONTAINER_CMD) $(MAKE) $(MFLAGS) generate
|
||||
.PHONY: clean
|
||||
clean:
|
||||
# Remove all files and directories ignored by git.
|
||||
git clean -Xfd .
|
||||
|
||||
.PHONY: generate
|
||||
generate: manifests **.md
|
||||
|
||||
**.md: $(EMBEDMD_BINARY) $(shell find examples) build.sh example.jsonnet
|
||||
$(EMBEDMD_BINARY) -w `find . -name "*.md" | grep -v vendor`
|
||||
**.md: $(EMBEDMD_BIN) $(shell find examples) build.sh example.jsonnet
|
||||
$(EMBEDMD_BIN) -w `find . -name "*.md" | grep -v vendor`
|
||||
|
||||
manifests: vendor example.jsonnet build.sh
|
||||
rm -rf manifests
|
||||
./build.sh ./examples/kustomize.jsonnet
|
||||
manifests: examples/kustomize.jsonnet $(GOJSONTOYAML_BIN) vendor build.sh
|
||||
./build.sh $<
|
||||
|
||||
vendor: $(JB_BINARY) jsonnetfile.json jsonnetfile.lock.json
|
||||
vendor: $(JB_BIN) jsonnetfile.json jsonnetfile.lock.json
|
||||
rm -rf vendor
|
||||
$(JB_BINARY) install
|
||||
$(JB_BIN) install
|
||||
|
||||
fmt:
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -o -name '*.jsonnet' -print | \
|
||||
xargs -n 1 -- $(JSONNET_FMT) -i
|
||||
.PHONY: fmt
|
||||
fmt: $(JSONNETFMT_BIN)
|
||||
find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
|
||||
xargs -n 1 -- $(JSONNETFMT_BIN) $(JSONNETFMT_ARGS) -i
|
||||
|
||||
test: $(JB_BINARY)
|
||||
$(JB_BINARY) install
|
||||
.PHONY: test
|
||||
test: $(JB_BIN)
|
||||
$(JB_BIN) install
|
||||
./test.sh
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e:
|
||||
go test -timeout 55m -v ./tests/e2e -count=1
|
||||
|
||||
test-in-docker:
|
||||
@echo ">> Compiling assets and generating Kubernetes manifests"
|
||||
$(CONTAINER_CMD) $(MAKE) $(MFLAGS) test
|
||||
$(BIN_DIR):
|
||||
mkdir -p $(BIN_DIR)
|
||||
|
||||
$(JB_BINARY):
|
||||
go get -u github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
|
||||
$(EMBEDMD_BINARY):
|
||||
go get github.com/campoy/embedmd
|
||||
|
||||
.PHONY: generate generate-in-docker test test-in-docker fmt
|
||||
$(TOOLING): $(BIN_DIR)
|
||||
@echo Installing tools from scripts/tools.go
|
||||
@cd scripts && cat tools.go | grep _ | awk -F'"' '{print $$2}' | xargs -tI % go build -modfile=go.mod -o $(BIN_DIR) %
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -1,5 +1,6 @@
|
||||
reviewers:
|
||||
- brancz
|
||||
- kakkoyun
|
||||
- metalmatze
|
||||
- mxinden
|
||||
- s-urbaniak
|
||||
|
||||
322
README.md
322
README.md
@@ -8,7 +8,7 @@ The content of this project is written in [jsonnet](http://jsonnet.org/). This p
|
||||
|
||||
Components included in this package:
|
||||
|
||||
* The [Prometheus Operator](https://github.com/coreos/prometheus-operator)
|
||||
* The [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator)
|
||||
* Highly available [Prometheus](https://prometheus.io/)
|
||||
* Highly available [Alertmanager](https://github.com/prometheus/alertmanager)
|
||||
* [Prometheus node-exporter](https://github.com/prometheus/node_exporter)
|
||||
@@ -20,30 +20,47 @@ This stack is meant for cluster monitoring, so it is pre-configured to collect m
|
||||
|
||||
## Table of contents
|
||||
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [minikube](#minikube)
|
||||
* [Quickstart](#quickstart)
|
||||
* [Customizing Kube-Prometheus](#customizing-kube-prometheus)
|
||||
* [Installing](#installing)
|
||||
* [Compiling](#compiling)
|
||||
* [Containerized Installing and Compiling](#containerized-installing-and-compiling)
|
||||
* [Configuration](#configuration)
|
||||
* [Customization Examples](#customization-examples)
|
||||
* [Cluster Creation Tools](#cluster-creation-tools)
|
||||
* [Internal Registries](#internal-registries)
|
||||
* [NodePorts](#nodeports)
|
||||
* [Prometheus Object Name](#prometheus-object-name)
|
||||
* [node-exporter DaemonSet namespace](#node-exporter-daemonset-namespace)
|
||||
* [Alertmanager configuration](#alertmanager-configuration)
|
||||
* [Static etcd configuration](#static-etcd-configuration)
|
||||
* [Pod Anti-Affinity](#pod-anti-affinity)
|
||||
* [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
|
||||
* [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
|
||||
* [Minikube Example](#minikube-example)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
|
||||
* [kube-state-metrics resource usage](#kube-state-metrics-resource-usage)
|
||||
* [Contributing](#contributing)
|
||||
- [kube-prometheus](#kube-prometheus)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [minikube](#minikube)
|
||||
- [Compatibility](#compatibility)
|
||||
- [Kubernetes compatibility matrix](#kubernetes-compatibility-matrix)
|
||||
- [Quickstart](#quickstart)
|
||||
- [Access the dashboards](#access-the-dashboards)
|
||||
- [Customizing Kube-Prometheus](#customizing-kube-prometheus)
|
||||
- [Installing](#installing)
|
||||
- [Compiling](#compiling)
|
||||
- [Apply the kube-prometheus stack](#apply-the-kube-prometheus-stack)
|
||||
- [Containerized Installing and Compiling](#containerized-installing-and-compiling)
|
||||
- [Update from upstream project](#update-from-upstream-project)
|
||||
- [Update jb](#update-jb)
|
||||
- [Update kube-prometheus](#update-kube-prometheus)
|
||||
- [Compile the manifests and apply](#compile-the-manifests-and-apply)
|
||||
- [Configuration](#configuration)
|
||||
- [Customization Examples](#customization-examples)
|
||||
- [Cluster Creation Tools](#cluster-creation-tools)
|
||||
- [Internal Registry](#internal-registry)
|
||||
- [NodePorts](#nodeports)
|
||||
- [Prometheus Object Name](#prometheus-object-name)
|
||||
- [node-exporter DaemonSet namespace](#node-exporter-daemonset-namespace)
|
||||
- [Alertmanager configuration](#alertmanager-configuration)
|
||||
- [Adding additional namespaces to monitor](#adding-additional-namespaces-to-monitor)
|
||||
- [Defining the ServiceMonitor for each additional Namespace](#defining-the-servicemonitor-for-each-additional-namespace)
|
||||
- [Monitoring all namespaces](#monitoring-all-namespaces)
|
||||
- [Static etcd configuration](#static-etcd-configuration)
|
||||
- [Pod Anti-Affinity](#pod-anti-affinity)
|
||||
- [Stripping container resource limits](#stripping-container-resource-limits)
|
||||
- [Customizing Prometheus alerting/recording rules and Grafana dashboards](#customizing-prometheus-alertingrecording-rules-and-grafana-dashboards)
|
||||
- [Exposing Prometheus/Alermanager/Grafana via Ingress](#exposing-prometheusalermanagergrafana-via-ingress)
|
||||
- [Minikube Example](#minikube-example)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Error retrieving kubelet metrics](#error-retrieving-kubelet-metrics)
|
||||
- [Authentication problem](#authentication-problem)
|
||||
- [Authorization problem](#authorization-problem)
|
||||
- [kube-state-metrics resource usage](#kube-state-metrics-resource-usage)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -51,45 +68,66 @@ You will need a Kubernetes cluster, that's it! By default it is assumed, that th
|
||||
|
||||
This means the kubelet configuration must contain these flags:
|
||||
|
||||
* `--authentication-token-webhook=true` This flag enables, that a `ServiceAccount` token can be used to authenticate against the kubelet(s).
|
||||
* `--authorization-mode=Webhook` This flag enables, that the kubelet will perform an RBAC request with the API to determine, whether the requesting entity (Prometheus in this case) is allow to access a resource, in specific for this project the `/metrics` endpoint.
|
||||
* `--authentication-token-webhook=true` This flag enables, that a `ServiceAccount` token can be used to authenticate against the kubelet(s). This can also be enabled by setting the kubelet configuration value `authentication.webhook.enabled` to `true`.
|
||||
* `--authorization-mode=Webhook` This flag enables, that the kubelet will perform an RBAC request with the API to determine, whether the requesting entity (Prometheus in this case) is allowed to access a resource, in specific for this project the `/metrics` endpoint. This can also be enabled by setting the kubelet configuration value `authorization.mode` to `Webhook`.
|
||||
|
||||
This stack provides [resource metrics](https://github.com/kubernetes/metrics#resource-metrics-api) by deploying the [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter/).
|
||||
This adapter is an Extension API Server and Kubernetes needs to be have this feature enabled, otherwise the adapter has no effect, but is still deployed.
|
||||
|
||||
### minikube
|
||||
|
||||
In order to just try out this stack, start [minikube](https://github.com/kubernetes/minikube) with the following command:
|
||||
To try out this stack, start [minikube](https://github.com/kubernetes/minikube) with the following command:
|
||||
|
||||
```shell
|
||||
$ minikube delete && minikube start --kubernetes-version=v1.13.5 --memory=4096 --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
$ minikube delete && minikube start --kubernetes-version=v1.20.0 --memory=6g --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
```
|
||||
|
||||
The kube-prometheus stack includes a resource metrics API server, like the metrics-server does. So ensure the metrics-server plugin is disabled on minikube:
|
||||
The kube-prometheus stack includes a resource metrics API server, so the metrics-server addon is not necessary. Ensure the metrics-server addon is disabled on minikube:
|
||||
|
||||
```shell
|
||||
$ minikube addons disable metrics-server
|
||||
```
|
||||
|
||||
## Compatibility
|
||||
|
||||
### Kubernetes compatibility matrix
|
||||
|
||||
The following versions are supported and work as we test against these versions in their respective branches. But note that other versions might work!
|
||||
|
||||
| kube-prometheus stack | Kubernetes 1.16 | Kubernetes 1.17 | Kubernetes 1.18 | Kubernetes 1.19 | Kubernetes 1.20 |
|
||||
|-----------------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| `release-0.4` | ✔ (v1.16.5+) | ✔ | ✗ | ✗ | ✗ |
|
||||
| `release-0.5` | ✗ | ✗ | ✔ | ✗ | ✗ |
|
||||
| `release-0.6` | ✗ | ✗ | ✔ | ✔ | ✗ |
|
||||
| `release-0.7` | ✗ | ✗ | ✗ | ✔ | ✔ |
|
||||
| `HEAD` | ✗ | ✗ | ✗ | ✔ | ✔ |
|
||||
|
||||
Note: Due to [two](https://github.com/kubernetes/kubernetes/issues/83778) [bugs](https://github.com/kubernetes/kubernetes/issues/86359) in Kubernetes v1.16.1, and prior to Kubernetes v1.16.5 the kube-prometheus release-0.4 branch only supports v1.16.5 and higher. The `extension-apiserver-authentication-reader` role in the kube-system namespace can be manually edited to include list and watch permissions in order to workaround the second issue with Kubernetes v1.16.2 through v1.16.4.
|
||||
|
||||
## Quickstart
|
||||
|
||||
>Note: For versions before Kubernetes v1.20.z refer to the [Kubernetes compatibility matrix](#kubernetes-compatibility-matrix) in order to choose a compatible branch.
|
||||
|
||||
This project is intended to be used as a library (i.e. the intent is not for you to create your own modified copy of this repository).
|
||||
|
||||
Though for a quickstart a compiled version of the Kubernetes [manifests](manifests) generated with this library (specifically with `example.jsonnet`) is checked into this repository in order to try the content out quickly. To try out the stack un-customized run:
|
||||
* Simply create the stack:
|
||||
* Create the monitoring stack using the config in the `manifests` directory:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f manifests/
|
||||
|
||||
# It can take a few seconds for the above 'create manifests' command to fully create the following resources, so verify the resources are ready before proceeding.
|
||||
$ until kubectl get customresourcedefinitions servicemonitors.monitoring.coreos.com ; do date; sleep 1; echo ""; done
|
||||
$ until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
|
||||
|
||||
$ kubectl apply -f manifests/ # This command sometimes may need to be done twice (to workaround a race condition).
|
||||
# Create the namespace and CRDs, and then wait for them to be availble before creating the remaining resources
|
||||
kubectl create -f manifests/setup
|
||||
until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
|
||||
kubectl create -f manifests/
|
||||
```
|
||||
|
||||
We create the namespace and CustomResourceDefinitions first to avoid race conditions when deploying the monitoring components.
|
||||
Alternatively, the resources in both folders can be applied with a single command
|
||||
`kubectl create -f manifests/setup -f manifests`, but it may be necessary to run the command multiple times for all components to
|
||||
be created successfullly.
|
||||
|
||||
* And to teardown the stack:
|
||||
```shell
|
||||
$ kubectl delete -f manifests/
|
||||
kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
|
||||
```
|
||||
|
||||
### Access the dashboards
|
||||
@@ -137,12 +175,12 @@ Install this library in your own project with [jsonnet-bundler](https://github.c
|
||||
$ mkdir my-kube-prometheus; cd my-kube-prometheus
|
||||
$ jb init # Creates the initial/empty `jsonnetfile.json`
|
||||
# Install the kube-prometheus dependency
|
||||
$ jb install github.com/coreos/kube-prometheus/jsonnet/kube-prometheus@release-0.1 # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json`
|
||||
$ jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.4 # Creates `vendor/` & `jsonnetfile.lock.json`, and fills in `jsonnetfile.json`
|
||||
```
|
||||
|
||||
> `jb` can be installed with `go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb`
|
||||
|
||||
> An e.g. of how to install a given version of this library: `jb install github.com/coreos/kube-prometheus/jsonnet/kube-prometheus@release-0.1`
|
||||
> An e.g. of how to install a given version of this library: `jb install github.com/prometheus-operator/kube-prometheus/jsonnet/kube-prometheus@release-0.4`
|
||||
|
||||
In order to update the kube-prometheus dependency, simply use the jsonnet-bundler update functionality:
|
||||
```shell
|
||||
@@ -157,17 +195,33 @@ e.g. of how to compile the manifests: `./build.sh example.jsonnet`
|
||||
|
||||
Here's [example.jsonnet](example.jsonnet):
|
||||
|
||||
> Note: some of the following components must be configured beforehand. See [configuration](#configuration) and [customization-examples](#customization-examples).
|
||||
|
||||
[embedmd]:# (example.jsonnet)
|
||||
```jsonnet
|
||||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-thanos-sidecar.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-external-metrics.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor is separated so that it can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
@@ -189,12 +243,19 @@ set -x
|
||||
# only exit with zero if all commands of the pipeline exit successfully
|
||||
set -o pipefail
|
||||
|
||||
# Make sure to use project tooling
|
||||
PATH="$(pwd)/tmp/bin:${PATH}"
|
||||
|
||||
# Make sure to start with a clean 'manifests' dir
|
||||
rm -rf manifests
|
||||
mkdir manifests
|
||||
mkdir -p manifests/setup
|
||||
|
||||
# optional, but we would like to generate yaml, not json
|
||||
jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml; rm -f {}' -- {}
|
||||
# Calling gojsontoyaml is optional, but we would like to generate yaml, not json
|
||||
jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml' -- {}
|
||||
|
||||
# Make sure to remove json files
|
||||
find manifests -type f ! -name '*.yaml' -delete
|
||||
rm -f kustomization
|
||||
|
||||
```
|
||||
|
||||
@@ -207,22 +268,22 @@ The previous steps (compilation) has created a bunch of manifest files in the ma
|
||||
Now simply use `kubectl` to install Prometheus and Grafana as per your configuration:
|
||||
|
||||
```shell
|
||||
# Update the namespace and CRDs, and then wait for them to be availble before creating the remaining resources
|
||||
$ kubectl apply -f manifests/setup
|
||||
$ kubectl apply -f manifests/
|
||||
```
|
||||
Alternatively, the resources in both folders can be applied with a single command
|
||||
`kubectl apply -Rf manifests`, but it may be necessary to run the command multiple times for all components to
|
||||
be created successfullly.
|
||||
|
||||
Check the monitoring namespace (or the namespace you have specific in `namespace: `) and make sure the pods are running. Prometheus and Grafana should be up and running soon.
|
||||
|
||||
### Containerized Installing and Compiling
|
||||
|
||||
If you don't care to have `jb` nor `jsonnet` nor `gojsontoyaml` installed, then build the `po-jsonnet` Docker image (this is something you'll need a copy of this repository for). Do the following from this `kube-prometheus` directory:
|
||||
If you don't care to have `jb` nor `jsonnet` nor `gojsontoyaml` installed, then use `quay.io/coreos/jsonnet-ci` container image. Do the following from this `kube-prometheus` directory:
|
||||
```shell
|
||||
$ make hack/jsonnet-docker-image
|
||||
```
|
||||
|
||||
Then you can do commands such as the following:
|
||||
```shell
|
||||
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) po-jsonnet jb update
|
||||
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) po-jsonnet ./build.sh example.jsonnet
|
||||
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) quay.io/coreos/jsonnet-ci jb update
|
||||
$ docker run --rm -v $(pwd):$(pwd) --workdir $(pwd) quay.io/coreos/jsonnet-ci ./build.sh example.jsonnet
|
||||
```
|
||||
|
||||
## Update from upstream project
|
||||
@@ -247,7 +308,7 @@ Once updated, just follow the instructions under "Compiling" and "Apply the kube
|
||||
|
||||
## Configuration
|
||||
|
||||
Jsonnet has the concept of hidden fields. These are fields, that are not going to be rendered in a result. This is used to configure the kube-prometheus components in jsonnet. In the example jsonnet code of the above [Usage section](#Usage), you can see an example of this, where the `namespace` is being configured to be `monitoring`. In order to not override the whole object, use the `+::` construct of jsonnet, to merge objects, this way you can override individual settings, but retain all other settings and defaults.
|
||||
Jsonnet has the concept of hidden fields. These are fields, that are not going to be rendered in a result. This is used to configure the kube-prometheus components in jsonnet. In the example jsonnet code of the above [Customizing Kube-Prometheus section](#customizing-kube-prometheus), you can see an example of this, where the `namespace` is being configured to be `monitoring`. In order to not override the whole object, use the `+::` construct of jsonnet, to merge objects, this way you can override individual settings, but retain all other settings and defaults.
|
||||
|
||||
These are the available fields with their respective default values:
|
||||
```
|
||||
@@ -256,23 +317,21 @@ These are the available fields with their respective default values:
|
||||
namespace: "default",
|
||||
|
||||
versions+:: {
|
||||
alertmanager: "v0.16.2",
|
||||
nodeExporter: "v0.17.0",
|
||||
alertmanager: "v0.17.0",
|
||||
nodeExporter: "v0.18.1",
|
||||
kubeStateMetrics: "v1.5.0",
|
||||
kubeRbacProxy: "v0.4.1",
|
||||
addonResizer: "1.8.4",
|
||||
prometheusOperator: "v0.29.0",
|
||||
prometheus: "v2.7.2",
|
||||
prometheusOperator: "v0.30.0",
|
||||
prometheus: "v2.10.0",
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
prometheus: "quay.io/prometheus/prometheus",
|
||||
alertmanager: "quay.io/prometheus/alertmanager",
|
||||
kubeStateMetrics: "quay.io/coreos/kube-state-metrics",
|
||||
kubeRbacProxy: "quay.io/coreos/kube-rbac-proxy",
|
||||
addonResizer: "k8s.gcr.io/addon-resizer",
|
||||
kubeRbacProxy: "quay.io/brancz/kube-rbac-proxy",
|
||||
nodeExporter: "quay.io/prometheus/node-exporter",
|
||||
prometheusOperator: "quay.io/coreos/prometheus-operator",
|
||||
prometheusOperator: "quay.io/prometheus-operator/prometheus-operator",
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
@@ -309,8 +368,6 @@ These are the available fields with their respective default values:
|
||||
|
||||
baseCPU: '100m',
|
||||
baseMemory: '150Mi',
|
||||
cpuPerNode: '2m',
|
||||
memoryPerNode: '30Mi',
|
||||
},
|
||||
|
||||
nodeExporter+:: {
|
||||
@@ -462,13 +519,13 @@ Standard Kubernetes manifests are all written using [ksonnet-lib](https://github
|
||||
|
||||
[embedmd]:# (examples/ksonnet-example.jsonnet)
|
||||
```jsonnet
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
|
||||
((import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
nodeExporter+: {
|
||||
daemonset+:
|
||||
daemonset.mixin.metadata.withNamespace('my-custom-namespace'),
|
||||
daemonset+: {
|
||||
metadata+: {
|
||||
namespace: 'my-custom-namespace',
|
||||
},
|
||||
},
|
||||
},
|
||||
}).nodeExporter.daemonset
|
||||
```
|
||||
@@ -541,6 +598,90 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
#### Defining the ServiceMonitor for each additional Namespace
|
||||
|
||||
In order to Prometheus be able to discovery and scrape services inside the additional namespaces specified in previous step you need to define a ServiceMonitor resource.
|
||||
|
||||
> Typically it is up to the users of a namespace to provision the ServiceMonitor resource, but in case you want to generate it with the same tooling as the rest of the cluster monitoring infrastructure, this is a guide on how to achieve this.
|
||||
|
||||
You can define ServiceMonitor resources in your `jsonnet` spec. See the snippet bellow:
|
||||
|
||||
[embedmd]:# (examples/additional-namespaces-servicemonitor.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
prometheus+:: {
|
||||
namespaces+: ['my-namespace', 'my-second-namespace'],
|
||||
},
|
||||
},
|
||||
prometheus+:: {
|
||||
serviceMonitorMyNamespace: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'my-servicemonitor',
|
||||
namespace: 'my-namespace',
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: 'myapp',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
> NOTE: make sure your service resources have the right labels (eg. `'app': 'myapp'`) applied. Prometheus uses kubernetes labels to discover resources inside the namespaces.
|
||||
|
||||
### Monitoring all namespaces
|
||||
|
||||
In case you want to monitor all namespaces in a cluster, you can add the following mixin. Also, make sure to empty the namespaces defined in prometheus so that roleBindings are not created against them.
|
||||
|
||||
[embedmd]:# (examples/all-namespaces.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-all-namespaces.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
|
||||
prometheus+:: {
|
||||
namespaces: [],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
> NOTE: This configuration can potentially make your cluster insecure especially in a multi-tenant cluster. This is because this gives Prometheus visibility over the whole cluster which might not be expected in a scenario when certain namespaces are locked down for security reasons.
|
||||
|
||||
Proceed with [creating ServiceMonitors for the services in the namespaces](#defining-the-servicemonitor-for-each-additional-namespace) you actually want to monitor
|
||||
|
||||
### Static etcd configuration
|
||||
|
||||
In order to configure a static etcd cluster to scrape there is a simple [kube-prometheus-static-etcd.libsonnet](jsonnet/kube-prometheus/kube-prometheus-static-etcd.libsonnet) mixin prepared - see [etcd.jsonnet](examples/etcd.jsonnet) for an example of how to use that mixin, and [Monitoring external etcd](docs/monitoring-external-etcd.md) for more information.
|
||||
@@ -557,6 +698,29 @@ possible, one can include the [kube-prometheus-anti-affinity.libsonnet](jsonnet/
|
||||
(import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet')
|
||||
```
|
||||
|
||||
### Stripping container resource limits
|
||||
|
||||
Sometimes in small clusters, the CPU/memory limits can get high enough for alerts to be fired continuously. To prevent this, one can strip off the predefined limits.
|
||||
To do that, one can import the following mixin
|
||||
|
||||
[embedmd]:# (examples/strip-limits.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-strip-limits.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
### Customizing Prometheus alerting/recording rules and Grafana dashboards
|
||||
|
||||
See [developing Prometheus rules and Grafana dashboards](docs/developing-prometheus-rules-and-grafana-dashboards.md) guide.
|
||||
@@ -569,15 +733,23 @@ See [exposing Prometheus/Alertmanager/Grafana](docs/exposing-prometheus-alertman
|
||||
|
||||
To use an easy to reproduce example, see [minikube.jsonnet](examples/minikube.jsonnet), which uses the minikube setup as demonstrated in [Prerequisites](#prerequisites). Because we would like easy access to our Prometheus, Alertmanager and Grafana UIs, `minikube.jsonnet` exposes the services as NodePort type services.
|
||||
|
||||
## Continuous Delivery
|
||||
|
||||
Working examples of use with continuous delivery tools are found in examples/continuous-delivery.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See the general [guidelines](docs/community-support.md) for getting support from the community.
|
||||
|
||||
### Error retrieving kubelet metrics
|
||||
|
||||
Should the Prometheus `/targets` page show kubelet targets, but not able to successfully scrape the metrics, then most likely it is a problem with the authentication and authorization setup of the kubelets.
|
||||
|
||||
As described in the [Prerequisites](#prerequisites) section, in order to retrieve metrics from the kubelet token authentication and authorization must be enabled. Some Kubernetes setup tools do not enable this by default.
|
||||
|
||||
If you are using Google's GKE product, see [cAdvisor support](docs/GKE-cadvisor-support.md).
|
||||
- If you are using Google's GKE product, see [cAdvisor support](docs/GKE-cadvisor-support.md).
|
||||
- If you are using AWS EKS, see [AWS EKS CNI support](docs/EKS-cni-support.md).
|
||||
- If you are using Weave Net, see [Weave Net support](docs/weave-net-support.md).
|
||||
|
||||
#### Authentication problem
|
||||
|
||||
@@ -617,5 +789,9 @@ the following process:
|
||||
2. Commit your changes (This is currently necessary due to our vendoring
|
||||
process. This is likely to change in the future).
|
||||
3. Update the pinned kube-prometheus dependency in `jsonnetfile.lock.json`: `jb update`
|
||||
3. Generate dependent `*.yaml` files: `make generate-in-docker`
|
||||
3. Generate dependent `*.yaml` files: `make generate`
|
||||
4. Commit the generated changes.
|
||||
|
||||
## License
|
||||
|
||||
Apache License 2.0, see [LICENSE](https://github.com/prometheus-operator/kube-prometheus/blob/master/LICENSE).
|
||||
|
||||
13
build.sh
13
build.sh
@@ -7,10 +7,17 @@ set -x
|
||||
# only exit with zero if all commands of the pipeline exit successfully
|
||||
set -o pipefail
|
||||
|
||||
# Make sure to use project tooling
|
||||
PATH="$(pwd)/tmp/bin:${PATH}"
|
||||
|
||||
# Make sure to start with a clean 'manifests' dir
|
||||
rm -rf manifests
|
||||
mkdir manifests
|
||||
mkdir -p manifests/setup
|
||||
|
||||
# optional, but we would like to generate yaml, not json
|
||||
jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml; rm -f {}' -- {}
|
||||
# Calling gojsontoyaml is optional, but we would like to generate yaml, not json
|
||||
jsonnet -J vendor -m manifests "${1-example.jsonnet}" | xargs -I{} sh -c 'cat {} | gojsontoyaml > {}.yaml' -- {}
|
||||
|
||||
# Make sure to remove json files
|
||||
find manifests -type f ! -name '*.yaml' -delete
|
||||
rm -f kustomization
|
||||
|
||||
|
||||
42
docs/EKS-cni-support.md
Normal file
42
docs/EKS-cni-support.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# CNI monitoring special configuration updates for EKS
|
||||
|
||||
AWS EKS uses [CNI](https://github.com/aws/amazon-vpc-cni-k8s) networking plugin for pod networking in Kubernetes using Elastic Network Interfaces on AWS
|
||||
|
||||
One fatal issue that can occur is that you run out of IP addresses in your eks cluster. (Generally happens due to error configs where pods keep scheduling).
|
||||
|
||||
You can monitor the `awscni` using kube-promethus with :
|
||||
[embedmd]:# (../examples/eks-cni-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-eks.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheusRules+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
{
|
||||
record: 'aws_eks_available_ip',
|
||||
expr: 'sum by(instance) (awscni_total_ip_addresses) - sum by(instance) (awscni_assigned_ip_addresses) < 10',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
```
|
||||
|
||||
After you have the required yaml file please run
|
||||
|
||||
```
|
||||
kubectl apply -f manifests/prometheus-serviceMonitorAwsEksCNI.yaml
|
||||
```
|
||||
84
docs/community-support.md
Normal file
84
docs/community-support.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Community support
|
||||
|
||||
For bugs, you can use the GitHub [issue tracker](https://github.com/prometheus-operator/kube-prometheus/issues/new/choose).
|
||||
|
||||
For questions, you can use the GitHub [discussions forum](https://github.com/prometheus-operator/kube-prometheus/discussions).
|
||||
|
||||
Many of the `kube-prometheus` project's contributors and users can also be found on the #prometheus-operator channel of the [Kubernetes Slack][Kubernetes Slack].
|
||||
|
||||
`kube-prometheus` is the aggregation of many projects that all have different
|
||||
channels to reach out for help and support. This community strives at
|
||||
supporting all users and you should never be afraid of asking us first. However
|
||||
if your request relates specifically to one of the projects listed below, it is
|
||||
often more efficient to reach out to the project directly. If you are unsure,
|
||||
please feel free to open an issue in this repository and we will redirect you
|
||||
if applicable.
|
||||
|
||||
## prometheus-operator
|
||||
|
||||
For documentation, check the project's [documentation directory](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation).
|
||||
|
||||
For questions, use the #prometheus-operator channel on the [Kubernetes Slack][Kubernetes Slack].
|
||||
|
||||
For bugs, use the GitHub [issue tracker](https://github.com/prometheus-operator/prometheus-operator/issues/new/choose).
|
||||
|
||||
## Prometheus, Alertmanager, node_exporter
|
||||
|
||||
For documentation, check the Prometheus [online docs](https://prometheus.io/docs/). There is a
|
||||
[section](https://prometheus.io/docs/introduction/media/) with links to blog
|
||||
posts, recorded talks and presentations. This [repository](https://github.com/roaldnefs/awesome-prometheus)
|
||||
(not affiliated to the Prometheus project) has also a list of curated resources
|
||||
related to the Prometheus ecosystem.
|
||||
|
||||
For questions, see the Prometheus [community page](https://prometheus.io/community/) for the various channels.
|
||||
|
||||
There is also a #prometheus channel on the [CNCF Slack][CNCF Slack].
|
||||
|
||||
## kube-state-metrics
|
||||
|
||||
For documentation, see the project's [docs directory](https://github.com/kubernetes/kube-state-metrics/tree/master/docs).
|
||||
|
||||
For questions, use the #kube-state-metrics channel on the [Kubernetes Slack][Kubernetes Slack].
|
||||
|
||||
For bugs, use the GitHub [issue tracker](https://github.com/kubernetes/kube-state-metrics/issues/new/choose).
|
||||
|
||||
## Kubernetes
|
||||
|
||||
For documentation, check the [Kubernetes docs](https://kubernetes.io/docs/home/).
|
||||
|
||||
For questions, use the [community forums](https://discuss.kubernetes.io/) and the [Kubernetes Slack][Kubernetes Slack]. Check also the [community page](https://kubernetes.io/community/#discuss).
|
||||
|
||||
For bugs, use the GitHub [issue tracker](https://github.com/kubernetes/kubernetes/issues/new/choose).
|
||||
|
||||
## Prometheus adapter
|
||||
|
||||
For documentation, check the project's [README](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/README.md).
|
||||
|
||||
For questions, use the #sig-instrumentation channel on the [Kubernetes Slack][Kubernetes Slack].
|
||||
|
||||
For bugs, use the GitHub [issue tracker](https://github.com/DirectXMan12/k8s-prometheus-adapter/issues/new).
|
||||
|
||||
## Grafana
|
||||
|
||||
For documentation, check the [Grafana docs](https://grafana.com/docs/grafana/latest/).
|
||||
|
||||
For questions, use the [community forums](https://community.grafana.com/).
|
||||
|
||||
For bugs, use the GitHub [issue tracker](https://github.com/grafana/grafana/issues/new/choose).
|
||||
|
||||
## kubernetes-mixin
|
||||
|
||||
For documentation, check the project's [README](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/README.md).
|
||||
|
||||
For questions, use #monitoring-mixins channel on the [Kubernetes Slack][Kubernetes Slack].
|
||||
|
||||
For bugs, use the GitHub [issue tracker](https://github.com/kubernetes-monitoring/kubernetes-mixin/issues/new).
|
||||
|
||||
## Jsonnet
|
||||
|
||||
For documentation, check the [Jsonnet](https://jsonnet.org/) website.
|
||||
|
||||
For questions, use the [mailing list](https://groups.google.com/forum/#!forum/jsonnet).
|
||||
|
||||
[Kubernetes Slack]: https://slack.k8s.io/
|
||||
[CNCF Slack]: https://slack.cncf.io/
|
||||
@@ -11,14 +11,28 @@ As a basis, all examples in this guide are based on the base example of the kube
|
||||
[embedmd]:# (../example.jsonnet)
|
||||
```jsonnet
|
||||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-thanos-sidecar.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-external-metrics.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor is separated so that it can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
@@ -70,6 +84,7 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
@@ -106,6 +121,7 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
@@ -128,7 +144,12 @@ Then import it in jsonnet:
|
||||
[embedmd]:# (../examples/prometheus-additional-rendered-rule-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
prometheusAlerts+:: (import 'existingrule.json'),
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups+: (import 'existingrule.json').groups,
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
@@ -137,13 +158,14 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
### Changing default rules
|
||||
|
||||
Along with adding additional rules, we give the user the option to filter or adjust the existing rules imported by `kube-prometheus/kube-prometheus.libsonnet`. The recording rules can be found in [kube-prometheus/rules](../jsonnet/kube-prometheus/rules) and [kubernetes-mixin/rules](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/rules) while the alerting rules can be found in [kube-prometheus/alerts](../jsonnet/kube-prometheus/alerts) and [kubernetes-mixin/alerts](https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/alerts).
|
||||
|
||||
Knowing which rules to change, the user can now use functions from the [Jsonnet standard library](https://jsonnet.org/ref/stdlib.html) to make these changes. Below are examples of both a filter and an adjustment being made to the default rules. These changes can be assigned to a local variable and then added to the `local kp` object as seen in the examples above.
|
||||
Knowing which rules to change, the user can now use functions from the [Jsonnet standard library](https://jsonnet.org/ref/stdlib.html) to make these changes. Below are examples of both a filter and an adjustment being made to the default rules. These changes can be assigned to a local variable and then added to the `local kp` object as seen in the examples above.
|
||||
|
||||
#### Filter
|
||||
Here the alert `KubeStatefulSetReplicasMismatch` is being filtered out of the group `kubernetes-apps`. The default rule can be seen [here](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/alerts/apps_alerts.libsonnet).
|
||||
@@ -193,7 +215,7 @@ local update = {
|
||||
},
|
||||
};
|
||||
```
|
||||
Using the example from above about adding in pre-rendered rules, the new local vaiables can be added in as follows:
|
||||
Using the example from above about adding in pre-rendered rules, the new local variables can be added in as follows:
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + filter + update + {
|
||||
prometheusAlerts+:: (import 'existingrule.json'),
|
||||
@@ -207,7 +229,7 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + filter + updat
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
```
|
||||
## Dashboards
|
||||
|
||||
Dashboards can either be added using jsonnet or simply a pre-rendered json dashboard.
|
||||
@@ -231,30 +253,32 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafanaDashboards+:: {
|
||||
'my-dashboard.json':
|
||||
dashboard.new('My Dashboard')
|
||||
.addTemplate(
|
||||
{
|
||||
current: {
|
||||
text: 'Prometheus',
|
||||
value: 'Prometheus',
|
||||
grafana+:: {
|
||||
dashboards+:: {
|
||||
'my-dashboard.json':
|
||||
dashboard.new('My Dashboard')
|
||||
.addTemplate(
|
||||
{
|
||||
current: {
|
||||
text: 'Prometheus',
|
||||
value: 'Prometheus',
|
||||
},
|
||||
hide: 0,
|
||||
label: null,
|
||||
name: 'datasource',
|
||||
options: [],
|
||||
query: 'prometheus',
|
||||
refresh: 1,
|
||||
regex: '',
|
||||
type: 'datasource',
|
||||
},
|
||||
hide: 0,
|
||||
label: null,
|
||||
name: 'datasource',
|
||||
options: [],
|
||||
query: 'prometheus',
|
||||
refresh: 1,
|
||||
regex: '',
|
||||
type: 'datasource',
|
||||
},
|
||||
)
|
||||
.addRow(
|
||||
row.new()
|
||||
.addPanel(graphPanel.new('My Panel', span=6, datasource='$datasource')
|
||||
.addTarget(prometheus.target('vector(1)')))
|
||||
),
|
||||
)
|
||||
.addRow(
|
||||
row.new()
|
||||
.addPanel(graphPanel.new('My Panel', span=6, datasource='$datasource')
|
||||
.addTarget(prometheus.target('vector(1)')))
|
||||
),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -277,9 +301,37 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafanaDashboards+:: {
|
||||
grafanaDashboards+:: { // monitoring-mixin compatibility
|
||||
'my-dashboard.json': (import 'example-grafana-dashboard.json'),
|
||||
},
|
||||
grafana+:: {
|
||||
dashboards+:: { // use this method to import your dashboards to Grafana
|
||||
'my-dashboard.json': (import 'example-grafana-dashboard.json'),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
In case you have lots of json dashboard exported out from grafana UI the above approach is going to take lots of time to improve performance we can use `rawDashboards` field and provide it's value as json string by using `importstr`
|
||||
[embedmd]:# (../examples/grafana-additional-rendered-dashboard-example-2.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafana+:: {
|
||||
rawDashboards+:: {
|
||||
'my-dashboard.json': (importstr 'example-grafana-dashboard.json'),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
|
||||
@@ -27,13 +27,6 @@ In order to use this a secret needs to be created containing the name of the `ht
|
||||
Also, the applications provide external links to themselves in alerts and various places. When an ingress is used in front of the applications these links need to be based on the external URL's. This can be configured for each application in jsonnet.
|
||||
|
||||
```jsonnet
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local secret = k.core.v1.secret;
|
||||
local ingress = k.extensions.v1beta1.ingress;
|
||||
local ingressTls = ingress.mixin.spec.tlsType;
|
||||
local ingressRule = ingress.mixin.spec.rulesType;
|
||||
local httpIngressPath = ingressRule.mixin.http.pathsType;
|
||||
|
||||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
{
|
||||
@@ -48,30 +41,46 @@ local kp =
|
||||
},
|
||||
},
|
||||
ingress+:: {
|
||||
'prometheus-k8s':
|
||||
ingress.new() +
|
||||
ingress.mixin.metadata.withName($.prometheus.prometheus.metadata.name) +
|
||||
ingress.mixin.metadata.withNamespace($.prometheus.prometheus.metadata.namespace) +
|
||||
ingress.mixin.metadata.withAnnotations({
|
||||
'nginx.ingress.kubernetes.io/auth-type': 'basic',
|
||||
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
|
||||
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
|
||||
}) +
|
||||
ingress.mixin.spec.withRules(
|
||||
ingressRule.new() +
|
||||
ingressRule.withHost('prometheus.example.com') +
|
||||
ingressRule.mixin.http.withPaths(
|
||||
httpIngressPath.new() +
|
||||
httpIngressPath.mixin.backend.withServiceName($.prometheus.service.metadata.name) +
|
||||
httpIngressPath.mixin.backend.withServicePort('web')
|
||||
),
|
||||
),
|
||||
'prometheus-k8s': {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'Ingress',
|
||||
metadata: {
|
||||
name: $.prometheus.prometheus.metadata.name,
|
||||
namespace: $.prometheus.prometheus.metadata.namespace,
|
||||
annotations: {
|
||||
'nginx.ingress.kubernetes.io/auth-type': 'basic',
|
||||
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
|
||||
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [{
|
||||
host: 'prometheus.example.com',
|
||||
http: {
|
||||
paths: [{
|
||||
backend: {
|
||||
service: {
|
||||
name: $.prometheus.service.metadata.name,
|
||||
port: 'web',
|
||||
},
|
||||
},
|
||||
}],
|
||||
},
|
||||
}],
|
||||
},
|
||||
},
|
||||
} + {
|
||||
ingress+:: {
|
||||
'basic-auth-secret':
|
||||
secret.new('basic-auth', { auth: std.base64(importstr 'auth') }) +
|
||||
secret.mixin.metadata.withNamespace($._config.namespace),
|
||||
'basic-auth-secret': {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Secret',
|
||||
metadata: {
|
||||
name: 'basic-auth',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
data: { auth: std.base64(importstr 'auth') },
|
||||
type: 'Opaque',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -81,7 +90,7 @@ k.core.v1.list.new([
|
||||
])
|
||||
```
|
||||
|
||||
In order to expose Alertmanager and Grafana, simply create additional fields containing an ingress object, but simply pointing at the `alertmanager` or `grafana` instead of the `prometheus-k8s` Service. Make sure to also use the correct port respectively, for Alertmanager it is also `web`, for Grafana it is `http`. Be sure to also specify the appropriate external URL.
|
||||
In order to expose Alertmanager and Grafana, simply create additional fields containing an ingress object, but simply pointing at the `alertmanager` or `grafana` instead of the `prometheus-k8s` Service. Make sure to also use the correct port respectively, for Alertmanager it is also `web`, for Grafana it is `http`. Be sure to also specify the appropriate external URL. Note that the external URL for grafana is set in a different way than the external URL for Prometheus or Alertmanager. See [ingress.jsonnet](../examples/ingress.jsonnet) for how to set the Grafana external URL.
|
||||
|
||||
In order to render the ingress objects similar to the other objects use as demonstrated in the [main readme](../README.md#usage):
|
||||
|
||||
|
||||
@@ -5,14 +5,14 @@ This guide will help you monitor applications in other Namespaces. By default th
|
||||
You have to give the list of the Namespaces that you want to be able to monitor.
|
||||
This is done in the variable `prometheus.roleSpecificNamespaces`. You usually set this in your `.jsonnet` file when building the manifests.
|
||||
|
||||
Example to create the needed `Role` and `Rolebindig` for the Namespace `foo` :
|
||||
Example to create the needed `Role` and `RoleBinding` for the Namespace `foo` :
|
||||
```
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
|
||||
prometheus+:: {
|
||||
namespaces: ["default", "kube-system","foo"],
|
||||
namespaces: ["default", "kube-system", "foo"],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
69
docs/weave-net-support.md
Normal file
69
docs/weave-net-support.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Setup Weave Net monitoring using kube-prometheus
|
||||
[Weave Net](https://kubernetes.io/docs/concepts/cluster-administration/networking/#weave-net-from-weaveworks) is a resilient and simple to use CNI provider for Kubernetes. A well monitored and observed CNI provider helps in troubleshooting Kubernetes networking problems. [Weave Net](https://www.weave.works/docs/net/latest/concepts/how-it-works/) emits [prometheus metrics](https://www.weave.works/docs/net/latest/tasks/manage/metrics/) for monitoring Weave Net. There are many ways to install Weave Net in your cluster. One of them is using [kops](https://github.com/kubernetes/kops/blob/master/docs/networking.md).
|
||||
|
||||
Following this document, you can setup Weave Net monitoring for your cluster using kube-prometheus.
|
||||
|
||||
## Contents
|
||||
Using kube-prometheus and kubectl you will be able install the following for monitoring Weave Net in your cluster:
|
||||
|
||||
1. [Service for Weave Net](https://gist.github.com/alok87/379c6234b582f555c141f6fddea9fbce) The service which the [service monitor](https://coreos.com/operators/prometheus/docs/latest/user-guides/cluster-monitoring.html) scrapes.
|
||||
2. [ServiceMonitor for Weave Net](https://gist.github.com/alok87/e46a7f9a79ef6d1da6964a035be2cfb9) Service monitor to scrape the Weave Net metrics and bring it to Prometheus.
|
||||
3. [Prometheus Alerts for Weave Net](https://stackoverflow.com/a/60447864) This will setup all the important Weave Net metrics you should be alerted on.
|
||||
4. [Grafana Dashboard for Weave Net](https://grafana.com/grafana/dashboards/11789) This will setup the per Weave Net pod level monitoring for Weave Net.
|
||||
5. [Grafana Dashboard for Weave Net(Cluster)](https://grafana.com/grafana/dashboards/11804) This will setup the cluster level monitoring for Weave Net.
|
||||
|
||||
## Instructions
|
||||
- You can monitor Weave Net using an example like below. **Please note that some alert configurations are environment specific and may require modifications of alert thresholds**. For example: The FastDP flows have never gone below 15000 for us. But if this value is say 20000 for you then you can use an example like below to update the alert. The alerts which may require threshold modifications are `WeaveNetFastDPFlowsLow` and `WeaveNetIPAMUnreachable`.
|
||||
|
||||
[embedmd]:# (../examples/weave-net-example.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-weave-net.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups: std.map(
|
||||
function(group)
|
||||
if group.name == 'weave-net' then
|
||||
group {
|
||||
rules: std.map(
|
||||
function(rule)
|
||||
if rule.alert == 'WeaveNetFastDPFlowsLow' then
|
||||
rule {
|
||||
expr: 'sum(weave_flows) < 20000',
|
||||
}
|
||||
else if rule.alert == 'WeaveNetIPAMUnreachable' then
|
||||
rule {
|
||||
expr: 'weave_ipam_unreachable_percentage > 25',
|
||||
}
|
||||
else
|
||||
rule
|
||||
,
|
||||
group.rules
|
||||
),
|
||||
}
|
||||
else
|
||||
group,
|
||||
super.groups
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
```
|
||||
|
||||
- After you have the required yamls file please run
|
||||
```
|
||||
kubectl create -f prometheus-serviceWeaveNet.yaml
|
||||
kubectl create -f prometheus-serviceMonitorWeaveNet.yaml
|
||||
kubectl apply -f prometheus-rules.yaml
|
||||
kubectl apply -f grafana-dashboardDefinitions.yaml
|
||||
kubectl apply -f grafana-deployment.yaml
|
||||
```
|
||||
@@ -1,12 +1,26 @@
|
||||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-thanos-sidecar.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-custom-metrics.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-external-metrics.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor is separated so that it can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
|
||||
40
examples/additional-namespaces-servicemonitor.jsonnet
Normal file
40
examples/additional-namespaces-servicemonitor.jsonnet
Normal file
@@ -0,0 +1,40 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
prometheus+:: {
|
||||
namespaces+: ['my-namespace', 'my-second-namespace'],
|
||||
},
|
||||
},
|
||||
prometheus+:: {
|
||||
serviceMonitorMyNamespace: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'my-servicemonitor',
|
||||
namespace: 'my-namespace',
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: 'myapp',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
@@ -1,6 +1,7 @@
|
||||
# external alertmanager yaml
|
||||
global:
|
||||
resolve_timeout: 10m
|
||||
slack_api_url: url
|
||||
route:
|
||||
group_by: ['job']
|
||||
group_wait: 30s
|
||||
@@ -13,3 +14,17 @@ route:
|
||||
receiver: 'null'
|
||||
receivers:
|
||||
- name: 'null'
|
||||
- name: slack
|
||||
slack_configs:
|
||||
- channel: '#alertmanager-testing'
|
||||
send_resolved: true
|
||||
title: '[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] Monitoring Event Notification'
|
||||
text: |-
|
||||
{{ range .Alerts }}
|
||||
*Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
|
||||
*Description:* {{ .Annotations.description }}
|
||||
*Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
|
||||
*Details:*
|
||||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
18
examples/all-namespaces.jsonnet
Normal file
18
examples/all-namespaces.jsonnet
Normal file
@@ -0,0 +1,18 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-all-namespaces.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
|
||||
prometheus+:: {
|
||||
namespaces: [],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
9
examples/continuous-delivery/argocd/README.md
Normal file
9
examples/continuous-delivery/argocd/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
## ArgoCD Example
|
||||
|
||||
This is the simplest, working example of an argocd app, the JSON object built is now an array of objects as that is the prefered format for ArgoCD.
|
||||
|
||||
Requirements:
|
||||
|
||||
**ArgoCD 1.7+**
|
||||
|
||||
Follow the vendor generation steps at the root of this repository and generate a `vendored` folder (referenced in `application.yaml`).
|
||||
25
examples/continuous-delivery/argocd/application.yaml
Normal file
25
examples/continuous-delivery/argocd/application.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: kube-prometheus
|
||||
namespace: argocd
|
||||
annotations:
|
||||
recipients.argocd-notifications.argoproj.io: "slack:jenkins"
|
||||
spec:
|
||||
destination:
|
||||
namespace: monitoring
|
||||
server: https://kubernetes.default.svc
|
||||
project: monitoring
|
||||
source:
|
||||
directory:
|
||||
jsonnet:
|
||||
libs:
|
||||
- vendored
|
||||
recurse: true
|
||||
path: examples/continuous-delivery/argocd/kube-prometheus
|
||||
repoURL: git@github.com:prometheus-operator/kube-prometheus.git
|
||||
targetRevision: HEAD
|
||||
syncPolicy:
|
||||
automated: {}
|
||||
---
|
||||
22
examples/continuous-delivery/argocd/appproject.yaml
Normal file
22
examples/continuous-delivery/argocd/appproject.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: AppProject
|
||||
metadata:
|
||||
annotations:
|
||||
recipients.argocd-notifications.argoproj.io: slack:alerts
|
||||
generation: 1
|
||||
name: monitoring
|
||||
namespace: argocd
|
||||
spec:
|
||||
clusterResourceWhitelist:
|
||||
- group: "*"
|
||||
kind: "*"
|
||||
description: "Monitoring Stack deployment"
|
||||
destinations:
|
||||
- namespace: kube-system
|
||||
server: https://kubernetes.default.svc
|
||||
- namespace: default
|
||||
server: https://kubernetes.default.svc
|
||||
- namespace: monitoring
|
||||
server: https://kubernetes.default.svc
|
||||
sourceRepos:
|
||||
- git@github.com:prometheus-operator/kube-prometheus.git
|
||||
@@ -0,0 +1,13 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
[kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus)] +
|
||||
[kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator)] +
|
||||
[kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter)] +
|
||||
[kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics)] +
|
||||
[kp.prometheus[name] for name in std.objectFields(kp.prometheus)] +
|
||||
[kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter)]
|
||||
26
examples/eks-cni-example.jsonnet
Normal file
26
examples/eks-cni-example.jsonnet
Normal file
@@ -0,0 +1,26 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-eks.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheusRules+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'example-group',
|
||||
rules: [
|
||||
{
|
||||
record: 'aws_eks_available_ip',
|
||||
expr: 'sum by(instance) (awscni_total_ip_addresses) - sum by(instance) (awscni_assigned_ip_addresses) < 10',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) }
|
||||
@@ -14,12 +14,16 @@ spec:
|
||||
port: 8080
|
||||
targetPort: web
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: example-app
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-app
|
||||
version: 1.1.3
|
||||
replicas: 4
|
||||
template:
|
||||
metadata:
|
||||
|
||||
@@ -9,30 +9,32 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafanaDashboards+:: {
|
||||
'my-dashboard.json':
|
||||
dashboard.new('My Dashboard')
|
||||
.addTemplate(
|
||||
{
|
||||
current: {
|
||||
text: 'Prometheus',
|
||||
value: 'Prometheus',
|
||||
grafana+:: {
|
||||
dashboards+:: {
|
||||
'my-dashboard.json':
|
||||
dashboard.new('My Dashboard')
|
||||
.addTemplate(
|
||||
{
|
||||
current: {
|
||||
text: 'Prometheus',
|
||||
value: 'Prometheus',
|
||||
},
|
||||
hide: 0,
|
||||
label: null,
|
||||
name: 'datasource',
|
||||
options: [],
|
||||
query: 'prometheus',
|
||||
refresh: 1,
|
||||
regex: '',
|
||||
type: 'datasource',
|
||||
},
|
||||
hide: 0,
|
||||
label: null,
|
||||
name: 'datasource',
|
||||
options: [],
|
||||
query: 'prometheus',
|
||||
refresh: 1,
|
||||
regex: '',
|
||||
type: 'datasource',
|
||||
},
|
||||
)
|
||||
.addRow(
|
||||
row.new()
|
||||
.addPanel(graphPanel.new('My Panel', span=6, datasource='$datasource')
|
||||
.addTarget(prometheus.target('vector(1)')))
|
||||
),
|
||||
)
|
||||
.addRow(
|
||||
row.new()
|
||||
.addPanel(graphPanel.new('My Panel', span=6, datasource='$datasource')
|
||||
.addTarget(prometheus.target('vector(1)')))
|
||||
),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafana+:: {
|
||||
rawDashboards+:: {
|
||||
'my-dashboard.json': (importstr 'example-grafana-dashboard.json'),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
@@ -2,9 +2,14 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
grafanaDashboards+:: {
|
||||
grafanaDashboards+:: { // monitoring-mixin compatibility
|
||||
'my-dashboard.json': (import 'example-grafana-dashboard.json'),
|
||||
},
|
||||
grafana+:: {
|
||||
dashboards+:: { // use this method to import your dashboards to Grafana
|
||||
'my-dashboard.json': (import 'example-grafana-dashboard.json'),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local secret = k.core.v1.secret;
|
||||
local ingress = k.extensions.v1beta1.ingress;
|
||||
local ingressTls = ingress.mixin.spec.tlsType;
|
||||
local ingressRule = ingress.mixin.spec.rulesType;
|
||||
local httpIngressPath = ingressRule.mixin.http.pathsType;
|
||||
local ingress(name, namespace, rules) = {
|
||||
apiVersion: 'networking.k8s.io/v1',
|
||||
kind: 'Ingress',
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
annotations: {
|
||||
'nginx.ingress.kubernetes.io/auth-type': 'basic',
|
||||
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
|
||||
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
|
||||
},
|
||||
},
|
||||
spec: { rules: rules },
|
||||
};
|
||||
|
||||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
@@ -37,67 +45,71 @@ local kp =
|
||||
},
|
||||
// Create ingress objects per application
|
||||
ingress+:: {
|
||||
'alertmanager-main':
|
||||
ingress.new() +
|
||||
ingress.mixin.metadata.withName('alertmanager-main') +
|
||||
ingress.mixin.metadata.withNamespace($._config.namespace) +
|
||||
ingress.mixin.metadata.withAnnotations({
|
||||
'nginx.ingress.kubernetes.io/auth-type': 'basic',
|
||||
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
|
||||
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
|
||||
}) +
|
||||
ingress.mixin.spec.withRules(
|
||||
ingressRule.new() +
|
||||
ingressRule.withHost('alertmanager.example.com') +
|
||||
ingressRule.mixin.http.withPaths(
|
||||
httpIngressPath.new() +
|
||||
httpIngressPath.mixin.backend.withServiceName('alertmanager-main') +
|
||||
httpIngressPath.mixin.backend.withServicePort('web')
|
||||
),
|
||||
),
|
||||
grafana:
|
||||
ingress.new() +
|
||||
ingress.mixin.metadata.withName('grafana') +
|
||||
ingress.mixin.metadata.withNamespace($._config.namespace) +
|
||||
ingress.mixin.metadata.withAnnotations({
|
||||
'nginx.ingress.kubernetes.io/auth-type': 'basic',
|
||||
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
|
||||
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
|
||||
}) +
|
||||
ingress.mixin.spec.withRules(
|
||||
ingressRule.new() +
|
||||
ingressRule.withHost('grafana.example.com') +
|
||||
ingressRule.mixin.http.withPaths(
|
||||
httpIngressPath.new() +
|
||||
httpIngressPath.mixin.backend.withServiceName('grafana') +
|
||||
httpIngressPath.mixin.backend.withServicePort('http')
|
||||
),
|
||||
),
|
||||
'prometheus-k8s':
|
||||
ingress.new() +
|
||||
ingress.mixin.metadata.withName('prometheus-k8s') +
|
||||
ingress.mixin.metadata.withNamespace($._config.namespace) +
|
||||
ingress.mixin.metadata.withAnnotations({
|
||||
'nginx.ingress.kubernetes.io/auth-type': 'basic',
|
||||
'nginx.ingress.kubernetes.io/auth-secret': 'basic-auth',
|
||||
'nginx.ingress.kubernetes.io/auth-realm': 'Authentication Required',
|
||||
}) +
|
||||
ingress.mixin.spec.withRules(
|
||||
ingressRule.new() +
|
||||
ingressRule.withHost('prometheus.example.com') +
|
||||
ingressRule.mixin.http.withPaths(
|
||||
httpIngressPath.new() +
|
||||
httpIngressPath.mixin.backend.withServiceName('prometheus-k8s') +
|
||||
httpIngressPath.mixin.backend.withServicePort('web')
|
||||
),
|
||||
),
|
||||
'alertmanager-main': ingress(
|
||||
'alertmanager-main',
|
||||
$._config.namespace,
|
||||
[{
|
||||
host: 'alertmanager.example.com',
|
||||
http: {
|
||||
paths: [{
|
||||
backend: {
|
||||
service: {
|
||||
name: 'alertmanager-main',
|
||||
port: 'web',
|
||||
},
|
||||
},
|
||||
}],
|
||||
},
|
||||
}]
|
||||
),
|
||||
grafana: ingress(
|
||||
'grafana',
|
||||
$._config.namespace,
|
||||
[{
|
||||
host: 'grafana.example.com',
|
||||
http: {
|
||||
paths: [{
|
||||
backend: {
|
||||
service: {
|
||||
name: 'grafana',
|
||||
port: 'http',
|
||||
},
|
||||
},
|
||||
}],
|
||||
},
|
||||
}],
|
||||
),
|
||||
'prometheus-k8s': ingress(
|
||||
'prometheus-k8s',
|
||||
$._config.namespace,
|
||||
[{
|
||||
host: 'prometheus.example.com',
|
||||
http: {
|
||||
paths: [{
|
||||
backend: {
|
||||
service: {
|
||||
name: 'prometheus-k8s',
|
||||
port: 'web',
|
||||
},
|
||||
},
|
||||
}],
|
||||
},
|
||||
}],
|
||||
),
|
||||
},
|
||||
} + {
|
||||
// Create basic auth secret - replace 'auth' file with your own
|
||||
ingress+:: {
|
||||
'basic-auth-secret':
|
||||
secret.new('basic-auth', { auth: std.base64(importstr 'auth') }) +
|
||||
secret.mixin.metadata.withNamespace($._config.namespace),
|
||||
'basic-auth-secret': {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Secret',
|
||||
metadata: {
|
||||
name: 'basic-auth',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
data: { auth: std.base64(importstr 'auth') },
|
||||
type: 'Opaque',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
|
||||
((import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
nodeExporter+: {
|
||||
daemonset+:
|
||||
daemonset.mixin.metadata.withNamespace('my-custom-namespace'),
|
||||
daemonset+: {
|
||||
metadata+: {
|
||||
namespace: 'my-custom-namespace',
|
||||
},
|
||||
},
|
||||
},
|
||||
}).nodeExporter.daemonset
|
||||
|
||||
@@ -6,8 +6,15 @@ local kp =
|
||||
};
|
||||
|
||||
local manifests =
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
// Uncomment line below to enable vertical auto scaling of kube-state-metrics
|
||||
//{ ['ksm-autoscaler-' + name]: kp.ksmAutoscaler[name] for name in std.objectFields(kp.ksmAutoscaler) } +
|
||||
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor is separated so that it can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
|
||||
@@ -29,4 +29,5 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
|
||||
@@ -23,4 +23,5 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
prometheusAlerts+:: (import 'existingrule.json'),
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups+: (import 'existingrule.json').groups,
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
@@ -8,4 +13,5 @@ local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
// Reference info: documentation for https://github.com/ksonnet/ksonnet-lib can be found at http://g.bryan.dev.hepti.center
|
||||
//
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet
|
||||
// * https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k8s.libsonnet defines things such as "persistentVolumeClaim:: {"
|
||||
//
|
||||
local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec')
|
||||
|
||||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-bootkube.libsonnet') +
|
||||
// Uncomment the following imports to enable its patches
|
||||
// (import 'kube-prometheus/kube-prometheus-anti-affinity.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-managed-cluster.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-static-etcd.libsonnet') +
|
||||
// (import 'kube-prometheus/kube-prometheus-thanos-sidecar.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
@@ -27,22 +25,22 @@ local kp =
|
||||
// * PersistentVolumeClaim (and a corresponding PersistentVolume)
|
||||
// * the actual volume (per the StorageClassName specified below)
|
||||
storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec
|
||||
volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined)
|
||||
pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new
|
||||
|
||||
pvc.mixin.spec.withAccessModes('ReadWriteOnce') +
|
||||
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#resourcerequirements-v1-core (defines 'requests'),
|
||||
// and https://kubernetes.io/docs/concepts/policy/resource-quotas/#storage-resource-quota (defines 'requests.storage')
|
||||
pvc.mixin.spec.resources.withRequests({ storage: '100Gi' }) +
|
||||
|
||||
// A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed.
|
||||
pvc.mixin.spec.withStorageClassName('ssd'),
|
||||
|
||||
// The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning).
|
||||
// And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error:
|
||||
// * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS'
|
||||
//pvc.mixin.spec.selector.withMatchLabels({}),
|
||||
volumeClaimTemplate: { // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec')
|
||||
apiVersion: 'v1',
|
||||
kind: 'PersistentVolumeClaim',
|
||||
spec: {
|
||||
accessModes: ['ReadWriteOnce'],
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#resourcerequirements-v1-core (defines 'requests'),
|
||||
// and https://kubernetes.io/docs/concepts/policy/resource-quotas/#storage-resource-quota (defines 'requests.storage')
|
||||
resources: { requests: { storage: '100Gi' } },
|
||||
// A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed.
|
||||
storageClassName: 'ssd',
|
||||
// The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning).
|
||||
// And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error:
|
||||
// * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS'
|
||||
// selector: { matchLabels: {} },
|
||||
},
|
||||
},
|
||||
}, // storage
|
||||
}, // spec
|
||||
}, // prometheus
|
||||
@@ -50,9 +48,16 @@ local kp =
|
||||
|
||||
};
|
||||
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['setup/0namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{
|
||||
['setup/prometheus-operator-' + name]: kp.prometheusOperator[name]
|
||||
for name in std.filter((function(name) name != 'serviceMonitor'), std.objectFields(kp.prometheusOperator))
|
||||
} +
|
||||
// serviceMonitor is separated so that it can be created after the CRDs are ready
|
||||
{ 'prometheus-operator-serviceMonitor': kp.prometheusOperator.serviceMonitor } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
|
||||
14
examples/strip-limits.jsonnet
Normal file
14
examples/strip-limits.jsonnet
Normal file
@@ -0,0 +1,14 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-strip-limits.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
24
examples/tolerations.libsonnet
Normal file
24
examples/tolerations.libsonnet
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
_config+:: {
|
||||
tolerations+:: [
|
||||
{
|
||||
key: 'key1',
|
||||
operator: 'Equal',
|
||||
value: 'value1',
|
||||
effect: 'NoSchedule',
|
||||
},
|
||||
{
|
||||
key: 'key2',
|
||||
operator: 'Exists',
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
prometheus+: {
|
||||
prometheus+: {
|
||||
spec+: {
|
||||
tolerations: [t for t in $._config.tolerations],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
40
examples/weave-net-example.jsonnet
Normal file
40
examples/weave-net-example.jsonnet
Normal file
@@ -0,0 +1,40 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-weave-net.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
prometheusAlerts+:: {
|
||||
groups: std.map(
|
||||
function(group)
|
||||
if group.name == 'weave-net' then
|
||||
group {
|
||||
rules: std.map(
|
||||
function(rule)
|
||||
if rule.alert == 'WeaveNetFastDPFlowsLow' then
|
||||
rule {
|
||||
expr: 'sum(weave_flows) < 20000',
|
||||
}
|
||||
else if rule.alert == 'WeaveNetIPAMUnreachable' then
|
||||
rule {
|
||||
expr: 'weave_ipam_unreachable_percentage > 25',
|
||||
}
|
||||
else
|
||||
rule
|
||||
,
|
||||
group.rules
|
||||
),
|
||||
}
|
||||
else
|
||||
group,
|
||||
super.groups
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
||||
7
experimental/custom-metrics-api/.gitignore
vendored
7
experimental/custom-metrics-api/.gitignore
vendored
@@ -1,7 +0,0 @@
|
||||
apiserver-key.pem
|
||||
apiserver.csr
|
||||
apiserver.pem
|
||||
metrics-ca-config.json
|
||||
metrics-ca.crt
|
||||
metrics-ca.key
|
||||
cm-adapter-serving-certs.yaml
|
||||
@@ -1,21 +0,0 @@
|
||||
# Custom Metrics API
|
||||
|
||||
The custom metrics API allows the HPA v2 to scale based on arbirary metrics.
|
||||
|
||||
This directory contains an example deployment which extends the Prometheus Adapter, deployed with kube-prometheus, serve the [Custom Metrics API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/custom-metrics-api.md) by talking to Prometheus running inside the cluster.
|
||||
|
||||
Make sure you have the Prometheus Adapter up and running in the `monitoring` namespace.
|
||||
|
||||
You can deploy everything in the `monitoring` namespace using `./deploy.sh`.
|
||||
|
||||
When you're done, you can teardown using the `./teardown.sh` script.
|
||||
|
||||
### Sample App
|
||||
|
||||
Additionally, this directory contains a sample app that uses the [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale the Deployment's replicas of Pods up and down as needed.
|
||||
Deploy this app by running `kubectl apply -f sample-app.yaml`.
|
||||
Make the app accessible on your system, for example by using `kubectl port-forward svc/sample-app 8080`. Next you need to put some load on its http endpoints.
|
||||
|
||||
A tool like [hey](https://github.com/rakyll/hey) is helpful for doing so: `hey -c 20 -n 100000000 http://localhost:8080/metrics`
|
||||
|
||||
There is an even more detailed information on this sample app at [luxas/kubeadm-workshop](https://github.com/luxas/kubeadm-workshop#deploying-the-prometheus-operator-for-monitoring-services-in-the-cluster).
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: custom-metrics-server-resources
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: custom-metrics-server-resources
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-adapter
|
||||
namespace: monitoring
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.custom.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: prometheus-adapter
|
||||
namespace: monitoring
|
||||
group: custom.metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: custom-metrics-server-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- custom.metrics.k8s.io
|
||||
resources: ["*"]
|
||||
verbs: ["*"]
|
||||
@@ -1,98 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: adapter-config
|
||||
namespace: monitoring
|
||||
data:
|
||||
config.yaml: |
|
||||
rules:
|
||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
||||
seriesFilters: []
|
||||
resources:
|
||||
overrides:
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
name:
|
||||
matches: ^container_(.*)_seconds_total$
|
||||
as: ""
|
||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>)
|
||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
||||
seriesFilters:
|
||||
- isNot: ^container_.*_seconds_total$
|
||||
resources:
|
||||
overrides:
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
name:
|
||||
matches: ^container_(.*)_total$
|
||||
as: ""
|
||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[1m])) by (<<.GroupBy>>)
|
||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
||||
seriesFilters:
|
||||
- isNot: ^container_.*_total$
|
||||
resources:
|
||||
overrides:
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
name:
|
||||
matches: ^container_(.*)$
|
||||
as: ""
|
||||
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>)
|
||||
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
|
||||
seriesFilters:
|
||||
- isNot: .*_total$
|
||||
resources:
|
||||
template: <<.Resource>>
|
||||
name:
|
||||
matches: ""
|
||||
as: ""
|
||||
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
|
||||
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
|
||||
seriesFilters:
|
||||
- isNot: .*_seconds_total
|
||||
resources:
|
||||
template: <<.Resource>>
|
||||
name:
|
||||
matches: ^(.*)_total$
|
||||
as: ""
|
||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
|
||||
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
|
||||
seriesFilters: []
|
||||
resources:
|
||||
template: <<.Resource>>
|
||||
name:
|
||||
matches: ^(.*)_seconds_total$
|
||||
as: ""
|
||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
|
||||
resourceRules:
|
||||
cpu:
|
||||
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
|
||||
nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[1m])) by (<<.GroupBy>>)
|
||||
resources:
|
||||
overrides:
|
||||
node:
|
||||
resource: node
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
containerLabel: container_name
|
||||
memory:
|
||||
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
|
||||
nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)
|
||||
resources:
|
||||
overrides:
|
||||
node:
|
||||
resource: node
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
containerLabel: container_name
|
||||
window: 1m
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
kubectl apply -n monitoring -f custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
|
||||
kubectl apply -n monitoring -f custom-metrics-apiservice.yaml
|
||||
kubectl apply -n monitoring -f custom-metrics-cluster-role.yaml
|
||||
kubectl apply -n monitoring -f custom-metrics-configmap.yaml
|
||||
kubectl apply -n monitoring -f hpa-custom-metrics-cluster-role-binding.yaml
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: hpa-controller-custom-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: custom-metrics-server-resources
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: horizontal-pod-autoscaler
|
||||
namespace: kube-system
|
||||
@@ -1,67 +0,0 @@
|
||||
kind: ServiceMonitor
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
metadata:
|
||||
name: sample-app
|
||||
labels:
|
||||
app: sample-app
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample-app
|
||||
endpoints:
|
||||
- port: http
|
||||
interval: 5s
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: sample-app
|
||||
labels:
|
||||
app: sample-app
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: sample-app
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: sample-app
|
||||
labels:
|
||||
app: sample-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: sample-app
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: sample-app
|
||||
spec:
|
||||
containers:
|
||||
- image: luxas/autoscale-demo:v0.1.2
|
||||
name: metrics-provider
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
---
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2beta1
|
||||
metadata:
|
||||
name: sample-app
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: sample-app
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
metrics:
|
||||
- type: Pods
|
||||
pods:
|
||||
metricName: http_requests
|
||||
targetAverageValue: 500m
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
kubectl delete -n monitoring -f custom-metrics-apiserver-resource-reader-cluster-role-binding.yaml
|
||||
kubectl delete -n monitoring -f custom-metrics-apiservice.yaml
|
||||
kubectl delete -n monitoring -f custom-metrics-cluster-role.yaml
|
||||
kubectl delete -n monitoring -f custom-metrics-configmap.yaml
|
||||
kubectl delete -n monitoring -f hpa-custom-metrics-cluster-role-binding.yaml
|
||||
@@ -14,6 +14,14 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
|
||||
35
go.mod
35
go.mod
@@ -1,32 +1,11 @@
|
||||
module github.com/coreos/kube-prometheus
|
||||
module github.com/prometheus-operator/kube-prometheus
|
||||
|
||||
go 1.12
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/Jeffail/gabs v1.2.0
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/gogo/protobuf v1.1.1 // indirect
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d // indirect
|
||||
github.com/imdario/mergo v0.3.7 // indirect
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/stretchr/testify v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a // indirect
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2 // indirect
|
||||
k8s.io/api v0.0.0-20190313235455-40a48860b5ab // indirect
|
||||
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1
|
||||
k8s.io/client-go v11.0.0+incompatible
|
||||
k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 // indirect
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
github.com/Jeffail/gabs v1.4.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.8.0
|
||||
k8s.io/apimachinery v0.19.3
|
||||
k8s.io/client-go v0.19.3
|
||||
)
|
||||
|
||||
605
go.sum
605
go.sum
@@ -1,67 +1,606 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Jeffail/gabs v1.2.0 h1:uFhoIVTtsX7hV2RxNgWad8gMU+8OJdzFbOathJdhD3o=
|
||||
github.com/Jeffail/gabs v1.2.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=
|
||||
github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
|
||||
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4=
|
||||
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a h1:Igim7XhdOpBnWPuYJ70XcNpq8q3BCACtVgNfoJxOV7g=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU=
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc=
|
||||
k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg=
|
||||
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
|
||||
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA=
|
||||
k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c=
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU=
|
||||
k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs=
|
||||
k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc=
|
||||
k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg=
|
||||
k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
alertmanager: 'v0.17.0',
|
||||
alertmanager: 'v0.21.0',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
@@ -13,30 +11,43 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
},
|
||||
|
||||
alertmanager+:: {
|
||||
name: $._config.alertmanager.name,
|
||||
name: 'main',
|
||||
config: {
|
||||
global: {
|
||||
resolve_timeout: '5m',
|
||||
},
|
||||
inhibit_rules: [{
|
||||
source_match: {
|
||||
severity: 'critical',
|
||||
},
|
||||
target_match_re: {
|
||||
severity: 'warning|info',
|
||||
},
|
||||
equal: ['namespace', 'alertname'],
|
||||
}, {
|
||||
source_match: {
|
||||
severity: 'warning',
|
||||
},
|
||||
target_match_re: {
|
||||
severity: 'info',
|
||||
},
|
||||
equal: ['namespace', 'alertname'],
|
||||
}],
|
||||
route: {
|
||||
group_by: ['job'],
|
||||
group_by: ['namespace'],
|
||||
group_wait: '30s',
|
||||
group_interval: '5m',
|
||||
repeat_interval: '12h',
|
||||
receiver: 'null',
|
||||
receiver: 'Default',
|
||||
routes: [
|
||||
{
|
||||
receiver: 'null',
|
||||
match: {
|
||||
alertname: 'Watchdog',
|
||||
},
|
||||
},
|
||||
{ receiver: 'Watchdog', match: { alertname: 'Watchdog' } },
|
||||
{ receiver: 'Critical', match: { severity: 'critical' } },
|
||||
],
|
||||
},
|
||||
receivers: [
|
||||
{
|
||||
name: 'null',
|
||||
},
|
||||
{ name: 'Default' },
|
||||
{ name: 'Watchdog' },
|
||||
{ name: 'Critical' },
|
||||
],
|
||||
},
|
||||
replicas: 3,
|
||||
@@ -44,82 +55,93 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
},
|
||||
|
||||
alertmanager+:: {
|
||||
secret:
|
||||
local secret = k.core.v1.secret;
|
||||
secret: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Secret',
|
||||
type: 'Opaque',
|
||||
metadata: {
|
||||
name: 'alertmanager-' + $._config.alertmanager.name,
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
stringData: {
|
||||
'alertmanager.yaml': if std.type($._config.alertmanager.config) == 'object'
|
||||
then
|
||||
std.manifestYamlDoc($._config.alertmanager.config)
|
||||
else
|
||||
$._config.alertmanager.config,
|
||||
},
|
||||
},
|
||||
|
||||
if std.type($._config.alertmanager.config) == 'object' then
|
||||
secret.new('alertmanager-' + $._config.alertmanager.name, { 'alertmanager.yaml': std.base64(std.manifestYamlDoc($._config.alertmanager.config)) }) +
|
||||
secret.mixin.metadata.withNamespace($._config.namespace)
|
||||
else
|
||||
secret.new('alertmanager-' + $._config.alertmanager.name, { 'alertmanager.yaml': std.base64($._config.alertmanager.config) }) +
|
||||
secret.mixin.metadata.withNamespace($._config.namespace),
|
||||
serviceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: {
|
||||
name: 'alertmanager-' + $._config.alertmanager.name,
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
service: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'alertmanager-' + $._config.alertmanager.name,
|
||||
namespace: $._config.namespace,
|
||||
labels: { alertmanager: $._config.alertmanager.name },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'web', targetPort: 'web', port: 9093 },
|
||||
],
|
||||
selector: { app: 'alertmanager', alertmanager: $._config.alertmanager.name },
|
||||
sessionAffinity: 'ClientIP',
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount.new('alertmanager-' + $._config.alertmanager.name) +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local alertmanagerPort = servicePort.newNamed('web', 9093, 'web');
|
||||
|
||||
service.new('alertmanager-' + $._config.alertmanager.name, { app: 'alertmanager', alertmanager: $._config.alertmanager.name }, alertmanagerPort) +
|
||||
service.mixin.spec.withSessionAffinity('ClientIP') +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ alertmanager: $._config.alertmanager.name }),
|
||||
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'alertmanager',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'alertmanager',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
alertmanager: $._config.alertmanager.name,
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'web',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
serviceMonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'alertmanager',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'alertmanager',
|
||||
},
|
||||
},
|
||||
|
||||
alertmanager:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'Alertmanager',
|
||||
metadata: {
|
||||
name: $._config.alertmanager.name,
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
alertmanager: $._config.alertmanager.name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: $._config.alertmanager.replicas,
|
||||
version: $._config.versions.alertmanager,
|
||||
baseImage: $._config.imageRepos.alertmanager,
|
||||
nodeSelector: { 'beta.kubernetes.io/os': 'linux' },
|
||||
serviceAccountName: 'alertmanager-' + $._config.alertmanager.name,
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
runAsNonRoot: true,
|
||||
fsGroup: 2000,
|
||||
},
|
||||
endpoints: [
|
||||
{ port: 'web', interval: '30s' },
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
alertmanager: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'Alertmanager',
|
||||
metadata: {
|
||||
name: $._config.alertmanager.name,
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
alertmanager: $._config.alertmanager.name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: $._config.alertmanager.replicas,
|
||||
version: $._config.versions.alertmanager,
|
||||
image: $._config.imageRepos.alertmanager + ':' + $._config.versions.alertmanager,
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
serviceAccountName: 'alertmanager-' + $._config.alertmanager.name,
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
runAsNonRoot: true,
|
||||
fsGroup: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
{
|
||||
prometheusAlerts+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'alertmanager.rules',
|
||||
rules: [
|
||||
{
|
||||
alert: 'AlertmanagerConfigInconsistent',
|
||||
annotations: {
|
||||
message: 'The configuration of the instances of the Alertmanager cluster `{{$labels.service}}` are out of sync.',
|
||||
},
|
||||
expr: |||
|
||||
count_values("config_hash", alertmanager_config_hash{%(alertmanagerSelector)s}) BY (service) / ON(service) GROUP_LEFT() label_replace(prometheus_operator_spec_replicas{%(prometheusOperatorSelector)s,controller="alertmanager"}, "service", "alertmanager-$1", "name", "(.*)") != 1
|
||||
||| % $._config,
|
||||
'for': '5m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'AlertmanagerFailedReload',
|
||||
annotations: {
|
||||
message: "Reloading Alertmanager's configuration has failed for {{ $labels.namespace }}/{{ $labels.pod}}.",
|
||||
},
|
||||
expr: |||
|
||||
alertmanager_config_last_reload_successful{%(alertmanagerSelector)s} == 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert:'AlertmanagerMembersInconsistent',
|
||||
annotations:{
|
||||
message: 'Alertmanager has not found all other members of the cluster.',
|
||||
},
|
||||
expr: |||
|
||||
alertmanager_cluster_members{%(alertmanagerSelector)s}
|
||||
!= on (service) GROUP_LEFT()
|
||||
count by (service) (alertmanager_cluster_members{%(alertmanagerSelector)s})
|
||||
||| % $._config,
|
||||
'for': '5m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -1,5 +1,2 @@
|
||||
(import 'alertmanager.libsonnet') +
|
||||
(import 'general.libsonnet') +
|
||||
(import 'node.libsonnet') +
|
||||
(import 'prometheus.libsonnet') +
|
||||
(import 'prometheus-operator.libsonnet')
|
||||
(import 'node.libsonnet')
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
{
|
||||
alert: 'TargetDown',
|
||||
annotations: {
|
||||
message: '{{ $value }}% of the {{ $labels.job }} targets are down.',
|
||||
message: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.',
|
||||
},
|
||||
expr: '100 * (count(up == 0) BY (job) / count(up) BY (job)) > 10',
|
||||
expr: '100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10',
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
|
||||
@@ -1,84 +1,9 @@
|
||||
{
|
||||
prometheusAlerts+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'kube-prometheus-node-alerting.rules',
|
||||
rules: [
|
||||
{
|
||||
alert: 'NodeDiskRunningFull',
|
||||
annotations: {
|
||||
message: 'Device {{ $labels.device }} of node-exporter {{ $labels.namespace }}/{{ $labels.pod }} will be full within the next 24 hours.',
|
||||
},
|
||||
expr: |||
|
||||
(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[6h], 3600 * 24) < 0)
|
||||
||| % $._config,
|
||||
'for': '30m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'NodeDiskRunningFull',
|
||||
annotations: {
|
||||
message: 'Device {{ $labels.device }} of node-exporter {{ $labels.namespace }}/{{ $labels.pod }} will be full within the next 2 hours.',
|
||||
},
|
||||
expr: |||
|
||||
(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[30m], 3600 * 2) < 0)
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'node-time',
|
||||
rules: [
|
||||
{
|
||||
alert: 'ClockSkewDetected',
|
||||
annotations: {
|
||||
message: 'Clock skew detected on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}. Ensure NTP is configured correctly on this host.',
|
||||
},
|
||||
expr: |||
|
||||
abs(node_timex_offset_seconds{%(nodeExporterSelector)s}) > 0.03
|
||||
||| % $._config,
|
||||
'for': '2m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'node-network',
|
||||
rules: [
|
||||
{
|
||||
alert: 'NetworkReceiveErrors',
|
||||
annotations: {
|
||||
message: 'Network interface "{{ $labels.device }}" showing receive errors on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}"',
|
||||
},
|
||||
expr: |||
|
||||
rate(node_network_receive_errs_total{%(nodeExporterSelector)s,%(hostNetworkInterfaceSelector)s}[2m]) > 0
|
||||
||| % $._config,
|
||||
'for': '2m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'NetworkTransmitErrors',
|
||||
annotations: {
|
||||
message: 'Network interface "{{ $labels.device }}" showing transmit errors on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}"',
|
||||
},
|
||||
expr: |||
|
||||
rate(node_network_transmit_errs_total{%(nodeExporterSelector)s,%(hostNetworkInterfaceSelector)s}[2m]) > 0
|
||||
||| % $._config,
|
||||
'for': '2m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'NodeNetworkInterfaceFlapping',
|
||||
annotations: {
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
prometheusAlerts+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'prometheus-operator',
|
||||
rules: [
|
||||
{
|
||||
alert: 'PrometheusOperatorReconcileErrors',
|
||||
expr: |||
|
||||
rate(prometheus_operator_reconcile_errors_total{%(prometheusOperatorSelector)s}[5m]) > 0.1
|
||||
||| % $._config,
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
message: 'Errors while reconciling {{ $labels.controller }} in {{ $labels.namespace }} Namespace.',
|
||||
},
|
||||
'for': '10m',
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusOperatorNodeLookupErrors',
|
||||
expr: |||
|
||||
rate(prometheus_operator_node_address_lookup_errors_total{%(prometheusOperatorSelector)s}[5m]) > 0.1
|
||||
||| % $._config,
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
annotations: {
|
||||
message: 'Errors while reconciling Prometheus in {{ $labels.namespace }} Namespace.',
|
||||
},
|
||||
'for': '10m',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
{
|
||||
prometheusAlerts+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'prometheus.rules',
|
||||
rules: [
|
||||
{
|
||||
alert: 'PrometheusConfigReloadFailed',
|
||||
annotations: {
|
||||
description: "Reloading Prometheus' configuration has failed for {{$labels.namespace}}/{{$labels.pod}}",
|
||||
summary: "Reloading Prometheus' configuration failed",
|
||||
},
|
||||
expr: |||
|
||||
prometheus_config_last_reload_successful{%(prometheusSelector)s} == 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotificationQueueRunningFull',
|
||||
annotations: {
|
||||
description: "Prometheus' alert notification queue is running full for {{$labels.namespace}}/{{ $labels.pod}}",
|
||||
summary: "Prometheus' alert notification queue is running full",
|
||||
},
|
||||
expr: |||
|
||||
predict_linear(prometheus_notifications_queue_length{%(prometheusSelector)s}[5m], 60 * 30) > prometheus_notifications_queue_capacity{%(prometheusSelector)s}
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusErrorSendingAlerts',
|
||||
annotations: {
|
||||
description: 'Errors while sending alerts from Prometheus {{$labels.namespace}}/{{ $labels.pod}} to Alertmanager {{$labels.Alertmanager}}',
|
||||
summary: 'Errors while sending alert from Prometheus',
|
||||
},
|
||||
expr: |||
|
||||
rate(prometheus_notifications_errors_total{%(prometheusSelector)s}[5m]) / rate(prometheus_notifications_sent_total{%(prometheusSelector)s}[5m]) > 0.01
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusErrorSendingAlerts',
|
||||
annotations: {
|
||||
description: 'Errors while sending alerts from Prometheus {{$labels.namespace}}/{{ $labels.pod}} to Alertmanager {{$labels.Alertmanager}}',
|
||||
summary: 'Errors while sending alerts from Prometheus',
|
||||
},
|
||||
expr: |||
|
||||
rate(prometheus_notifications_errors_total{%(prometheusSelector)s}[5m]) / rate(prometheus_notifications_sent_total{%(prometheusSelector)s}[5m]) > 0.03
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotConnectedToAlertmanagers',
|
||||
annotations: {
|
||||
description: 'Prometheus {{ $labels.namespace }}/{{ $labels.pod}} is not connected to any Alertmanagers',
|
||||
summary: 'Prometheus is not connected to any Alertmanagers',
|
||||
},
|
||||
expr: |||
|
||||
prometheus_notifications_alertmanagers_discovered{%(prometheusSelector)s} < 1
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTSDBReloadsFailing',
|
||||
annotations: {
|
||||
description: '{{$labels.job}} at {{$labels.instance}} had {{$value | humanize}} reload failures over the last four hours.',
|
||||
summary: 'Prometheus has issues reloading data blocks from disk',
|
||||
},
|
||||
expr: |||
|
||||
increase(prometheus_tsdb_reloads_failures_total{%(prometheusSelector)s}[2h]) > 0
|
||||
||| % $._config,
|
||||
'for': '12h',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTSDBCompactionsFailing',
|
||||
annotations: {
|
||||
description: '{{$labels.job}} at {{$labels.instance}} had {{$value | humanize}} compaction failures over the last four hours.',
|
||||
summary: 'Prometheus has issues compacting sample blocks',
|
||||
},
|
||||
expr: |||
|
||||
increase(prometheus_tsdb_compactions_failed_total{%(prometheusSelector)s}[2h]) > 0
|
||||
||| % $._config,
|
||||
'for': '12h',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTSDBWALCorruptions',
|
||||
annotations: {
|
||||
description: '{{$labels.job}} at {{$labels.instance}} has a corrupted write-ahead log (WAL).',
|
||||
summary: 'Prometheus write-ahead log is corrupted',
|
||||
},
|
||||
expr: |||
|
||||
prometheus_tsdb_wal_corruptions_total{%(prometheusSelector)s} > 0
|
||||
||| % $._config,
|
||||
'for': '4h',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusNotIngestingSamples',
|
||||
annotations: {
|
||||
description: "Prometheus {{ $labels.namespace }}/{{ $labels.pod}} isn't ingesting samples.",
|
||||
summary: "Prometheus isn't ingesting samples",
|
||||
},
|
||||
expr: |||
|
||||
rate(prometheus_tsdb_head_samples_appended_total{%(prometheusSelector)s}[5m]) <= 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'PrometheusTargetScrapesDuplicate',
|
||||
annotations: {
|
||||
description: '{{$labels.namespace}}/{{$labels.pod}} has many samples rejected due to duplicate timestamps but different values',
|
||||
summary: 'Prometheus has many samples rejected',
|
||||
},
|
||||
expr: |||
|
||||
increase(prometheus_target_scrapes_sample_duplicate_timestamp_total{%(prometheusSelector)s}[5m]) > 0
|
||||
||| % $._config,
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'warning',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
[
|
||||
// Drop all kubelet metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all scheduler metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all apiserver metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all docker metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all reflector metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all etcd metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all transformation metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'transformation_(transformation_latencies_microseconds|failures_total)',
|
||||
action: 'drop',
|
||||
},
|
||||
// Drop all other metrics which are deprecated in kubernetes.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: '(admission_quota_controller_adds|crd_autoregistration_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|AvailableConditionController_retries|crd_openapi_controller_unfinished_work_seconds|APIServiceRegistrationController_retries|admission_quota_controller_longest_running_processor_microseconds|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_unfinished_work_seconds|crd_openapi_controller_adds|crd_autoregistration_controller_retries|crd_finalizer_queue_latency|AvailableConditionController_work_duration|non_structural_schema_condition_controller_depth|crd_autoregistration_controller_unfinished_work_seconds|AvailableConditionController_adds|DiscoveryController_longest_running_processor_microseconds|autoregister_queue_latency|crd_autoregistration_controller_adds|non_structural_schema_condition_controller_work_duration|APIServiceRegistrationController_adds|crd_finalizer_work_duration|crd_naming_condition_controller_unfinished_work_seconds|crd_openapi_controller_longest_running_processor_microseconds|DiscoveryController_adds|crd_autoregistration_controller_longest_running_processor_microseconds|autoregister_unfinished_work_seconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|non_structural_schema_condition_controller_queue_latency|crd_naming_condition_controller_depth|AvailableConditionController_longest_running_processor_microseconds|crdEstablishing_depth|crd_finalizer_longest_running_processor_microseconds|crd_naming_condition_controller_adds|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_unfinished_work_seconds|crd_openapi_controller_depth|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|DiscoveryController_work_duration|autoregister_adds|crd_autoregistration_controller_queue_latency|crd_finalizer_retries|AvailableConditionController_unfinished_work_seconds|autoregister_longest_running_processor_microseconds|non_structural_schema_condition_controller_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_depth|AvailableConditionController_depth|DiscoveryController_retries|admission_quota_controller_depth|crdEstablishing_adds|APIServiceOpenAPIAggregationControllerQueue1_retries|crdEstablishing_queue_latency|non_structural_schema_condition_controller_longest_running_processor_microseconds|autoregister_work_duration|crd_openapi_controller_retries|APIServiceRegistrationController_work_duration|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_openapi_controller_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_queue_latency|crd_autoregistration_controller_depth|AvailableConditionController_queue_latency|admission_quota_controller_queue_latency|crd_naming_condition_controller_work_duration|crd_openapi_controller_work_duration|DiscoveryController_depth|crd_naming_condition_controller_longest_running_processor_microseconds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|crd_finalizer_unfinished_work_seconds|crdEstablishing_retries|admission_quota_controller_unfinished_work_seconds|non_structural_schema_condition_controller_adds|APIServiceRegistrationController_unfinished_work_seconds|admission_quota_controller_work_duration|autoregister_depth|autoregister_retries|kubeproxy_sync_proxy_rules_latency_microseconds|rest_client_request_latency_seconds|non_structural_schema_condition_controller_retries)',
|
||||
action: 'drop',
|
||||
},
|
||||
]
|
||||
3347
jsonnet/kube-prometheus/grafana-weave-net-cluster.json
Normal file
3347
jsonnet/kube-prometheus/grafana-weave-net-cluster.json
Normal file
File diff suppressed because it is too large
Load Diff
2605
jsonnet/kube-prometheus/grafana-weave-net.json
Normal file
2605
jsonnet/kube-prometheus/grafana-weave-net.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,54 +1,107 @@
|
||||
{
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "ksonnet",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/ksonnet/ksonnet-lib",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"name": "kubernetes-mixin",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "release-0.1"
|
||||
},
|
||||
{
|
||||
"name": "grafana",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/brancz/kubernetes-grafana",
|
||||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-operator",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/coreos/prometheus-operator",
|
||||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "v0.30.0"
|
||||
},
|
||||
{
|
||||
"name": "etcd-mixin",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/coreos/etcd",
|
||||
"subdir": "Documentation/etcd-mixin"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/brancz/kubernetes-grafana",
|
||||
"subdir": "grafana"
|
||||
}
|
||||
]
|
||||
},
|
||||
"version": "release-0.2"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/etcd-io/etcd",
|
||||
"subdir": "Documentation/etcd-mixin"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus-operator/prometheus-operator",
|
||||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "release-0.44"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus-operator/prometheus-operator",
|
||||
"subdir": "jsonnet/mixin"
|
||||
}
|
||||
},
|
||||
"version": "release-0.44"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "release-0.6"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes/kube-state-metrics",
|
||||
"subdir": "jsonnet/kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"version": "release-1.9"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes/kube-state-metrics",
|
||||
"subdir": "jsonnet/kube-state-metrics-mixin"
|
||||
}
|
||||
},
|
||||
"version": "release-1.9"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus/node_exporter",
|
||||
"subdir": "docs/node-mixin"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus/prometheus",
|
||||
"subdir": "documentation/prometheus-mixin"
|
||||
}
|
||||
},
|
||||
"version": "release-2.23",
|
||||
"name": "prometheus"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus/alertmanager",
|
||||
"subdir": "doc/alertmanager-mixin"
|
||||
}
|
||||
},
|
||||
"version": "master",
|
||||
"name": "alertmanager"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/thanos-io/thanos",
|
||||
"subdir": "mixin"
|
||||
}
|
||||
},
|
||||
"version": "release-0.17"
|
||||
}
|
||||
],
|
||||
"legacyImports": true
|
||||
}
|
||||
|
||||
128
jsonnet/kube-prometheus/ksm-autoscaler/ksm-autoscaler.libsonnet
Normal file
128
jsonnet/kube-prometheus/ksm-autoscaler/ksm-autoscaler.libsonnet
Normal file
@@ -0,0 +1,128 @@
|
||||
{
|
||||
_config+:: {
|
||||
versions+:: { clusterVerticalAutoscaler: '0.8.1' },
|
||||
imageRepos+:: { clusterVerticalAutoscaler: 'gcr.io/google_containers/cpvpa-amd64' },
|
||||
|
||||
kubeStateMetrics+:: {
|
||||
stepCPU: '1m',
|
||||
stepMemory: '2Mi',
|
||||
},
|
||||
},
|
||||
ksmAutoscaler+:: {
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: { name: 'ksm-autoscaler' },
|
||||
rules: [{
|
||||
apiGroups: [''],
|
||||
resources: ['nodes'],
|
||||
verbs: ['list', 'watch'],
|
||||
}],
|
||||
},
|
||||
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: { name: 'ksm-autoscaler' },
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'ksm-autoscaler',
|
||||
},
|
||||
subjects: [{ kind: 'ServiceAccount', name: 'ksm-autoscaler', namespace: $._config.namespace }],
|
||||
},
|
||||
|
||||
roleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'RoleBinding',
|
||||
metadata: {
|
||||
name: 'ksm-autoscaler',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'Role',
|
||||
name: 'ksm-autoscaler',
|
||||
},
|
||||
subjects: [{ kind: 'ServiceAccount', name: 'ksm-autoscaler' }],
|
||||
},
|
||||
|
||||
role: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'Role',
|
||||
metadata: {
|
||||
name: 'ksm-autoscaler',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: ['extensions'],
|
||||
resources: ['deployments'],
|
||||
verbs: ['patch'],
|
||||
resourceNames: ['kube-state-metrics'],
|
||||
},
|
||||
{
|
||||
apiGroups: ['apps'],
|
||||
resources: ['deployments'],
|
||||
verbs: ['patch'],
|
||||
resourceNames: ['kube-state-metrics'],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
serviceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: {
|
||||
name: 'ksm-autoscaler',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
deployment:
|
||||
local podLabels = { app: 'ksm-autoscaler' };
|
||||
local c = {
|
||||
name: 'ksm-autoscaler',
|
||||
image: $._config.imageRepos.clusterVerticalAutoscaler + ':v' + $._config.versions.clusterVerticalAutoscaler,
|
||||
args: [
|
||||
'/cpvpa',
|
||||
'--target=deployment/kube-state-metrics',
|
||||
'--namespace=' + $._config.namespace,
|
||||
'--logtostderr=true',
|
||||
'--poll-period-seconds=10',
|
||||
'--default-config={"kube-state-metrics":{"requests":{"cpu":{"base":"' + $._config.kubeStateMetrics.baseCPU + '","step":"' + $._config.kubeStateMetrics.stepCPU + '","nodesPerStep":1},"memory":{"base":"' + $._config.kubeStateMetrics.baseMemory + '","step":"' + $._config.kubeStateMetrics.stepMemory + '","nodesPerStep":1}},"limits":{"cpu":{"base":"' + $._config.kubeStateMetrics.baseCPU + '","step":"' + $._config.kubeStateMetrics.stepCPU + '","nodesPerStep":1},"memory":{"base":"' + $._config.kubeStateMetrics.baseMemory + '","step":"' + $._config.kubeStateMetrics.stepMemory + '","nodesPerStep":1}}}}',
|
||||
],
|
||||
resources: {
|
||||
requests: { cpu: '20m', memory: '10Mi' },
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
apiVersion: 'apps/v1',
|
||||
kind: 'Deployment',
|
||||
metadata: {
|
||||
name: 'ksm-autoscaler',
|
||||
namespace: $._config.namespace,
|
||||
labels: podLabels,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
selector: { matchLabels: podLabels },
|
||||
template: {
|
||||
metadata: {
|
||||
labels: podLabels,
|
||||
},
|
||||
spec: {
|
||||
containers: [c],
|
||||
serviceAccount: 'ksm-autoscaler',
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
securityContext: {
|
||||
runAsNonRoot: true,
|
||||
runAsUser: 65534,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
{
|
||||
prometheus+:: {
|
||||
clusterRole+: {
|
||||
rules+: [{
|
||||
apiGroups: [''],
|
||||
resources: ['services', 'endpoints', 'pods'],
|
||||
verbs: ['get', 'list', 'watch'],
|
||||
}],
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,23 +1,22 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local statefulSet = k.apps.v1beta2.statefulSet;
|
||||
local affinity = statefulSet.mixin.spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecutionType;
|
||||
local matchExpression = affinity.mixin.podAffinityTerm.labelSelector.matchExpressionsType;
|
||||
|
||||
{
|
||||
local antiaffinity(key, values) = {
|
||||
local antiaffinity(key, values, namespace) = {
|
||||
affinity: {
|
||||
podAntiAffinity: {
|
||||
preferredDuringSchedulingIgnoredDuringExecution: [
|
||||
affinity.new() +
|
||||
affinity.withWeight(100) +
|
||||
affinity.mixin.podAffinityTerm.withNamespaces($._config.namespace) +
|
||||
affinity.mixin.podAffinityTerm.withTopologyKey('kubernetes.io/hostname') +
|
||||
affinity.mixin.podAffinityTerm.labelSelector.withMatchExpressions([
|
||||
matchExpression.new() +
|
||||
matchExpression.withKey(key) +
|
||||
matchExpression.withOperator('In') +
|
||||
matchExpression.withValues(values),
|
||||
]),
|
||||
{
|
||||
weight: 100,
|
||||
podAffinityTerm: {
|
||||
namespaces: [namespace],
|
||||
topologyKey: 'kubernetes.io/hostname',
|
||||
labelSelector: {
|
||||
matchExpressions: [{
|
||||
key: key,
|
||||
operator: 'In',
|
||||
values: values,
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
@@ -26,14 +25,16 @@ local matchExpression = affinity.mixin.podAffinityTerm.labelSelector.matchExpres
|
||||
alertmanager+:: {
|
||||
alertmanager+: {
|
||||
spec+:
|
||||
antiaffinity('alertmanager', [$._config.alertmanager.name]),
|
||||
antiaffinity('alertmanager', [$._config.alertmanager.name], $._config.namespace),
|
||||
},
|
||||
},
|
||||
|
||||
prometheus+: {
|
||||
prometheus+:: {
|
||||
local p = self,
|
||||
|
||||
prometheus+: {
|
||||
spec+:
|
||||
antiaffinity('prometheus', [$._config.prometheus.name]),
|
||||
antiaffinity('prometheus', [$._config.prometheus.name], $._config.namespace),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,23 +1,42 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local service(name, namespace, labels, selector, ports) = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
labels: labels,
|
||||
},
|
||||
spec: {
|
||||
ports+: ports,
|
||||
selector: selector,
|
||||
clusterIP: 'None',
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
prometheus+:: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { 'k8s-app': 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { 'k8s-app': 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeDnsPrometheusDiscoveryService:
|
||||
service.new('kube-dns-prometheus-discovery', { 'k8s-app': 'kube-dns' }, [servicePort.newNamed('http-metrics-skydns', 10055, 10055), servicePort.newNamed('http-metrics-dnsmasq', 10054, 10054)]) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-dns' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeControllerManagerPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
[{ name: 'https-metrics', port: 10257, targetPort: 10257 }]
|
||||
),
|
||||
|
||||
kubeSchedulerPrometheusDiscoveryService: service(
|
||||
'kube-scheduler-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
[{ name: 'https-metrics', port: 10259, targetPort: 10259 }]
|
||||
),
|
||||
|
||||
kubeDnsPrometheusDiscoveryService: service(
|
||||
'kube-dns-prometheus-discovery',
|
||||
'kube-syste',
|
||||
{ 'k8s-app': 'kube-dns' },
|
||||
{ 'k8s-app': 'kube-dns' },
|
||||
[{ name: 'http-metrics-skydns', port: 10055, targetPort: 10055 }, { name: 'http-metrics-dnsmasq', port: 10054, targetPort: 10054 }]
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,9 +9,9 @@ local withImageRepository(repository) = {
|
||||
if repository == null then image else repository + '/' + l.imageName(image),
|
||||
_config+:: {
|
||||
imageRepos:: {
|
||||
[field]: substituteRepository(oldRepos[field], repository),
|
||||
[field]: substituteRepository(oldRepos[field], repository)
|
||||
for field in std.objectFields(oldRepos)
|
||||
}
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
165
jsonnet/kube-prometheus/kube-prometheus-custom-metrics.libsonnet
Normal file
165
jsonnet/kube-prometheus/kube-prometheus-custom-metrics.libsonnet
Normal file
@@ -0,0 +1,165 @@
|
||||
// Custom metrics API allows the HPA v2 to scale based on arbirary metrics.
|
||||
// For more details on usage visit https://github.com/DirectXMan12/k8s-prometheus-adapter#quick-links
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
prometheusAdapter+:: {
|
||||
namespace: $._config.namespace,
|
||||
// Rules for custom-metrics
|
||||
config+:: {
|
||||
rules+: [
|
||||
{
|
||||
seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}',
|
||||
seriesFilters: [],
|
||||
resources: {
|
||||
overrides: {
|
||||
namespace: { resource: 'namespace' },
|
||||
pod: { resource: 'pod' },
|
||||
},
|
||||
},
|
||||
name: { matches: '^container_(.*)_seconds_total$', as: '' },
|
||||
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[1m])) by (<<.GroupBy>>)',
|
||||
},
|
||||
{
|
||||
seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}',
|
||||
seriesFilters: [
|
||||
{ isNot: '^container_.*_seconds_total$' },
|
||||
],
|
||||
resources: {
|
||||
overrides: {
|
||||
namespace: { resource: 'namespace' },
|
||||
pod: { resource: 'pod' },
|
||||
},
|
||||
},
|
||||
name: { matches: '^container_(.*)_total$', as: '' },
|
||||
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[1m])) by (<<.GroupBy>>)',
|
||||
},
|
||||
{
|
||||
seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}',
|
||||
seriesFilters: [
|
||||
{ isNot: '^container_.*_total$' },
|
||||
],
|
||||
resources: {
|
||||
overrides: {
|
||||
namespace: { resource: 'namespace' },
|
||||
pod: { resource: 'pod' },
|
||||
},
|
||||
},
|
||||
name: { matches: '^container_(.*)$', as: '' },
|
||||
metricsQuery: 'sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>)',
|
||||
},
|
||||
{
|
||||
seriesQuery: '{namespace!="",__name__!~"^container_.*"}',
|
||||
seriesFilters: [
|
||||
{ isNot: '.*_total$' },
|
||||
],
|
||||
resources: { template: '<<.Resource>>' },
|
||||
name: { matches: '', as: '' },
|
||||
metricsQuery: 'sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)',
|
||||
},
|
||||
{
|
||||
seriesQuery: '{namespace!="",__name__!~"^container_.*"}',
|
||||
seriesFilters: [
|
||||
{ isNot: '.*_seconds_total' },
|
||||
],
|
||||
resources: { template: '<<.Resource>>' },
|
||||
name: { matches: '^(.*)_total$', as: '' },
|
||||
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)',
|
||||
},
|
||||
{
|
||||
seriesQuery: '{namespace!="",__name__!~"^container_.*"}',
|
||||
seriesFilters: [],
|
||||
resources: { template: '<<.Resource>>' },
|
||||
name: { matches: '^(.*)_seconds_total$', as: '' },
|
||||
metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
prometheusAdapter+:: {
|
||||
customMetricsApiService: {
|
||||
apiVersion: 'apiregistration.k8s.io/v1',
|
||||
kind: 'APIService',
|
||||
metadata: {
|
||||
name: 'v1beta1.custom.metrics.k8s.io',
|
||||
},
|
||||
spec: {
|
||||
service: {
|
||||
name: $.prometheusAdapter.service.metadata.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
group: 'custom.metrics.k8s.io',
|
||||
version: 'v1beta1',
|
||||
insecureSkipTLSVerify: true,
|
||||
groupPriorityMinimum: 100,
|
||||
versionPriority: 100,
|
||||
},
|
||||
},
|
||||
customMetricsApiServiceV1Beta2: {
|
||||
apiVersion: 'apiregistration.k8s.io/v1',
|
||||
kind: 'APIService',
|
||||
metadata: {
|
||||
name: 'v1beta2.custom.metrics.k8s.io',
|
||||
},
|
||||
spec: {
|
||||
service: {
|
||||
name: $.prometheusAdapter.service.metadata.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
group: 'custom.metrics.k8s.io',
|
||||
version: 'v1beta2',
|
||||
insecureSkipTLSVerify: true,
|
||||
groupPriorityMinimum: 100,
|
||||
versionPriority: 200,
|
||||
},
|
||||
},
|
||||
customMetricsClusterRoleServerResources: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: 'custom-metrics-server-resources',
|
||||
},
|
||||
rules: [{
|
||||
apiGroups: ['custom.metrics.k8s.io'],
|
||||
resources: ['*'],
|
||||
verbs: ['*'],
|
||||
}],
|
||||
},
|
||||
customMetricsClusterRoleBindingServerResources: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'custom-metrics-server-resources',
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'custom-metrics-server-resources',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: $.prometheusAdapter.serviceAccount.metadata.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
}],
|
||||
},
|
||||
customMetricsClusterRoleBindingHPA: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'hpa-controller-custom-metrics',
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'custom-metrics-server-resources',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: 'horizontal-pod-autoscaler',
|
||||
namespace: 'kube-system',
|
||||
}],
|
||||
},
|
||||
},
|
||||
}
|
||||
89
jsonnet/kube-prometheus/kube-prometheus-eks.libsonnet
Normal file
89
jsonnet/kube-prometheus/kube-prometheus-eks.libsonnet
Normal file
@@ -0,0 +1,89 @@
|
||||
{
|
||||
_config+:: {
|
||||
eks: {
|
||||
minimumAvailableIPs: 10,
|
||||
minimumAvailableIPsTime: '10m',
|
||||
},
|
||||
},
|
||||
prometheus+: {
|
||||
serviceMonitorCoreDNS+: {
|
||||
spec+: {
|
||||
endpoints: [
|
||||
{
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
interval: '15s',
|
||||
targetPort: 9153,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
AwsEksCniMetricService: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'aws-node',
|
||||
namespace: 'kube-system',
|
||||
labels: { 'k8s-app': 'aws-node' },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'cni-metrics-port', port: 61678, targetPort: 61678 },
|
||||
],
|
||||
selector: { 'k8s-app': 'aws-node' },
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
|
||||
serviceMonitorAwsEksCNI: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'awsekscni',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'eks-cni',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'aws-node',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'cni-metrics-port',
|
||||
interval: '30s',
|
||||
path: '/metrics',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheusRules+: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'kube-prometheus-eks.rules',
|
||||
rules: [
|
||||
{
|
||||
expr: 'sum by(instance) (awscni_ip_max) - sum by(instance) (awscni_assigned_ip_addresses) < %s' % $._config.eks.minimumAvailableIPs,
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
message: 'Instance {{ $labels.instance }} has less than 10 IPs available.',
|
||||
},
|
||||
'for': $._config.eks.minimumAvailableIPsTime,
|
||||
alert: 'EksAvailableIPs',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// External metrics API allows the HPA v2 to scale based on metrics coming from outside of Kubernetes cluster
|
||||
// For more details on usage visit https://github.com/DirectXMan12/k8s-prometheus-adapter#quick-links
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
prometheusAdapter+:: {
|
||||
namespace: $._config.namespace,
|
||||
// Rules for external-metrics
|
||||
config+:: {
|
||||
externalRules+: [
|
||||
// {
|
||||
// seriesQuery: '{__name__=~"^.*_queue$",namespace!=""}',
|
||||
// seriesFilters: [],
|
||||
// resources: {
|
||||
// overrides: {
|
||||
// namespace: { resource: 'namespace' }
|
||||
// },
|
||||
// },
|
||||
// name: { matches: '^.*_queue$', as: '$0' },
|
||||
// metricsQuery: 'max(<<.Series>>{<<.LabelMatchers>>})',
|
||||
// },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
prometheusAdapter+:: {
|
||||
externalMetricsApiService: {
|
||||
apiVersion: 'apiregistration.k8s.io/v1',
|
||||
kind: 'APIService',
|
||||
metadata: {
|
||||
name: 'v1beta1.external.metrics.k8s.io',
|
||||
},
|
||||
spec: {
|
||||
service: {
|
||||
name: $.prometheusAdapter.service.metadata.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
group: 'external.metrics.k8s.io',
|
||||
version: 'v1beta1',
|
||||
insecureSkipTLSVerify: true,
|
||||
groupPriorityMinimum: 100,
|
||||
versionPriority: 100,
|
||||
},
|
||||
},
|
||||
externalMetricsClusterRoleServerResources: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: 'external-metrics-server-resources',
|
||||
},
|
||||
rules: [{
|
||||
apiGroups: ['external.metrics.k8s.io'],
|
||||
resources: ['*'],
|
||||
verbs: ['*'],
|
||||
}],
|
||||
},
|
||||
externalMetricsClusterRoleBindingServerResources: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'external-metrics-server-resources',
|
||||
},
|
||||
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'external-metrics-server-resources',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: $.prometheusAdapter.serviceAccount.metadata.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
}],
|
||||
},
|
||||
externalMetricsClusterRoleBindingHPA: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'hpa-controller-external-metrics',
|
||||
},
|
||||
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'external-metrics-server-resources',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: 'horizontal-pod-autoscaler',
|
||||
namespace: 'kube-system',
|
||||
}],
|
||||
},
|
||||
},
|
||||
}
|
||||
13
jsonnet/kube-prometheus/kube-prometheus-gke.libsonnet
Normal file
13
jsonnet/kube-prometheus/kube-prometheus-gke.libsonnet
Normal file
@@ -0,0 +1,13 @@
|
||||
(import './kube-prometheus-managed-cluster.libsonnet') + {
|
||||
_config+:: {
|
||||
prometheusAdapter+:: {
|
||||
config+: {
|
||||
resourceRules:: null,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
prometheusAdapter+:: {
|
||||
apiService:: null,
|
||||
},
|
||||
}
|
||||
@@ -9,6 +9,9 @@
|
||||
scheme: 'http',
|
||||
interval: '30s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [
|
||||
{ sourceLabels: ['__metrics_path__'], targetLabel: 'metrics_path' },
|
||||
],
|
||||
},
|
||||
{
|
||||
port: 'http-metrics',
|
||||
@@ -17,6 +20,18 @@
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [
|
||||
{ sourceLabels: ['__metrics_path__'], targetLabel: 'metrics_path' },
|
||||
],
|
||||
metricRelabelings: [
|
||||
// Drop a bunch of metrics which are disabled but still sent, see
|
||||
// https://github.com/google/cadvisor/issues/1925.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
{
|
||||
prometheus+:: {
|
||||
kubeDnsPrometheusDiscoveryService:
|
||||
service.new('kube-dns-prometheus-discovery', { 'k8s-app': 'kube-dns' }, [servicePort.newNamed('metrics', 9153, 9153)]) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-dns' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeDnsPrometheusDiscoveryService: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'kube-dns-prometheus-discovery',
|
||||
namespace: 'kube-system',
|
||||
labels: { 'k8s-app': 'kube-dns' },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'metrics', port: 9153, targetPort: 9153 },
|
||||
],
|
||||
selector: { 'k8s-app': 'kube-dns' },
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,23 +1,40 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local service(name, namespace, labels, selector, ports) = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
labels: labels,
|
||||
},
|
||||
spec: {
|
||||
ports+: ports,
|
||||
selector: selector,
|
||||
clusterIP: 'None',
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
prometheus+:: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { 'k8s-app': 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { 'k8s-app': 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeDnsPrometheusDiscoveryService:
|
||||
service.new('kube-dns-prometheus-discovery', { 'k8s-app': 'kube-dns' }, [servicePort.newNamed('metrics', 10055, 10055), servicePort.newNamed('http-metrics-dnsmasq', 10054, 10054)]) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-dns' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeControllerManagerPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
[{ name: 'https-metrics', port: 10257, targetPort: 10257 }]
|
||||
),
|
||||
kubeSchedulerPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
[{ name: 'https-metrics', port: 10259, targetPort: 10259 }]
|
||||
),
|
||||
kubeDnsPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-dns' },
|
||||
{ 'k8s-app': 'kube-dns' },
|
||||
[{ name: 'metrics', port: 10055, targetPort: 10055 }, { name: 'http-metrics-dnsmasq', port: 10054, targetPort: 10054 }]
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet');
|
||||
local kp = (import './kube-prometheus/kube-prometheus.libsonnet');
|
||||
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
|
||||
@@ -1,18 +1,33 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local service(name, namespace, labels, selector, ports) = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
labels: labels,
|
||||
},
|
||||
spec: {
|
||||
ports+: ports,
|
||||
selector: selector,
|
||||
clusterIP: 'None',
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
prometheus+: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { 'k8s-app': 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { 'k8s-app': 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeControllerManagerPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
[{ name: 'https-metrics', port: 10257, targetPort: 10257 }],
|
||||
),
|
||||
kubeSchedulerPrometheusDiscoveryService: service(
|
||||
'kube-scheduler-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
[{ name: 'https-metrics', port: 10259, targetPort: 10259 }],
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,18 +1,33 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local service(name, namespace, labels, selector, ports) = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
labels: labels,
|
||||
},
|
||||
spec: {
|
||||
ports+: ports,
|
||||
selector: selector,
|
||||
clusterIP: 'None',
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
prometheus+: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { component: 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { component: 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeControllerManagerPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
{ component: 'kube-controller-manager' },
|
||||
[{ name: 'https-metrics', port: 10257, targetPort: 10257 }]
|
||||
),
|
||||
kubeSchedulerPrometheusDiscoveryService: service(
|
||||
'kube-scheduler-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
{ component: 'kube-scheduler' },
|
||||
[{ name: 'https-metrics', port: 10259, targetPort: 10259 }],
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local service(name, namespace, labels, selector, ports) = {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
labels: labels,
|
||||
},
|
||||
spec: {
|
||||
ports+: ports,
|
||||
selector: selector,
|
||||
clusterIP: 'None',
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
|
||||
_config+:: {
|
||||
jobs+: {
|
||||
CoreDNS: 'job="coredns"',
|
||||
},
|
||||
},
|
||||
|
||||
prometheus+: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { 'component': 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { 'component': 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeControllerManagerPrometheusDiscoveryService: service(
|
||||
'kube-controller-manager-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
{ 'k8s-app': 'kube-controller-manager' },
|
||||
[{ name: 'https-metrics', port: 10257, targetPort: 10257 }]
|
||||
),
|
||||
|
||||
serviceMonitorCoreDNS+: {
|
||||
spec+: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'coredns',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
kubeSchedulerPrometheusDiscoveryService: service(
|
||||
'kube-scheduler-prometheus-discovery',
|
||||
'kube-system',
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
{ 'k8s-app': 'kube-scheduler' },
|
||||
[{ name: 'https-metrics', port: 10259, targetPort: 10259 }],
|
||||
),
|
||||
|
||||
serviceMonitorKubeScheduler+: {
|
||||
spec+: {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// On managed Kubernetes clusters some of the control plane components are not exposed to customers.
|
||||
// Disable scrape jobs and service monitors for these components by overwriting 'kube-prometheus.libsonnet' defaults
|
||||
// Note this doesn't disable generation of associated alerting rules but the rules don't trigger
|
||||
// Disable scrape jobs, service monitors, and alert groups for these components by overwriting 'kube-prometheus.libsonnet' defaults
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
@@ -12,6 +11,18 @@
|
||||
for k in std.objectFields(j)
|
||||
if !std.setMember(k, ['KubeControllerManager', 'KubeScheduler'])
|
||||
},
|
||||
|
||||
// Skip alerting rules too
|
||||
prometheus+:: {
|
||||
rules+:: {
|
||||
local g = super.groups,
|
||||
groups: [
|
||||
h
|
||||
for h in g
|
||||
if !std.setMember(h.name, ['kubernetes-system-controller-manager', 'kubernetes-system-scheduler'])
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// Same as above but for ServiceMonitor's
|
||||
@@ -21,8 +32,4 @@
|
||||
for q in std.objectFields(p)
|
||||
if !std.setMember(q, ['serviceMonitorKubeControllerManager', 'serviceMonitorKubeScheduler'])
|
||||
},
|
||||
|
||||
// TODO: disable generationg of alerting rules
|
||||
// manifests/prometheus-rules.yaml:52: - name: kube-scheduler.rules
|
||||
|
||||
}
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local patch(ports) = {
|
||||
spec+: {
|
||||
ports: ports,
|
||||
type: 'NodePort',
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
prometheus+: {
|
||||
service+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed('web', 9090, 'web') + servicePort.withNodePort(30900)) +
|
||||
service.mixin.spec.withType('NodePort'),
|
||||
service+: patch([{ name: 'web', port: 9090, targetPort: 'web', nodePort: 30900 }]),
|
||||
},
|
||||
alertmanager+: {
|
||||
service+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed('web', 9093, 'web') + servicePort.withNodePort(30903)) +
|
||||
service.mixin.spec.withType('NodePort'),
|
||||
service+: patch([{ name: 'web', port: 9093, targetPort: 'web', nodePort: 30903 }]),
|
||||
},
|
||||
grafana+: {
|
||||
service+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed('http', 3000, 'http') + servicePort.withNodePort(30902)) +
|
||||
service.mixin.spec.withType('NodePort'),
|
||||
service+: patch([{ name: 'http', port: 3000, targetPort: 'http', nodePort: 30902 }]),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
(import 'etcd-mixin/mixin.libsonnet') + {
|
||||
(import 'github.com/etcd-io/etcd/Documentation/etcd-mixin/mixin.libsonnet') + {
|
||||
_config+:: {
|
||||
etcd: {
|
||||
ips: [],
|
||||
@@ -12,88 +10,93 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
},
|
||||
},
|
||||
prometheus+:: {
|
||||
serviceEtcd:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local etcdServicePort = servicePort.newNamed('metrics', 2379, 2379);
|
||||
|
||||
service.new('etcd', null, etcdServicePort) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'etcd' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
endpointsEtcd:
|
||||
local endpoints = k.core.v1.endpoints;
|
||||
local endpointSubset = endpoints.subsetsType;
|
||||
local endpointPort = endpointSubset.portsType;
|
||||
|
||||
local etcdPort = endpointPort.new() +
|
||||
endpointPort.withName('metrics') +
|
||||
endpointPort.withPort(2379) +
|
||||
endpointPort.withProtocol('TCP');
|
||||
|
||||
local subset = endpointSubset.new() +
|
||||
endpointSubset.withAddresses([
|
||||
{ ip: etcdIP }
|
||||
for etcdIP in $._config.etcd.ips
|
||||
]) +
|
||||
endpointSubset.withPorts(etcdPort);
|
||||
|
||||
endpoints.new() +
|
||||
endpoints.mixin.metadata.withName('etcd') +
|
||||
endpoints.mixin.metadata.withNamespace('kube-system') +
|
||||
endpoints.mixin.metadata.withLabels({ 'k8s-app': 'etcd' }) +
|
||||
endpoints.withSubsets(subset),
|
||||
serviceMonitorEtcd:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'etcd',
|
||||
namespace: 'kube-system',
|
||||
labels: {
|
||||
serviceEtcd: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'etcd',
|
||||
namespace: 'kube-system',
|
||||
labels: { 'k8s-app': 'etcd' },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'metrics', targetPort: 2379, port: 2379 },
|
||||
],
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
endpointsEtcd: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Endpoints',
|
||||
metadata: {
|
||||
name: 'etcd',
|
||||
namespace: 'kube-system',
|
||||
labels: { 'k8s-app': 'etcd' },
|
||||
},
|
||||
subsets: [{
|
||||
addresses: [
|
||||
{ ip: etcdIP }
|
||||
for etcdIP in $._config.etcd.ips
|
||||
],
|
||||
ports: [
|
||||
{ name: 'metrics', port: 2379, protocol: 'TCP' },
|
||||
],
|
||||
}],
|
||||
},
|
||||
serviceMonitorEtcd: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'etcd',
|
||||
namespace: 'kube-system',
|
||||
labels: {
|
||||
'k8s-app': 'etcd',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
// Prometheus Operator (and Prometheus) allow us to specify a tlsConfig. This is required as most likely your etcd metrics end points is secure.
|
||||
tlsConfig: {
|
||||
caFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client-ca.crt',
|
||||
keyFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.key',
|
||||
certFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.crt',
|
||||
[if $._config.etcd.serverName != null then 'serverName']: $._config.etcd.serverName,
|
||||
[if $._config.etcd.insecureSkipVerify != null then 'insecureSkipVerify']: $._config.etcd.insecureSkipVerify,
|
||||
},
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'etcd',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
// Prometheus Operator (and Prometheus) allow us to specify a tlsConfig. This is required as most likely your etcd metrics end points is secure.
|
||||
tlsConfig: {
|
||||
caFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client-ca.crt',
|
||||
keyFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.key',
|
||||
certFile: '/etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.crt',
|
||||
[if $._config.etcd.serverName != null then 'serverName']: $._config.etcd.serverName,
|
||||
[if $._config.etcd.insecureSkipVerify != null then 'insecureSkipVerify']: $._config.etcd.insecureSkipVerify,
|
||||
},
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'etcd',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
secretEtcdCerts:
|
||||
},
|
||||
secretEtcdCerts: {
|
||||
// Prometheus Operator allows us to mount secrets in the pod. By loading the secrets as files, they can be made available inside the Prometheus pod.
|
||||
local secret = k.core.v1.secret;
|
||||
secret.new('kube-etcd-client-certs', {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Secret',
|
||||
type: 'Opaque',
|
||||
metadata: {
|
||||
name: 'kube-etcd-client-certs',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
data: {
|
||||
'etcd-client-ca.crt': std.base64($._config.etcd.clientCA),
|
||||
'etcd-client.key': std.base64($._config.etcd.clientKey),
|
||||
'etcd-client.crt': std.base64($._config.etcd.clientCert),
|
||||
}) +
|
||||
secret.mixin.metadata.withNamespace($._config.namespace),
|
||||
prometheus+:
|
||||
{
|
||||
// Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec
|
||||
spec+: {
|
||||
secrets+: [$.prometheus.secretEtcdCerts.metadata.name],
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus+: {
|
||||
// Reference info: https://coreos.com/operators/prometheus/docs/latest/api.html#prometheusspec
|
||||
spec+: {
|
||||
secrets+: [$.prometheus.secretEtcdCerts.metadata.name],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
// Strips spec.containers[].limits for certain containers
|
||||
// https://github.com/prometheus-operator/kube-prometheus/issues/72
|
||||
{
|
||||
_config+:: {
|
||||
resources+:: {
|
||||
'addon-resizer'+: {
|
||||
limits: {},
|
||||
},
|
||||
'kube-rbac-proxy'+: {
|
||||
limits: {},
|
||||
},
|
||||
'kube-state-metrics'+: {
|
||||
limits: {},
|
||||
},
|
||||
'node-exporter'+: {
|
||||
limits: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheusOperator+: {
|
||||
deployment+: {
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
local addArgs(c) =
|
||||
if c.name == 'prometheus-operator'
|
||||
then c { args+: ['--config-reloader-cpu=0'] }
|
||||
else c,
|
||||
containers: std.map(addArgs, super.containers),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
(import 'github.com/thanos-io/thanos/mixin/alerts/sidecar.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
versions+:: { thanos: 'v0.14.0' },
|
||||
imageRepos+:: { thanos: 'quay.io/thanos/thanos' },
|
||||
thanos+:: {
|
||||
objectStorageConfig: {
|
||||
key: 'thanos.yaml', // How the file inside the secret is called
|
||||
name: 'thanos-objectstorage', // This is the name of your Kubernetes secret with the config
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus+:: {
|
||||
local p = self,
|
||||
|
||||
// Add the grpc port to the Prometheus service to be able to query it with the Thanos Querier
|
||||
service+: {
|
||||
spec+: {
|
||||
ports+: [
|
||||
{ name: 'grpc', port: 10901, targetPort: 10901 },
|
||||
],
|
||||
},
|
||||
},
|
||||
// Create a new service that exposes both sidecar's HTTP metrics port and gRPC StoreAPI
|
||||
serviceThanosSidecar: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'prometheus-' + p.name + '-thanos-sidecar',
|
||||
namespace: p.namespace,
|
||||
labels: { prometheus: p.name, app: 'thanos-sidecar' },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'grpc', port: 10901, targetPort: 10901 },
|
||||
{ name: 'http', port: 10902, targetPort: 10902 },
|
||||
],
|
||||
selector: { app: 'prometheus', prometheus: p.name },
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
prometheus+: {
|
||||
spec+: {
|
||||
thanos+: {
|
||||
version: $._config.versions.thanos,
|
||||
image: $._config.imageRepos.thanos + ':' + $._config.versions.thanos,
|
||||
objectStorageConfig: $._config.thanos.objectStorageConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorThanosSidecar:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'thanos-sidecar',
|
||||
namespace: p.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'prometheus',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
// Use the service's app label (thanos-sidecar) as the value for the job label.
|
||||
jobLabel: 'app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
prometheus: p.name,
|
||||
app: 'thanos-sidecar',
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
versions+:: {
|
||||
thanos: 'v0.3.2',
|
||||
},
|
||||
imageRepos+:: {
|
||||
thanos: 'improbable/thanos',
|
||||
},
|
||||
thanos+:: {
|
||||
objectStorageConfig: {
|
||||
key: 'thanos.yaml', // How the file inside the secret is called
|
||||
name: 'thanos-objstore-config', // This is the name of your Kubernetes secret with the config
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheus+:: {
|
||||
prometheus+: {
|
||||
spec+: {
|
||||
podMetadata+: {
|
||||
labels+: { 'thanos-peers': 'true' },
|
||||
},
|
||||
thanos+: {
|
||||
peers: 'thanos-peers.' + $._config.namespace + '.svc:10900',
|
||||
version: $._config.versions.thanos,
|
||||
baseImage: $._config.imageRepos.thanos,
|
||||
objectStorageConfig: $._config.thanos.objectStorageConfig,
|
||||
},
|
||||
},
|
||||
},
|
||||
thanosPeerService:
|
||||
service.new('thanos-peers', { 'thanos-peers': 'true' }, [
|
||||
servicePort.newNamed('cluster', 10900, 'cluster'),
|
||||
servicePort.newNamed('http', 10902, 'http'),
|
||||
]) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ 'thanos-peers': 'true' }) +
|
||||
service.mixin.spec.withType('ClusterIP') +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
|
||||
serviceMonitorThanosPeer:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'thanos-peers',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'thanos-peers',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'thanos-peers': 'true',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
thanosQueryDeployment:
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
local containerPort = container.portsType;
|
||||
|
||||
local thanosQueryContainer =
|
||||
container.new('thanos-query', $._config.imageRepos.thanos + ':' + $._config.versions.thanos) +
|
||||
container.withPorts([
|
||||
containerPort.newNamed('http', 10902),
|
||||
containerPort.newNamed('grpc', 10901),
|
||||
containerPort.newNamed('cluster', 10900),
|
||||
]) +
|
||||
container.withArgs([
|
||||
'query',
|
||||
'--log.level=debug',
|
||||
'--query.replica-label=prometheus_replica',
|
||||
'--query.auto-downsampling',
|
||||
'--cluster.peers=thanos-peers.' + $._config.namespace + '.svc:10900',
|
||||
]);
|
||||
local podLabels = { app: 'thanos-query', 'thanos-peers': 'true' };
|
||||
deployment.new('thanos-query', 1, thanosQueryContainer, podLabels) +
|
||||
deployment.mixin.metadata.withNamespace($._config.namespace) +
|
||||
deployment.mixin.metadata.withLabels(podLabels) +
|
||||
deployment.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName('prometheus-' + $._config.prometheus.name),
|
||||
thanosQueryService:
|
||||
local thanosQueryPort = servicePort.newNamed('http-query', 9090, 'http');
|
||||
service.new('thanos-query', { app: 'thanos-query' }, thanosQueryPort) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ app: 'thanos-query' }),
|
||||
|
||||
thanosStoreStatefulset:
|
||||
local statefulSet = k.apps.v1beta2.statefulSet;
|
||||
local volume = statefulSet.mixin.spec.template.spec.volumesType;
|
||||
local container = statefulSet.mixin.spec.template.spec.containersType;
|
||||
local containerEnv = container.envType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
|
||||
local labels = { app: 'thanos', 'thanos-peers': 'true' };
|
||||
|
||||
local c =
|
||||
container.new('thanos-store', $._config.imageRepos.thanos + ':' + $._config.versions.thanos) +
|
||||
container.withArgs([
|
||||
'store',
|
||||
'--log.level=debug',
|
||||
'--data-dir=/var/thanos/store',
|
||||
'--cluster.peers=thanos-peers.' + $._config.namespace + '.svc:10900',
|
||||
'--objstore.config=$(OBJSTORE_CONFIG)',
|
||||
]) +
|
||||
container.withEnv([
|
||||
containerEnv.fromSecretRef(
|
||||
'OBJSTORE_CONFIG',
|
||||
$._config.thanos.objectStorageConfig.name,
|
||||
$._config.thanos.objectStorageConfig.key,
|
||||
),
|
||||
]) +
|
||||
container.withPorts([
|
||||
{ name: 'cluster', containerPort: 10900 },
|
||||
{ name: 'grpc', containerPort: 10901 },
|
||||
{ name: 'http', containerPort: 10902 },
|
||||
]) +
|
||||
container.withVolumeMounts([
|
||||
containerVolumeMount.new('data', '/var/thanos/store', false),
|
||||
]);
|
||||
|
||||
statefulSet.new('thanos-store', 1, c, [], labels) +
|
||||
statefulSet.mixin.metadata.withNamespace($._config.namespace) +
|
||||
statefulSet.mixin.spec.selector.withMatchLabels(labels) +
|
||||
statefulSet.mixin.spec.withServiceName('thanos-store') +
|
||||
statefulSet.mixin.spec.template.spec.withVolumes([
|
||||
volume.fromEmptyDir('data'),
|
||||
]),
|
||||
|
||||
serviceMonitorThanosCompactor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'thanos-compactor',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'thanos-compactor',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: 'thanos-compactor',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
thanosCompactorService:
|
||||
service.new(
|
||||
'thanos-compactor',
|
||||
{ app: 'thanos-compactor' },
|
||||
servicePort.newNamed('http', 9090, 'http'),
|
||||
) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ app: 'thanos-compactor' }),
|
||||
|
||||
thanosCompactorStatefulset:
|
||||
local statefulSet = k.apps.v1beta2.statefulSet;
|
||||
local volume = statefulSet.mixin.spec.template.spec.volumesType;
|
||||
local container = statefulSet.mixin.spec.template.spec.containersType;
|
||||
local containerEnv = container.envType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
|
||||
local labels = { app: 'thanos-compactor' };
|
||||
|
||||
local c =
|
||||
container.new('thanos-compactor', $._config.imageRepos.thanos + ':' + $._config.versions.thanos) +
|
||||
container.withArgs([
|
||||
'compact',
|
||||
'--log.level=debug',
|
||||
'--data-dir=/var/thanos/store',
|
||||
'--objstore.config=$(OBJSTORE_CONFIG)',
|
||||
'--wait',
|
||||
]) +
|
||||
container.withEnv([
|
||||
containerEnv.fromSecretRef(
|
||||
'OBJSTORE_CONFIG',
|
||||
$._config.thanos.objectStorageConfig.name,
|
||||
$._config.thanos.objectStorageConfig.key,
|
||||
),
|
||||
]) +
|
||||
container.withPorts([
|
||||
{ name: 'http', containerPort: 10902 },
|
||||
]) +
|
||||
container.withVolumeMounts([
|
||||
containerVolumeMount.new('data', '/var/thanos/store', false),
|
||||
]);
|
||||
|
||||
statefulSet.new('thanos-compactor', 1, c, [], labels) +
|
||||
statefulSet.mixin.metadata.withNamespace($._config.namespace) +
|
||||
statefulSet.mixin.spec.selector.withMatchLabels(labels) +
|
||||
statefulSet.mixin.spec.withServiceName('thanos-compactor') +
|
||||
statefulSet.mixin.spec.template.spec.withVolumes([
|
||||
volume.fromEmptyDir('data'),
|
||||
]),
|
||||
},
|
||||
}
|
||||
196
jsonnet/kube-prometheus/kube-prometheus-weave-net.libsonnet
Normal file
196
jsonnet/kube-prometheus/kube-prometheus-weave-net.libsonnet
Normal file
@@ -0,0 +1,196 @@
|
||||
{
|
||||
prometheus+: {
|
||||
serviceWeaveNet: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'weave-net',
|
||||
namespace: 'kube-system',
|
||||
labels: { 'k8s-app': 'weave-net' },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'weave-net-metrics', targetPort: 6782, port: 6782 },
|
||||
],
|
||||
selector: { name: 'weave-net' },
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
serviceMonitorWeaveNet: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'weave-net',
|
||||
labels: {
|
||||
'k8s-app': 'weave-net',
|
||||
},
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'weave-net-metrics',
|
||||
path: '/metrics',
|
||||
interval: '15s',
|
||||
},
|
||||
],
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'weave-net',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheusRules+: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'weave-net',
|
||||
rules: [
|
||||
{
|
||||
alert: 'WeaveNetIPAMSplitBrain',
|
||||
expr: 'max(weave_ipam_unreachable_percentage) - min(weave_ipam_unreachable_percentage) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Percentage of all IP addresses owned by unreachable peers is not same for every node.',
|
||||
description: 'actionable: Weave Net network has a split brain problem. Please find the problem and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetIPAMUnreachable',
|
||||
expr: 'weave_ipam_unreachable_percentage > 25',
|
||||
'for': '10m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Percentage of all IP addresses owned by unreachable peers is above threshold.',
|
||||
description: 'actionable: Please find the problem and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetIPAMPendingAllocates',
|
||||
expr: 'sum(weave_ipam_pending_allocates) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Number of pending allocates is above the threshold.',
|
||||
description: 'actionable: Please find the problem and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetIPAMPendingClaims',
|
||||
expr: 'sum(weave_ipam_pending_claims) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Number of pending claims is above the threshold.',
|
||||
description: 'actionable: Please find the problem and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetFastDPFlowsLow',
|
||||
expr: 'sum(weave_flows) < 15000',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'Number of FastDP flows is below the threshold.',
|
||||
description: 'actionable: Please find the reason for FastDP flows to go below the threshold and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetFastDPFlowsOff',
|
||||
expr: 'sum(weave_flows == bool 0) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'FastDP flows is zero.',
|
||||
description: 'actionable: Please find the reason for FastDP flows to be off and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetHighConnectionTerminationRate',
|
||||
expr: 'rate(weave_connection_terminations_total[5m]) > 0.1',
|
||||
'for': '5m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'A lot of connections are getting terminated.',
|
||||
description: 'actionable: Please find the reason for the high connection termination rate and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetConnectionsConnecting',
|
||||
expr: 'sum(weave_connections{state="connecting"}) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'A lot of connections are in connecting state.',
|
||||
description: 'actionable: Please find the reason for this and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetConnectionsRetying',
|
||||
expr: 'sum(weave_connections{state="retrying"}) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'A lot of connections are in retrying state.',
|
||||
description: 'actionable: Please find the reason for this and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetConnectionsPending',
|
||||
expr: 'sum(weave_connections{state="pending"}) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'A lot of connections are in pending state.',
|
||||
description: 'actionable: Please find the reason for this and fix it.',
|
||||
},
|
||||
},
|
||||
{
|
||||
alert: 'WeaveNetConnectionsFailed',
|
||||
expr: 'sum(weave_connections{state="failed"}) > 0',
|
||||
'for': '3m',
|
||||
labels: {
|
||||
severity: 'critical',
|
||||
},
|
||||
annotations: {
|
||||
summary: 'A lot of connections are in failed state.',
|
||||
description: 'actionable: Please find the reason and fix it.',
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
grafanaDashboards+:: {
|
||||
'weave-net.json': (import './grafana-weave-net.json'),
|
||||
'weave-net-cluster.json': (import './grafana-weave-net-cluster.json'),
|
||||
},
|
||||
}
|
||||
@@ -1,21 +1,96 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local configMapList = k.core.v1.configMapList;
|
||||
local kubeRbacProxyContainer = import './kube-rbac-proxy/container.libsonnet';
|
||||
|
||||
(import 'grafana/grafana.libsonnet') +
|
||||
(import 'kube-state-metrics/kube-state-metrics.libsonnet') +
|
||||
(import 'node-exporter/node-exporter.libsonnet') +
|
||||
(import 'alertmanager/alertmanager.libsonnet') +
|
||||
(import 'prometheus-operator/prometheus-operator.libsonnet') +
|
||||
(import 'prometheus/prometheus.libsonnet') +
|
||||
(import 'prometheus-adapter/prometheus-adapter.libsonnet') +
|
||||
(import 'kubernetes-mixin/mixin.libsonnet') +
|
||||
(import 'alerts/alerts.libsonnet') +
|
||||
(import 'rules/rules.libsonnet') + {
|
||||
(import 'github.com/brancz/kubernetes-grafana/grafana/grafana.libsonnet') +
|
||||
(import './kube-state-metrics/kube-state-metrics.libsonnet') +
|
||||
(import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics-mixin/mixin.libsonnet') +
|
||||
(import './node-exporter/node-exporter.libsonnet') +
|
||||
(import 'github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet') +
|
||||
(import './alertmanager/alertmanager.libsonnet') +
|
||||
(import 'github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet') +
|
||||
(import 'github.com/prometheus-operator/prometheus-operator/jsonnet/prometheus-operator/prometheus-operator.libsonnet') +
|
||||
(import 'github.com/prometheus-operator/prometheus-operator/jsonnet/mixin/mixin.libsonnet') +
|
||||
(import './prometheus/prometheus.libsonnet') +
|
||||
(import './prometheus-adapter/prometheus-adapter.libsonnet') +
|
||||
(import 'github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet') +
|
||||
(import 'github.com/prometheus/prometheus/documentation/prometheus-mixin/mixin.libsonnet') +
|
||||
(import './alerts/alerts.libsonnet') +
|
||||
(import './rules/rules.libsonnet') +
|
||||
{
|
||||
kubePrometheus+:: {
|
||||
namespace: k.core.v1.namespace.new($._config.namespace),
|
||||
namespace: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Namespace',
|
||||
metadata: {
|
||||
name: $._config.namespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
prometheusOperator+::
|
||||
{
|
||||
service+: {
|
||||
spec+: {
|
||||
ports: [
|
||||
{
|
||||
name: 'https',
|
||||
port: 8443,
|
||||
targetPort: 'https',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
serviceMonitor+: {
|
||||
spec+: {
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
scheme: 'https',
|
||||
honorLabels: true,
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
clusterRole+: {
|
||||
rules+: [
|
||||
{
|
||||
apiGroups: ['authentication.k8s.io'],
|
||||
resources: ['tokenreviews'],
|
||||
verbs: ['create'],
|
||||
},
|
||||
{
|
||||
apiGroups: ['authorization.k8s.io'],
|
||||
resources: ['subjectaccessreviews'],
|
||||
verbs: ['create'],
|
||||
},
|
||||
],
|
||||
},
|
||||
} +
|
||||
(kubeRbacProxyContainer {
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
local cfg = self,
|
||||
image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
|
||||
name: 'kube-rbac-proxy',
|
||||
securePortName: 'https',
|
||||
securePort: 8443,
|
||||
secureListenAddress: ':%d' % self.securePort,
|
||||
upstream: 'http://127.0.0.1:8080/',
|
||||
tlsCipherSuites: $._config.tlsCipherSuites,
|
||||
},
|
||||
},
|
||||
}).deploymentMixin,
|
||||
|
||||
|
||||
grafana+:: {
|
||||
dashboardDefinitions: configMapList.new(super.dashboardDefinitions),
|
||||
local dashboardDefinitions = super.dashboardDefinitions,
|
||||
dashboardDefinitions: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ConfigMapList',
|
||||
items: dashboardDefinitions,
|
||||
},
|
||||
serviceMonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
@@ -29,12 +104,10 @@ local configMapList = k.core.v1.configMapList;
|
||||
app: 'grafana',
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http',
|
||||
interval: '15s',
|
||||
},
|
||||
],
|
||||
endpoints: [{
|
||||
port: 'http',
|
||||
interval: '15s',
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -42,43 +115,45 @@ local configMapList = k.core.v1.configMapList;
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
grafana: '6.0.1',
|
||||
},
|
||||
versions+:: { grafana: '7.3.4', kubeRbacProxy: 'v0.8.0' },
|
||||
imageRepos+:: { kubeRbacProxy: 'quay.io/brancz/kube-rbac-proxy' },
|
||||
|
||||
tlsCipherSuites: [
|
||||
'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721
|
||||
'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', // required by h2: http://golang.org/cl/30721
|
||||
|
||||
// 'TLS_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
|
||||
// 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661
|
||||
// 'TLS_RSA_WITH_AES_128_CBC_SHA', // disabled by h2
|
||||
// 'TLS_RSA_WITH_AES_256_CBC_SHA', // disabled by h2
|
||||
'TLS_RSA_WITH_AES_128_CBC_SHA256',
|
||||
// 'TLS_RSA_WITH_AES_128_GCM_SHA256', // disabled by h2
|
||||
// 'TLS_RSA_WITH_AES_256_GCM_SHA384', // disabled by h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
|
||||
// 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA',// disabled by h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA',// disabled by h2
|
||||
// 'TLS_ECDHE_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
|
||||
// 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661
|
||||
// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', // disabled by h2
|
||||
// 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', // disabled by h2
|
||||
'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256',
|
||||
'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256',
|
||||
// 'TLS_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
|
||||
// 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661
|
||||
// 'TLS_RSA_WITH_AES_128_CBC_SHA', // disabled by h2
|
||||
// 'TLS_RSA_WITH_AES_256_CBC_SHA', // disabled by h2
|
||||
// 'TLS_RSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
|
||||
// 'TLS_RSA_WITH_AES_128_GCM_SHA256', // disabled by h2
|
||||
// 'TLS_RSA_WITH_AES_256_GCM_SHA384', // disabled by h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
|
||||
// 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA', // disabled by h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA', // disabled by h2
|
||||
// 'TLS_ECDHE_RSA_WITH_RC4_128_SHA', // insecure: https://access.redhat.com/security/cve/cve-2013-2566
|
||||
// 'TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA', // insecure: https://access.redhat.com/articles/2548661
|
||||
// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', // disabled by h2
|
||||
// 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', // disabled by h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
|
||||
// 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', // insecure: https://access.redhat.com/security/cve/cve-2013-0169
|
||||
|
||||
// disabled by h2 means: https://github.com/golang/net/blob/e514e69ffb8bc3c76a71ae40de0118d794855992/http2/ciphers.go
|
||||
|
||||
// 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', // TODO: Might not work with h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', // TODO: Might not work with h2
|
||||
// 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305', // TODO: Might not work with h2
|
||||
// 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305', // TODO: Might not work with h2
|
||||
'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384',
|
||||
'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384',
|
||||
'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305',
|
||||
'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305',
|
||||
],
|
||||
|
||||
cadvisorSelector: 'job="kubelet"',
|
||||
kubeletSelector: 'job="kubelet"',
|
||||
runbookURLPattern: 'https://github.com/prometheus-operator/kube-prometheus/wiki/%s',
|
||||
|
||||
cadvisorSelector: 'job="kubelet", metrics_path="/metrics/cadvisor"',
|
||||
kubeletSelector: 'job="kubelet", metrics_path="/metrics"',
|
||||
kubeStateMetricsSelector: 'job="kube-state-metrics"',
|
||||
nodeExporterSelector: 'job="node-exporter"',
|
||||
fsSpaceFillingUpCriticalThreshold: 15,
|
||||
notKubeDnsSelector: 'job!="kube-dns"',
|
||||
kubeSchedulerSelector: 'job="kube-scheduler"',
|
||||
kubeControllerManagerSelector: 'job="kube-controller-manager"',
|
||||
@@ -86,8 +161,11 @@ local configMapList = k.core.v1.configMapList;
|
||||
coreDNSSelector: 'job="kube-dns"',
|
||||
podLabel: 'pod',
|
||||
|
||||
alertmanagerSelector: 'job="alertmanager-main",namespace="' + $._config.namespace + '"',
|
||||
alertmanagerName: '{{ $labels.namespace }}/{{ $labels.pod}}',
|
||||
alertmanagerClusterLabels: 'namespace,service',
|
||||
alertmanagerSelector: 'job="alertmanager-' + $._config.alertmanager.name + '",namespace="' + $._config.namespace + '"',
|
||||
prometheusSelector: 'job="prometheus-' + $._config.prometheus.name + '",namespace="' + $._config.namespace + '"',
|
||||
prometheusName: '{{$labels.namespace}}/{{$labels.pod}}',
|
||||
prometheusOperatorSelector: 'job="prometheus-operator",namespace="' + $._config.namespace + '"',
|
||||
|
||||
jobs: {
|
||||
@@ -103,12 +181,25 @@ local configMapList = k.core.v1.configMapList;
|
||||
CoreDNS: $._config.coreDNSSelector,
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
rules: $.prometheusRules + $.prometheusAlerts,
|
||||
},
|
||||
|
||||
grafana+:: {
|
||||
dashboards: $.grafanaDashboards,
|
||||
resources+:: {
|
||||
'addon-resizer': {
|
||||
requests: { cpu: '10m', memory: '30Mi' },
|
||||
limits: { cpu: '50m', memory: '30Mi' },
|
||||
},
|
||||
'kube-rbac-proxy': {
|
||||
requests: { cpu: '10m', memory: '20Mi' },
|
||||
limits: { cpu: '20m', memory: '40Mi' },
|
||||
},
|
||||
'kube-state-metrics': {
|
||||
requests: { cpu: '100m', memory: '150Mi' },
|
||||
limits: { cpu: '100m', memory: '150Mi' },
|
||||
},
|
||||
'node-exporter': {
|
||||
requests: { cpu: '102m', memory: '180Mi' },
|
||||
limits: { cpu: '250m', memory: '180Mi' },
|
||||
},
|
||||
},
|
||||
prometheus+:: { rules: $.prometheusRules + $.prometheusAlerts },
|
||||
grafana+:: { dashboards: $.grafanaDashboards },
|
||||
},
|
||||
}
|
||||
|
||||
93
jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet
Normal file
93
jsonnet/kube-prometheus/kube-rbac-proxy/container.libsonnet
Normal file
@@ -0,0 +1,93 @@
|
||||
{
|
||||
local krp = self,
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
image: error 'must provide image',
|
||||
name: error 'must provide name',
|
||||
securePortName: error 'must provide securePortName',
|
||||
securePort: error 'must provide securePort',
|
||||
secureListenAddress: error 'must provide secureListenAddress',
|
||||
upstream: error 'must provide upstream',
|
||||
tlsCipherSuites: error 'must provide tlsCipherSuites',
|
||||
},
|
||||
},
|
||||
|
||||
specMixin:: {
|
||||
local sm = self,
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
image: error 'must provide image',
|
||||
name: error 'must provide name',
|
||||
securePortName: error 'must provide securePortName',
|
||||
securePort: error 'must provide securePort',
|
||||
secureListenAddress: error 'must provide secureListenAddress',
|
||||
upstream: error 'must provide upstream',
|
||||
tlsCipherSuites: error 'must provide tlsCipherSuites',
|
||||
},
|
||||
},
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
containers+: [{
|
||||
name: krp.config.kubeRbacProxy.name,
|
||||
image: krp.config.kubeRbacProxy.image,
|
||||
args: [
|
||||
'--logtostderr',
|
||||
'--secure-listen-address=' + krp.config.kubeRbacProxy.secureListenAddress,
|
||||
'--tls-cipher-suites=' + std.join(',', krp.config.kubeRbacProxy.tlsCipherSuites),
|
||||
'--upstream=' + krp.config.kubeRbacProxy.upstream,
|
||||
],
|
||||
ports: [
|
||||
{ name: krp.config.kubeRbacProxy.securePortName, containerPort: krp.config.kubeRbacProxy.securePort },
|
||||
],
|
||||
securityContext: {
|
||||
runAsUser: 65532,
|
||||
runAsGroup: 65532,
|
||||
runAsNonRoot: true,
|
||||
},
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
deploymentMixin:: {
|
||||
local dm = self,
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
image: error 'must provide image',
|
||||
name: error 'must provide name',
|
||||
securePortName: error 'must provide securePortName',
|
||||
securePort: error 'must provide securePort',
|
||||
secureListenAddress: error 'must provide secureListenAddress',
|
||||
upstream: error 'must provide upstream',
|
||||
tlsCipherSuites: error 'must provide tlsCipherSuites',
|
||||
},
|
||||
},
|
||||
deployment+: krp.specMixin {
|
||||
config+:: {
|
||||
kubeRbacProxy+: dm.config.kubeRbacProxy,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
statefulSetMixin:: {
|
||||
local sm = self,
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
image: error 'must provide image',
|
||||
name: error 'must provide name',
|
||||
securePortName: error 'must provide securePortName',
|
||||
securePort: error 'must provide securePort',
|
||||
secureListenAddress: error 'must provide secureListenAddress',
|
||||
upstream: error 'must provide upstream',
|
||||
tlsCipherSuites: error 'must provide tlsCipherSuites',
|
||||
},
|
||||
},
|
||||
statefulSet+: krp.specMixin {
|
||||
config+:: {
|
||||
kubeRbacProxy+: sm.config.kubeRbacProxy,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,315 +1,132 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local kubeRbacProxyContainer = import '../kube-rbac-proxy/container.libsonnet';
|
||||
local ksm = import 'github.com/kubernetes/kube-state-metrics/jsonnet/kube-state-metrics/kube-state-metrics.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
kubeStateMetrics+:: {
|
||||
collectors: '', // empty string gets a default set
|
||||
scrapeInterval: '30s',
|
||||
scrapeTimeout: '30s',
|
||||
|
||||
baseCPU: '100m',
|
||||
baseMemory: '150Mi',
|
||||
cpuPerNode: '2m',
|
||||
memoryPerNode: '30Mi',
|
||||
},
|
||||
|
||||
versions+:: {
|
||||
kubeStateMetrics: 'v1.5.0',
|
||||
kubeRbacProxy: 'v0.4.1',
|
||||
addonResizer: '1.8.4',
|
||||
kubeStateMetrics: '1.9.7',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
kubeStateMetrics: 'quay.io/coreos/kube-state-metrics',
|
||||
kubeRbacProxy: 'quay.io/coreos/kube-rbac-proxy',
|
||||
addonResizer: 'k8s.gcr.io/addon-resizer',
|
||||
},
|
||||
kubeStateMetrics+:: {
|
||||
scrapeInterval: '30s',
|
||||
scrapeTimeout: '30s',
|
||||
},
|
||||
},
|
||||
|
||||
kubeStateMetrics+:: {
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('kube-state-metrics') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('kube-state-metrics') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'kube-state-metrics', namespace: $._config.namespace }]),
|
||||
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local rulesType = clusterRole.rulesType;
|
||||
|
||||
local coreRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['']) +
|
||||
rulesType.withResources([
|
||||
'configmaps',
|
||||
'secrets',
|
||||
'nodes',
|
||||
'pods',
|
||||
'services',
|
||||
'resourcequotas',
|
||||
'replicationcontrollers',
|
||||
'limitranges',
|
||||
'persistentvolumeclaims',
|
||||
'persistentvolumes',
|
||||
'namespaces',
|
||||
'endpoints',
|
||||
]) +
|
||||
rulesType.withVerbs(['list', 'watch']);
|
||||
|
||||
local extensionsRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['extensions']) +
|
||||
rulesType.withResources([
|
||||
'daemonsets',
|
||||
'deployments',
|
||||
'replicasets',
|
||||
]) +
|
||||
rulesType.withVerbs(['list', 'watch']);
|
||||
|
||||
local appsRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['apps']) +
|
||||
rulesType.withResources([
|
||||
'statefulsets',
|
||||
'daemonsets',
|
||||
'deployments',
|
||||
'replicasets',
|
||||
]) +
|
||||
rulesType.withVerbs(['list', 'watch']);
|
||||
|
||||
local batchRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['batch']) +
|
||||
rulesType.withResources([
|
||||
'cronjobs',
|
||||
'jobs',
|
||||
]) +
|
||||
rulesType.withVerbs(['list', 'watch']);
|
||||
|
||||
local autoscalingRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['autoscaling']) +
|
||||
rulesType.withResources([
|
||||
'horizontalpodautoscalers',
|
||||
]) +
|
||||
rulesType.withVerbs(['list', 'watch']);
|
||||
|
||||
local authenticationRole = rulesType.new() +
|
||||
rulesType.withApiGroups(['authentication.k8s.io']) +
|
||||
rulesType.withResources([
|
||||
'tokenreviews',
|
||||
]) +
|
||||
rulesType.withVerbs(['create']);
|
||||
|
||||
local authorizationRole = rulesType.new() +
|
||||
rulesType.withApiGroups(['authorization.k8s.io']) +
|
||||
rulesType.withResources([
|
||||
'subjectaccessreviews',
|
||||
]) +
|
||||
rulesType.withVerbs(['create']);
|
||||
|
||||
local policyRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['policy']) +
|
||||
rulesType.withResources([
|
||||
'poddisruptionbudgets',
|
||||
]) +
|
||||
rulesType.withVerbs(['list', 'watch']);
|
||||
|
||||
local rules = [coreRule, extensionsRule, appsRule, batchRule, autoscalingRule, authenticationRole, authorizationRole, policyRule];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('kube-state-metrics') +
|
||||
clusterRole.withRules(rules),
|
||||
deployment:
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
local volume = k.apps.v1beta2.deployment.mixin.spec.template.spec.volumesType;
|
||||
local containerPort = container.portsType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
local podSelector = deployment.mixin.spec.template.spec.selectorType;
|
||||
|
||||
local podLabels = { app: 'kube-state-metrics' };
|
||||
|
||||
local proxyClusterMetrics =
|
||||
container.new('kube-rbac-proxy-main', $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy) +
|
||||
container.withArgs([
|
||||
'--logtostderr',
|
||||
'--secure-listen-address=:8443',
|
||||
'--tls-cipher-suites=' + std.join(',', $._config.tlsCipherSuites),
|
||||
'--upstream=http://127.0.0.1:8081/',
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed('https-main', 8443)) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '20Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '20m', memory: '40Mi' });
|
||||
|
||||
local proxySelfMetrics =
|
||||
container.new('kube-rbac-proxy-self', $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy) +
|
||||
container.withArgs([
|
||||
'--logtostderr',
|
||||
'--secure-listen-address=:9443',
|
||||
'--tls-cipher-suites=' + std.join(',', $._config.tlsCipherSuites),
|
||||
'--upstream=http://127.0.0.1:8082/',
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed('https-self', 9443)) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '20Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '20m', memory: '40Mi' });
|
||||
|
||||
local kubeStateMetrics =
|
||||
container.new('kube-state-metrics', $._config.imageRepos.kubeStateMetrics + ':' + $._config.versions.kubeStateMetrics) +
|
||||
container.withArgs([
|
||||
'--host=127.0.0.1',
|
||||
'--port=8081',
|
||||
'--telemetry-host=127.0.0.1',
|
||||
'--telemetry-port=8082',
|
||||
] + if $._config.kubeStateMetrics.collectors != '' then ['--collectors=' + $._config.kubeStateMetrics.collectors] else []) +
|
||||
container.mixin.resources.withRequests({ cpu: $._config.kubeStateMetrics.baseCPU, memory: $._config.kubeStateMetrics.baseMemory }) +
|
||||
container.mixin.resources.withLimits({ cpu: $._config.kubeStateMetrics.baseCPU, memory: $._config.kubeStateMetrics.baseMemory });
|
||||
|
||||
local addonResizer =
|
||||
container.new('addon-resizer', $._config.imageRepos.addonResizer + ':' + $._config.versions.addonResizer) +
|
||||
container.withCommand([
|
||||
'/pod_nanny',
|
||||
'--container=kube-state-metrics',
|
||||
'--cpu=' + $._config.kubeStateMetrics.baseCPU,
|
||||
'--extra-cpu=' + $._config.kubeStateMetrics.cpuPerNode,
|
||||
'--memory=' + $._config.kubeStateMetrics.baseMemory,
|
||||
'--extra-memory=' + $._config.kubeStateMetrics.memoryPerNode,
|
||||
'--threshold=5',
|
||||
'--deployment=kube-state-metrics',
|
||||
]) +
|
||||
container.withEnv([
|
||||
{
|
||||
name: 'MY_POD_NAME',
|
||||
valueFrom: {
|
||||
fieldRef: { apiVersion: 'v1', fieldPath: 'metadata.name' },
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'MY_POD_NAMESPACE',
|
||||
valueFrom: {
|
||||
fieldRef: { apiVersion: 'v1', fieldPath: 'metadata.namespace' },
|
||||
},
|
||||
},
|
||||
]) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '30Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '50m', memory: '30Mi' });
|
||||
|
||||
local c = [proxyClusterMetrics, proxySelfMetrics, kubeStateMetrics, addonResizer];
|
||||
|
||||
deployment.new('kube-state-metrics', 1, c, podLabels) +
|
||||
deployment.mixin.metadata.withNamespace($._config.namespace) +
|
||||
deployment.mixin.metadata.withLabels(podLabels) +
|
||||
deployment.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
deployment.mixin.spec.template.spec.withNodeSelector({ 'beta.kubernetes.io/os': 'linux' }) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName('kube-state-metrics'),
|
||||
|
||||
roleBinding:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('kube-state-metrics') +
|
||||
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('kube-state-metrics') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'kube-state-metrics' }]),
|
||||
|
||||
role:
|
||||
local role = k.rbac.v1.role;
|
||||
local rulesType = role.rulesType;
|
||||
|
||||
local coreRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['']) +
|
||||
rulesType.withResources([
|
||||
'pods',
|
||||
]) +
|
||||
rulesType.withVerbs(['get']);
|
||||
|
||||
local extensionsRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['extensions']) +
|
||||
rulesType.withResources([
|
||||
'deployments',
|
||||
]) +
|
||||
rulesType.withVerbs(['get', 'update']) +
|
||||
rulesType.withResourceNames(['kube-state-metrics']);
|
||||
|
||||
local appsRule = rulesType.new() +
|
||||
rulesType.withApiGroups(['apps']) +
|
||||
rulesType.withResources([
|
||||
'deployments',
|
||||
]) +
|
||||
rulesType.withVerbs(['get', 'update']) +
|
||||
rulesType.withResourceNames(['kube-state-metrics']);
|
||||
|
||||
local rules = [coreRule, extensionsRule, appsRule];
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('kube-state-metrics') +
|
||||
role.mixin.metadata.withNamespace($._config.namespace) +
|
||||
role.withRules(rules),
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('kube-state-metrics') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local ksmServicePortMain = servicePort.newNamed('https-main', 8443, 'https-main');
|
||||
local ksmServicePortSelf = servicePort.newNamed('https-self', 9443, 'https-self');
|
||||
|
||||
service.new('kube-state-metrics', $.kubeStateMetrics.deployment.spec.selector.matchLabels, [ksmServicePortMain, ksmServicePortSelf]) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-state-metrics' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-state-metrics',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kube-state-metrics',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-state-metrics',
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
kubeStateMetrics+::
|
||||
ksm {
|
||||
local version = self.version,
|
||||
name:: 'kube-state-metrics',
|
||||
namespace:: $._config.namespace,
|
||||
version:: $._config.versions.kubeStateMetrics,
|
||||
image:: $._config.imageRepos.kubeStateMetrics + ':v' + $._config.versions.kubeStateMetrics,
|
||||
service+: {
|
||||
spec+: {
|
||||
ports: [
|
||||
{
|
||||
port: 'https-main',
|
||||
scheme: 'https',
|
||||
interval: $._config.kubeStateMetrics.scrapeInterval,
|
||||
scrapeTimeout: $._config.kubeStateMetrics.scrapeTimeout,
|
||||
honorLabels: true,
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
name: 'https-main',
|
||||
port: 8443,
|
||||
targetPort: 'https-main',
|
||||
},
|
||||
{
|
||||
port: 'https-self',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
name: 'https-self',
|
||||
port: 9443,
|
||||
targetPort: 'https-self',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
deployment+: {
|
||||
spec+: {
|
||||
template+: {
|
||||
spec+: {
|
||||
containers: std.map(function(c) c {
|
||||
ports:: null,
|
||||
livenessProbe:: null,
|
||||
readinessProbe:: null,
|
||||
args: ['--host=127.0.0.1', '--port=8081', '--telemetry-host=127.0.0.1', '--telemetry-port=8082'],
|
||||
}, super.containers),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-state-metrics',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'app.kubernetes.io/name': 'kube-state-metrics',
|
||||
'app.kubernetes.io/version': version,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'app.kubernetes.io/name',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'app.kubernetes.io/name': 'kube-state-metrics',
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-main',
|
||||
scheme: 'https',
|
||||
interval: $._config.kubeStateMetrics.scrapeInterval,
|
||||
scrapeTimeout: $._config.kubeStateMetrics.scrapeTimeout,
|
||||
honorLabels: true,
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [
|
||||
{
|
||||
regex: '(pod|service|endpoint|namespace)',
|
||||
action: 'labeldrop',
|
||||
},
|
||||
],
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
port: 'https-self',
|
||||
scheme: 'https',
|
||||
interval: $._config.kubeStateMetrics.scrapeInterval,
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
} +
|
||||
(kubeRbacProxyContainer {
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
local cfg = self,
|
||||
image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
|
||||
name: 'kube-rbac-proxy-main',
|
||||
securePortName: 'https-main',
|
||||
securePort: 8443,
|
||||
secureListenAddress: ':%d' % self.securePort,
|
||||
upstream: 'http://127.0.0.1:8081/',
|
||||
tlsCipherSuites: $._config.tlsCipherSuites,
|
||||
},
|
||||
},
|
||||
}).deploymentMixin +
|
||||
(kubeRbacProxyContainer {
|
||||
config+:: {
|
||||
kubeRbacProxy: {
|
||||
local cfg = self,
|
||||
image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
|
||||
name: 'kube-rbac-proxy-self',
|
||||
securePortName: 'https-self',
|
||||
securePort: 9443,
|
||||
secureListenAddress: ':%d' % self.securePort,
|
||||
upstream: 'http://127.0.0.1:8082/',
|
||||
tlsCipherSuites: $._config.tlsCipherSuites,
|
||||
},
|
||||
},
|
||||
}).deploymentMixin,
|
||||
}
|
||||
|
||||
@@ -5,16 +5,16 @@ local imageName(image) =
|
||||
local parts = std.split(image, '/');
|
||||
local len = std.length(parts);
|
||||
if len == 3 then
|
||||
# registry.com/org/image
|
||||
// registry.com/org/image
|
||||
parts[2]
|
||||
else if len == 2 then
|
||||
# org/image
|
||||
// org/image
|
||||
parts[1]
|
||||
else if len == 1 then
|
||||
# image, ie. busybox
|
||||
// image, ie. busybox
|
||||
parts[0]
|
||||
else
|
||||
error 'unknown image format: ' + image;
|
||||
error 'unknown image format: ' + image;
|
||||
|
||||
{
|
||||
imageName:: imageName,
|
||||
|
||||
@@ -1,120 +1,96 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
nodeExporter: 'v0.17.0',
|
||||
kubeRbacProxy: 'v0.4.1',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
nodeExporter: 'quay.io/prometheus/node-exporter',
|
||||
kubeRbacProxy: 'quay.io/coreos/kube-rbac-proxy',
|
||||
},
|
||||
versions+:: { nodeExporter: 'v1.0.1' },
|
||||
imageRepos+:: { nodeExporter: 'quay.io/prometheus/node-exporter' },
|
||||
|
||||
nodeExporter+:: {
|
||||
listenAddress: '127.0.0.1',
|
||||
port: 9100,
|
||||
labels: {
|
||||
'app.kubernetes.io/name': 'node-exporter',
|
||||
'app.kubernetes.io/version': $._config.versions.nodeExporter,
|
||||
},
|
||||
selectorLabels: {
|
||||
[labelName]: $._config.nodeExporter.labels[labelName]
|
||||
for labelName in std.objectFields($._config.nodeExporter.labels)
|
||||
if !std.setMember(labelName, ['app.kubernetes.io/version'])
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
nodeExporter+:: {
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'node-exporter',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: 'node-exporter',
|
||||
namespace: $._config.namespace,
|
||||
}],
|
||||
},
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('node-exporter') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('node-exporter') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'node-exporter', namespace: $._config.namespace }]),
|
||||
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local authenticationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(['authentication.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'tokenreviews',
|
||||
]) +
|
||||
policyRule.withVerbs(['create']);
|
||||
|
||||
local authorizationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(['authorization.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'subjectaccessreviews',
|
||||
]) +
|
||||
policyRule.withVerbs(['create']);
|
||||
|
||||
local rules = [authenticationRole, authorizationRole];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('node-exporter') +
|
||||
clusterRole.withRules(rules),
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: ['authentication.k8s.io'],
|
||||
resources: ['tokenreviews'],
|
||||
verbs: ['create'],
|
||||
},
|
||||
{
|
||||
apiGroups: ['authorization.k8s.io'],
|
||||
resources: ['subjectaccessreviews'],
|
||||
verbs: ['create'],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
daemonset:
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
local container = daemonset.mixin.spec.template.spec.containersType;
|
||||
local volume = daemonset.mixin.spec.template.spec.volumesType;
|
||||
local containerPort = container.portsType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
local podSelector = daemonset.mixin.spec.template.spec.selectorType;
|
||||
local toleration = daemonset.mixin.spec.template.spec.tolerationsType;
|
||||
local containerEnv = container.envType;
|
||||
|
||||
local podLabels = { app: 'node-exporter' };
|
||||
|
||||
local noExecuteToleration = toleration.new() +
|
||||
toleration.withOperator('Exists') +
|
||||
toleration.withEffect('NoExecute');
|
||||
|
||||
local noScheduleToleration = toleration.new() +
|
||||
toleration.withOperator('Exists') +
|
||||
toleration.withEffect('NoSchedule');
|
||||
|
||||
local procVolumeName = 'proc';
|
||||
local procVolume = volume.fromHostPath(procVolumeName, '/proc');
|
||||
local procVolumeMount = containerVolumeMount.new(procVolumeName, '/host/proc');
|
||||
|
||||
local sysVolumeName = 'sys';
|
||||
local sysVolume = volume.fromHostPath(sysVolumeName, '/sys');
|
||||
local sysVolumeMount = containerVolumeMount.new(sysVolumeName, '/host/sys');
|
||||
|
||||
local rootVolumeName = 'root';
|
||||
local rootVolume = volume.fromHostPath(rootVolumeName, '/');
|
||||
local rootVolumeMount = containerVolumeMount.new(rootVolumeName, '/host/root').
|
||||
withMountPropagation('HostToContainer').
|
||||
withReadOnly(true);
|
||||
|
||||
local nodeExporter =
|
||||
container.new('node-exporter', $._config.imageRepos.nodeExporter + ':' + $._config.versions.nodeExporter) +
|
||||
container.withArgs([
|
||||
'--web.listen-address=127.0.0.1:' + $._config.nodeExporter.port,
|
||||
local nodeExporter = {
|
||||
name: 'node-exporter',
|
||||
image: $._config.imageRepos.nodeExporter + ':' + $._config.versions.nodeExporter,
|
||||
args: [
|
||||
'--web.listen-address=' + std.join(':', [$._config.nodeExporter.listenAddress, std.toString($._config.nodeExporter.port)]),
|
||||
'--path.procfs=/host/proc',
|
||||
'--path.sysfs=/host/sys',
|
||||
'--path.rootfs=/host/root',
|
||||
'--no-collector.wifi',
|
||||
'--no-collector.hwmon',
|
||||
'--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)',
|
||||
],
|
||||
volumeMounts: [
|
||||
{ name: 'proc', mountPath: '/host/proc', mountPropagation: 'HostToContainer', readOnly: true },
|
||||
{ name: 'sys', mountPath: '/host/sys', mountPropagation: 'HostToContainer', readOnly: true },
|
||||
{ name: 'root', mountPath: '/host/root', mountPropagation: 'HostToContainer', readOnly: true },
|
||||
],
|
||||
resources: $._config.resources['node-exporter'],
|
||||
};
|
||||
|
||||
// The following settings have been taken from
|
||||
// https://github.com/prometheus/node_exporter/blob/0662673/collector/filesystem_linux.go#L30-L31
|
||||
// Once node exporter is being released with those settings, this can be removed.
|
||||
'--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)',
|
||||
'--collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$',
|
||||
]) +
|
||||
container.withVolumeMounts([procVolumeMount, sysVolumeMount, rootVolumeMount]) +
|
||||
container.mixin.resources.withRequests({ cpu: '102m', memory: '180Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '250m', memory: '180Mi' });
|
||||
|
||||
local ip = containerEnv.fromFieldPath('IP', 'status.podIP');
|
||||
local proxy =
|
||||
container.new('kube-rbac-proxy', $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy) +
|
||||
container.withArgs([
|
||||
local proxy = {
|
||||
name: 'kube-rbac-proxy',
|
||||
image: $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy,
|
||||
args: [
|
||||
'--logtostderr',
|
||||
'--secure-listen-address=$(IP):' + $._config.nodeExporter.port,
|
||||
'--secure-listen-address=[$(IP)]:' + $._config.nodeExporter.port,
|
||||
'--tls-cipher-suites=' + std.join(',', $._config.tlsCipherSuites),
|
||||
'--upstream=http://127.0.0.1:' + $._config.nodeExporter.port + '/',
|
||||
]) +
|
||||
],
|
||||
env: [
|
||||
{ name: 'IP', valueFrom: { fieldRef: { fieldPath: 'status.podIP' } } },
|
||||
],
|
||||
// Keep `hostPort` here, rather than in the node-exporter container
|
||||
// because Kubernetes mandates that if you define a `hostPort` then
|
||||
// `containerPort` must match. In our case, we are splitting the
|
||||
@@ -123,76 +99,114 @@ local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
// used by the service is tied to the proxy container. We *could*
|
||||
// forgo declaring the host port, however it is important to declare
|
||||
// it so that the scheduler can decide if the pod is schedulable.
|
||||
container.withPorts(containerPort.new($._config.nodeExporter.port) + containerPort.withHostPort($._config.nodeExporter.port) + containerPort.withName('https')) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '20Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '20m', memory: '40Mi' }) +
|
||||
container.withEnv([ip]);
|
||||
ports: [
|
||||
{ name: 'https', containerPort: $._config.nodeExporter.port, hostPort: $._config.nodeExporter.port },
|
||||
],
|
||||
resources: $._config.resources['kube-rbac-proxy'],
|
||||
securityContext: {
|
||||
runAsUser: 65532,
|
||||
runAsGroup: 65532,
|
||||
runAsNonRoot: true,
|
||||
},
|
||||
};
|
||||
|
||||
local c = [nodeExporter, proxy];
|
||||
|
||||
daemonset.new() +
|
||||
daemonset.mixin.metadata.withName('node-exporter') +
|
||||
daemonset.mixin.metadata.withNamespace($._config.namespace) +
|
||||
daemonset.mixin.metadata.withLabels(podLabels) +
|
||||
daemonset.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
daemonset.mixin.spec.template.metadata.withLabels(podLabels) +
|
||||
daemonset.mixin.spec.template.spec.withTolerations([noExecuteToleration, noScheduleToleration]) +
|
||||
daemonset.mixin.spec.template.spec.withNodeSelector({ 'beta.kubernetes.io/os': 'linux' }) +
|
||||
daemonset.mixin.spec.template.spec.withContainers(c) +
|
||||
daemonset.mixin.spec.template.spec.withVolumes([procVolume, sysVolume, rootVolume]) +
|
||||
daemonset.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
daemonset.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
daemonset.mixin.spec.template.spec.withServiceAccountName('node-exporter') +
|
||||
daemonset.mixin.spec.template.spec.withHostPid(true) +
|
||||
daemonset.mixin.spec.template.spec.withHostNetwork(true),
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('node-exporter') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
apiVersion: 'apps/v1',
|
||||
kind: 'DaemonSet',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'node-exporter',
|
||||
},
|
||||
labels: $._config.nodeExporter.labels,
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'node-exporter',
|
||||
selector: { matchLabels: $._config.nodeExporter.selectorLabels },
|
||||
updateStrategy: {
|
||||
type: 'RollingUpdate',
|
||||
rollingUpdate: { maxUnavailable: '10%' },
|
||||
},
|
||||
template: {
|
||||
metadata: { labels: $._config.nodeExporter.labels },
|
||||
spec: {
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
tolerations: [{
|
||||
operator: 'Exists',
|
||||
}],
|
||||
containers: [nodeExporter, proxy],
|
||||
volumes: [
|
||||
{ name: 'proc', hostPath: { path: '/proc' } },
|
||||
{ name: 'sys', hostPath: { path: '/sys' } },
|
||||
{ name: 'root', hostPath: { path: '/' } },
|
||||
],
|
||||
serviceAccountName: 'node-exporter',
|
||||
securityContext: {
|
||||
runAsUser: 65534,
|
||||
runAsNonRoot: true,
|
||||
},
|
||||
hostPID: true,
|
||||
hostNetwork: true,
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
serviceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
local nodeExporterPort = servicePort.newNamed('https', $._config.nodeExporter.port, 'https');
|
||||
serviceMonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
namespace: $._config.namespace,
|
||||
labels: $._config.nodeExporter.labels,
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'app.kubernetes.io/name',
|
||||
selector: {
|
||||
matchLabels: $._config.nodeExporter.selectorLabels,
|
||||
},
|
||||
endpoints: [{
|
||||
port: 'https',
|
||||
scheme: 'https',
|
||||
interval: '15s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [
|
||||
{
|
||||
action: 'replace',
|
||||
regex: '(.*)',
|
||||
replacement: '$1',
|
||||
sourceLabels: ['__meta_kubernetes_pod_node_name'],
|
||||
targetLabel: 'instance',
|
||||
},
|
||||
],
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
service.new('node-exporter', $.nodeExporter.daemonset.spec.selector.matchLabels, nodeExporterPort) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'node-exporter' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
service: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
namespace: $._config.namespace,
|
||||
labels: $._config.nodeExporter.labels,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'https', targetPort: 'https', port: $._config.nodeExporter.port },
|
||||
],
|
||||
selector: $._config.nodeExporter.selectorLabels,
|
||||
clusterIP: 'None',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,221 +1,277 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
prometheusAdapter: 'v0.4.1',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
prometheusAdapter: 'quay.io/coreos/k8s-prometheus-adapter-amd64',
|
||||
},
|
||||
versions+:: { prometheusAdapter: 'v0.8.2' },
|
||||
imageRepos+:: { prometheusAdapter: 'directxman12/k8s-prometheus-adapter' },
|
||||
|
||||
prometheusAdapter+:: {
|
||||
name: 'prometheus-adapter',
|
||||
namespace: $._config.namespace,
|
||||
labels: { name: $._config.prometheusAdapter.name },
|
||||
prometheusURL: 'http://prometheus-' + $._config.prometheus.name + '.' + $._config.namespace + '.svc:9090/',
|
||||
config: |||
|
||||
resourceRules:
|
||||
cpu:
|
||||
containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container_name!="POD",container_name!="",pod_name!=""}[1m])) by (<<.GroupBy>>)
|
||||
nodeQuery: sum(1 - rate(node_cpu_seconds_total{mode="idle"}[1m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
|
||||
resources:
|
||||
overrides:
|
||||
node:
|
||||
resource: node
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
containerLabel: container_name
|
||||
memory:
|
||||
containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container_name!="POD",container_name!="",pod_name!=""}) by (<<.GroupBy>>)
|
||||
nodeQuery: sum(node:node_memory_bytes_total:sum{<<.LabelMatchers>>} - node:node_memory_bytes_available:sum{<<.LabelMatchers>>}) by (<<.GroupBy>>)
|
||||
resources:
|
||||
overrides:
|
||||
node:
|
||||
resource: node
|
||||
namespace:
|
||||
resource: namespace
|
||||
pod_name:
|
||||
resource: pod
|
||||
containerLabel: container_name
|
||||
window: 1m
|
||||
|||,
|
||||
prometheusURL: 'http://prometheus-' + $._config.prometheus.name + '.' + $._config.namespace + '.svc.cluster.local:9090/',
|
||||
config: {
|
||||
resourceRules: {
|
||||
cpu: {
|
||||
containerQuery: 'sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>)',
|
||||
nodeQuery: 'sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)',
|
||||
resources: {
|
||||
overrides: {
|
||||
node: { resource: 'node' },
|
||||
namespace: { resource: 'namespace' },
|
||||
pod: { resource: 'pod' },
|
||||
},
|
||||
},
|
||||
containerLabel: 'container',
|
||||
},
|
||||
memory: {
|
||||
containerQuery: 'sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>)',
|
||||
nodeQuery: 'sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>)',
|
||||
resources: {
|
||||
overrides: {
|
||||
instance: { resource: 'node' },
|
||||
namespace: { resource: 'namespace' },
|
||||
pod: { resource: 'pod' },
|
||||
},
|
||||
},
|
||||
containerLabel: 'container',
|
||||
},
|
||||
window: '5m',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
prometheusAdapter+:: {
|
||||
apiService:
|
||||
{
|
||||
apiVersion: 'apiregistration.k8s.io/v1',
|
||||
kind: 'APIService',
|
||||
metadata: {
|
||||
name: 'v1beta1.metrics.k8s.io',
|
||||
},
|
||||
spec: {
|
||||
service: {
|
||||
name: $.prometheusAdapter.service.metadata.name,
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
group: 'metrics.k8s.io',
|
||||
version: 'v1beta1',
|
||||
insecureSkipTLSVerify: true,
|
||||
groupPriorityMinimum: 100,
|
||||
versionPriority: 100,
|
||||
},
|
||||
apiService: {
|
||||
apiVersion: 'apiregistration.k8s.io/v1',
|
||||
kind: 'APIService',
|
||||
metadata: {
|
||||
name: 'v1beta1.metrics.k8s.io',
|
||||
},
|
||||
spec: {
|
||||
service: {
|
||||
name: $.prometheusAdapter.service.metadata.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
group: 'metrics.k8s.io',
|
||||
version: 'v1beta1',
|
||||
insecureSkipTLSVerify: true,
|
||||
groupPriorityMinimum: 100,
|
||||
versionPriority: 100,
|
||||
},
|
||||
},
|
||||
|
||||
configMap:
|
||||
local configmap = k.core.v1.configMap;
|
||||
configMap: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ConfigMap',
|
||||
metadata: {
|
||||
name: 'adapter-config',
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
data: { 'config.yaml': std.manifestYamlDoc($._config.prometheusAdapter.config) },
|
||||
},
|
||||
|
||||
configmap.new('adapter-config', { 'config.yaml': $._config.prometheusAdapter.config }) +
|
||||
configmap.mixin.metadata.withNamespace($._config.namespace),
|
||||
serviceMonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
labels: $._config.prometheusAdapter.labels,
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: $._config.prometheusAdapter.labels,
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
service.new(
|
||||
$._config.prometheusAdapter.name,
|
||||
$._config.prometheusAdapter.labels,
|
||||
servicePort.newNamed('https', 443, 6443),
|
||||
) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels($._config.prometheusAdapter.labels),
|
||||
service: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
labels: $._config.prometheusAdapter.labels,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'https', targetPort: 6443, port: 443 },
|
||||
],
|
||||
selector: $._config.prometheusAdapter.labels,
|
||||
},
|
||||
},
|
||||
|
||||
deployment:
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local volume = deployment.mixin.spec.template.spec.volumesType;
|
||||
local container = deployment.mixin.spec.template.spec.containersType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
|
||||
local c =
|
||||
container.new($._config.prometheusAdapter.name, $._config.imageRepos.prometheusAdapter + ':' + $._config.versions.prometheusAdapter) +
|
||||
container.withArgs([
|
||||
local c = {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
image: $._config.imageRepos.prometheusAdapter + ':' + $._config.versions.prometheusAdapter,
|
||||
args: [
|
||||
'--cert-dir=/var/run/serving-cert',
|
||||
'--config=/etc/adapter/config.yaml',
|
||||
'--logtostderr=true',
|
||||
'--metrics-relist-interval=1m',
|
||||
'--prometheus-url=' + $._config.prometheusAdapter.prometheusURL,
|
||||
'--secure-port=6443',
|
||||
]) +
|
||||
container.withPorts([{ containerPort: 6443 }]) +
|
||||
container.withVolumeMounts([
|
||||
containerVolumeMount.new('tmpfs', '/tmp'),
|
||||
containerVolumeMount.new('volume-serving-cert', '/var/run/serving-cert'),
|
||||
containerVolumeMount.new('config', '/etc/adapter'),
|
||||
],);
|
||||
],
|
||||
ports: [{ containerPort: 6443 }],
|
||||
volumeMounts: [
|
||||
{ name: 'tmpfs', mountPath: '/tmp', readOnly: false },
|
||||
{ name: 'volume-serving-cert', mountPath: '/var/run/serving-cert', readOnly: false },
|
||||
{ name: 'config', mountPath: '/etc/adapter', readOnly: false },
|
||||
],
|
||||
};
|
||||
|
||||
deployment.new($._config.prometheusAdapter.name, 1, c, $._config.prometheusAdapter.labels) +
|
||||
deployment.mixin.metadata.withNamespace($._config.namespace) +
|
||||
deployment.mixin.spec.selector.withMatchLabels($._config.prometheusAdapter.labels) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName($.prometheusAdapter.serviceAccount.metadata.name) +
|
||||
deployment.mixin.spec.template.spec.withNodeSelector({ 'beta.kubernetes.io/os': 'linux' }) +
|
||||
deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(1) +
|
||||
deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(0) +
|
||||
deployment.mixin.spec.template.spec.withVolumes([
|
||||
volume.fromEmptyDir(name='tmpfs'),
|
||||
volume.fromEmptyDir(name='volume-serving-cert'),
|
||||
{ name: 'config', configMap: { name: 'adapter-config' } },
|
||||
]),
|
||||
{
|
||||
apiVersion: 'apps/v1',
|
||||
kind: 'Deployment',
|
||||
metadata: {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
selector: { matchLabels: $._config.prometheusAdapter.labels },
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: 1,
|
||||
maxUnavailable: 0,
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: { labels: $._config.prometheusAdapter.labels },
|
||||
spec: {
|
||||
containers: [c],
|
||||
serviceAccountName: $.prometheusAdapter.serviceAccount.metadata.name,
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
volumes: [
|
||||
{ name: 'tmpfs', emptyDir: {} },
|
||||
{ name: 'volume-serving-cert', emptyDir: {} },
|
||||
{ name: 'config', configMap: { name: 'adapter-config' } },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
serviceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount.new($._config.prometheusAdapter.name) +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
},
|
||||
rules: [{
|
||||
apiGroups: [''],
|
||||
resources: ['nodes', 'namespaces', 'pods', 'services'],
|
||||
verbs: ['get', 'list', 'watch'],
|
||||
}],
|
||||
},
|
||||
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local rules =
|
||||
policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources(['nodes', 'namespaces', 'pods', 'services']) +
|
||||
policyRule.withVerbs(['get', 'list', 'watch']);
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName($._config.prometheusAdapter.name) +
|
||||
clusterRole.withRules(rules),
|
||||
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName($._config.prometheusAdapter.name) +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName($.prometheusAdapter.clusterRole.metadata.name) +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: $._config.prometheusAdapter.name,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: $.prometheusAdapter.clusterRole.metadata.name,
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: $.prometheusAdapter.serviceAccount.metadata.name,
|
||||
namespace: $._config.namespace,
|
||||
}]),
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
}],
|
||||
},
|
||||
|
||||
clusterRoleBindingDelegator:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('resource-metrics:system:auth-delegator') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('system:auth-delegator') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{
|
||||
clusterRoleBindingDelegator: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: {
|
||||
name: 'resource-metrics:system:auth-delegator',
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'system:auth-delegator',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: $.prometheusAdapter.serviceAccount.metadata.name,
|
||||
namespace: $._config.namespace,
|
||||
}]),
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
}],
|
||||
},
|
||||
|
||||
clusterRoleServerResources:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
clusterRoleServerResources: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: 'resource-metrics-server-resources',
|
||||
},
|
||||
rules: [{
|
||||
apiGroups: ['metrics.k8s.io'],
|
||||
resources: ['*'],
|
||||
verbs: ['*'],
|
||||
}],
|
||||
},
|
||||
|
||||
local rules =
|
||||
policyRule.new() +
|
||||
policyRule.withApiGroups(['metrics.k8s.io']) +
|
||||
policyRule.withResources(['*']) +
|
||||
policyRule.withVerbs(['*']);
|
||||
clusterRoleAggregatedMetricsReader: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: {
|
||||
name: 'system:aggregated-metrics-reader',
|
||||
labels: {
|
||||
'rbac.authorization.k8s.io/aggregate-to-admin': 'true',
|
||||
'rbac.authorization.k8s.io/aggregate-to-edit': 'true',
|
||||
'rbac.authorization.k8s.io/aggregate-to-view': 'true',
|
||||
},
|
||||
},
|
||||
rules: [{
|
||||
apiGroups: ['metrics.k8s.io'],
|
||||
resources: ['pods', 'nodes'],
|
||||
verbs: ['get', 'list', 'watch'],
|
||||
}],
|
||||
},
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('resource-metrics-server-resources') +
|
||||
clusterRole.withRules(rules),
|
||||
|
||||
clusterRoleAggregatedMetricsReader:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local rules =
|
||||
policyRule.new() +
|
||||
policyRule.withApiGroups(['metrics.k8s.io']) +
|
||||
policyRule.withResources(['pods']) +
|
||||
policyRule.withVerbs(['get','list','watch']);
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('system:aggregated-metrics-reader') +
|
||||
clusterRole.mixin.metadata.withLabels({
|
||||
"rbac.authorization.k8s.io/aggregate-to-admin": "true",
|
||||
"rbac.authorization.k8s.io/aggregate-to-edit": "true",
|
||||
"rbac.authorization.k8s.io/aggregate-to-view": "true",
|
||||
}) +
|
||||
clusterRole.withRules(rules),
|
||||
|
||||
roleBindingAuthReader:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('resource-metrics-auth-reader') +
|
||||
roleBinding.mixin.metadata.withNamespace('kube-system') +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('extension-apiserver-authentication-reader') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{
|
||||
roleBindingAuthReader: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'RoleBinding',
|
||||
metadata: {
|
||||
name: 'resource-metrics-auth-reader',
|
||||
namespace: 'kube-system',
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'Role',
|
||||
name: 'extension-apiserver-authentication-reader',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: $.prometheusAdapter.serviceAccount.metadata.name,
|
||||
namespace: $._config.namespace,
|
||||
}]),
|
||||
namespace: $._config.prometheusAdapter.namespace,
|
||||
}],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,434 +1,463 @@
|
||||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local relabelings = import 'kube-prometheus/dropping-deprecated-metrics-relabelings.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
prometheus: 'v2.7.2',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
prometheus: 'quay.io/prometheus/prometheus',
|
||||
},
|
||||
|
||||
alertmanager+:: {
|
||||
name: 'main',
|
||||
},
|
||||
versions+:: { prometheus: 'v2.22.1' },
|
||||
imageRepos+:: { prometheus: 'quay.io/prometheus/prometheus' },
|
||||
alertmanager+:: { name: 'main' },
|
||||
|
||||
prometheus+:: {
|
||||
name: 'k8s',
|
||||
replicas: 2,
|
||||
rules: {},
|
||||
renderedRules: {},
|
||||
namespaces: ['default', 'kube-system', $._config.namespace],
|
||||
},
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
local p = self,
|
||||
|
||||
serviceAccount.new('prometheus-' + $._config.prometheus.name) +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
name:: $._config.prometheus.name,
|
||||
namespace:: $._config.namespace,
|
||||
roleBindingNamespaces:: $._config.prometheus.namespaces,
|
||||
replicas:: $._config.prometheus.replicas,
|
||||
prometheusRules:: $._config.prometheus.rules,
|
||||
alertmanagerName:: $.alertmanager.service.metadata.name,
|
||||
|
||||
local prometheusPort = servicePort.newNamed('web', 9090, 'web');
|
||||
|
||||
service.new('prometheus-' + $._config.prometheus.name, { app: 'prometheus', prometheus: $._config.prometheus.name }, prometheusPort) +
|
||||
service.mixin.spec.withSessionAffinity('ClientIP') +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ prometheus: $._config.prometheus.name }),
|
||||
[if $._config.prometheus.rules != null && $._config.prometheus.rules != {} then 'rules']:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'PrometheusRule',
|
||||
metadata: {
|
||||
labels: {
|
||||
prometheus: $._config.prometheus.name,
|
||||
role: 'alert-rules',
|
||||
},
|
||||
name: 'prometheus-' + $._config.prometheus.name + '-rules',
|
||||
namespace: $._config.namespace,
|
||||
},
|
||||
spec: {
|
||||
groups: $._config.prometheus.rules.groups,
|
||||
},
|
||||
serviceAccount: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'ServiceAccount',
|
||||
metadata: {
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: p.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
service: {
|
||||
apiVersion: 'v1',
|
||||
kind: 'Service',
|
||||
metadata: {
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: p.namespace,
|
||||
labels: { prometheus: p.name },
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{ name: 'web', targetPort: 'web', port: 9090 },
|
||||
],
|
||||
selector: { app: 'prometheus', prometheus: p.name },
|
||||
sessionAffinity: 'ClientIP',
|
||||
},
|
||||
},
|
||||
|
||||
rules: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'PrometheusRule',
|
||||
metadata: {
|
||||
labels: {
|
||||
prometheus: p.name,
|
||||
role: 'alert-rules',
|
||||
},
|
||||
name: 'prometheus-' + p.name + '-rules',
|
||||
namespace: p.namespace,
|
||||
},
|
||||
spec: {
|
||||
groups: p.prometheusRules.groups,
|
||||
},
|
||||
},
|
||||
|
||||
roleBindingSpecificNamespaces:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
local newSpecificRoleBinding(namespace) = {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'RoleBinding',
|
||||
metadata: {
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'Role',
|
||||
name: 'prometheus-' + p.name,
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: p.namespace,
|
||||
}],
|
||||
};
|
||||
{
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'RoleBindingList',
|
||||
items: [newSpecificRoleBinding(x) for x in p.roleBindingNamespaces],
|
||||
},
|
||||
|
||||
local newSpecificRoleBinding(namespace) =
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('prometheus-' + $._config.prometheus.name) +
|
||||
roleBinding.mixin.metadata.withNamespace(namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('prometheus-' + $._config.prometheus.name) +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-' + $._config.prometheus.name, namespace: $._config.namespace }]);
|
||||
clusterRole: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRole',
|
||||
metadata: { name: 'prometheus-' + p.name },
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [''],
|
||||
resources: ['nodes/metrics'],
|
||||
verbs: ['get'],
|
||||
},
|
||||
{
|
||||
nonResourceURLs: ['/metrics'],
|
||||
verbs: ['get'],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
local roleBindigList = k.rbac.v1.roleBindingList;
|
||||
roleBindigList.new([newSpecificRoleBinding(x) for x in $._config.prometheus.namespaces]),
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
roleConfig: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'Role',
|
||||
metadata: {
|
||||
name: 'prometheus-' + p.name + '-config',
|
||||
namespace: p.namespace,
|
||||
},
|
||||
rules: [{
|
||||
apiGroups: [''],
|
||||
resources: ['configmaps'],
|
||||
verbs: ['get'],
|
||||
}],
|
||||
},
|
||||
|
||||
local nodeMetricsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources(['nodes/metrics']) +
|
||||
policyRule.withVerbs(['get']);
|
||||
roleBindingConfig: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'RoleBinding',
|
||||
metadata: {
|
||||
name: 'prometheus-' + p.name + '-config',
|
||||
namespace: p.namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'Role',
|
||||
name: 'prometheus-' + p.name + '-config',
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: p.namespace,
|
||||
}],
|
||||
},
|
||||
|
||||
local metricsRule = policyRule.new() +
|
||||
policyRule.withNonResourceUrls('/metrics') +
|
||||
policyRule.withVerbs(['get']);
|
||||
clusterRoleBinding: {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'ClusterRoleBinding',
|
||||
metadata: { name: 'prometheus-' + p.name },
|
||||
roleRef: {
|
||||
apiGroup: 'rbac.authorization.k8s.io',
|
||||
kind: 'ClusterRole',
|
||||
name: 'prometheus-' + p.name,
|
||||
},
|
||||
subjects: [{
|
||||
kind: 'ServiceAccount',
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: p.namespace,
|
||||
}],
|
||||
},
|
||||
|
||||
local rules = [nodeMetricsRule, metricsRule];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('prometheus-' + $._config.prometheus.name) +
|
||||
clusterRole.withRules(rules),
|
||||
roleConfig:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local configmapRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'configmaps',
|
||||
]) +
|
||||
policyRule.withVerbs(['get']);
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('prometheus-' + $._config.prometheus.name + '-config') +
|
||||
role.mixin.metadata.withNamespace($._config.namespace) +
|
||||
role.withRules(configmapRule),
|
||||
roleBindingConfig:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('prometheus-' + $._config.prometheus.name + '-config') +
|
||||
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('prometheus-' + $._config.prometheus.name + '-config') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-' + $._config.prometheus.name, namespace: $._config.namespace }]),
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('prometheus-' + $._config.prometheus.name) +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('prometheus-' + $._config.prometheus.name) +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-' + $._config.prometheus.name, namespace: $._config.namespace }]),
|
||||
roleSpecificNamespaces:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'services',
|
||||
'endpoints',
|
||||
'pods',
|
||||
]) +
|
||||
policyRule.withVerbs(['get', 'list', 'watch']);
|
||||
|
||||
local newSpecificRole(namespace) =
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('prometheus-' + $._config.prometheus.name) +
|
||||
role.mixin.metadata.withNamespace(namespace) +
|
||||
role.withRules(coreRule);
|
||||
|
||||
local roleList = k.rbac.v1.roleList;
|
||||
roleList.new([newSpecificRole(x) for x in $._config.prometheus.namespaces]),
|
||||
prometheus:
|
||||
local statefulSet = k.apps.v1beta2.statefulSet;
|
||||
local container = statefulSet.mixin.spec.template.spec.containersType;
|
||||
local resourceRequirements = container.mixin.resourcesType;
|
||||
local selector = statefulSet.mixin.spec.selectorType;
|
||||
|
||||
local resources =
|
||||
resourceRequirements.new() +
|
||||
resourceRequirements.withRequests({ memory: '400Mi' });
|
||||
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'Prometheus',
|
||||
local newSpecificRole(namespace) = {
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'Role',
|
||||
metadata: {
|
||||
name: $._config.prometheus.name,
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
prometheus: $._config.prometheus.name,
|
||||
},
|
||||
name: 'prometheus-' + p.name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: $._config.prometheus.replicas,
|
||||
version: $._config.versions.prometheus,
|
||||
baseImage: $._config.imageRepos.prometheus,
|
||||
serviceAccountName: 'prometheus-' + $._config.prometheus.name,
|
||||
serviceMonitorSelector: {},
|
||||
serviceMonitorNamespaceSelector: {},
|
||||
nodeSelector: { 'beta.kubernetes.io/os': 'linux' },
|
||||
ruleSelector: selector.withMatchLabels({
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [''],
|
||||
resources: ['services', 'endpoints', 'pods'],
|
||||
verbs: ['get', 'list', 'watch'],
|
||||
},
|
||||
{
|
||||
apiGroups: ['extensions'],
|
||||
resources: ['ingresses'],
|
||||
verbs: ['get', 'list', 'watch'],
|
||||
},
|
||||
],
|
||||
};
|
||||
{
|
||||
apiVersion: 'rbac.authorization.k8s.io/v1',
|
||||
kind: 'RoleList',
|
||||
items: [newSpecificRole(x) for x in p.roleBindingNamespaces],
|
||||
},
|
||||
|
||||
prometheus: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'Prometheus',
|
||||
metadata: {
|
||||
name: p.name,
|
||||
namespace: p.namespace,
|
||||
labels: { prometheus: p.name },
|
||||
},
|
||||
spec: {
|
||||
replicas: p.replicas,
|
||||
version: $._config.versions.prometheus,
|
||||
image: $._config.imageRepos.prometheus + ':' + $._config.versions.prometheus,
|
||||
serviceAccountName: 'prometheus-' + p.name,
|
||||
serviceMonitorSelector: {},
|
||||
podMonitorSelector: {},
|
||||
probeSelector: {},
|
||||
serviceMonitorNamespaceSelector: {},
|
||||
podMonitorNamespaceSelector: {},
|
||||
probeNamespaceSelector: {},
|
||||
nodeSelector: { 'kubernetes.io/os': 'linux' },
|
||||
ruleSelector: {
|
||||
matchLabels: {
|
||||
role: 'alert-rules',
|
||||
prometheus: $._config.prometheus.name,
|
||||
}),
|
||||
resources: resources,
|
||||
alerting: {
|
||||
alertmanagers: [
|
||||
prometheus: p.name,
|
||||
},
|
||||
},
|
||||
resources: {
|
||||
requests: { memory: '400Mi' },
|
||||
},
|
||||
alerting: {
|
||||
alertmanagers: [{
|
||||
namespace: p.namespace,
|
||||
name: p.alertmanagerName,
|
||||
port: 'web',
|
||||
}],
|
||||
},
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
runAsNonRoot: true,
|
||||
fsGroup: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
serviceMonitor: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'prometheus',
|
||||
namespace: p.namespace,
|
||||
labels: { 'k8s-app': 'prometheus' },
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: { prometheus: p.name },
|
||||
},
|
||||
endpoints: [{
|
||||
port: 'web',
|
||||
interval: '30s',
|
||||
}],
|
||||
},
|
||||
},
|
||||
|
||||
serviceMonitorKubeScheduler: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-scheduler',
|
||||
namespace: p.namespace,
|
||||
labels: { 'k8s-app': 'kube-scheduler' },
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [{
|
||||
port: 'https-metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
}],
|
||||
selector: {
|
||||
matchLabels: { 'k8s-app': 'kube-scheduler' },
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: ['kube-system'],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
serviceMonitorKubelet: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kubelet',
|
||||
namespace: p.namespace,
|
||||
labels: { 'k8s-app': 'kubelet' },
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: relabelings,
|
||||
relabelings: [{
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
}],
|
||||
},
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
path: '/metrics/cadvisor',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
honorTimestamps: false,
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [{
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
}],
|
||||
metricRelabelings: [
|
||||
// Drop a bunch of metrics which are disabled but still sent, see
|
||||
// https://github.com/google/cadvisor/issues/1925.
|
||||
{
|
||||
namespace: $._config.namespace,
|
||||
name: 'alertmanager-' + $._config.alertmanager.name,
|
||||
port: 'web',
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
runAsNonRoot: true,
|
||||
fsGroup: 2000,
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
path: '/metrics/probes',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
tlsConfig: { insecureSkipVerify: true },
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
relabelings: [{
|
||||
sourceLabels: ['__metrics_path__'],
|
||||
targetLabel: 'metrics_path',
|
||||
}],
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: { 'k8s-app': 'kubelet' },
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: ['kube-system'],
|
||||
},
|
||||
},
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'prometheus',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'prometheus',
|
||||
},
|
||||
|
||||
serviceMonitorKubeControllerManager: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-controller-manager',
|
||||
namespace: p.namespace,
|
||||
labels: { 'k8s-app': 'kube-controller-manager' },
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [{
|
||||
port: 'https-metrics',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
prometheus: $._config.prometheus.name,
|
||||
},
|
||||
},
|
||||
endpoints: [
|
||||
metricRelabelings: relabelings + [
|
||||
{
|
||||
port: 'web',
|
||||
interval: '30s',
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|request|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
}],
|
||||
selector: {
|
||||
matchLabels: { 'k8s-app': 'kube-controller-manager' },
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: ['kube-system'],
|
||||
},
|
||||
},
|
||||
serviceMonitorKubeScheduler:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-scheduler',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kube-scheduler',
|
||||
},
|
||||
|
||||
serviceMonitorApiserver: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-apiserver',
|
||||
namespace: p.namespace,
|
||||
labels: { 'k8s-app': 'apiserver' },
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'component',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
component: 'apiserver',
|
||||
provider: 'kubernetes',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
namespaceSelector: {
|
||||
matchNames: ['default'],
|
||||
},
|
||||
endpoints: [{
|
||||
port: 'https',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
tlsConfig: {
|
||||
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
|
||||
serverName: 'kubernetes',
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: relabelings + [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
interval: '30s',
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_controller_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_step_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__', 'le'],
|
||||
regex: 'apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50)',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-scheduler',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
},
|
||||
}],
|
||||
},
|
||||
serviceMonitorKubelet:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kubelet',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kubelet',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
path: '/metrics/cadvisor',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: [
|
||||
// Drop a bunch of metrics which are disabled but still sent, see
|
||||
// https://github.com/google/cadvisor/issues/1925.
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kubelet',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
serviceMonitorCoreDNS: {
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'coredns',
|
||||
namespace: p.namespace,
|
||||
labels: { 'k8s-app': 'coredns' },
|
||||
},
|
||||
serviceMonitorKubeControllerManager:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-controller-manager',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kube-controller-manager',
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: { 'k8s-app': 'kube-dns' },
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
interval: '30s',
|
||||
metricRelabelings: [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|request|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-controller-manager',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorApiserver:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-apiserver',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'apiserver',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'component',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
component: 'apiserver',
|
||||
provider: 'kubernetes',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'default',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
tlsConfig: {
|
||||
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
|
||||
serverName: 'kubernetes',
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
metricRelabelings: [
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'etcd_(debugging|disk|request|server).*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_controller_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
{
|
||||
sourceLabels: ['__name__'],
|
||||
regex: 'apiserver_admission_step_admission_latencies_seconds_.*',
|
||||
action: 'drop',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
serviceMonitorCoreDNS:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'coredns',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'coredns',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-dns',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'metrics',
|
||||
interval: '15s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
],
|
||||
namespaceSelector: {
|
||||
matchNames: ['kube-system'],
|
||||
},
|
||||
endpoints: [{
|
||||
port: 'metrics',
|
||||
interval: '15s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
19
jsonnet/kube-prometheus/rules/general.libsonnet
Normal file
19
jsonnet/kube-prometheus/rules/general.libsonnet
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
prometheusRules+:: {
|
||||
groups+: [
|
||||
{
|
||||
name: 'kube-prometheus-general.rules',
|
||||
rules: [
|
||||
{
|
||||
expr: 'count without(instance, pod, node) (up == 1)',
|
||||
record: 'count:up1',
|
||||
},
|
||||
{
|
||||
expr: 'count without(instance, pod, node) (up == 0)',
|
||||
record: 'count:up0',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
@@ -5,13 +5,9 @@
|
||||
name: 'kube-prometheus-node-recording.rules',
|
||||
rules: [
|
||||
{
|
||||
expr: 'sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[3m])) BY (instance)',
|
||||
expr: 'sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) BY (instance)',
|
||||
record: 'instance:node_cpu:rate:sum',
|
||||
},
|
||||
{
|
||||
expr: 'sum((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"})) BY (instance)',
|
||||
record: 'instance:node_filesystem_usage:sum',
|
||||
},
|
||||
{
|
||||
expr: 'sum(rate(node_network_receive_bytes_total[3m])) BY (instance)',
|
||||
record: 'instance:node_network_receive_bytes:rate:sum',
|
||||
@@ -21,11 +17,11 @@
|
||||
record: 'instance:node_network_transmit_bytes:rate:sum',
|
||||
},
|
||||
{
|
||||
expr: 'sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance)',
|
||||
expr: 'sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance)',
|
||||
record: 'instance:node_cpu:ratio',
|
||||
},
|
||||
{
|
||||
expr: 'sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m]))',
|
||||
expr: 'sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m]))',
|
||||
record: 'cluster:node_cpu:sum_rate5m',
|
||||
},
|
||||
{
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
(import 'node-rules.libsonnet')
|
||||
(import 'node-rules.libsonnet') +
|
||||
(import 'general.libsonnet')
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
{
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "kube-prometheus",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": ".",
|
||||
"subdir": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": "."
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"local": {
|
||||
"directory": "jsonnet/kube-prometheus"
|
||||
}
|
||||
]
|
||||
},
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"legacyImports": true
|
||||
}
|
||||
|
||||
@@ -1,84 +1,167 @@
|
||||
{
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "kube-prometheus",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": ".",
|
||||
"subdir": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": "81b9c9f9f6886ba1fbd61b05cdf0cc4f4d95eba8"
|
||||
},
|
||||
{
|
||||
"name": "ksonnet",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/ksonnet/ksonnet-lib",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "d03da231d6c8bd74437b74a1e9e8b966f13dffa2"
|
||||
},
|
||||
{
|
||||
"name": "kubernetes-mixin",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "ae58a33e85b191a8760a8d1bd8d3cda2fd046d05"
|
||||
},
|
||||
{
|
||||
"name": "grafonnet",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib",
|
||||
"subdir": "grafonnet"
|
||||
}
|
||||
},
|
||||
"version": "a6896d19aedc46ecf80dd64967191b9fd6f75f45"
|
||||
},
|
||||
{
|
||||
"name": "grafana-builder",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kausalco/public",
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "a73d6c3e7f5804fc7a16f592b42a62384605046c"
|
||||
},
|
||||
{
|
||||
"name": "grafana",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/brancz/kubernetes-grafana",
|
||||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "b6db6bdbdc8d7f2f8834a8044897ea6322a0f6ad"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-operator",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/coreos/prometheus-operator",
|
||||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "7a25bf6b6bb2347dacb235659b73bc210117acc7"
|
||||
},
|
||||
{
|
||||
"name": "etcd-mixin",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/coreos/etcd",
|
||||
"subdir": "Documentation/etcd-mixin"
|
||||
}
|
||||
},
|
||||
"version": "919b93b742c76b12a83bdf8885fa75f11db6bcac"
|
||||
"version": 1,
|
||||
"dependencies": [
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/brancz/kubernetes-grafana.git",
|
||||
"subdir": "grafana"
|
||||
}
|
||||
]
|
||||
},
|
||||
"version": "014301fd5f71d8305a395b2fb437089a7b1a3999",
|
||||
"sum": "RHtpk2c0CcliWyt6F4DIgwpi4cEfHADK7nAxIw6RTGs="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/etcd-io/etcd.git",
|
||||
"subdir": "Documentation/etcd-mixin"
|
||||
}
|
||||
},
|
||||
"version": "ca866c02422ff3f3d1f0876898a30c33dd7bcccf",
|
||||
"sum": "bLqTqEr0jky9zz5MV/7ucn6H5mph2NlXas0TVnGNB1Y="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
||||
"subdir": "grafonnet"
|
||||
}
|
||||
},
|
||||
"version": "356bd73e4792ffe107725776ca8946895969c191",
|
||||
"sum": "CSMZ3dJrpJpwvffie8BqcfrIVVwiKNqdPEN+1XWRBGU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/jsonnet-libs.git",
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "9c3fb8096e1f80e2f3a84566566906ff187f5a8c",
|
||||
"sum": "9/eJqljTTtJeq9QRjabdKWL6yD8a7VzLmGKBK3ir77k="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/ksonnet/ksonnet-lib.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "0d2f82676817bbf9e4acf6495b2090205f323b9f",
|
||||
"sum": "h28BXZ7+vczxYJ2sCt8JuR9+yznRtU/iA6DCpQUrtEg=",
|
||||
"name": "ksonnet"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "1941868d86a7c37e5505a14e3d567bda90e80357",
|
||||
"sum": "ypWxhZVFWF53k7qIkSpUvnI6IGyFBNKmgrzjNtLwMIM="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
|
||||
"subdir": "lib/promgrafonnet"
|
||||
}
|
||||
},
|
||||
"version": "ead45674dba3c8712e422d99223453177aac6bf4",
|
||||
"sum": "zv7hXGui6BfHzE9wPatHI/AGZa4A2WKo6pq7ZdqBsps="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes/kube-state-metrics.git",
|
||||
"subdir": "jsonnet/kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"version": "89aaf6c524ee891140c4c8f2a05b1b16f5847309",
|
||||
"sum": "zD/pbQLnQq+5hegEelaheHS8mn1h09GTktFO74iwlBI="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes/kube-state-metrics.git",
|
||||
"subdir": "jsonnet/kube-state-metrics-mixin"
|
||||
}
|
||||
},
|
||||
"version": "89aaf6c524ee891140c4c8f2a05b1b16f5847309",
|
||||
"sum": "E1GGavnf9PCWBm4WVrxWnc0FIj72UcbcweqGioWrOdU="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus-operator/prometheus-operator.git",
|
||||
"subdir": "jsonnet/mixin"
|
||||
}
|
||||
},
|
||||
"version": "d8b7d3766225908d0239fd0d78258892cd0fc384",
|
||||
"sum": "6reUygVmQrLEWQzTKcH8ceDbvM+2ztK3z2VBR2K2l+U="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus-operator/prometheus-operator.git",
|
||||
"subdir": "jsonnet/prometheus-operator"
|
||||
}
|
||||
},
|
||||
"version": "d8b7d3766225908d0239fd0d78258892cd0fc384",
|
||||
"sum": "Nl+N/h76bzD9tZ8tx7tuNIKHwCIJ9zyOsAWplH8HvAE="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus/alertmanager.git",
|
||||
"subdir": "doc/alertmanager-mixin"
|
||||
}
|
||||
},
|
||||
"version": "193ebba04d1e70d971047e983a0b489112610460",
|
||||
"sum": "QcftU7gjCQyj7B6M4YJeCAeaPd0kwxd4J4rolo7AnLE=",
|
||||
"name": "alertmanager"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus/node_exporter.git",
|
||||
"subdir": "docs/node-mixin"
|
||||
}
|
||||
},
|
||||
"version": "8b466360a35581e0301bd22918be7011cf4203c3",
|
||||
"sum": "rvyiD/yCB4BeYAWqYF53bP8c+aCUt2ipLHW2Ea8ELO8="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/prometheus/prometheus.git",
|
||||
"subdir": "documentation/prometheus-mixin"
|
||||
}
|
||||
},
|
||||
"version": "26d89b4b0776fe4cd5a3656dfa520f119a375273",
|
||||
"sum": "1VRVMuxAEZ9vdGHFlndmG9iQzDD6AoIXrX80CDpGDaU=",
|
||||
"name": "prometheus"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/thanos-io/thanos.git",
|
||||
"subdir": "mixin"
|
||||
}
|
||||
},
|
||||
"version": "37e6ef61566c7c70793ba6d128f00c4c66cb2402",
|
||||
"sum": "OptiWUMOHFrRGTZhSfxV1RCeXZ90qsefGNTD4lDYVG0="
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"local": {
|
||||
"directory": "jsonnet/kube-prometheus"
|
||||
}
|
||||
},
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"legacyImports": false
|
||||
}
|
||||
|
||||
@@ -1,17 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- ./manifests/00namespace-namespace.yaml
|
||||
- ./manifests/0prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
|
||||
- ./manifests/0prometheus-operator-0prometheusCustomResourceDefinition.yaml
|
||||
- ./manifests/0prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
|
||||
- ./manifests/0prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
|
||||
- ./manifests/0prometheus-operator-clusterRole.yaml
|
||||
- ./manifests/0prometheus-operator-clusterRoleBinding.yaml
|
||||
- ./manifests/0prometheus-operator-deployment.yaml
|
||||
- ./manifests/0prometheus-operator-service.yaml
|
||||
- ./manifests/0prometheus-operator-serviceAccount.yaml
|
||||
- ./manifests/0prometheus-operator-serviceMonitor.yaml
|
||||
- ./manifests/alertmanager-alertmanager.yaml
|
||||
- ./manifests/alertmanager-secret.yaml
|
||||
- ./manifests/alertmanager-service.yaml
|
||||
@@ -27,8 +16,6 @@ resources:
|
||||
- ./manifests/kube-state-metrics-clusterRole.yaml
|
||||
- ./manifests/kube-state-metrics-clusterRoleBinding.yaml
|
||||
- ./manifests/kube-state-metrics-deployment.yaml
|
||||
- ./manifests/kube-state-metrics-role.yaml
|
||||
- ./manifests/kube-state-metrics-roleBinding.yaml
|
||||
- ./manifests/kube-state-metrics-service.yaml
|
||||
- ./manifests/kube-state-metrics-serviceAccount.yaml
|
||||
- ./manifests/kube-state-metrics-serviceMonitor.yaml
|
||||
@@ -49,8 +36,10 @@ resources:
|
||||
- ./manifests/prometheus-adapter-roleBindingAuthReader.yaml
|
||||
- ./manifests/prometheus-adapter-service.yaml
|
||||
- ./manifests/prometheus-adapter-serviceAccount.yaml
|
||||
- ./manifests/prometheus-adapter-serviceMonitor.yaml
|
||||
- ./manifests/prometheus-clusterRole.yaml
|
||||
- ./manifests/prometheus-clusterRoleBinding.yaml
|
||||
- ./manifests/prometheus-operator-serviceMonitor.yaml
|
||||
- ./manifests/prometheus-prometheus.yaml
|
||||
- ./manifests/prometheus-roleBindingConfig.yaml
|
||||
- ./manifests/prometheus-roleBindingSpecificNamespaces.yaml
|
||||
@@ -65,3 +54,17 @@ resources:
|
||||
- ./manifests/prometheus-serviceMonitorKubeControllerManager.yaml
|
||||
- ./manifests/prometheus-serviceMonitorKubeScheduler.yaml
|
||||
- ./manifests/prometheus-serviceMonitorKubelet.yaml
|
||||
- ./manifests/setup/0namespace-namespace.yaml
|
||||
- ./manifests/setup/prometheus-operator-0alertmanagerConfigCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0alertmanagerCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0podmonitorCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0probeCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0prometheusCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0servicemonitorCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-0thanosrulerCustomResourceDefinition.yaml
|
||||
- ./manifests/setup/prometheus-operator-clusterRole.yaml
|
||||
- ./manifests/setup/prometheus-operator-clusterRoleBinding.yaml
|
||||
- ./manifests/setup/prometheus-operator-deployment.yaml
|
||||
- ./manifests/setup/prometheus-operator-service.yaml
|
||||
- ./manifests/setup/prometheus-operator-serviceAccount.yaml
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user