223 Commits

Author SHA1 Message Date
6cfd02bc26 rompr new 2025-12-08 20:00:39 +01:00
0033a5a231 bogus commit for rompr 2024-10-29 10:01:12 +01:00
70ccdf43ef Merge branch 'main' of ssh://gitea.service.nr5:2222/chaos/docker-images
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build is failing
2024-10-29 09:52:10 +01:00
401acdc54f new rompr version 2024-10-29 09:47:26 +01:00
c6a8464bb2 why _?111git statuskubectl apply -n kube-system -f descheduler-cronjob.yaml
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone Build is passing
2024-09-13 20:09:41 +02:00
d1247a3b02 listing
Some checks failed
continuous-integration/drone/push Build is failing
2024-09-13 20:07:39 +02:00
83e3907708 only apps
Some checks failed
continuous-integration/drone/push Build is failing
2024-09-13 09:58:44 +02:00
630f321651 only apps 2024-09-13 09:57:03 +02:00
65318147c7 with git/testing
Some checks failed
continuous-integration/drone/push Build is running
continuous-integration/drone Build is failing
2024-04-21 19:23:45 +02:00
5b5c21b67b klappt das so? Ja ne? - man-db darf bleiben
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-21 17:44:07 +02:00
3dac0b92f1 klappt das so? Ja ne? - man-db darf bleiben
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-21 17:38:45 +02:00
35ec70792c klappt das so? Ja ne?
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-21 17:36:58 +02:00
4ccfd0d648 building testing with git
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-21 17:27:46 +02:00
ccbe462a76 building testing with git
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-04-21 17:26:41 +02:00
98234e569a WHOA Sun 21 Apr 17:23:21 CEST 2024
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:23:21 +02:00
8c96788392 Sun 21 Apr 17:17:50 CEST 2024
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:17:50 +02:00
60417861fc more changes
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:13:27 +02:00
dafa848d80 more changes
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:09:07 +02:00
4579621b03 more changes
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:07:27 +02:00
542fc02720 more changes
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:05:43 +02:00
4b2f5d8c9f merged
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 17:02:48 +02:00
7da16def78 .gitignore 2024-04-21 17:02:29 +02:00
bcd8242061 what is happening here, for all hails sake
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 16:29:41 +02:00
6639d8d0c2 whats happening
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 16:16:12 +02:00
3ced13f704 whats happening
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 16:15:00 +02:00
d4f052787f cleanup
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 16:06:32 +02:00
d55511e84e bogus change
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-21 16:04:49 +02:00
11c3f3174d bogus change
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-04-21 15:51:03 +02:00
a770e55f47 loops and all in one pipeline 2024-04-21 12:50:23 +02:00
ac02ddcc00 Merge branch 'main' of ssh://gitea.service.nr5:2222/chaos/docker-images 2024-04-21 11:36:08 +02:00
0b93d83014 git log step
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-15 17:42:09 +02:00
0da2ea2477 removal clearer typed
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-08 19:27:09 +02:00
5751f2c82e removing man-db in first run
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-08 18:58:03 +02:00
9d83926159 git in debian-stable image
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-08 18:47:43 +02:00
dd52955602 one character less optimization 2024-04-08 18:02:10 +02:00
b451999d77 dry_run and cache_from its own image
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-08 16:53:49 +02:00
1d84d11f37 new ROMPR Version
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-04-08 16:29:04 +02:00
3067ebd5de new rompr version 2024-03-21 21:44:59 +01:00
fb1a6e307f all images again
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build was killed
2024-02-27 18:18:39 +01:00
82d001e962 distcc stuff removed
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-02-27 18:10:35 +01:00
cde42fcd56 distcc stuff removed
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build is failing
2024-02-27 17:32:20 +01:00
801e76f0d3 distcc stuff removed
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-02-27 17:25:55 +01:00
323f9eaff0 only openwrt image
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-02-27 17:21:33 +01:00
09c98d766a only openwrt image
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-27 17:21:00 +01:00
2ebc1ec635 project rename
Some checks reported errors
continuous-integration/drone Build was killed
2024-02-26 17:02:22 +01:00
67787c4fe0 enabling openwrt image
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-02-26 16:57:24 +01:00
fef81d7c28 using our own image
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-26 16:53:18 +01:00
7fbaf62415 using our own image
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-26 16:51:04 +01:00
7a70000833 using our own image 2024-02-26 16:50:40 +01:00
5058b10769 openwrt builder
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-26 16:48:16 +01:00
3b7ac02aed openwrt builder
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-26 16:46:46 +01:00
fc591f4dac openwrt builder
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-26 16:45:25 +01:00
36c7b2d0b5 openwrt builder
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2024-02-26 16:43:54 +01:00
cf8ac80bc5 all packs
Some checks are pending
continuous-integration/drone/push Build is running
continuous-integration/drone Build is passing
2024-01-17 18:28:19 +01:00
5c2bded912 ENV var fix 2024-01-17 18:07:20 +01:00
55ace2881c using fpm-socket
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 18:04:49 +01:00
75edd26772 php-fpm proper version
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 17:48:02 +01:00
21fab1e23f php-fpm proper version
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 17:38:06 +01:00
45ffac4318 fewer layers in rompr image 2024-01-17 17:32:45 +01:00
e702963a01 all packs again 2024-01-17 17:26:20 +01:00
ca165f5c5e how to run 2024-01-17 17:24:59 +01:00
44ae607709 removed obsolete kubernetes stuff 2024-01-17 17:13:28 +01:00
e0824bf3c1 rompr version 2.x
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 17:07:42 +01:00
95e8c6f363 all packages again
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 14:09:07 +01:00
123eeddf49 using debian again , we need chmod
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 13:45:08 +01:00
5a96d89fc2 experimental features and copy chmod
Some checks failed
continuous-integration/drone/push Build is failing
2024-01-17 12:46:58 +01:00
296ab18421 chmod befor copy
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-17 12:13:54 +01:00
3477d59e07 from scratch and not debian
Some checks failed
continuous-integration/drone/push Build is failing
2024-01-17 11:26:21 +01:00
0b3cbc584f chmod
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-01-17 11:04:53 +01:00
0075dac22d chmod?
Some checks failed
continuous-integration/drone/push Build is failing
2024-01-16 14:38:00 +01:00
9ce1a6b610 all of them again
Some checks failed
continuous-integration/drone/push Build is failing
2024-01-16 13:50:16 +01:00
e811e80f25 here we go
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-16 13:18:26 +01:00
397dd88ebb The right image might help
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-01-16 12:57:20 +01:00
da88bfdfc0 The right image might help
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-01-16 12:55:29 +01:00
7c94d1d7a7 all images again 2024-01-10 16:11:24 +01:00
598253193b downloading mods
Some checks reported errors
continuous-integration/drone/push Build encountered an error
continuous-integration/drone Build is failing
2024-01-10 11:34:46 +01:00
ec3e999375 lesser images
Some checks failed
continuous-integration/drone/push Build is failing
2023-12-21 11:59:04 +01:00
b423324a75 packages as steps
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build was killed
2023-12-19 13:55:34 +01:00
a2143bfc0a as steps 2
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-19 13:46:05 +01:00
2e76ec3da9 as steps
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-19 13:45:17 +01:00
01208f9413 as steps
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-19 13:44:30 +01:00
c72f7b7a1c as steps
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-19 13:43:46 +01:00
67edba2276 mosquitto prometheus exporter image build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-12-19 12:46:37 +01:00
315d8bd632 apps (some of them), typo
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-15 18:52:13 +01:00
13898378cd apps (some of them)
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 18:51:23 +01:00
1815e60a37 registry typo fix
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-12-15 18:37:54 +01:00
72aeb85a2e looping against
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-12-15 18:24:39 +01:00
a6d2e03707 looping against
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 18:23:11 +01:00
da199f3fe0 new sources format, who knew?
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-15 18:12:50 +01:00
c686d6fe91 sources.list gone?
Some checks failed
continuous-integration/drone/push Build is failing
2023-12-15 18:06:59 +01:00
86855f541a context for drone and cleanup/update
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-15 18:04:23 +01:00
3debf1dabc platform part, not an array and its plugins/docker
Some checks failed
continuous-integration/drone/push Build is failing
2023-12-15 17:58:30 +01:00
af467c339e platform part, not an array
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 17:55:27 +01:00
47c4908ffe platform part
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 17:54:12 +01:00
4cb9b0c3b5 platform part
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 17:53:35 +01:00
f316936acc no loops for now2
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-12-15 17:40:43 +01:00
f353210a42 no loops for now
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 17:40:20 +01:00
eca7f86f4f no loops for now
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 17:39:47 +01:00
64196d7209 what
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-15 17:33:00 +01:00
065ff0a85d drone as jsonnnet
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-13 18:55:09 +01:00
2604d026e4 drone as jsonnnet
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-13 18:54:06 +01:00
dfd2866c06 drone as jsonnnet
Some checks reported errors
continuous-integration/drone/push Build encountered an error
2023-12-13 18:53:30 +01:00
5e271a7593 drone as jsonnnet
Some checks reported errors
continuous-integration/drone Build encountered an error
2023-12-13 18:51:45 +01:00
77a646866d more obsolete stuff cleanup 2023-12-13 18:11:54 +01:00
e60be3ab70 removing kubernetes stuff 2023-12-13 18:09:45 +01:00
757ab5a092 removed submodules 2023-12-13 18:03:49 +01:00
2e3bb35f86 coreddns update 2023-10-15 19:17:51 +02:00
47cbd88587 coredns / cluster upgrade 2023-01-16 18:57:58 +01:00
dd74762778 tekton PVC? required? 2023-01-12 20:54:31 +01:00
07d7f45e64 other stings 2023-01-12 20:53:46 +01:00
536c0c4ddc flannel 0.20 upgrade 2023-01-12 20:53:23 +01:00
fcb2e69615 upgrade galore from 1.23 to 1.26. and cluster ist still at 1.25? See: Readme.md 2023-01-12 20:52:46 +01:00
e2e032ac94 another nfs -client provisioner 2022-12-08 17:51:23 +01:00
4bbf79569c another nfs -client provisioner 2022-12-08 17:47:14 +01:00
273fb0e252 more updates 2022-12-08 17:09:38 +01:00
62f5788742 changing output dir 2022-12-08 16:47:19 +01:00
9b2d2a9d95 php-fpm 2022-12-08 16:43:36 +01:00
b5ff289f66 stuff 2022-12-08 16:39:52 +01:00
7cb8d572e7 stuff 2022-12-08 14:03:01 +01:00
14aceae467 new version and create dirs on run 2022-12-08 13:57:10 +01:00
604d065252 new version and create dirs on run 2022-12-08 13:09:24 +01:00
b50d6de8f7 cleanup 2022-11-18 10:26:13 +01:00
79c4e5e0c7 tekton stuff and install 2022-11-18 10:24:39 +01:00
d7241c7563 removed obsolete submods 2022-11-18 10:21:37 +01:00
8fbf07efdf removed descheduler, helm is on its way 2022-10-25 14:03:10 +02:00
beb1bfe0da nginx ingress is installed via helm now 2022-10-25 14:01:34 +02:00
8b62746bcc cleanup 2022-10-12 13:20:42 +02:00
94b39a804b merged 2022-09-19 16:58:14 +02:00
43d17581b3 gitea and apt-cacher 2022-09-19 16:56:40 +02:00
180d28fe80 Merge branch 'master' of git.lan:chaos/kubernetes 2022-09-19 16:54:53 +02:00
30ba290918 don't know why this shit doesn't run anymore 2022-09-10 13:32:34 +02:00
b111463cf5 Merge branch 'master' of git.lan:chaos/kubernetes 2022-08-24 19:17:10 +02:00
c2f6c546eb gitea uses ebin02 2022-08-24 19:16:24 +02:00
748b94f069 local changes 2022-07-30 12:54:52 +02:00
59c019727d rompr version 1.61 2022-07-30 12:51:17 +02:00
17f8b2f5cb mosquitto and prometheus 2022-07-30 12:43:56 +02:00
105e051d64 grav and tekton 2022-07-30 12:33:26 +02:00
9b92cf35e0 Merge branch 'master' of git.lan:chaos/kubernetes 2022-07-30 12:29:55 +02:00
41a2ba8c82 Dockerfile using our debian image 2022-07-30 12:29:43 +02:00
3b552f3134 my changes 2022-07-30 11:47:09 +02:00
7c778d3794 pipeline for mariadb prometheus 2022-07-29 18:42:25 +02:00
a608ac1297 mariadb pipeline 2022-07-29 18:41:01 +02:00
89c3eaac22 dolibarr and curl 2022-07-28 19:08:22 +02:00
7505262bc9 pipelinerun for nextcloud 2022-07-28 18:57:19 +02:00
9c88f4bc6c nextcloud pipelinerun 2022-07-28 18:52:51 +02:00
f96313a307 deschduler 2022-06-22 21:00:51 +02:00
1d3eb09904 deschduler 2022-06-22 21:00:18 +02:00
287458f48b gitea liveness probes and some config updates 2022-06-21 12:29:35 +02:00
5affbfd886 gitea liveness probes and some config updates 2022-06-21 12:27:57 +02:00
c1b864155e nextcloud 24 2022-05-08 11:33:56 +02:00
2827dac20c nextcloud 24 2022-05-07 10:47:51 +02:00
0c8338cd86 nextcloud 24 2022-05-06 19:44:19 +02:00
62aa39b493 descheduler still amystery 2022-03-20 11:23:40 +01:00
c626429abf more rfactoring 2022-03-16 19:58:37 +01:00
237981b8b2 multiarch-support is gone in bullseye 2022-03-16 19:28:46 +01:00
7763958f0f using another src dir 2022-03-16 19:06:17 +01:00
d904f51d20 migrated base images to pipeline runs 2022-03-16 18:33:09 +01:00
613da54d99 migrated base images to pipeline runs 2022-03-16 18:30:18 +01:00
06c173e650 refactoring 2022-03-16 18:11:11 +01:00
23e696c1a5 listening on localhost tcp 2022-03-15 14:02:19 +01:00
17f490accb finding our way 2022-03-15 13:40:50 +01:00
b4d33528a2 finding our way 2022-03-15 13:09:50 +01:00
857641a92e git-clone taks and updates 2022-03-15 12:55:42 +01:00
5a2b1a1521 git-clone taks and updates 2022-03-15 12:34:17 +01:00
e1930c8fae moved to files fome configs i have 2022-03-14 16:53:07 +01:00
48cc3f2b66 using bullseye 2022-03-14 16:52:04 +01:00
19ffec27ff using bullseye 2022-03-14 16:45:24 +01:00
d860eee58e using bullseye 2022-03-14 16:41:24 +01:00
84acbf3c2c using bullseye 2022-03-14 16:38:44 +01:00
74901c0cb8 merged with v0.10.33 2022-03-14 09:55:34 +01:00
6bed4a690d php-fpm www.conf listen tcp 2022-03-14 09:10:27 +01:00
2f775470fd php-fpm www.conf listen tcp 2022-03-13 14:18:44 +01:00
2efe4378cb php-fpm www.conf listen tcp 2022-03-13 13:44:50 +01:00
f722ad99bf php-fpm www.conf listen tcp 2022-03-13 13:41:48 +01:00
2926f20542 php-fpm www.conf listen tcp 2022-03-13 13:32:12 +01:00
cc7cef4abe php-fpm debian image 2022-03-13 13:25:29 +01:00
9aab08d889 php-fpm debian image 2022-03-13 12:59:51 +01:00
4b4f0055cc php-fpm debian image 2022-03-13 12:53:19 +01:00
ae30b7dc0b php-fpm debian image 2022-03-13 12:43:39 +01:00
f703c33044 php-fpm debian image 2022-03-13 12:39:31 +01:00
e3e8308416 php-fpm debian image 2022-03-13 12:25:40 +01:00
785c24e32f php-fpm debian image 2022-03-13 12:16:32 +01:00
942b54c3fc php-fpm debian image 2022-03-13 12:13:17 +01:00
8c67a09c57 php-fpm debian image 2022-03-13 12:05:46 +01:00
2e4717d508 php-fpm debian image 2022-03-13 12:02:59 +01:00
bf902aebc5 dolibarr CRM 2022-03-12 19:43:18 +01:00
3c16cbdc59 dolibarr CRM 2022-03-12 16:26:12 +01:00
8732c960bb dolibarr CRM 2022-03-12 15:36:23 +01:00
edb5dd8e83 dolibarr CRM 2022-03-12 15:30:57 +01:00
c0c6d618c7 dolibarr CRM 2022-03-12 15:30:01 +01:00
1387477174 dolibarr CRM 2022-03-12 15:11:38 +01:00
4ed19d3f76 dolibarr CRM 2022-03-12 14:57:28 +01:00
de9e3c4602 dolibarr CRM 2022-03-12 14:35:29 +01:00
423dc10e5f dolibarr CRM 2022-03-12 13:18:59 +01:00
40fbf50ae1 dolibarr CRM 2022-03-12 13:12:45 +01:00
97e55e032b dolibarr CRM 2022-03-12 13:09:51 +01:00
8d5193dc16 dolibarr CRM 2022-03-12 13:06:51 +01:00
a69ebc9779 dolibarr CRM 2022-03-12 13:04:55 +01:00
057be561c5 dolibarr CRM 2022-03-12 12:59:30 +01:00
b42e651e03 this stuff doesn't quite work 2022-02-01 22:38:32 +01:00
27724423fb using kaniko task from tectoncd catalog 2022-02-01 20:38:21 +01:00
2a32977d80 docker-reg-ui needs to be latest 2022-02-01 18:56:27 +01:00
9f117548c3 armhf experiments 2022-02-01 18:56:08 +01:00
cfd86e1aa7 also building armhf 2022-02-01 17:42:51 +01:00
1abb9c3d48 also building armhf 2022-02-01 17:19:42 +01:00
225c04e35b also building armhf 2022-02-01 17:19:36 +01:00
50ee3e875a new nexctcloud version 23 2022-01-28 12:00:09 +01:00
af4e616d6d error page upgrade for rompr 2022-01-19 18:11:41 +01:00
0828492d42 new rompr version 2022-01-19 17:57:14 +01:00
93b01e5abb stoeff 2022-01-19 17:42:42 +01:00
83241b2602 instructiones 2021-11-03 20:31:28 +01:00
a438815b1a consule helm values, unused by now 2021-10-26 20:09:31 +02:00
f372124fa9 removed mosquitto-exporter submod 2021-10-26 20:09:07 +02:00
320cd6eb34 coredns now also serves .lan 2021-10-21 12:16:34 +02:00
a46743bc96 authelia works 2021-10-19 13:35:23 +02:00
aa44e55363 version string in config needs to be updated, hooray 2021-10-18 14:57:13 +02:00
b55e6b247d version string in config needs to be updated, hooray 2021-10-18 14:32:05 +02:00
4e37047691 wtf 2021-10-18 14:24:16 +02:00
b90687f69c Nextcloud 22 2021-10-18 13:30:12 +02:00
1896ea8be1 Nextcloud 22 2021-10-18 13:09:05 +02:00
688485987f antiaffinities and namespaces 2021-10-16 18:23:19 +02:00
f006923f61 I'm going slightly mad 2021-10-14 19:31:52 +02:00
1eb12be2e8 adapting memory requests 2021-10-07 11:31:38 +02:00
143 changed files with 1563 additions and 11184 deletions

77
.drone.jsonnet Normal file
View File

@@ -0,0 +1,77 @@
#local dirs = ['_CI-CD', 'apps'];
local dirs = ['apps'];
local packages = ['debian-stable', 'debian-stable-build-essential', 'debian-stable-openwrt',
'debian-golang', 'debian-stable-php-fpm', 'debian-testing'];
#local packages = ['debian-stable-openwrt'];
local apps = ['rompr', 'apt-cacher-ng', 'curl', 'mosquitto', 'mosquitto-prometheus-exporter'];
#local apps = ['rompr'];
local build(dir, package) = {
name: '%(package)s' % { package: package },
image: 'plugins/docker',
settings: {
context: '%(dir)s/%(package)s' % { dir: dir, package: package },
dockerfile: '%(dir)s/%(package)s/Dockerfile' % { dir: dir, package: package },
registry: 'http://cr.wks',
insecure: 'true',
purge: 'false',
experimental: 'true',
tags: ['latest'],
repo: 'cr.wks/%(package)s' % { package: package },
cache_from: 'cr.wks/%(package)s:latest' % { package: package },
},
};
[
{
kind: 'pipeline',
type: 'docker',
name: 'Build Changes',
platform: {
os: 'linux',
arch: 'arm64',
},
steps: [
{
name: 'git log',
image: 'cr.wks/debian-testing',
commands: [ 'bin/find_changes.sh', 'ls -la' ]
},
# [
# build('_CI-CD', app)
# for app in packages
# ],
# [
# build('apps', app)
# for app in apps
# ]
],
},
#{
# kind: 'pipeline',
# type: 'docker',
# name: '_CI-CD',
# platform: {
# os: 'linux',
# arch: 'arm64',
# },
# steps: [
# build('_CI-CD', pkg)
# for pkg in packages
# ],
# },
{
kind: 'pipeline',
type: 'docker',
name: 'apps',
platform: {
os: 'linux',
arch: 'arm64',
},
steps: [
build('apps', app)
for app in apps
],
},
]

2
.gitignore vendored
View File

@@ -1 +1 @@
csi-s3/storage-csi-s3/cmd/s3driver/s3driver
*.swp

51
.gitmodules vendored
View File

@@ -1,51 +0,0 @@
[submodule "kube-prometheus"]
path = kube-prometheus
url = https://github.com/coreos/kube-prometheus.git
[submodule "cluster-monitoring"]
path = cluster-monitoring
url = https://github.com/carlosedp/cluster-monitoring.git
[submodule "gluster-kubernetes"]
path = gluster-kubernetes
url = https://github.com/jayflory/gluster-kubernetes.git
[submodule "kubernetes-ingress"]
path = kubernetes-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "pihole-kubernetes"]
path = pihole-kubernetes
url = https://github.com/MoJo2600/pihole-kubernetes.git
[submodule "pihole-helm"]
path = pihole-helm
url = https://github.com/ChrisPhillips-cminion/pihole-helm.git
[submodule "helm"]
path = helm
url = https://github.com/helm/helm.git
[submodule "docker-apt-cacher-ng"]
path = docker-apt-cacher-ng
url = https://github.com/sameersbn/docker-apt-cacher-ng.git
[submodule "mosquitto/charts"]
path = mosquitto/charts
url = https://github.com/smizy/charts.git
[submodule "mosquitto-exporter"]
path = mosquitto-exporter
url = https://github.com/sapcc/mosquitto-exporter.git
[submodule "csi-s3/storage-csi-s3"]
path = csi-s3/storage-csi-s3
url = https://github.com/ctrox/csi-s3.git
[submodule "csi-s3/external-attacher"]
path = csi-s3/external-attacher
url = https://github.com/kubernetes-csi/external-attacher.git
[submodule "csi-s3/external-provisioner"]
path = csi-s3/external-provisioner
url = https://github.com/kubernetes-csi/external-provisioner.git
[submodule "csi-s3/node-driver-registrar"]
path = csi-s3/node-driver-registrar
url = https://github.com/kubernetes-csi/node-driver-registrar.git
[submodule "apps/tekton/dashboard"]
path = apps/tekton/dashboard
url = https://github.com/tektoncd/dashboard.git
[submodule "_sys/haproxy-ingress"]
path = _sys/haproxy-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "nfs-subdir-external-provisioner"]
path = nfs-subdir-external-provisioner
url = https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git

View File

@@ -1,10 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>kubernetes</name>
<name>docker-images</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.xtext.ui.shared.xtextBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
@@ -13,5 +18,6 @@
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
<nature>org.eclipse.xtext.ui.shared.xtextNature</nature>
</natures>
</projectDescription>

View File

@@ -1,9 +1,11 @@
FROM debian:stable-slim
FROM cr.wks/debian-stable
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
golang make git && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
RUN apt-get update && apt-get install -y \
golang make git
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*

View File

@@ -1,84 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-golang-stable
spec:
type: image
params:
- name: url
value: cr.lan/debian-golang-stable
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-golang
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-golang/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-golang
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-golang
spec:
taskRef:
name: build-debian-golang
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-golang-stable
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -1,13 +1,15 @@
FROM debian:stable-slim
FROM cr.wks/debian-stable
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
RUN apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash \
build-essential make ccache distcc-pump distcc g++ \
libncursesw5-dev && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
libncursesw5-dev
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -1,85 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-stable-build-essential
spec:
type: image
params:
- name: url
value: cr.lan/debian-stable-build-essential
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-stable-build-essential
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable-build-essential/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable-build-essential
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-stable-build-essential
spec:
taskRef:
name: build-debian-stable-build-essential
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-stable-build-essential
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -0,0 +1,14 @@
FROM cr.wks/debian-stable-build-essential
RUN apt update -y; \
apt install -y build-essential ccache ecj fastjar file g++ gawk \
gettext git java-propose-classpath libelf-dev libncurses5-dev \
libncursesw5-dev libssl-dev python3 python3-dev unzip wget \
python3-distutils python3-setuptools rsync subversion swig time \
xsltproc zlib1g-dev make distcc distcc-pump nfs-common clang flex bison g++ gawk \
gcc-multilib-mips-linux-gnu git libncurses-dev libssl-dev && \
apt-get remove --purge -y exim* && \
apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*

View File

@@ -0,0 +1,21 @@
FROM debian:stable AS baseimage
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash openssl \
php-fpm php-zip php-sqlite3 php-pgsql php-mysqli php-json php-readline \
php-xml php-intl php-xmlrpc php-imagick php-gd php-cli php-curl \
php-bz2 php-mbstring
#cleanup
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
FROM baseimage as final
ADD etc_php-fpm/www.conf /etc/php/8.4/fpm/pool.d
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,440 @@
; Start a new pool named 'www'.
; the variable $pool can be used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
; listen = /run/php/php7.4-fpm.sock
listen = 127.0.0.1:9000
; Set listen(2) backlog.
; Default Value: 511 (-1 on FreeBSD and OpenBSD)
;listen.backlog = 511
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions. The owner
; and group can be specified either by name or by their numeric IDs.
; Default Values: user and group are set as the running user
; mode is set to 0660
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
; or group is differrent than the master process user. It allows to create process
; core dump and ptrace the process for the pool user.
; Default Value: no
; process.dumpable = yes
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 5
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: (min_spare_servers + max_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php/7.4/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;pm.status_path = /status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; Depth of slow log stack trace.
; Default Value: 20
;request_slowlog_trace_depth = 20
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 0
; The timeout set by 'request_terminate_timeout' ini option is not engaged after
; application calls 'fastcgi_finish_request' or when application has finished and
; shutdown functions are being called (registered via register_shutdown_function).
; This option will enable timeout limit to be applied unconditionally
; even in such cases.
; Default Value: no
;request_terminate_timeout_track_finished = no
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
;chdir = /var/www
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
;catch_workers_output = yes
; Decorate worker output with prefix and suffix containing information about
; the child that writes to the log and if stdout or stderr is used as well as
; log level and time. This options is used only if catch_workers_output is yes.
; Settings to "no" will output data as written to the stdout or stderr.
; Default value: yes
;decorate_workers_output = no
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; execute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5 .php7
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@@ -1,11 +1,15 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
RUN sed -i 's@deb.debian.org@apt-cache.service.nr5/deb.debian.org@g' /etc/apt/sources.list.d/debian.sources && \
sed -i 's@security.debian.org@apt-cache.service.nr5/security.debian.org@g' /etc/apt/sources.list.d/debian.sources
RUN apt-get update && apt-get install -y \
man-db- \
dnsutils procps nmap bash iputils-ping bash git
RUN apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -1,85 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-stable
spec:
type: image
params:
- name: url
value: cr.lan/debian-stable
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-stable
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-stable
spec:
taskRef:
name: build-debian-stable
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-stable
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -1,11 +1,15 @@
FROM debian:testing-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
RUN sed -i 's@deb.debian.org@apt-cache.service.nr5/deb.debian.org@g' /etc/apt/sources.list.d/debian.sources && \
sed -i 's@security.debian.org@apt-cache.service.nr5/security.debian.org@g' /etc/apt/sources.list.d/debian.sources
RUN apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash git
RUN apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -1,85 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-testing
spec:
type: image
params:
- name: url
value: cr.lan/debian-testing
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-testing
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-testing/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-testing
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-testing
spec:
taskRef:
name: build-debian-testing
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-testing
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -1,18 +1,17 @@
FROM debian:stable-slim
FROM cr.wks/debian-stable-build-essential
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
RUN apt-get update && \
apt-get install -y \
gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf \
multiarch-support dpkg-dev distcc ccache \
build-essential gcc cpp g++ clang llvm && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*; \
rm -rf /var/cache/apt/*; \
#removing distcc conf, no zeroconf
rm -fv /etc/distcc/hosts
dpkg-dev distcc ccache \
build-essential gcc cpp g++ clang llvm
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
# Op port
EXPOSE 3632
# Stats port

View File

@@ -1,76 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-distcc
spec:
type: image
params:
- name: url
value: cr.lan/distcc
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-distcc
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/distcc/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/distcc
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-distcc
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-distcc
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-distcc

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: git-secret
type: Opaque
data:
token: Nzk1YTFhMGQxMWQ0MDJiY2FiOGM3MjkyZDk5ODIyMzg2NDNkM2U3OQo=

View File

@@ -1,73 +0,0 @@
#!/usr/bin/python3
import kubernetes as k8s
from pint import UnitRegistry
from collections import defaultdict
__all__ = ["compute_allocated_resources"]
def compute_allocated_resources():
ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
Q_ = ureg.Quantity
data = {}
# doing this computation within a k8s cluster
k8s.config.load_kube_config()
core_v1 = k8s.client.CoreV1Api()
# print("Listing pods with their IPs:")
# ret = core_v1.list_pod_for_all_namespaces(watch=False)
# for i in ret.items:
# print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
for node in core_v1.list_node().items:
stats = {}
node_name = node.metadata.name
allocatable = node.status.allocatable
max_pods = int(int(allocatable["pods"]) * 1.5)
# print("{} ALLOC: {} MAX_PODS: {}".format(node_name,allocatable,max_pods))
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
stats["cpu_alloc"] = Q_(allocatable["cpu"])
stats["mem_alloc"] = Q_(allocatable["memory"])
pods = core_v1.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).items
# compute the allocated resources
cpureqs, cpulmts, memreqs, memlmts = [], [], [], []
for pod in pods:
for container in pod.spec.containers:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
lmts = defaultdict(lambda: 0, res.limits or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)
data[node_name] = stats
return data
if __name__ == "__main__":
# execute only if run as a script
print(compute_allocated_resources())

View File

@@ -1,20 +0,0 @@
# memory units
kmemunits = 1 = [kmemunits]
Ki = 1024 * kmemunits
Mi = Ki^2
Gi = Ki^3
Ti = Ki^4
Pi = Ki^5
Ei = Ki^6
# cpu units
kcpuunits = 1 = [kcpuunits]
m = 1/1000 * kcpuunits
k = 1000 * kcpuunits
M = k^2
G = k^3
T = k^4
P = k^5
E = k^6

View File

@@ -1,6 +0,0 @@
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/configmap.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/job/job.yaml

View File

@@ -1,47 +0,0 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/15 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -1,37 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 8
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 50
"memory": 70
"pods": 10
targetThresholds:
"cpu": 70
"memory": 70
"pods": 20
nodeFit: true

View File

@@ -1,10 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
#namespace: nginx-ingress
namespace: default
data:
proxy-connect-timeout: "10s"
proxy-read-timeout: "10s"
client-max-body-size: "0"

View File

@@ -1,674 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.1
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: k8s.gcr.io/ingress-nginx/controller:v1.0.0@sha256:0851b34f69f69352bf168e6ccf30e1e20714a264ab1ecd1933e4d8c0fc3215c6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
namespace: ingress-nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000

View File

@@ -1,223 +0,0 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.23.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@@ -1,21 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: loki-data
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/loki-data
server: ebin02
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: storage-loki-0
namespace: monitoring

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 172.23.255.1-172.23.255.254

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 172.23.255.1-172.23.255.254

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-openwrt
type: Opaque
data:
username: b3BlbndydAo=
password: ZUZWbmVnOEkwOE1zRTN0Q2VCRFB4c011OU0yVjJGdnkK
endpoint: aHR0cHM6Ly9taW5pby5saXZlLWluZnJhLnN2Yy5jbHVzdGVyLmxvY2FsOjk0NDMK

View File

@@ -1,36 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd
provisioner: nfs-ssd # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin01
provisioner: nfs-ssd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-hdd-ebin01
provisioner: nfs-hdd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin02
provisioner: nfs-ssd-ebin02 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain

View File

@@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-ssd/k8s-data

View File

@@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-hdd-ebin01
labels:
app: nfs-hdd-ebin01
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-hdd-ebin01
template:
metadata:
labels:
app: nfs-hdd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-hdd-ebin01
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-hdd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-hdd/k8s-data
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-hdd/k8s-data

View File

@@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin01
labels:
app: nfs-ssd-ebin01
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin01
template:
metadata:
labels:
app: nfs-ssd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin01
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-ssd/k8s-data

View File

@@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin02
labels:
app: nfs-ssd-ebin02
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin02
template:
metadata:
labels:
app: nfs-ssd-ebin02
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin02
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin02
- name: NFS_SERVER
value: ebin02
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
volumes:
- name: nfs-client-root
nfs:
server: ebin02
path: /data/raid1-ssd/k8s-data

View File

@@ -1,65 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: live-env
---
apiVersion: v1
kind: Namespace
metadata:
name: test-env
---
apiVersion: v1
kind: Namespace
metadata:
name: live-infra
---
apiVersion: v1
kind: Namespace
metadata:
name: test-infra

View File

@@ -1,4 +1,4 @@
FROM debian:bullseye
FROM debian:stable
ENV DEBIAN_FRONTEND noninteractive
ARG DEVPKGS="git make cmake gcc g++ python-dev libsqlcipher-dev"
@@ -34,4 +34,4 @@ RUN apt-get remove -y --purge ${DEVPKGS} && \
USER almond-cloud
WORKDIR /home/almond-cloud
ENTRYPOINT ["/opt/almond-cloud/start.sh"]
ENTRYPOINT ["/opt/almond-cloud/start.sh"]

View File

@@ -1,4 +1,4 @@
FROM debian:stable-slim
FROM cr.wks/debian-stable
RUN apt-get update && apt-get install -y \
apt-cacher-ng && \
@@ -8,5 +8,5 @@ RUN apt-get update && apt-get install -y \
RUN echo 'PassThroughPattern: .*' >> /etc/apt-cacher-ng/acng.conf
EXPOSE 3142
EXPOSE 3142
CMD /usr/sbin/apt-cacher-ng -c /etc/apt-cacher-ng pidfile=/var/run/apt-cacher-ng/pid SocketPath=/var/run/apt-cacher-ng/socket foreground=1

View File

@@ -76,9 +76,30 @@ kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin02
volumeName: apt-cacher-ng
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: apt-cacher-ng
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/apt-cacher-ng
server: ebin02
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: apt-cacher-volume
namespace: live-infra

View File

@@ -1,76 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-apt-cacher-ng
spec:
type: image
params:
- name: url
value: cr.lan/apt-cacher-ng
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-apt-cacher-ng
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/apt-cacher-ng/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/apt-cacher-ng
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-apt-cacher-ng
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-apt-cacher-ng
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-apt-cacher-ng

View File

@@ -1,73 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apt-cacher-ng-test
namespace: test
labels:
app: apt-cacher-ng-test
spec:
replicas: 1
selector:
matchLabels:
app: apt-cacher-ng-test
strategy:
type: Recreate
template:
metadata:
labels:
app: apt-cacher-ng-test
spec:
containers:
- name: apt-cacher-ng-test
image: docker-registry.lan/apt-cacher-ng:arm64
imagePullPolicy: Always
ports:
- containerPort: 3142
protocol: TCP
volumeMounts:
- mountPath: /var/cache/apt-cacher-ng
name: data
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
volumes:
- name: data
persistentVolumeClaim:
claimName: apt-cacher-volume-test
#---
#apiVersion: v1
#kind: Service
#metadata:
# name: apt-cacher-ng
# labels:
# app: apt-cacher-ng
#spec:
# type: LoadBalancer
# loadBalancerIP: 172.23.255.1
# ports:
# - name: apt-cacher-ng
# port: 3142
# targetPort: 3142
# protocol: TCP
# selector:
# app: apt-cacher-ng
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume-test
namespace: test
#annotations:
# volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
storageClassName: csi-s3-slow
#storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi

View File

@@ -1,7 +0,0 @@
FROM: https://tanzu.vmware.com/developer/guides/ci-cd/argocd-gs/
# kubectl apply -f namespace.yaml
# -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml-
# kubectl apply -n argocd -f install.yaml (needs changes for ARM builds)
# kubectl apply -n argocd -f ingress.yaml

View File

@@ -1,18 +0,0 @@
#https://argoproj.github.io/argo-cd/operator-manual/ingress/#kubernetesingress-nginx
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.lan
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd

3
apps/authelia/README.md Normal file
View File

@@ -0,0 +1,3 @@
### Apply new config
$ kubectl -n live-infra create configmap authelia-config --from-file=configMaps/ -o yaml --dry-run |kubectl apply -f -

View File

@@ -7,14 +7,14 @@
## Certificates directory specifies where Authelia will load trusted certificates (public portion) from in addition to
## the system certificates store.
## They should be in base64 format, and have one of the following extensions: *.cer, *.crt, *.pem.
# certificates_directory: /config/certificates
certificates_directory: /etc/pki/chain
## The theme to display: light, dark, grey, auto.
theme: dark
## The secret used to generate JWT tokens when validating user identity by email confirmation. JWT Secret can also be
## set using a secret: https://www.authelia.com/docs/configuration/secrets.html
jwt_secret: a_very_important_secret2
jwt_secret: hAnFzapSCusyF2W83JAg6PRqc6v7iQvN7sP3PQ70HAbPBshJzAMz
## Default redirection URL
##
@@ -60,10 +60,12 @@ server:
## Authelia by default doesn't accept TLS communication on the server port. This section overrides this behaviour.
tls:
## The path to the DER base64/PEM format private key.
#key: "/etc/pki/private.key"
key: ""
## The path to the DER base64/PEM format public certificate.
certificate: ""
#certificate: "/etc/pki/auth.lan.crt"
certificate : ""
##
## Log Configuration
@@ -76,7 +78,7 @@ log:
format: text
## File path where the logs will be written. If not set logs are written to stdout.
file_path: /config-nfs/authelia.log
file_path: "" #/config-nfs/authelia.log
## Whether to also log to stdout when a log_file_path is defined.
# keep_stdout: false
@@ -241,12 +243,13 @@ authentication_backend:
file:
path: /config-nfs/users_database.yml
password:
algorithm: argon2id
iterations: 1
key_length: 32
algorithm: sha512
salt_length: 16
memory: 1024
parallelism: 8
#algorithm: argon2id
#iterations: 1
#key_length: 32
#memory: 32
#parallelism: 4
##
## Access Control Configuration
##

View File

@@ -24,7 +24,6 @@ spec:
containers:
- name: authelia
image: authelia/authelia:latest
imagePullPolicy: IfNotPresent
env:
#- name: AUTHELIA_SERVER_PORT
# value: "9091"
@@ -35,6 +34,8 @@ spec:
mountPath: /config-nfs
- name: authelia-config
mountPath: /config
- name: pki
mountPath: /etc/pki
ports:
- name: http
containerPort: 9091
@@ -65,6 +66,10 @@ spec:
items:
- key: configuration.yml
path: configuration.yml
- name: pki
hostPath:
path: /etc/pki
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
@@ -112,6 +117,9 @@ spec:
targetPort: http
protocol: TCP
name: http
- port: 443
targetPort: http
name: https
selector:
app: authelia
release: latest
@@ -122,8 +130,8 @@ metadata:
name: authelia
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/auth-url: http://authelia.live-infra.svc.cluster.local/api/verify
nginx.ingress.kubernetes.io/auth-signin: http://auth.lan
nginx.ingress.kubernetes.io/auth-url: https://authelia.live-infra.svc.cluster.local/api/verify
nginx.ingress.kubernetes.io/auth-signin: https://auth.lan
nginx.ingress.kubernetes.io/auth-response-headers: Remote-User,Remote-Name,Remote-Groups,Remote-Email
nginx.ingress.kubernetes.io/auth-snippet: |
proxy_set_header X-Forwarded-Method $request_method;

View File

@@ -1,5 +1,4 @@
FROM debian:stable-slim
FROM cr.wks/debian-stable
RUN apt-get update && apt-get install -y \
curl procps && \
apt-get clean -y && \

View File

@@ -1,76 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-curl
spec:
type: image
params:
- name: url
value: cr.lan/curl
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-curl
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/curl/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/curl
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-curl-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-curl
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-curl

View File

@@ -20,8 +20,7 @@ spec:
spec:
containers:
- name: registry-ui
#image: cr.lan/docker-registry-ui:arm64
image: docker.io/joxit/docker-registry-ui:main-debian
image: docker.io/joxit/docker-registry-ui:latest
imagePullPolicy: Always
env:
- name: NGINX_PROXY_PASS_URL

116
apps/dolibarr/Dockerfile Normal file
View File

@@ -0,0 +1,116 @@
FROM cr.lan/debian-stable-php-fpm
# see https://wiki.dolibarr.org/index.php/Dependencies_and_external_libraries
# Prepare folders
ENV DEBIAN_FRONTEND noninteractive
RUN set -ex; \
apt-get update -q; \
apt-get install -y --no-install-recommends \
bzip2 \
default-mysql-client \
cron \
rsync \
unzip \
zip php-soap;\
mkdir -p /var/www/documents; \
chown -R www-data:root /var/www; \
chmod -R g=u /var/www
# CLeanup
RUN apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/*
VOLUME /var/www/html /var/www/documents /var/www/scripts
# Runtime env var
ENV DOLI_AUTO_CONFIGURE=1 \
DOLI_DB_TYPE=mysqli \
DOLI_DB_HOST= \
DOLI_DB_PORT=3306 \
DOLI_DB_USER=dolibarr \
DOLI_DB_PASSWORD='' \
DOLI_DB_NAME=dolibarr \
DOLI_DB_PREFIX=llx_ \
DOLI_DB_CHARACTER_SET=utf8 \
DOLI_DB_COLLATION=utf8_unicode_ci \
DOLI_DB_ROOT_LOGIN='' \
DOLI_DB_ROOT_PASSWORD='' \
DOLI_ADMIN_LOGIN=admin \
DOLI_MODULES='' \
DOLI_URL_ROOT='http://localhost' \
DOLI_AUTH=dolibarr \
DOLI_LDAP_HOST= \
DOLI_LDAP_PORT=389 \
DOLI_LDAP_VERSION=3 \
DOLI_LDAP_SERVERTYPE=openldap \
DOLI_LDAP_LOGIN_ATTRIBUTE=uid \
DOLI_LDAP_DN='' \
DOLI_LDAP_FILTER='' \
DOLI_LDAP_ADMIN_LOGIN='' \
DOLI_LDAP_ADMIN_PASS='' \
DOLI_LDAP_DEBUG=false \
DOLI_HTTPS=0 \
DOLI_PROD=0 \
DOLI_NO_CSRF_CHECK=0 \
WWW_USER_ID=33 \
WWW_GROUP_ID=33 \
PHP_INI_DATE_TIMEZONE='UTC' \
PHP_MEMORY_LIMIT=256M \
PHP_MAX_UPLOAD=20M \
PHP_MAX_EXECUTION_TIME=300
# Build time env var
ARG DOLI_VERSION=13.0.4
# Get Dolibarr
ADD https://github.com/Dolibarr/dolibarr/archive/${DOLI_VERSION}.zip /tmp/dolibarr.zip
# Install Dolibarr from tag archive
RUN set -ex; \
mkdir -p /tmp/dolibarr; \
unzip -q /tmp/dolibarr.zip -d /tmp/dolibarr; \
rm /tmp/dolibarr.zip; \
mkdir -p /usr/src/dolibarr; \
cp -r "/tmp/dolibarr/dolibarr-${DOLI_VERSION}"/* /usr/src/dolibarr; \
rm -rf /tmp/dolibarr; \
chmod +x /usr/src/dolibarr/scripts/*; \
echo "${DOLI_VERSION}" > /usr/src/dolibarr/.docker-image-version
COPY entrypoint.sh /
RUN set -ex; \
chmod 755 /entrypoint.sh ;\
mkdir -p /run/php
ENTRYPOINT ["/entrypoint.sh"]
CMD ["php-fpm7.4", "--nodaemonize", "-c", "/etc/php/7.4/fpm/php.ini", "--fpm-config", "/etc/php/7.4/fpm/php-fpm.conf"]
# Arguments to label built container
ARG VCS_REF
ARG BUILD_DATE
# Container labels (http://label-schema.org/)
# Container annotations (https://github.com/opencontainers/image-spec)
LABEL maintainer="Monogramm maintainers <opensource at monogramm dot io>" \
product="Dolibarr" \
version=${DOLI_VERSION} \
org.label-schema.vcs-ref=${VCS_REF} \
org.label-schema.vcs-url="https://github.com/Monogramm/docker-dolibarr" \
org.label-schema.build-date=${BUILD_DATE} \
org.label-schema.name="Dolibarr" \
org.label-schema.description="Open Source ERP & CRM for Business" \
org.label-schema.url="https://www.dolibarr.org/" \
org.label-schema.vendor="Dolibarr" \
org.label-schema.version=$DOLI_VERSION \
org.label-schema.schema-version="1.0" \
org.opencontainers.image.revision=${VCS_REF} \
org.opencontainers.image.source="https://github.com/Monogramm/docker-dolibarr" \
org.opencontainers.image.created=${BUILD_DATE} \
org.opencontainers.image.title="Dolibarr" \
org.opencontainers.image.description="Open Source ERP & CRM for Business" \
org.opencontainers.image.url="https://www.dolibarr.org/" \
org.opencontainers.image.vendor="Dolibarr" \
org.opencontainers.image.version=${DOLI_VERSION} \
org.opencontainers.image.authors="Monogramm maintainers <opensource at monogramm dot io>"

3
apps/dolibarr/README.md Normal file
View File

@@ -0,0 +1,3 @@
create nginx configmap
kubectl -n live-env create configmap dolibarr-nginx-site --from-file=nginx-site.configmap.conf

View File

@@ -0,0 +1,104 @@
#we use postgresql:
#create database dolibarr;
#create user dolibarr with encrypted password 'secret';
#grant all privileges on database dolibarr to dolibarr;
apiVersion: apps/v1
kind: Deployment
metadata:
name: dolibarr
labels:
app: dolibarr
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: dolibarr
release: latest
template:
metadata:
labels:
app: dolibarr
release: latest
spec:
volumes:
- name: dolibarr-nginx-site
configMap:
name: dolibarr-nginx-site
- name: www-data
emptyDir: {}
containers:
- name: nginx-proxy
image: nginx
volumeMounts:
- name: dolibarr-nginx-site
mountPath: /etc/nginx/conf.d
- name: www-data
mountPath: /var/www/html
ports:
- name: http
containerPort: 80
protocol: TCP
- name: dolibarr
image: cr.lan/dolibarr:latest
volumeMounts:
- name: www-data
mountPath: /var/www/html
env:
- name: TZ
value: "Europe/Berlin"
- name: DOLI_DB_HOST
value: postgres.live-env.svc.cluster.local
- name: DOLI_DB_PORT
value: "5432"
- name: DOLI_DB_NAME
value: dolibarr
- name: DOLI_DB_USER
value: dolibarr
- name: DOLI_DB_PASSWORD
value: Vb7yHzmE5HIjfU4hjghjghj6AnMdB
- name: DOLI_DB_TYPE
value: pgsql
ports:
- name: php-fpm
containerPort: 9000
protocol: TCP
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "3000m"
---
apiVersion: v1
kind: Service
metadata:
name: dolibarr
spec:
ports:
- name: http
port: 80
selector:
app: dolibarr
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dolibarr
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/whitelist-x-forwarded-for: "true"
spec:
rules:
- host: dolibarr.lan
http:
paths:
- backend:
service:
name: dolibarr
port:
name: http
path: /
pathType: Prefix

271
apps/dolibarr/entrypoint.sh Normal file
View File

@@ -0,0 +1,271 @@
#!/bin/sh
set -e
log() {
echo "[$0] [$(date +%Y-%m-%dT%H:%M:%S)] $*"
}
# version_greater A B returns whether A > B
version_greater() {
[ "$(printf '%s\n' "$@" | sort -t '.' -n -k1,1 -k2,2 -k3,3 -k4,4 | head -n 1)" != "$1" ]
}
# return true if specified directory is empty
directory_empty() {
[ -z "$(ls -A "$1/")" ]
}
run_as() {
if [ "$(id -u)" = 0 ]; then
su - www-data -s /bin/sh -c "$1"
else
sh -c "$1"
fi
}
if [ ! -f /usr/local/etc/php/php.ini ]; then
log "Initializing PHP configuration..."
cat <<EOF > /etc/php/7.4/fpm/php.ini
date.timezone = "${PHP_INI_DATE_TIMEZONE}"
memory_limit = ${PHP_MEMORY_LIMIT}
file_uploads = On
upload_max_filesize = ${PHP_MAX_UPLOAD}
post_max_size = ${PHP_MAX_UPLOAD}
max_execution_time = ${PHP_MAX_EXECUTION_TIME}
sendmail_path = /usr/sbin/sendmail -t -i
extension = calendar.so
EOF
fi
if [ ! -d /var/www/documents ]; then
log "Initializing Dolibarr documents directory..."
mkdir -p /var/www/documents
fi
log "Updating Dolibarr users and group..."
usermod -u "$WWW_USER_ID" www-data
groupmod -g "$WWW_GROUP_ID" www-data
log "Updating Dolibarr folder ownership..."
chown -R www-data:www-data /var/www
if [ ! -d /var/www/html/conf/ ]; then
log "Initializing Dolibarr HTML configuration directory..."
mkdir -p /var/www/html/conf/
fi
# Create a default config if autoconfig enabled
if [ -n "$DOLI_AUTO_CONFIGURE" ] && [ ! -f /var/www/html/conf/conf.php ]; then
log "Initializing Dolibarr HTML configuration..."
cat <<EOF > /var/www/html/conf/conf.php
<?php
// Config file for Dolibarr ${DOLI_VERSION} ($(date +%Y-%m-%dT%H:%M:%S%:z))
// ###################
// # Main parameters #
// ###################
\$dolibarr_main_url_root='${DOLI_URL_ROOT}';
\$dolibarr_main_document_root='/var/www/html';
\$dolibarr_main_url_root_alt='/custom';
\$dolibarr_main_document_root_alt='/var/www/html/custom';
\$dolibarr_main_data_root='/var/www/documents';
\$dolibarr_main_db_host='${DOLI_DB_HOST}';
\$dolibarr_main_db_port='${DOLI_DB_PORT}';
\$dolibarr_main_db_name='${DOLI_DB_NAME}';
\$dolibarr_main_db_prefix='${DOLI_DB_PREFIX}';
\$dolibarr_main_db_user='${DOLI_DB_USER}';
\$dolibarr_main_db_pass='${DOLI_DB_PASSWORD}';
\$dolibarr_main_db_type='${DOLI_DB_TYPE}';
\$dolibarr_main_db_character_set='${DOLI_DB_CHARACTER_SET}';
\$dolibarr_main_db_collation='${DOLI_DB_COLLATION}';
// ##################
// # Login #
// ##################
\$dolibarr_main_authentication='${DOLI_AUTH}';
\$dolibarr_main_auth_ldap_host='${DOLI_LDAP_HOST}';
\$dolibarr_main_auth_ldap_port='${DOLI_LDAP_PORT}';
\$dolibarr_main_auth_ldap_version='${DOLI_LDAP_VERSION}';
\$dolibarr_main_auth_ldap_servertype='${DOLI_LDAP_SERVERTYPE}';
\$dolibarr_main_auth_ldap_login_attribute='${DOLI_LDAP_LOGIN_ATTRIBUTE}';
\$dolibarr_main_auth_ldap_dn='${DOLI_LDAP_DN}';
\$dolibarr_main_auth_ldap_filter ='${DOLI_LDAP_FILTER}';
\$dolibarr_main_auth_ldap_admin_login='${DOLI_LDAP_ADMIN_LOGIN}';
\$dolibarr_main_auth_ldap_admin_pass='${DOLI_LDAP_ADMIN_PASS}';
\$dolibarr_main_auth_ldap_debug='${DOLI_LDAP_DEBUG}';
// ##################
// # Security #
// ##################
\$dolibarr_main_prod='${DOLI_PROD}';
\$dolibarr_main_force_https='${DOLI_HTTPS}';
\$dolibarr_main_restrict_os_commands='mysqldump, mysql, pg_dump, pgrestore';
\$dolibarr_nocsrfcheck='${DOLI_NO_CSRF_CHECK}';
\$dolibarr_main_cookie_cryptkey='$(openssl rand -hex 32)';
\$dolibarr_mailing_limit_sendbyweb='0';
EOF
chown www-data:www-data /var/www/html/conf/conf.php
chmod 766 /var/www/html/conf/conf.php
fi
# Detect Docker container version (ie. previous installed version)
installed_version="0.0.0.0"
if [ -f /var/www/documents/.docker-container-version ]; then
# shellcheck disable=SC2016
installed_version="$(cat /var/www/documents/.docker-container-version)"
fi
if [ -f /var/www/documents/install.version ]; then
# shellcheck disable=SC2016
installed_version="$(cat /var/www/documents/install.version)"
mv \
/var/www/documents/install.version \
/var/www/documents/.docker-container-version
fi
# Detect Docker image version (docker specific solution)
# shellcheck disable=SC2016
image_version="${DOLI_VERSION}"
if [ -f /usr/src/dolibarr/.docker-image-version ]; then
# shellcheck disable=SC2016
image_version="$(cat /usr/src/dolibarr/.docker-image-version)"
fi
if version_greater "$installed_version" "$image_version"; then
log "Can't start Dolibarr because the version of the data ($installed_version) is higher than the docker image version ($image_version) and downgrading is not supported. Are you sure you have pulled the newest image version?"
exit 1
fi
# Initialize image
if version_greater "$image_version" "$installed_version"; then
log "Dolibarr initialization..."
if [ "$(id -u)" = 0 ]; then
rsync_options="-rvlDog --chown www-data:root"
else
rsync_options="-rvlD"
fi
mkdir -p /var/www/scripts
rsync $rsync_options /usr/src/dolibarr/scripts/ /var/www/scripts/
rsync $rsync_options --delete --exclude /conf/ --exclude /custom/ --exclude /theme/ /usr/src/dolibarr/htdocs/ /var/www/html/
for dir in conf custom; do
if [ ! -d "/var/www/html/$dir" ] || directory_empty "/var/www/html/$dir"; then
rsync $rsync_options --include "/$dir/" --exclude '/*' /usr/src/dolibarr/htdocs/ /var/www/html/
fi
done
# The theme folder contains custom and official themes. We must copy even if folder is not empty, but not delete content either
for dir in theme; do
rsync $rsync_options --include "/$dir/" --exclude '/*' /usr/src/dolibarr/htdocs/ /var/www/html/
done
if [ "$installed_version" != "0.0.0.0" ]; then
# Call upgrade if needed
# https://wiki.dolibarr.org/index.php/Installation_-_Upgrade#With_Dolibarr_.28standard_.zip_package.29
log "Dolibarr upgrade from $installed_version to $image_version..."
if [ -f /var/www/documents/install.lock ]; then
rm /var/www/documents/install.lock
fi
base_version=$(echo "${installed_version}" | sed -e 's|\(.*\..*\)\..*|\1|g')
target_version=$(echo "${image_version}" | sed -e 's|\(.*\..*\)\..*|\1|g')
run_as "cd /var/www/html/install/ && php upgrade.php ${base_version}.0 ${target_version}.0"
run_as "cd /var/www/html/install/ && php upgrade2.php ${base_version}.0 ${target_version}.0"
run_as "cd /var/www/html/install/ && php step5.php ${base_version}.0 ${target_version}.0"
log 'This is a lock file to prevent use of install pages (generated by container entrypoint)' > /var/www/documents/install.lock
chown www-data:www-data /var/www/documents/install.lock
chmod 400 /var/www/documents/install.lock
elif [ -n "$DOLI_AUTO_CONFIGURE" ] && [ ! -f /var/www/documents/install.lock ]; then
log "Create forced values for first Dolibarr install..."
cat <<EOF > /var/www/html/install/install.forced.php
<?php
// Forced install config file for Dolibarr ${DOLI_VERSION} ($(date +%Y-%m-%dT%H:%M:%S%:z))
/** @var bool Hide PHP informations */
\$force_install_nophpinfo = true;
/** @var int 1 = Lock and hide environment variables, 2 = Lock all set variables */
\$force_install_noedit = 2;
/** @var string Information message */
\$force_install_message = 'Dolibarr installation (Docker)';
/** @var string Data root absolute path (documents folder) */
\$force_install_main_data_root = '/var/www/documents';
/** @var bool Force HTTPS */
\$force_install_mainforcehttps = !empty('${DOLI_HTTPS}');
/** @var string Database name */
\$force_install_database = '${DOLI_DB_NAME}';
/** @var string Database driver (mysql|mysqli|pgsql|mssql|sqlite|sqlite3) */
\$force_install_type = '${DOLI_DB_TYPE}';
/** @var string Database server host */
\$force_install_dbserver = '${DOLI_DB_HOST}';
/** @var int Database server port */
\$force_install_port = '${DOLI_DB_PORT}';
/** @var string Database tables prefix */
\$force_install_prefix = '${DOLI_DB_PREFIX}';
/** @var string Database username */
\$force_install_databaselogin = '${DOLI_DB_USER}';
/** @var string Database password */
\$force_install_databasepass = '${DOLI_DB_PASSWORD}';
/** @var bool Force database user creation */
\$force_install_createuser = false;
/** @var bool Force database creation */
\$force_install_createdatabase = !empty('${DOLI_DB_ROOT_LOGIN}');
/** @var string Database root username */
\$force_install_databaserootlogin = '${DOLI_DB_ROOT_LOGIN}';
/** @var string Database root password */
\$force_install_databaserootpass = '${DOLI_DB_ROOT_PASSWORD}';
/** @var string Dolibarr super-administrator username */
\$force_install_dolibarrlogin = '${DOLI_ADMIN_LOGIN}';
/** @var bool Force install locking */
\$force_install_lockinstall = true;
/** @var string Enable module(s) (Comma separated class names list) */
\$force_install_module = '${DOLI_MODULES}';
EOF
log "You shall complete Dolibarr install manually at '${DOLI_URL_ROOT}/install'"
fi
fi
if [ ! -d /var/www/htdocs ]; then
log "Adding a symlink to /var/www/htdocs..."
ln -s /var/www/html /var/www/htdocs
fi
if [ ! -d /var/www/scripts ]; then
log "Initializing Dolibarr scripts directory..."
cp /usr/src/dolibarr/scripts /var/www/scripts
fi
if [ -f /var/www/documents/install.lock ]; then
log "Updating Dolibarr installed version..."
echo "$image_version" > /var/www/documents/.docker-container-version
fi
log "Serving Dolibarr...$@"
exec "$@"

View File

@@ -0,0 +1,60 @@
server {
listen 80;
listen [::]:80;
add_header Referrer-Policy origin; # make sure outgoing links don't show the URL to the Matomo instance
root /var/www/html;
index index.php index.html;
try_files $uri $uri/ =404;
## only allow accessing the following php files
location ~ \.php$ {
# regex to split $uri to $fastcgi_script_name and $fastcgi_path
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
# Check that the PHP script exists before passing it
try_files $fastcgi_script_name =404;
proxy_connect_timeout 3600;
proxy_send_timeout 3600;
proxy_read_timeout 3600;
send_timeout 3600;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param HTTP_PROXY ""; # prohibit httpoxy: https://httpoxy.org/
fastcgi_pass 127.0.0.1:9000;
}
## disable all access to the following directories
location ~ /\.ht {
deny all;
return 403;
}
location ~ /\.git {
deny all;
}
location ~ \.(gif|ico|jpg|png|svg|js|css|htm|html|mp3|mp4|wav|ogg|avi|ttf|eot|woff|woff2|json)$ {
allow all;
## Cache images,CSS,JS and webfonts for an hour
## Increasing the duration may improve the load-time, but may cause old files to show after an Matomo upgrade
expires 1h;
add_header Pragma public;
add_header Cache-Control "public";
}
location ~ /(libs|vendor|plugins|misc/user) {
deny all;
return 403;
}
## properly display textfiles in root directory
location ~/(.*\.md|LEGALNOTICE|LICENSE) {
default_type text/plain;
}
}
# vim: filetype=nginx

23
apps/dolibarr/tekton.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-dolibarr
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/dolibarr
- name: path-to-dockerfile
value: apps/dolibarr/Dockerfile
- name: image-name
value: cr.lan/dolibarr
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/dolibarr

View File

@@ -11,6 +11,8 @@ metadata:
release: latest
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: gitea
@@ -24,7 +26,6 @@ spec:
containers:
- name: gitea
image: gitea/gitea:latest
imagePullPolicy: Always
env:
- name: USER_UID
value: "1000"
@@ -32,6 +33,8 @@ spec:
value: "1000"
- name: TZ
value: "Europe/Berlin"
- name: GITEA__lfs__PATH
value: /data/git/lfs
- name: DB_TYPE
value: postgres
- name: DB_HOST
@@ -42,6 +45,26 @@ spec:
value: gitea
- name: DB_PASSWD
value: giteaEu94XSS4gKpheSBoMsIs
#- name: GITEA__indexer__ISSUE_INDEXER
#value: redis
#- name: GITEA__indexer__ISSUE_INDEXER_QUEUE_CONN_STR
#value: addrs=redis-standalone.live-env.svc.cluster.local:6379 db=1
- name: GITEA__packages__ENABLED
value: "true"
- name: GITEA__log__LEVEL
value: warn
- name: GITEA__log__MODE
value: file
- name: GITEA__log__ROUTER
value: file
- name: GITEA__log__MACARON
value: file
#- name: GITEA__queue__TYPE
#value: redis
#- name: GITEA__queue__CONN_STR
#value: redis://redis-standalone.live-env.svc.cluster.local:6397/0
- name: GITEA__server__ROOT_URL
value: http://git-ui.lan/
volumeMounts:
- name: gitea
mountPath: /data
@@ -53,20 +76,24 @@ spec:
containerPort: 22
protocol : TCP
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 300
periodSeconds: 10
httpGet:
path: /
port: http
readinessProbe:
initialDelaySeconds: 300
periodSeconds: 10
httpGet:
path: /
port: http
resources:
requests:
memory: "200Mi"
memory: "300Mi"
cpu: "150m"
limits:
memory: "312Mi"
cpu: "500m"
memory: "512Mi"
cpu: "1000m"
volumes:
- name: gitea
persistentVolumeClaim:
@@ -79,7 +106,8 @@ metadata:
labels:
app: gitea
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin02
volumeName: gitea
accessModes:
- ReadWriteOnce
resources:
@@ -87,6 +115,26 @@ spec:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gitea
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/gitea-data
server: ebin02
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: gitea
namespace: live-env
---
apiVersion: v1
kind: Service
metadata:
name: gitea

View File

@@ -1,6 +1,6 @@
FROM debian:bullseye-slim
FROM cr.lan/debian-stable-php-fpm
ENV DEBIAN_FRONTEND noninteractive
ARG GRAV_VERSION=1.6.28
ARG GRAV_VERSION=1.7.34
ARG DEV_PKGS="zlib1g-dev libpng-dev libjpeg-dev libfreetype6-dev \
libcurl4-gnutls-dev libxml2-dev libonig-dev"

View File

@@ -1,5 +1,5 @@
# vim:set ft=dockerfile:
FROM debian:buster-slim
FROM cr.lan/debian-stable
RUN set -ex; \
apt-get update; \

View File

@@ -1,5 +1,5 @@
# vim:set ft=dockerfile:
FROM debian:buster-slim
FROM cr.lan/debian-stable
# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added
RUN groupadd -r mysql && useradd -r -g mysql mysql

View File

@@ -1,76 +1,23 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: chaos-kubernetes-git
name: img-mariadb-prometheus-node-exporter
spec:
type: git
pipelineRef:
name: kaniko-pipeline
params:
- name: revision
value: master
- name: url
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mariadb-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mariadb-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mariadb-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb-prometheus/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb-prometheus
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mariadb-prometheus-exporter-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mariadb-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mariadb-prometheus-exporter
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mariadb/mariadb-prometheus
- name: path-to-dockerfile
value: apps/mariadb/mariadb-prometheus/Dockerfile
- name: image-name
value: cr.lan/mariadb-prometheus-node-exporter
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mariadb-prometheus-node-exporter

View File

@@ -1,76 +1,23 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mariadb
spec:
type: image
pipelineRef:
name: kaniko-pipeline
params:
- name: url
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mariadb/mariadb
- name: path-to-dockerfile
value: apps/mariadb/mariadb/Dockerfile
- name: image-name
value: cr.lan/mariadb
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mariadb
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mariadb-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mariadb
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mariadb
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mariadb

View File

@@ -0,0 +1,19 @@
FROM cr.wks/debian-golang AS build
ENV GOARCH=arm64
ENV GOPATH=/usr/src/gopath
ENV GOCACHE=/usr/src/gocache
RUN go env
WORKDIR /usr/src
RUN go install github.com/sapcc/mosquitto-exporter@latest
#RUN go mod download
FROM cr.wks/debian-stable
LABEL source_repository="https://github.com/sapcc/mosquitto-exporter"
COPY --from=build /usr/src/gopath/bin/mosquitto-exporter /mosquitto-exporter
RUN chmod 0755 /mosquitto-exporter
EXPOSE 9234
ENTRYPOINT [ "/mosquitto-exporter" ]

View File

View File

@@ -1,8 +1,6 @@
FROM debian:stable-slim
FROM cr.wks/debian-stable
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
RUN apt-get update && \
apt-get install -y --no-install-recommends \
mosquitto procps && \
apt-get clean -y && \

0
apps/mosquitto/bla Normal file
View File

View File

@@ -62,7 +62,8 @@ spec:
name: mosquitto-data
subPath: mosquitto/data
- name: mosquitto-exporter
image: cr.lan/mosquitto-exporter
image: cr.lan/mosquitto-prometheus-exporter
args: ["--endpoint", "tcp://mqtt.lan:1883"]
imagePullPolicy: Always
ports:
- containerPort: 9234

View File

@@ -1,93 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: github-mosquitto-prometheus-exporter
spec:
type: git
params:
- name: revision
value: master
- name: url
value: https://github.com/sapcc/mosquitto-exporter.git
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-binary
image: cr.lan/debian-golang-stable
script: |
#!/usr/bin/env bash
cd $(resources.inputs.source.path)
ls -al
export GOARCH=arm64
export GOPATH=/usr/src/gopath
export GOCACHE=/usr/src/gocache
go env
go get github.com/sapcc/mosquitto-exporter
make -j4 build CGO_ENABLED=0
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
workspaces:
- name: usr-src
mountPath: /usr/src
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-prometheus-exporter
spec:
taskRef:
name: build-mosquitto-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: github-mosquitto-prometheus-exporter
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto-prometheus-exporter
workspaces:
- name: usr-src
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: usr_src

View File

@@ -1,77 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mosquitto/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mosquitto
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mosquitto
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto

View File

@@ -1,4 +1,4 @@
FROM nextcloud:21-fpm
FROM nextcloud:24-fpm
#needed for some reason
ENV NEXTCLOUD_UPDATE=1
@@ -6,7 +6,7 @@ ENV NEXTCLOUD_UPDATE=1
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
procps bash iputils-ping libmagickcore-6.q16-6-extra
procps bash iputils-ping libmagickcore-6.q16-6-extra vim-tiny
RUN apt-get clean -y && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

View File

@@ -3,7 +3,7 @@
// Manually deployed by yourself
//
$CONFIG = array(
'config_is_read_only' => true,
'config_is_read_only' => false,
'htaccess.RewriteBase' => '/',
'memcache.local' => '\\OC\\Memcache\\APCu',
'apps_paths' => array(
@@ -46,7 +46,7 @@ $CONFIG = array(
),
'datadirectory' => '/var/www/html/data',
'dbtype' => 'pgsql',
'version' => '20.0.9.1',
'version' => '24.0.0',
'overwrite.cli.url' => 'http://nc.lan',
'dbname' => 'nextcloud',
'dbhost' => 'postgres.live-env.svc.cluster.local:5432',
@@ -57,4 +57,4 @@ $CONFIG = array(
'installed' => true,
'default_phone_region' => 'DE',
'updater.release.channel' => 'stable',
);
);

View File

@@ -1,77 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-nextcloud
spec:
type: image
params:
- name: url
value: cr.lan/nextcloud
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-nextcloud
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/nextcloud/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/nextcloud
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-nextcloud
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-nextcloud
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-nextcloud

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-nextcloud
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/nextcloud
- name: path-to-dockerfile
value: apps/nextcloud/Dockerfile
- name: image-name
value: cr.lan/nextcloud
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/nextcloud

View File

@@ -1,42 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
run: nginx-deployment
template:
metadata:
labels:
run: nginx-deployment
spec:
containers:
- image: nginx
name: nginx-webserver
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
type: NodePort
selector:
run: nginx-deployment
ports:
- port: 80
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx-test
spec:
rules:
- host: nginx-test.lan
http:
paths:
- backend:
serviceName: nginx-service
servicePort: 80

View File

@@ -1,4 +1,4 @@
FROM debian:stable-slim
FROM cr.lan/debian-stable
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \

View File

@@ -41,6 +41,17 @@ spec:
value: pg2020
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- prometheus
- loki
topologyKey: kubernetes.io/hostname
# - name: prometheus-exporter
# image: wrouesnel/postgres_exporter
# env:

103
apps/redis.yaml Normal file
View File

@@ -0,0 +1,103 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-cm
namespace: live-env
data:
redis.conf: |-
bind * -::*
appendonly yes
maxmemory 5mb
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-standalone
namespace: live-env
spec:
serviceName: redis-standalone
replicas: 1
selector:
matchLabels:
app: redis-standalone
template:
metadata:
labels:
app: redis-standalone
spec:
containers:
- name: redis-standalone
image: redis
command: ["redis-server"]
args: ["/usr/local/etc/redis/redis.conf"]
resources:
limits:
memory: "128Mi"
cpu: "50m"
ports:
- containerPort: 6379
volumeMounts:
- name: redis-standalone-pv
mountPath: /data
- name: config
mountPath: /usr/local/etc/redis
volumes:
- name: config
configMap:
name: redis-cm
- name: redis-standalone-pv
persistentVolumeClaim:
claimName: redis-standalone-pv
---
apiVersion: v1
kind: Service
metadata:
name: redis-standalone
labels:
app: redis-standalone
env: live-env
spec:
selector:
env: live-env
type: LoadBalancer
loadBalancerIP: 172.23.255.6
ports:
- name: redis-standalone
port: 6379
targetPort: 6379
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: redis-standalone-pv
labels:
app: redis-stndalone
spec:
storageClassName: nfs-ssd-ebin02
volumeName: redis-standalone-pv
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: redis-standalone-pv
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/redis-standalone-pv
server: ebin02
capacity:
storage: 100Mi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: redis-standalone-pv
namespace: live-env

View File

@@ -1,54 +1,42 @@
FROM debian:buster-slim
FROM cr.chaos/debian-stable-php-fpm as baseimage
ARG ROMPR_VERSION=1.58
ARG ROMPR_VERSION=2.24
# Install packages
ENV DEBIAN_FRONTEND noninteractive
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get -y install \
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get -y install \
nginx \
php-fpm \
curl \
php-mysql \
php-curl \
php-gd \
unzip \
imagemagick \
php-json \
php-xml \
php-mbstring \
php-sqlite3 \
php-intl
unzip
# CLeanup
RUN apt-get remove -y --purge ${DEV_PKGS} && \
apt-get autoremove --purge -y && \
RUN apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/*
RUN curl -k -L -o rompr.zip https://github.com/fatg3erman/RompR/releases/download/${ROMPR_VERSION}/rompr-${ROMPR_VERSION}.zip
RUN mkdir -p /app
RUN mkdir -p /app /rompr
RUN unzip -d /app rompr.zip && rm rompr.zip
RUN mkdir -p /rompr
RUN ln -sf /rompr/prefs /app/rompr/prefs
RUN ln -sf /rompr/albumart /app/rompr/albumart
RUN ln -sf /rompr/prefs /app/rompr/prefs; ln -sf /rompr/albumart /app/rompr/albumart;
RUN chown -R www-data:www-data /app/rompr /rompr
COPY nginx_default /etc/nginx/sites-available/default
RUN pwd; ls -la .;ls -la /etc/php/
ADD files/nginx_default /etc/nginx/sites-available/default
RUN mkdir -p /run/php/
FROM baseimage as final
#Environment variables to configure php
RUN sed -ri -e 's/^allow_url_fopen =.*/allow_url_fopen = On/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^memory_limit =.*/memory_limit = 128M/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^max_execution_time =.*/max_execution_time = 1800/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^post_max_size =.*/post_max_size = 256M/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^upload_max_filesize =.*/upload_max_filesize = 8M/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^max_file_uploads =.*/max_file_uploads = 50/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^display_errors =.*/display_errors = On/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^display_startup_errors =.*/display_startup_errors = On/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^allow_url_fopen =.*/allow_url_fopen = On/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^memory_limit =.*/memory_limit = 128M/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^max_execution_time =.*/max_execution_time = 1800/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^post_max_size =.*/post_max_size = 256M/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^upload_max_filesize =.*/upload_max_filesize = 8M/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^max_file_uploads =.*/max_file_uploads = 50/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^display_errors =.*/display_errors = On/g' /etc/php/8.4/fpm/php.ini && \
sed -ri -e 's/^display_startup_errors =.*/display_startup_errors = On/g' /etc/php/8.4/fpm/php.ini
RUN echo "<?php phpinfo(); ?>" > /app/rompr/phpinfo.php
RUN update-rc.d php7.3-fpm defaults
COPY run-httpd /usr/local/bin/
RUN update-rc.d php8.4-fpm defaults
ADD files/run-httpd /usr/local/bin/
RUN chmod 755 /usr/local/bin/run-httpd
EXPOSE 80
VOLUME ["/rompr"]

View File

@@ -1,3 +1,5 @@
lighttpd is configured in etc_lighttpd
generate a configmap with:
kubectl create configmap rompr-lighttpd-config --from-file etc_lighthttpd/
Run with:
```podman run --pull=always -d --replace -p 127.0.0.1:8081:80 \
--mount=type=bind,source=/var/lib/rompr,destination=/rompr \
--tz=Europe/Berlin --name=rompr cr.wks/rompr:latest```

View File

@@ -1,73 +0,0 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: rompr
spec:
selector:
matchLabels:
app: rompr
strategy:
type: Recreate
template:
metadata:
labels:
app: rompr
spec:
containers:
- image: cr.lan/rompr
name: rompr
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
volumeMounts:
- name: rompr-data
mountPath: /rompr
volumes:
- name: rompr-data
persistentVolumeClaim:
claimName: rompr-data
---
apiVersion: v1
kind: Service
metadata:
name: rompr
spec:
ports:
- name: http
port: 80
selector:
app: rompr
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rompr
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: musik.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: rompr
port:
name: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rompr-data
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

View File

@@ -20,13 +20,13 @@ server {
index index.php;
location ~ \.php {
try_files $uri index.php =404;
fastcgi_pass unix:/var/run/php/php7.3-fpm.sock;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $request_filename;
include /etc/nginx/fastcgi_params;
fastcgi_read_timeout 1800;
}
error_page 404 = /404.php;
error_page 404 = /rompr/404.php;
try_files $uri $uri/ =404;
location ~ /albumart/* {
expires -1s;

View File

@@ -0,0 +1,8 @@
#!/bin/sh
rm -f /var/run/nginx.pid
mkdir -p /var/log/nginx
set -e
mkdir -p /rompr/albumart /rompr/prefs
chown www-data:www-data -R /rompr/albumart /rompr/prefs
/etc/init.d/php8.4-fpm restart
exec /usr/sbin/nginx -g 'daemon off;'

View File

@@ -1,7 +0,0 @@
#!/bin/sh
rm -f /var/run/nginx.pid
mkdir -p /var/log/nginx
set -e
/etc/init.d/php7.3-fpm restart
exec /usr/sbin/nginx -g 'daemon off;'

View File

@@ -1,77 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-rompr
spec:
type: image
params:
- name: url
value: cr.lan/rompr
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-rompr
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/rompr/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/rompr
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-rompr-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-rompr
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-rompr

View File

@@ -45,7 +45,7 @@ spec:
periodSeconds: 5
resources:
requests:
memory: "92Mi"
memory: "200Mi"
cpu: "250m"
limits:
memory: "256Mi"

View File

@@ -1,8 +0,0 @@
Install:
# Pipelines: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml@
# Triggers: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml@ #https://github.com/tektoncd/triggers/blob/master/docs/install.md
# Dashboard:
## update submodule in ./dashboard
## Build: @docker build -t tekton-dashboard:arm64 -t docker-registry.lan/tekton-dashboard:arm64 --platform linux/arm64 --build-arg GOARCH=arm64 .@
## apply deployment.yaml

View File

@@ -1,60 +0,0 @@
# Copyright 2020 Tekton Authors LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: config-registry-cert
namespace: tekton-pipelines
labels:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
data:
# Registry's self-signed certificate
# TODO: somehow automate this with salt
cert: |
-----BEGIN CERTIFICATE-----
MIIFujCCA6KgAwIBAgIEYsvT+zANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE
RTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFDASBgNVBAMMC3R1
bW9yLmNoYW9zMB4XDTIxMDIxMjE4MzAzM1oXDTIyMDIxMjE4MzAzM1owLzELMAkG
A1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVybGluMIICIjAN
BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAog4t352wKHS4pflQK4NlWH6yv1FK
MnqNJiNnIgkWrNABTu9ES3cmUwdEhf+Um7MJYvQivOZFIH65wBBmOxfnYWB+NPwn
XAi/o3BcePIdbwEGs0cxgIEKbmL9fY0SCXq0pXRu8Y7WAhqdTNp6/HY2fTMx7ghX
RNQPoeNlcfAZgpsJlZdkSzMYoFpGIW+Tvj3INNuIuHo1pagckWW/hGUIqY0NuUV9
Aj8LOHhHB+vKtjbq5DMVAob4kKOPJFmq/1D6fmRh3W1YAGikowVv3V45jAmnkcBj
Z8BIEiOnBy1AyW9o8Tc5000MAGNrm9IGpRfBBTptSAApZmK1V6zKreqCiCpgOBbh
6U1Bf1L39u8aLVRxeyzQbxqBM1VTbjKxygFSIR/7rVd9BEhx6VA95EG+EdPLpKDp
mymElCcVgv2ZhKBRxtne4CAQD5ng2SoEqLdjvZdC44QNapnj+6jlaNvKRJ1q63kq
B5Y4shJxYOc6QDQp2+Eh2d7qQNiTE3FJC/aeXDNQ+dqeV7chU+PbcbMQoxnIN6ou
Zc2IdtNL87+Apgh6vqZX9pELBXUN1Nu3NI88T8tw1CdqfFfh4Z2EEBBCsPD0yZPV
UrHZsAMiHh5prRkwsBVzDBIaLYd6glf/w9W8sWxe5wceDNhxD8VAfq/ZXeuE1Pme
cTVYsBNj8idC9tECAwEAAaOBxzCBxDAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF
4DAdBgNVHQ4EFgQUa7ADNR68XrDsLtLtngmdJQ9UtOswcAYDVR0jBGkwZ4AU9l9v
D1+dukLLV/uDnP3eB4i6ZyihSaRHMEUxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZC
ZXJsaW4xDzANBgNVBAcMBkJlcmxpbjEUMBIGA1UEAwwLdHVtb3IuY2hhb3OCBBKa
C88wFgYDVR0RBA8wDYILdHVtb3IuY2hhb3MwDQYJKoZIhvcNAQELBQADggIBAKK3
S8qKrsarBflGrDI4diG+QOcMG3/y6juARp3vxQf3fDqC6HZCl+kWAp+Cq3Sp/hU7
GKM7qraWpvGxgmDyaevAirLdFlYQBgcIl9frPI8yfLWbZHWvx3PFXNqg2Ckm98xX
vSUacPTPp/tKFBquJ5+j+/YS2U4qWWNIYYtDEI+3lswfoeh0CIEPSxDk0wHDAyfZ
Vh30ZuZhsf3F63xMggw/RpEHeTTCr0YGOAmzpb7jItcbP/EER1qTQ4T+3ExuC40C
EdOAeL377O2rr7zjcmJWk8B5FaQ8K8UdE/iQGM7tP5ieMNTVACe21KFpqIIXaIka
HqRTyvRmJGUrVf1NeXE16yKirIqAjEV/B/4S244wxYcwqweZObbI0PnbnEMn3PMF
TV+e1CUmVOKyGIxfHH7j/VKQfmH/W0jOlGWI7OkbdU5GckoX4Knjrv2MmT9i2ENy
6dID3BJVm6hK2SjJLc7SxbPXMG3I6BrlA5/3LaXzl+2fWAk5OA1jnGZz0P4XcdOO
iAulB4I3PdmNRdSYAXVRdo5OLoq/7iBcqSrCXRw1IbgJm0VlS2AI6hGEXDQvjQwP
38ijZUV/ch2lGyUZOfQymI7Ylh+Airn8ctqyMS8FeZBAyny4/t7xrhWuGO1awUzp
4p/sEjg6kqp3oLai5yhaz9S+y7Ao5XmGDdzfalWH
-----END CERTIFICATE-----

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: tekton.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: tekton-dashboard
port:
number: 9097

View File

@@ -1,526 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: extensions.dashboard.tekton.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.apiVersion
name: API version
type: string
- JSONPath: .spec.name
name: Kind
type: string
- JSONPath: .spec.displayname
name: Display name
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: dashboard.tekton.dev
names:
categories:
- tekton
- tekton-dashboard
kind: Extension
plural: extensions
shortNames:
- ext
- exts
preserveUnknownFields: false
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- use
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- create
- update
- delete
- patch
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.dashboard.tekton.dev/aggregate-to-dashboard: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
- taskruns/finalizers
- pipelineruns/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- taskruns/finalizers
- pipelineruns/finalizers
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-backend
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
ports:
- name: http
port: 9097
protocol: TCP
targetPort: 9097
selector:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
template:
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
name: tekton-dashboard
spec:
containers:
- args:
- --port=9097
- --logout-url=
- --pipelines-namespace=tekton-pipelines
- --triggers-namespace=tekton-pipelines
- --read-only=false
- --csrf-secure-cookie=false
- --log-level=info
- --log-format=json
- --namespace=
- --openshift=false
- --stream-logs=false
- --external-logs=
env:
- name: INSTALLED_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
- name: TEKTON_PIPELINES_WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
#image: gcr.io/tekton-releases/github.com/tektoncd/dashboard/cmd/dashboard@sha256:744eb92d7d0365bbfb2405df4ba4d2a66c01edc26028c362bd5675e2bc1b9626
image: docker-registry.lan/tekton-dashboard:arm64
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /health
port: 9097
name: tekton-dashboard
ports:
- containerPort: 9097
readinessProbe:
httpGet:
path: /readiness
port: 9097
securityContext:
runAsNonRoot: true
runAsUser: 65532
serviceAccountName: tekton-dashboard
volumes: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-pipelines
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-dashboard
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-triggers
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-tenant
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-extensions
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
spec:
rules:
- host: tekton.lan
http:
paths:
- backend:
serviceName: tekton-dashboard
servicePort: 9097

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tektoncd-workspaces
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi

14
bin/find_changes.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
declare -A CH
CH=()
i=0
echo $(git --version)
while read line; do
WHAT=$(dirname ${line})
echo "LIN: ${line} WHAT: ${WHAT}"
CH[$i]=$WHAT
i=$((i++))
done < <(git diff-tree --no-commit-id --name-only HEAD -r| egrep '^_')
#echo "UNIQ:"
UNIQ=$(echo ${CH} |sort |uniq)
echo ${UNIQ}

View File

@@ -1,5 +0,0 @@
from :https://github.com/coreos/prometheus-operator/blob/master/Documentation/additional-scrape-config.md
# create new secret:
kubectl create secret generic additional-scrape-configs --from-file=prometheus-additional.yaml --dry-run -oyaml > additional-scrape-configs.yaml
# add "namespace: monitoring"
# apply

View File

@@ -1,7 +0,0 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogZ2l0ZWEKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGdpdC11aS5sYW4KLSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0Ci0gam9iX25hbWU6IGhhcHJveHkKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGFkbTAxLndrczo5MTAxCiAgICAtIGRydWNraS53a3M6OTEwMQogICAgLSBhdXRvMDIuY2hhb3M6OTEwMQotIGpvYl9uYW1lOiBrbGlwcGVyCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjM5MDMKLSBqb2JfbmFtZTogb2N0b3ByaW50CiAgbWV0cmljc19wYXRoOiAvcGx1Z2luL3Byb21ldGhldXNfZXhwb3J0ZXIvbWV0cmljcwogIHBhcmFtczoKICAgIGFwaWtleToKICAgIC0gMzBFOEIwMUJGRDY3NEU1QkJENDQ2RDA4QzQ3MzBERjQKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGRydWNraS53a3M6ODAKLSBqb2JfbmFtZTogaGFzc2lvCiAgbWV0cmljc19wYXRoOiAvYXBpL3Byb21ldGhldXMKICBiZWFyZXJfdG9rZW46ICdleUowZVhBaU9pSktWMVFpTENKaGJHY2lPaUpJVXpJMU5pSjkuZXlKcGMzTWlPaUpoTXpCbVlqVTFaamN5WkdFMFl6YzJZbVUyTm1ZME5qbGpOVEF5TWpkalpDSXNJbWxoZENJNk1UWXhNamc0TXpJNU55d2laWGh3SWpveE9USTRNalF6TWprM2ZRLjFJQ3NIbGlVWFIwQ0c0SDh2UVJZSjVqVnFGd21xS1NCMGZTY1NpdEMtUTQnCiAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICAgIC0gaGFzc2lvLmxhbjo4MAotIGpvYl9uYW1lOiBoYXNzaW9fcmluZzg2CiAgbWV0cmljc19wYXRoOiAvYXBpL3Byb21ldGhldXMKICBiZWFyZXJfdG9rZW46ICdleUowZVhBaU9pSktWMVFpTENKaGJHY2lPaUpJVXpJMU5pSjkuZXlKcGMzTWlPaUkwT0dGalpUSmlObVEzT1RnME1qYzNZV0ZtTW1ObVptVTFZemM0TlRFME5DSXNJbWxoZENJNk1UWXhNakU1TWprME1Dd2laWGh3SWpveE9USTNOVFV5T1RRd2ZRLkJiSUFYbTlScTBqYjZvdXFnVkhOZDZLZWV6M05QM3loLTd3eWZ1b0I4WWsnCiAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICAgIC0gYXV0by5jaGFvczo4MAotIGpvYl9uYW1lOiBwb3N0Z3JlcwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIHBvc3RncmVzLmxpdmUtZW52LnN2Yy5jbHVzdGVyLmxvY2FsOjkxODcKLSBqb2JfbmFtZTogbm9kZQogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gYWRtMDEud2tzOjkxMDAKICAgIC0gZHVtb250LXdrcy53a3M6OTEwMAogICAgLSBkcnVja2kud2tzOjkxMDAKICAgIC0gZWJpbjAxLndrczo5MTAwCiAgICAtIGViaW4wMi53a3M6OTEwMAogICAgLSBvc21jLndrczo5MTAwCiAgICAtIHJpb3QwMS53a3M6OTEwMAogICAgLSB0cnVoZS5jaGFvczo5MTAwCiAgICAtIGF1dG8wMS5jaGFvczo5MTAwCiAgICAtIGF1dG8wMi5jaGFvczo5MTAwCiAgICAtIGR1bW9udC5jaGFvczo5MTAwCiAgICAtIHR1bW9yLmNoYW9zOjkxMDAKICAgIC0gd29obnouY2hhb3M6OTEwMAo=
kind: Secret
metadata:
creationTimestamp: null
name: additional-scrape-configs

View File

@@ -1,30 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-k8s
namespace: metallb-system
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus-k8s
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring

View File

@@ -1,62 +0,0 @@
- job_name: gitea
static_configs:
- targets:
- git-ui.lan
- job_name: mysqld
static_configs:
- targets:
- mariadb.lan:9104
- job_name: mqtt.mosquitto
static_configs:
- targets:
- mqtt.lan:9234
- job_name: haproxy
static_configs:
- targets:
- adm01.wks:9101
- drucki.wks:9101
- auto02.chaos:9101
- job_name: klipper
static_configs:
- targets:
- drucki.wks:3903
- job_name: octoprint
metrics_path: /plugin/prometheus_exporter/metrics
params:
apikey:
- 30E8B01BFD674E5BBD446D08C4730DF4
static_configs:
- targets:
- drucki.wks:80
- job_name: hassio
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4'
static_configs:
- targets:
- hassio.lan:80
- job_name: hassio_ring86
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiI0OGFjZTJiNmQ3OTg0Mjc3YWFmMmNmZmU1Yzc4NTE0NCIsImlhdCI6MTYxMjE5Mjk0MCwiZXhwIjoxOTI3NTUyOTQwfQ.BbIAXm9Rq0jb6ouqgVHNd6Keez3NP3yh-7wyfuoB8Yk'
static_configs:
- targets:
- auto.chaos:80
- job_name: postgres
static_configs:
- targets:
- postgres.live-env.svc.cluster.local:9187
- job_name: node
static_configs:
- targets:
- adm01.wks:9100
- dumont-wks.wks:9100
- drucki.wks:9100
- ebin01.wks:9100
- ebin02.wks:9100
- osmc.wks:9100
- riot01.wks:9100
- truhe.chaos:9100
- auto01.chaos:9100
- auto02.chaos:9100
- dumont.chaos:9100
- tumor.chaos:9100
- wohnz.chaos:9100

Some files were not shown because too many files have changed in this diff Show More