292 Commits
123 ... master

Author SHA1 Message Date
2e3bb35f86 coreddns update 2023-10-15 19:17:51 +02:00
47cbd88587 coredns / cluster upgrade 2023-01-16 18:57:58 +01:00
dd74762778 tekton PVC? required? 2023-01-12 20:54:31 +01:00
07d7f45e64 other stings 2023-01-12 20:53:46 +01:00
536c0c4ddc flannel 0.20 upgrade 2023-01-12 20:53:23 +01:00
fcb2e69615 upgrade galore from 1.23 to 1.26. and cluster ist still at 1.25? See: Readme.md 2023-01-12 20:52:46 +01:00
e2e032ac94 another nfs -client provisioner 2022-12-08 17:51:23 +01:00
4bbf79569c another nfs -client provisioner 2022-12-08 17:47:14 +01:00
273fb0e252 more updates 2022-12-08 17:09:38 +01:00
62f5788742 changing output dir 2022-12-08 16:47:19 +01:00
9b2d2a9d95 php-fpm 2022-12-08 16:43:36 +01:00
b5ff289f66 stuff 2022-12-08 16:39:52 +01:00
7cb8d572e7 stuff 2022-12-08 14:03:01 +01:00
14aceae467 new version and create dirs on run 2022-12-08 13:57:10 +01:00
604d065252 new version and create dirs on run 2022-12-08 13:09:24 +01:00
b50d6de8f7 cleanup 2022-11-18 10:26:13 +01:00
79c4e5e0c7 tekton stuff and install 2022-11-18 10:24:39 +01:00
d7241c7563 removed obsolete submods 2022-11-18 10:21:37 +01:00
8fbf07efdf removed descheduler, helm is on its way 2022-10-25 14:03:10 +02:00
beb1bfe0da nginx ingress is installed via helm now 2022-10-25 14:01:34 +02:00
8b62746bcc cleanup 2022-10-12 13:20:42 +02:00
94b39a804b merged 2022-09-19 16:58:14 +02:00
43d17581b3 gitea and apt-cacher 2022-09-19 16:56:40 +02:00
180d28fe80 Merge branch 'master' of git.lan:chaos/kubernetes 2022-09-19 16:54:53 +02:00
30ba290918 don't know why this shit doesn't run anymore 2022-09-10 13:32:34 +02:00
b111463cf5 Merge branch 'master' of git.lan:chaos/kubernetes 2022-08-24 19:17:10 +02:00
c2f6c546eb gitea uses ebin02 2022-08-24 19:16:24 +02:00
748b94f069 local changes 2022-07-30 12:54:52 +02:00
59c019727d rompr version 1.61 2022-07-30 12:51:17 +02:00
17f8b2f5cb mosquitto and prometheus 2022-07-30 12:43:56 +02:00
105e051d64 grav and tekton 2022-07-30 12:33:26 +02:00
9b92cf35e0 Merge branch 'master' of git.lan:chaos/kubernetes 2022-07-30 12:29:55 +02:00
41a2ba8c82 Dockerfile using our debian image 2022-07-30 12:29:43 +02:00
3b552f3134 my changes 2022-07-30 11:47:09 +02:00
7c778d3794 pipeline for mariadb prometheus 2022-07-29 18:42:25 +02:00
a608ac1297 mariadb pipeline 2022-07-29 18:41:01 +02:00
89c3eaac22 dolibarr and curl 2022-07-28 19:08:22 +02:00
7505262bc9 pipelinerun for nextcloud 2022-07-28 18:57:19 +02:00
9c88f4bc6c nextcloud pipelinerun 2022-07-28 18:52:51 +02:00
f96313a307 deschduler 2022-06-22 21:00:51 +02:00
1d3eb09904 deschduler 2022-06-22 21:00:18 +02:00
287458f48b gitea liveness probes and some config updates 2022-06-21 12:29:35 +02:00
5affbfd886 gitea liveness probes and some config updates 2022-06-21 12:27:57 +02:00
c1b864155e nextcloud 24 2022-05-08 11:33:56 +02:00
2827dac20c nextcloud 24 2022-05-07 10:47:51 +02:00
0c8338cd86 nextcloud 24 2022-05-06 19:44:19 +02:00
62aa39b493 descheduler still amystery 2022-03-20 11:23:40 +01:00
c626429abf more rfactoring 2022-03-16 19:58:37 +01:00
237981b8b2 multiarch-support is gone in bullseye 2022-03-16 19:28:46 +01:00
7763958f0f using another src dir 2022-03-16 19:06:17 +01:00
d904f51d20 migrated base images to pipeline runs 2022-03-16 18:33:09 +01:00
613da54d99 migrated base images to pipeline runs 2022-03-16 18:30:18 +01:00
06c173e650 refactoring 2022-03-16 18:11:11 +01:00
23e696c1a5 listening on localhost tcp 2022-03-15 14:02:19 +01:00
17f490accb finding our way 2022-03-15 13:40:50 +01:00
b4d33528a2 finding our way 2022-03-15 13:09:50 +01:00
857641a92e git-clone taks and updates 2022-03-15 12:55:42 +01:00
5a2b1a1521 git-clone taks and updates 2022-03-15 12:34:17 +01:00
e1930c8fae moved to files fome configs i have 2022-03-14 16:53:07 +01:00
48cc3f2b66 using bullseye 2022-03-14 16:52:04 +01:00
19ffec27ff using bullseye 2022-03-14 16:45:24 +01:00
d860eee58e using bullseye 2022-03-14 16:41:24 +01:00
84acbf3c2c using bullseye 2022-03-14 16:38:44 +01:00
74901c0cb8 merged with v0.10.33 2022-03-14 09:55:34 +01:00
6bed4a690d php-fpm www.conf listen tcp 2022-03-14 09:10:27 +01:00
2f775470fd php-fpm www.conf listen tcp 2022-03-13 14:18:44 +01:00
2efe4378cb php-fpm www.conf listen tcp 2022-03-13 13:44:50 +01:00
f722ad99bf php-fpm www.conf listen tcp 2022-03-13 13:41:48 +01:00
2926f20542 php-fpm www.conf listen tcp 2022-03-13 13:32:12 +01:00
cc7cef4abe php-fpm debian image 2022-03-13 13:25:29 +01:00
9aab08d889 php-fpm debian image 2022-03-13 12:59:51 +01:00
4b4f0055cc php-fpm debian image 2022-03-13 12:53:19 +01:00
ae30b7dc0b php-fpm debian image 2022-03-13 12:43:39 +01:00
f703c33044 php-fpm debian image 2022-03-13 12:39:31 +01:00
e3e8308416 php-fpm debian image 2022-03-13 12:25:40 +01:00
785c24e32f php-fpm debian image 2022-03-13 12:16:32 +01:00
942b54c3fc php-fpm debian image 2022-03-13 12:13:17 +01:00
8c67a09c57 php-fpm debian image 2022-03-13 12:05:46 +01:00
2e4717d508 php-fpm debian image 2022-03-13 12:02:59 +01:00
bf902aebc5 dolibarr CRM 2022-03-12 19:43:18 +01:00
3c16cbdc59 dolibarr CRM 2022-03-12 16:26:12 +01:00
8732c960bb dolibarr CRM 2022-03-12 15:36:23 +01:00
edb5dd8e83 dolibarr CRM 2022-03-12 15:30:57 +01:00
c0c6d618c7 dolibarr CRM 2022-03-12 15:30:01 +01:00
1387477174 dolibarr CRM 2022-03-12 15:11:38 +01:00
4ed19d3f76 dolibarr CRM 2022-03-12 14:57:28 +01:00
de9e3c4602 dolibarr CRM 2022-03-12 14:35:29 +01:00
423dc10e5f dolibarr CRM 2022-03-12 13:18:59 +01:00
40fbf50ae1 dolibarr CRM 2022-03-12 13:12:45 +01:00
97e55e032b dolibarr CRM 2022-03-12 13:09:51 +01:00
8d5193dc16 dolibarr CRM 2022-03-12 13:06:51 +01:00
a69ebc9779 dolibarr CRM 2022-03-12 13:04:55 +01:00
057be561c5 dolibarr CRM 2022-03-12 12:59:30 +01:00
b42e651e03 this stuff doesn't quite work 2022-02-01 22:38:32 +01:00
27724423fb using kaniko task from tectoncd catalog 2022-02-01 20:38:21 +01:00
2a32977d80 docker-reg-ui needs to be latest 2022-02-01 18:56:27 +01:00
9f117548c3 armhf experiments 2022-02-01 18:56:08 +01:00
cfd86e1aa7 also building armhf 2022-02-01 17:42:51 +01:00
1abb9c3d48 also building armhf 2022-02-01 17:19:42 +01:00
225c04e35b also building armhf 2022-02-01 17:19:36 +01:00
50ee3e875a new nexctcloud version 23 2022-01-28 12:00:09 +01:00
af4e616d6d error page upgrade for rompr 2022-01-19 18:11:41 +01:00
0828492d42 new rompr version 2022-01-19 17:57:14 +01:00
93b01e5abb stoeff 2022-01-19 17:42:42 +01:00
83241b2602 instructiones 2021-11-03 20:31:28 +01:00
a438815b1a consule helm values, unused by now 2021-10-26 20:09:31 +02:00
f372124fa9 removed mosquitto-exporter submod 2021-10-26 20:09:07 +02:00
320cd6eb34 coredns now also serves .lan 2021-10-21 12:16:34 +02:00
a46743bc96 authelia works 2021-10-19 13:35:23 +02:00
aa44e55363 version string in config needs to be updated, hooray 2021-10-18 14:57:13 +02:00
b55e6b247d version string in config needs to be updated, hooray 2021-10-18 14:32:05 +02:00
4e37047691 wtf 2021-10-18 14:24:16 +02:00
b90687f69c Nextcloud 22 2021-10-18 13:30:12 +02:00
1896ea8be1 Nextcloud 22 2021-10-18 13:09:05 +02:00
688485987f antiaffinities and namespaces 2021-10-16 18:23:19 +02:00
f006923f61 I'm going slightly mad 2021-10-14 19:31:52 +02:00
1eb12be2e8 adapting memory requests 2021-10-07 11:31:38 +02:00
4fb80042a2 nc 21 again 2021-10-04 21:52:13 +02:00
fcbbf57031 postgres 13, forced
gitea with limits adapted
descheduler, still
2021-10-04 21:29:08 +02:00
6b8d34f88c nextcloud 22 2021-10-04 21:10:17 +02:00
82adc0d6ae migrated Ingresses to nginx 1.0.0
https://blog.hycorve.com/migrating-from-ingress-networking-k8s-io-v1beta1-to-v1/
2021-09-21 18:53:33 +02:00
90e89bf867 flannel 1.14 upgrade 2021-09-20 19:31:50 +02:00
6c7ba4385c postgres monitoring 2021-08-21 10:43:58 +02:00
7578dca854 refactored postgres 2021-08-21 10:26:07 +02:00
666db73722 adding exporter to postgres 2021-08-21 10:21:24 +02:00
8e3e8c0e45 trusted proxies 2021-08-20 12:36:29 +02:00
3108ceeebc config_is_read_only does not work 2021-08-20 12:13:44 +02:00
bb607f8774 config_is_read_only does not work 2021-08-20 12:13:18 +02:00
928a3a942a no more supervisor 2021-08-20 11:46:31 +02:00
da2c41de37 no more apache 2021-08-20 11:46:08 +02:00
8fc6757dbd nextcloud config from configmap 2021-08-20 11:45:44 +02:00
455f519fca yes, nginx we are 2021-08-19 20:44:29 +02:00
d1d240f4e3 going nginx/fpm 2021-08-19 15:14:00 +02:00
8dd75bef5d using fpm version 2021-08-19 08:31:10 +02:00
ed4cb41728 na, its to early 2021-08-17 10:35:48 +02:00
afb58dfacf running upgrade on init 2021-08-17 10:19:47 +02:00
8f4f586964 loki pv, installed via helm 2021-08-13 19:20:38 +02:00
07e6ec3779 addign magickcore with svg support 2021-08-10 08:56:26 +02:00
68255f12fd descheduler, I don't quite get it 2021-08-06 14:35:46 +02:00
2f0dfffc5c no 'full' nextcloud 2021-06-28 10:24:09 +02:00
b3919708e8 chown to www-data for supervisord dirs 2021-06-28 09:51:58 +02:00
08bbda1278 no access to stderr 2021-06-26 16:42:16 +02:00
f20dbd975f setting user to www-data 2021-06-26 11:48:01 +02:00
66eed92fd2 nextcloud 21 2021-06-25 15:56:53 +02:00
2ffeb004ba supervisord logs to stderr and pidfile in /tmp/ 2021-06-25 15:41:18 +02:00
2b1767271a vim and other pkgs removed 2021-06-25 14:56:21 +02:00
e10cfa28fe nextcloud full with supervisord and cron 2021-06-23 18:25:11 +02:00
c5c5605031 applied new paths 2021-06-23 18:10:55 +02:00
eb93167600 applied new paths 2021-06-23 18:10:13 +02:00
c83fdd990a removed tekton dashboard submod 2021-06-22 09:31:29 +02:00
2e1eb475e9 removed docker-registry-ui submod 2021-06-22 09:29:48 +02:00
20501a36d0 new rompr version 2021-06-21 15:59:03 +02:00
01255383e1 new rompr version 2021-06-21 15:56:00 +02:00
6201447116 refactored, noone needs the web folder 2021-05-28 19:45:35 +02:00
5e07cdc688 bucket update and no apps install yet 2021-05-20 11:36:28 +02:00
8e74a4c0ef no .htaccess but our own apache config 2021-05-15 19:20:24 +02:00
09a4c58638 we're still debugging 2021-05-14 21:39:59 +02:00
b6e45339f1 make firmware, it deploys that shit 2021-05-12 16:13:57 +02:00
cc0b6f92dc do we need the entrypoint and cmd 2021-05-12 15:59:48 +02:00
a15b29ad38 do we need the entrypoint and cmd 2021-05-12 15:46:04 +02:00
522899fd14 do we need the entrypoint and cmd 2021-05-12 15:25:30 +02:00
99185edd98 we need our own image, goddammit 2021-05-11 16:42:40 +02:00
8a7f54cf1b testing, realky. 2021-05-11 15:48:16 +02:00
fa3cd5ea7c debian testing image 2021-05-08 11:59:12 +02:00
60dbeadebc what the freaking f 2021-05-06 10:56:40 +02:00
219c25f8f4 no patch any more, we want the CA.crt 2021-05-05 13:15:24 +02:00
0e13b3446a dockerfile fix 2021-05-05 12:23:09 +02:00
0badcc15c6 adding ca.crt to pod (minio access) 2021-05-05 12:04:17 +02:00
c34600a47b adding ca.crt to pod (minio access) 2021-05-05 11:30:16 +02:00
84833b0e94 yes 2021-04-16 19:49:39 +02:00
4e45f3954c build-essential image 2021-04-11 21:52:18 +02:00
41f36deb08 build-essential image 2021-04-11 21:51:57 +02:00
0a42103c21 touch .ocdata 2021-04-10 21:46:11 +02:00
71157dfa19 hassio limits 2021-04-10 21:26:59 +02:00
437bf9a96f touch .ocdata 2021-04-10 20:53:07 +02:00
f1f0e0b98e debug for docker and our own image 2021-04-10 20:35:28 +02:00
29b7576c83 debug for docker and our own image 2021-04-10 20:34:43 +02:00
bd5a19fcb6 debug for docker and our own image 2021-04-10 20:16:46 +02:00
f47e96617f our own nextcloud image, of course! 2021-04-10 20:12:04 +02:00
ecc1041761 hdd ebin01 archive storagio 2021-04-10 16:17:12 +02:00
13cb0c1929 deschulder stops working, nfs-hdd-ebin01 storage class 2021-04-09 22:16:32 +02:00
741698526f systemd -dev libs 2021-03-24 19:32:02 +01:00
f740ce7ffa python-deps 2021-03-24 10:18:30 +01:00
223729647b sqlcipher includ 2021-03-24 08:45:49 +01:00
5a577afdd5 we actually need to fetch that thang! 2021-03-23 16:29:04 +01:00
c46975c112 we actually need to fetch that thang! 2021-03-23 16:09:04 +01:00
40c9df0bea we actually need to fetch that thang! 2021-03-23 15:46:03 +01:00
8773365477 we actually need to fetch that thang! 2021-03-23 15:29:03 +01:00
08f48796e8 npm install doesn't do a thing? 2021-03-23 15:16:40 +01:00
8f5d268fdc npm install doesn't do a thing? 2021-03-23 15:15:34 +01:00
7a1400e1f1 start.sh copying and starting fix, deployment 2021-03-23 14:38:37 +01:00
3506a17bc7 start.sh copying and starting fix, deployment 2021-03-23 11:10:01 +01:00
07ca7a7833 start.sh copying and starting fix, deployment 2021-03-23 10:19:07 +01:00
8365cebcb9 tensorboard? 2021-03-22 19:18:50 +01:00
7d50c1df5c tensorboard? 2021-03-22 19:03:29 +01:00
65e679cea6 npm, we need ! 2021-03-22 18:46:30 +01:00
deb3c618fe npm, we need ! 2021-03-22 18:43:31 +01:00
03938dc864 use apt-cache, you fool! 2021-03-22 18:36:26 +01:00
bb56ea5b39 tekton, baby! 2021-03-22 18:32:04 +01:00
f1c5493d95 verbose mosquitto 2021-03-19 21:36:00 +01:00
173c2a9d01 faster snapshots 2021-03-19 21:20:27 +01:00
b18dea273f procps 2021-03-19 21:20:08 +01:00
bfc01803a2 fixing some run issues and creating /rompr before volume 2021-03-19 14:23:48 +01:00
a625c7351e fixing some run issues and creating /rompr before volume 2021-03-19 14:04:32 +01:00
6e58e75668 migrated to debian 2021-03-19 12:34:01 +01:00
cc66ee9eae migrated to debian 2021-03-19 12:11:13 +01:00
aa55a0314e deployment adapt and tekton image-build 2021-03-19 12:00:30 +01:00
96ec8b5555 rompr tekton build 2021-03-19 11:04:06 +01:00
9431f73ead Dockerfile again 2021-03-14 10:14:19 +01:00
5912fd84f0 deprecate default StorageClass 2021-03-02 19:40:20 +01:00
f6a0f2af5e migrated to _sys/nfs-... 2021-03-02 19:38:59 +01:00
56fd09b49d nfs-client-provisioner, need tekton git status .! 2021-03-02 19:34:47 +01:00
4325eff624 new namespaces 2021-03-02 19:15:58 +01:00
ec3e530a36 new namespaces 2021-03-02 19:15:24 +01:00
eaa5d94a72 new namespaces 2021-03-02 19:15:11 +01:00
033118eb89 no deployment for debian-stable and descheduler policy update 2021-02-27 14:11:44 +01:00
8bf0fe5f10 removing sleep infinity 2021-02-27 13:24:12 +01:00
d26981cd78 distcc moved to _CI-CD 2021-02-23 21:26:21 +01:00
878359e846 also install g++ gnueabihf 2021-02-23 17:49:44 +01:00
a4b1be1bc2 removing unecessary archs 2021-02-22 19:52:33 +01:00
e9212313d6 debian-stable kept running 2021-02-22 19:20:27 +01:00
6582f5093a removing zeroconf /etc/distcc/hosts 2021-02-22 18:43:27 +01:00
f1fc18a594 building an debian-stable image 2021-02-22 13:45:27 +01:00
aea6550d6e building an debian-stable image 2021-02-22 13:37:55 +01:00
d4cc44a1ea building an debian-stable image 2021-02-22 13:13:10 +01:00
aaaf6fa29f building an debian-stable image 2021-02-22 13:08:48 +01:00
3538f407e8 mariadb and postgres and all of that 2021-02-20 22:40:49 +01:00
276e41fde2 obsolete 2021-02-19 22:35:43 +01:00
b25f6ca608 apt-cacher image doesn't use apt-cache.lan... yes,yes! 2021-02-19 22:19:00 +01:00
b66179023a debug 2021-02-19 22:17:47 +01:00
a397bdc71e debug 2021-02-19 22:06:09 +01:00
4ec401f348 debug 2021-02-19 22:03:29 +01:00
fd569d894a refactor _sys and namespaces 2021-02-19 22:02:19 +01:00
ed3e8cdddc apt-cacher image doesn't use apt-cache.lan... yes,yes! 2021-02-19 21:35:26 +01:00
9cda7c9f76 docker-reg-ui 2021-02-19 20:53:39 +01:00
c094e99451 obsolete 2021-02-19 20:53:25 +01:00
5a16e4cf40 no procps 2021-02-19 20:53:07 +01:00
be069c53bf distcc in tekton 2021-02-19 20:52:44 +01:00
ce329ca353 golang image 2021-02-18 23:52:00 +01:00
b45a4489fc golang image 2021-02-18 23:28:38 +01:00
1ac9cc0b4c stuff 2021-02-18 23:08:00 +01:00
38cac7a57f debian-golang image 2021-02-18 23:04:53 +01:00
9dd3b2b4e0 debian-golang image 2021-02-18 22:57:56 +01:00
0f6c04a0f0 debian-golang image 2021-02-18 22:55:40 +01:00
3a28bebcda mosquitto in tekton 2021-02-18 22:16:26 +01:00
f17dea5dff mosquitto in tekton 2021-02-18 22:16:12 +01:00
1a42071c26 mosquitto in tekton 2021-02-18 21:49:53 +01:00
0e79b36875 tekton pipelines for apps 2021-02-18 21:42:24 +01:00
c918c39e6e using apt-cache.lan 2021-02-18 21:08:56 +01:00
5932220ead Dockerfile again, because kaniko! 2021-02-18 21:03:57 +01:00
36fa98e78b CI-CD Stuff, mainly tekton 2021-02-18 20:55:44 +01:00
6b7b23dd71 tekton local configs 2021-02-18 20:55:20 +01:00
b131b76916 apt-cacher-ng in tekton 2021-02-18 20:54:53 +01:00
fb33950bc8 updated systems: descheduler runs at root.... 2021-02-18 20:54:24 +01:00
3970c20e3a updated systems 2021-02-18 20:54:06 +01:00
1cc1de7ed8 no more dockerfile, we're podmanning now :) 2021-02-10 14:58:10 +01:00
b91ea42a41 updates 2021-02-10 14:49:14 +01:00
f616346ac6 sweet caroline 2021-02-09 20:26:53 +01:00
315520baa6 static pvs for essential services 2021-01-24 00:20:55 +01:00
76c036fa79 static pvs 2021-01-21 12:56:33 +01:00
5ce8a3b5be all subs 2021-01-21 12:56:11 +01:00
0bdd4a2db0 all subs 2021-01-21 12:54:20 +01:00
76e516c7f3 pvs for grafana and prometheus 2021-01-21 12:53:10 +01:00
c7363d513e using flannel now 2021-01-21 12:52:46 +01:00
8d66cb1f66 postgres svc fix 2021-01-21 12:52:18 +01:00
f9269f2c2c persistent grafana/prometheus pvs 2021-01-21 10:06:01 +01:00
9b9b551907 new run 2021-01-20 15:33:26 +01:00
ab96839f50 removed external-storage 2021-01-20 15:32:30 +01:00
a3bd4349e2 nummer5 in wks 2021-01-07 21:50:45 +01:00
4dcb961e81 tekton for the masses 2020-12-08 17:12:45 +01:00
9561cb8d82 grav on php74/bullseye 2020-11-30 19:34:31 +01:00
2e3e37062a new and old scrapes 2020-11-19 18:38:25 +01:00
f85ff91873 doesn;t work yet 2020-11-12 18:03:29 +01:00
62cb2881c2 d-ui needs less resources 2020-11-11 21:00:58 +01:00
bb4fb01b7c hack: running socat and z2mqtt in one container with supervisor 2020-11-10 22:32:33 +01:00
e8c1fa3cef hassio 2020-11-09 19:33:15 +01:00
5efad7226b migrated to stats.lan 2020-11-09 13:59:58 +01:00
a5ca5799c7 hairpin mode does the trick 2020-11-09 13:59:42 +01:00
2224fc50c8 grav 2020-11-08 10:45:46 +01:00
54a80d25d9 grav+nginx+phpfpm 2020-11-02 16:22:38 +01:00
c4d78c6805 webapps refactoring 2020-10-31 23:47:09 +01:00
3bb2b5072a webapps 2020-10-31 23:45:24 +01:00
fef8a517ee entrypoint 2020-10-31 22:43:47 +01:00
6419eec2af urubu python CMS 2020-10-30 21:16:40 +01:00
c7bb0632d1 we-re traefik now 2020-10-30 21:16:03 +01:00
879a375a8c removed ingress-nginx submod 2020-10-30 21:15:26 +01:00
1fb381f2db removed ingress-nginx submod 2020-10-30 21:14:38 +01:00
163792a913 haproxy replaces nginx? 2020-10-29 12:13:59 +01:00
76318f92bd codetogether original - never mind... 2020-10-23 23:05:49 +02:00
1489869898 codetogether original 2020-10-23 23:00:44 +02:00
e0df26962a no connection to mqqt.chaos 2020-10-22 18:19:45 +02:00
f0729d9055 tumor ist der meister, immerhin! 2020-10-20 23:10:41 +02:00
151 changed files with 11540 additions and 1936 deletions

23
.gitmodules vendored
View File

@@ -3,16 +3,13 @@
url = https://github.com/coreos/kube-prometheus.git
[submodule "cluster-monitoring"]
path = cluster-monitoring
url = https://github.com/carlosedp/cluster-monitoring.git
url = git@git.lan:chaos/k8s-cluster-monitoring.git
[submodule "gluster-kubernetes"]
path = gluster-kubernetes
url = https://github.com/jayflory/gluster-kubernetes.git
[submodule "kubernetes-ingress"]
path = kubernetes-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "ingress-nginx"]
path = ingress-nginx
url = https://github.com/kubernetes/ingress-nginx.git
[submodule "pihole-kubernetes"]
path = pihole-kubernetes
url = https://github.com/MoJo2600/pihole-kubernetes.git
@@ -28,12 +25,6 @@
[submodule "mosquitto/charts"]
path = mosquitto/charts
url = https://github.com/smizy/charts.git
[submodule "external-storage"]
path = external-storage
url = https://github.com/kubernetes-incubator/external-storage.git
[submodule "mosquitto-exporter"]
path = mosquitto-exporter
url = https://github.com/sapcc/mosquitto-exporter.git
[submodule "csi-s3/storage-csi-s3"]
path = csi-s3/storage-csi-s3
url = https://github.com/ctrox/csi-s3.git
@@ -46,3 +37,15 @@
[submodule "csi-s3/node-driver-registrar"]
path = csi-s3/node-driver-registrar
url = https://github.com/kubernetes-csi/node-driver-registrar.git
[submodule "apps/tekton/dashboard"]
path = apps/tekton/dashboard
url = https://github.com/tektoncd/dashboard.git
[submodule "_sys/haproxy-ingress"]
path = _sys/haproxy-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "nfs-subdir-external-provisioner"]
path = nfs-subdir-external-provisioner
url = https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git
[submodule "descheduler"]
path = descheduler
url = https://github.com/kubernetes-sigs/descheduler.git

View File

@@ -5,6 +5,11 @@
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.xtext.ui.shared.xtextBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
@@ -13,5 +18,6 @@
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
<nature>org.eclipse.xtext.ui.shared.xtextNature</nature>
</natures>
</projectDescription>

9
TODO.md Normal file
View File

@@ -0,0 +1,9 @@
root@pine01:/etc/kubernetes# kubeadm upgrade apply v1.24.9 --ignore-preflight-errors=CoreDNSUnsupportedPlugins
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0112 18:28:48.533830 21616 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
CoreDNS v1.8.6 v1.9.3

View File

@@ -0,0 +1,11 @@
FROM cr.lan/debian-stable
RUN apt-get update && apt-get install -y \
golang make git
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-debian-golang
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: _CI-CD/debian-golang
- name: path-to-dockerfile
value: _CI-CD/debian-golang/Dockerfile
- name: image-name
value: cr.lan/debian-stable-golang
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/debian-stable-golang

View File

@@ -0,0 +1,15 @@
FROM cr.lan/debian-stable
RUN apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash \
build-essential make ccache distcc-pump distcc g++ \
libncursesw5-dev
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-debian-stable-build-essential
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: _CI-CD/debian-stable-build-essential
- name: path-to-dockerfile
value: _CI-CD/debian-stable-build-essential/Dockerfile
- name: image-name
value: cr.lan/debian-stable-build-essential
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/debian-stable-build-essential

View File

@@ -0,0 +1,19 @@
FROM cr.lan/debian-stable
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash openssl \
php-fpm php-zip php-sqlite3 php-pgsql php-mysqli php-json php-readline \
php-xml php-ldap php-imap php-intl php-xmlrpc php-imagick php-gd php-cli php-curl \
php-bz2 php-mbstring php-memcache php-redis
#cleanup
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD etc_php-fpm/www.conf /etc/php/7.4/fpm/pool.d
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,440 @@
; Start a new pool named 'www'.
; the variable $pool can be used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
; listen = /run/php/php7.4-fpm.sock
listen = 127.0.0.1:9000
; Set listen(2) backlog.
; Default Value: 511 (-1 on FreeBSD and OpenBSD)
;listen.backlog = 511
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions. The owner
; and group can be specified either by name or by their numeric IDs.
; Default Values: user and group are set as the running user
; mode is set to 0660
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
; or group is differrent than the master process user. It allows to create process
; core dump and ptrace the process for the pool user.
; Default Value: no
; process.dumpable = yes
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 5
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: (min_spare_servers + max_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php/7.4/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;pm.status_path = /status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; Depth of slow log stack trace.
; Default Value: 20
;request_slowlog_trace_depth = 20
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 0
; The timeout set by 'request_terminate_timeout' ini option is not engaged after
; application calls 'fastcgi_finish_request' or when application has finished and
; shutdown functions are being called (registered via register_shutdown_function).
; This option will enable timeout limit to be applied unconditionally
; even in such cases.
; Default Value: no
;request_terminate_timeout_track_finished = no
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
;chdir = /var/www
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
;catch_workers_output = yes
; Decorate worker output with prefix and suffix containing information about
; the child that writes to the log and if stdout or stderr is used as well as
; log level and time. This options is used only if catch_workers_output is yes.
; Settings to "no" will output data as written to the stdout or stderr.
; Default value: yes
;decorate_workers_output = no
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; execute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5 .php7
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-debian-stable-php-fpm
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: _CI-CD/debian-stable-php-fpm
- name: path-to-dockerfile
value: _CI-CD/debian-stable-php-fpm/Dockerfile
- name: image-name
value: cr.lan/debian-stable-php-fpm
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/debian-stable-php-fpm

View File

@@ -0,0 +1,14 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash && \
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-debian-stable
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: _CI-CD/debian-stable
- name: path-to-dockerfile
value: _CI-CD/debian-stable/Dockerfile
- name: image-name
value: cr.lan/debian-stable
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/debian-stable

View File

@@ -0,0 +1,15 @@
FROM debian:testing-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-debian-testing
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: _CI-CD/debian-testing
- name: path-to-dockerfile
value: _CI-CD/debian-testing/Dockerfile
- name: image-name
value: cr.lan/debian-testing
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/debian-testing

23
_CI-CD/distcc/Dockerfile Normal file
View File

@@ -0,0 +1,23 @@
FROM cr.lan/debian-stable-build-essential
RUN apt-get update && \
apt-get install -y \
gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf \
dpkg-dev distcc ccache \
build-essential gcc cpp g++ clang llvm
RUN apt-get remove -y --purge man-db ;\
apt-get autoremove -y --purge ;\
apt-get clean -y ;\
rm -rf /var/lib/apt/lists/* ;\
rm -rf /var/cache/apt/*
# Op port
EXPOSE 3632
# Stats port
EXPOSE 3633
USER distccd
ENTRYPOINT /usr/bin/distccd --no-detach --daemon --stats --log-level error --log-stderr $OPTIONS

View File

@@ -1,27 +1,25 @@
apiVersion: apps/v1
kind: Deployment
kind: StatefulSet
metadata:
labels:
app: distcc
release: buster
release: stable
name: distcc
namespace: default
spec:
replicas: 3
serviceName: distcc
replicas: 4
selector:
matchLabels:
app: distcc
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: distcc
release: buster
release: stable
spec:
containers:
- name: distcc
image: docker-registry.lan/distcc:armhf
image: cr.lan/distcc
imagePullPolicy: Always
#env:
#- name: OPTIONS
@@ -35,12 +33,11 @@ spec:
protocol: TCP
resources:
limits:
cpu: 1
cpu: 4
memory: 128Mi
requests:
cpu: 1
cpu: 50m
memory: 64Mi
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
@@ -61,11 +58,9 @@ kind: Service
metadata:
labels:
app: distcc
release: buster
namespace: default
release: stable
name: distcc
spec:
externalTrafficPolicy: Cluster
ports:
- name: distcc-data
port: 3632
@@ -77,4 +72,3 @@ spec:
protocol: TCP
selector:
app: distcc
type: LoadBalancer

23
_CI-CD/distcc/tekton.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-distcc
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: _CI-CD/distcc
- name: path-to-dockerfile
value: _CI-CD/distcc/Dockerfile
- name: image-name
value: cr.lan/distcc
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/distcc

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: git-secret
type: Opaque
data:
token: Nzk1YTFhMGQxMWQ0MDJiY2FiOGM3MjkyZDk5ODIyMzg2NDNkM2U3OQo=

33
_CI-CD/tekton-pvc.yaml Normal file
View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tektoncd-workspaces
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi
storageClassName: nfs-ssd-ebin02
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: tektoncd-workspaces
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/tektoncd-workspaces
server: ebin02
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: tektoncd-workspaces
namespace: default

View File

@@ -0,0 +1,101 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: git-clone
spec:
workspaces:
- name: output
description: The git repo will be cloned onto the volume backing this workspace
params:
- name: url
description: git url to clone
type: string
default: http://git-ui.lan/chaos/kubernetes.git
- name: revision
description: git revision to checkout (branch, tag, sha, ref…)
type: string
default: master
- name: refspec
description: (optional) git refspec to fetch before checking out revision
default: ""
- name: submodules
description: defines if the resource should initialize and fetch the submodules
type: string
default: "true"
- name: depth
description: performs a shallow clone where only the most recent commit(s) will be fetched
type: string
default: "1"
- name: sslVerify
description: defines if http.sslVerify should be set to true or false in the global git config
type: string
default: "true"
- name: subdirectory
description: subdirectory inside the "output" workspace to clone the git repo into
type: string
default: ""
- name: deleteExisting
description: clean out the contents of the repo's destination directory (if it already exists) before trying to clone the repo there
type: string
default: "true"
- name: httpProxy
description: git HTTP proxy server for non-SSL requests
type: string
default: ""
- name: httpsProxy
description: git HTTPS proxy server for SSL requests
type: string
default: ""
- name: noProxy
description: git no proxy - opt out of proxying HTTP/HTTPS requests
type: string
default: ""
results:
- name: commit
description: The precise commit SHA that was fetched by this Task
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.30.2
script: |
CHECKOUT_DIR="$(workspaces.output.path)/$(params.subdirectory)"
cleandir() {
# Delete any existing contents of the repo directory if it exists.
#
# We don't just "rm -rf $CHECKOUT_DIR" because $CHECKOUT_DIR might be "/"
# or the root of a mounted volume.
if [[ -d "$CHECKOUT_DIR" ]] ; then
# Delete non-hidden files and directories
rm -rf "$CHECKOUT_DIR"/*
# Delete files and directories starting with . but excluding ..
rm -rf "$CHECKOUT_DIR"/.[!.]*
# Delete files and directories starting with .. plus any other character
rm -rf "$CHECKOUT_DIR"/..?*
fi
}
if [[ "$(params.deleteExisting)" == "true" ]] ; then
cleandir
fi
test -z "$(params.httpProxy)" || export HTTP_PROXY=$(params.httpProxy)
test -z "$(params.httpsProxy)" || export HTTPS_PROXY=$(params.httpsProxy)
test -z "$(params.noProxy)" || export NO_PROXY=$(params.noProxy)
/ko-app/git-init \
-url "$(params.url)" \
-revision "$(params.revision)" \
-refspec "$(params.refspec)" \
-path "$CHECKOUT_DIR" \
-sslVerify="$(params.sslVerify)" \
-submodules="$(params.submodules)" \
-depth "$(params.depth)"
cd "$CHECKOUT_DIR"
RESULT_SHA="$(git rev-parse HEAD | tr -d '\n')"
EXIT_CODE="$?"
if [ "$EXIT_CODE" != 0 ]
then
exit $EXIT_CODE
fi
# Make sure we don't add a trailing newline to the result!
echo -n "$RESULT_SHA" > $(results.commit.path)

View File

@@ -0,0 +1,45 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: kaniko
spec:
params:
- name: git-url
- name: git-revision
- name: image-name
- name: path-to-image-context
- name: path-to-dockerfile
workspaces:
- name: git-source
tasks:
- name: fetch-from-git
taskRef:
name: git-clone
params:
- name: url
value: $(params.git-url)
- name: revision
value: $(params.git-revision)
- name: submodules
value: false
- subdirectory:
value: "source"
workspaces:
- name: source
workspace: git-source
- name: build-image
taskRef:
name: kaniko
params:
- name: IMAGE
value: $(params.image-name)
- name: CONTEXT
value: $(params.path-to-image-context)
- name: DOCKERFILE
value: $(params.path-to-dockerfile)
workspaces:
- name: source
workspace: git-source
# If you want you can add a Task that uses the IMAGE_DIGEST from the kaniko task
# via $(tasks.build-image.results.IMAGE_DIGEST) - this was a feature we hadn't been
# able to fully deliver with the Image PipelineResource!

View File

@@ -0,0 +1,78 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: kaniko
labels:
app.kubernetes.io/version: "0.5"
annotations:
tekton.dev/pipelines.minVersion: "0.17.0"
tekton.dev/categories: Image Build
tekton.dev/tags: image-build
tekton.dev/displayName: "Build and upload container image using Kaniko"
tekton.dev/platforms: "linux/arm64"
spec:
description: >-
This Task builds source into a container image using Google's kaniko tool.
Kaniko doesn't depend on a Docker daemon and executes each
command within a Dockerfile completely in userspace. This enables
building container images in environments that can't easily or
securely run a Docker daemon, such as a standard Kubernetes cluster.
params:
- name: IMAGE
description: Name (reference) of the image to build.
- name: DOCKERFILE
description: Path to the Dockerfile to build.
default: ./Dockerfile
- name: CONTEXT
description: The build context used by Kaniko.
default: ./
- name: EXTRA_ARGS
type: array
default: []
- name: BUILDER_IMAGE
description: The image on which builds will run (default is v1.5.1)
default: gcr.io/kaniko-project/executor:v1.9.1
workspaces:
- name: source
description: Holds the context and docker file
- name: dockerconfig
description: Includes a docker `config.json`
optional: true
mountPath: /kaniko/.docker
results:
- name: IMAGE-DIGEST
description: Digest of the image just built.
steps:
- name: debug
workingDir: $(workspaces.source.path)
image: bash
script: |
#!/usr/bin/env bash
export
pwd
mount
ls -al
- name: build-and-push
workingDir: $(workspaces.source.path)
image: $(params.BUILDER_IMAGE)
args:
- $(params.EXTRA_ARGS[*])
- --dockerfile=$(params.DOCKERFILE)
- --context=$(params.CONTEXT) # The user does not need to care the workspace and the source.
- --destination=$(params.IMAGE)
- --digest-file=/tekton/results/IMAGE-DIGEST
- --snapshotMode=redo
- --single-snapshot
- --use-new-run
- --skip-tls-verify
- --cache
- --cache-copy-layers
- --cache-dir=/workspace/cache
# kaniko assumes it is running as root, which means this example fails on platforms
# that default to run containers as random uid (like OpenShift). Adding this securityContext
# makes it explicit that it needs to run as root.
securityContext:
runAsUser: 0

90
_sys/README.md Normal file
View File

@@ -0,0 +1,90 @@
Upgrade:
```
export KV=1.26.0-00;
apt-mark unhold kubeadm=$KV kubectl=$KV kubelet=$KV;
apt install -y kubeadm=$KV;
```
```
kubeadm upgrade node #Other pines in the wood
```
```
#pine01
kubeadm upgrade plan --ignore-preflight-errors=CoreDNSUnsupportedPlugins;
kubeadm config images pull;
kubeadm upgrade apply ${KV/\-*/} --ignore-preflight-errors=CoreDNSUnsupportedPlugins --certificate-renewal=false; #sometimes true
```
```
apt install kubectl=$KV kubelet=$KV;
systemctl daemon-reload && systemctl restart kubelet;
apt-mark hold kubeadm=$KV kubectl=$KV kubelet=$KV;
echo 'You can now uncordon, der Geraet';
```
# Infos:
```
$ kubectl -n kube-system get cm kubeadm-config -o yaml
apiVersion: v1
data:
ClusterConfiguration: |
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: v1.23.15
networking:
dnsDomain: cluster.local
podSubnet: 172.23.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
ClusterStatus: |
apiEndpoints:
pine01:
advertiseAddress: 172.16.23.21
bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterStatus
kind: ConfigMap
metadata:
creationTimestamp: "2021-01-20T14:55:12Z"
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:ClusterConfiguration: {}
f:ClusterStatus: {}
manager: kubeadm
operation: Update
time: "2021-01-20T14:55:12Z"
name: kubeadm-config
namespace: kube-system
resourceVersion: "441685033"
uid: c70fefd3-02c3-44c8-a37d-7b17ec445455
```
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/configmap.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/job/job.yaml

2348
_sys/consul-values.yaml Normal file

File diff suppressed because it is too large Load Diff

122
_sys/coredns-1.26-x.yaml Normal file
View File

@@ -0,0 +1,122 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
uid: 7bf78c83-68ac-4dee-95f6-52964e38e2d3
resourceVersion: '441765420'
generation: 37
creationTimestamp: '2021-01-20T14:55:14Z'
labels:
k8s-app: kube-dns
annotations:
deployment.kubernetes.io/revision: '34'
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
defaultMode: 420
containers:
- name: coredns
image: registry.k8s.io/coredns/coredns:v1.9.3
args:
- '-conf'
- /etc/coredns/Corefile
ports:
- name: dns
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
- name: metrics
containerPort: 9153
protocol: TCP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- name: config-volume
readOnly: true
mountPath: /etc/coredns
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: coredns
serviceAccount: coredns
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
priorityClassName: system-cluster-critical
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600

View File

@@ -0,0 +1,202 @@
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
file /etc/coredns/lan.db lan
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
lan.db: |
;lan. zone file
$ORIGIN lan.
@ 600 IN SOA sns.dns.icann.org. noc.dns.icann.org. 2022032201 7200 600 1209600 600
3600 IN NS 172.23.255.252
ns IN A 172.23.255.252
salt IN A 192.168.10.2
mqtt IN A 172.16.23.1
www-proxy IN A 172.23.255.1
git IN A 172.23.255.2
postgresql IN A 172.23.255.4
mariadb IN A 172.23.255.5
redis IN A 172.23.255.6
pihole IN A 172.23.255.253
adm IN CNAME adm01.wks.
prometheus IN CNAME www-proxy
alertmanager IN CNAME www-proxy
stats IN CNAME www-proxy
cr-ui IN CNAME www-proxy
apt IN CNAME www-proxy
apt-cache IN CNAME www-proxy
nodered IN CNAME www-proxy
foto IN CNAME www-proxy
musik IN CNAME www-proxy
hassio IN CNAME www-proxy
hassio-conf IN CNAME www-proxy
git-ui IN CNAME www-proxy
grav IN CNAME www-proxy
tekton IN CNAME www-proxy
nc IN CNAME www-proxy
dolibarr IN CNAME www-proxy
auth IN CNAME www-proxy
public.auth IN CNAME www-proxy
secure.auth IN CNAME www-proxy
docker-registry IN CNAME adm
cr IN CNAME adm
dr-mirror IN CNAME adm
log IN CNAME adm
---
apiVersion: v1
kind: Service
metadata:
name: dns-ext
namespace: kube-system
spec:
ports:
- name: dns-udp
protocol: UDP
port: 53
targetPort: 53
selector:
k8s-app: kube-dns
type: LoadBalancer
loadBalancerIP: 172.23.255.252
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
labels:
k8s-app: kube-dns
spec:
progressDeadlineSeconds: 600
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.9.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 8181
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
- key: lan.db
path: lan.db
name: coredns
name: config-volume

View File

@@ -0,0 +1,47 @@
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/50 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.25.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,34 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
maxNoOfPodsToEvictPerNode: 1
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 20
"memory": 40
"pods": 20
targetThresholds:
"cpu": 50
"memory": 60
"pods": 20
#nodeFit: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false

View File

@@ -0,0 +1,10 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
#namespace: nginx-ingress
namespace: default
data:
proxy-connect-timeout: "10s"
proxy-read-timeout: "10s"
client-max-body-size: "0"

205
_sys/kube-flannel.yml Normal file
View File

@@ -0,0 +1,205 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.23.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@@ -1,53 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-router
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- nodes
- endpoints
verbs:
- list
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- list
- get
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-router
subjects:
- kind: ServiceAccount
name: kube-router
namespace: kube-system

View File

@@ -1,137 +0,0 @@
#https://gist.github.com/jjo/8c616aaf795284bb5b85d02143745f63
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"hairpinMode":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-router
namespace: kube-system
labels:
k8s-app: kube-router
spec:
selector:
matchLabels:
k8s-app: kube-router
template:
metadata:
labels:
k8s-app: kube-router
spec:
priorityClassName: system-node-critical
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
args:
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"
- "--bgp-graceful-restart=true"
- "--hairpin-mode=true"
- "--enable-cni=true"
- "--advertise-cluster-ip=true"
- "--advertise-external-ip=true"
- "--advertise-loadbalancer-ip=true"
- "--kubeconfig=/var/lib/kube-router/kubeconfig"
#- "--master=https://192.168.10.13:6443"
securityContext:
privileged: true
imagePullPolicy: Always
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /var/lib/kube-router/kubeconfig
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
initContainers:
- name: install-cni
image: docker.io/cloudnativelabs/kube-router
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kube-router-cfg
mountPath: /etc/kube-router
hostNetwork: true
serviceAccountName: kube-router
serviceAccount: kube-router
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg
- name: kubeconfig
hostPath:
path: /var/lib/kube-router/kubeconfig
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@@ -0,0 +1,21 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: loki-data
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/loki-data
server: ebin02
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: storage-loki-0
namespace: monitoring

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-openwrt
type: Opaque
data:
username: b3BlbndydAo=
password: ZUZWbmVnOEkwOE1zRTN0Q2VCRFB4c011OU0yVjJGdnkK
endpoint: aHR0cHM6Ly9taW5pby5saXZlLWluZnJhLnN2Yy5jbHVzdGVyLmxvY2FsOjk0NDMK

View File

@@ -0,0 +1,36 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd
provisioner: nfs-ssd # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin01
provisioner: nfs-ssd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-hdd-ebin01
provisioner: nfs-hdd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin02
provisioner: nfs-ssd-ebin02 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-hdd-ebin01
namespace: live-infra
labels:
app: nfs-hdd-ebin01
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-hdd-ebin01
template:
metadata:
labels:
app: nfs-hdd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-hdd-ebin01
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-hdd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/k8s-data-hdd
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/k8s-data-hdd

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin01
namespace: live-infra
labels:
app: nfs-ssd-ebin01
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin01
template:
metadata:
labels:
app: nfs-ssd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin01
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-ssd/k8s-data

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin02
namespace: live-infra
labels:
app: nfs-ssd-ebin02
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin02
template:
metadata:
labels:
app: nfs-ssd-ebin02
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin02
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin02
- name: NFS_SERVER
value: ebin02
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin02
path: /data/raid1-ssd/k8s-data

View File

@@ -0,0 +1,65 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Namespace
metadata:
name: live-env
---
apiVersion: v1
kind: Namespace
metadata:
name: test-env
---
apiVersion: v1
kind: Namespace
metadata:
name: live-infra
---
apiVersion: v1
kind: Namespace
metadata:
name: test-infra

View File

@@ -0,0 +1,37 @@
FROM debian:bullseye
ENV DEBIAN_FRONTEND noninteractive
ARG DEVPKGS="git make cmake gcc g++ python-dev libsqlcipher-dev"
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get -y install ${DEVPKGS} python3-pip python3-torch python3-dateutil python3-filelock python3-tqdm python3-pyparsing python3-joblib \
python3-portalocker python3-click python3-packaging python3-regex python3-docopt python3-systemd \
libsystemd-dev graphicsmagick zip unzip bubblewrap sqlcipher gettext nodejs npm && \
pip3 install tensorboardX && \
pip3 install 'git+https://github.com/stanford-oval/genienlp@0969c6ea74376b20982c0c8bea9a4732547b15cb#egg=genienlp' && \
git clone --depth=1 --branch v1.99.0 https://github.com/stanford-oval/almond-cloud.git /opt/almond-cloud
#setup
RUN useradd -ms /bin/bash -r almond-cloud && id almond-cloud
WORKDIR /opt/almond-cloud/
RUN chown -R almond-cloud:almond-cloud /opt/almond-cloud && \
echo "build_from_source = true" > ~almond-cloud/.npmrc && \
echo "sqlite = external" >> ~almond-cloud/.npmrc && \
echo "sqlite_libname = sqlcipher" >> ~almond-cloud/.npmrc && \
echo "======== package.json ============="; cat package.json && \
su almond-cloud -c 'CPLUS_INCLUDE_PATH=/usr/include/sqlcipher npm install' && \
chown -R root:root /opt/almond-cloud
COPY --chown=almond-cloud:almond-cloud start.sh /opt/almond-cloud/
# CLeanup
RUN apt-get remove -y --purge ${DEVPKGS} && \
apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/* /root/.cache
USER almond-cloud
WORKDIR /home/almond-cloud
ENTRYPOINT ["/opt/almond-cloud/start.sh"]

View File

@@ -0,0 +1,67 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: almond-cloud
spec:
selector:
matchLabels:
app: almond-cloud
strategy:
type: Recreate
template:
metadata:
labels:
app: almond-cloud
spec:
containers:
- image: cr.lan/almond-cloud
name: almond-cloud
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
volumeMounts:
- name: almond-cloud-data
mountPath: /home/almond-cloud
volumes:
- name: almond-cloud-data
persistentVolumeClaim:
claimName: almond-cloud-data
---
apiVersion: v1
kind: Service
metadata:
name: almond-cloud
spec:
ports:
- name: http
port: 3000
selector:
app: almond-cloud
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: almond-cloud
spec:
rules:
- host: almond.lan
http:
paths:
- backend:
serviceName: almond-cloud
servicePort: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: almond-cloud-data
spec:
storageClassName: nfs-ssd-ebin01
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

3
apps/almond-cloud/start.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
NODE_MAX_OLD_SPACE_SIZE=${NODE_MAX_OLD_SPACE_SIZE:-500}
exec node --max_old_space_size=${NODE_MAX_OLD_SPACE_SIZE} /opt/almond-cloud/main.js "$@"

View File

@@ -0,0 +1,77 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-almond-cloud
spec:
type: image
params:
- name: url
value: cr.lan/almond-cloud
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-almond-cloud
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/almond-cloud/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/almond-cloud
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-almond-cloud-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-almond-cloud
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-almond-cloud

View File

@@ -1,17 +1,12 @@
FROM debian:stable-slim
FROM cr.lan/debian-stable
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \
apt-cacher-ng procps && \
apt-cacher-ng && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
RUN echo 'PassThroughPattern: .*' >> /etc/apt-cacher-ng/acng.conf
CMD chown apt-cacher-ng:apt-cacher-ng /var/cache/apt-cacher-ng
EXPOSE 3142
USER apt-cacher-ng
#CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
CMD /usr/sbin/apt-cacher-ng -c /etc/apt-cacher-ng pidfile=/var/run/apt-cacher-ng/pid SocketPath=/var/run/apt-cacher-ng/socket foreground=1

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: apt-cacher-ng
image: docker-registry.lan/apt-cacher-ng:arm64
image: cr.lan/apt-cacher-ng:latest
ports:
- containerPort: 3142
protocol: TCP
@@ -27,10 +27,10 @@ spec:
name: data
resources:
requests:
memory: "24Mi"
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
memory: "192Mi"
cpu: "100m"
volumes:
- name: data
@@ -52,29 +52,54 @@ spec:
selector:
app: apt-cacher-ng
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apt-cacher-ng
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: apt-cache.lan
http:
paths:
- backend:
serviceName: apt-cacher-ng
servicePort: 3142
- path: /
pathType: Prefix
backend:
service:
name: apt-cacher-ng
port:
number: 3142
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume
#annotations:
# volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin02
volumeName: apt-cacher-ng
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: apt-cacher-ng
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/apt-cacher-ng
server: ebin02
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: apt-cacher-volume
namespace: live-infra

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-apt-cacher-ng
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/apt-cacher-ng
- name: path-to-dockerfile
value: apps/apt-cacher-ng/Dockerfile
- name: image-name
value: cr.lan/apt-cacher-ng
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/apt-cacher-ng

View File

@@ -1,73 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apt-cacher-ng-test
namespace: test
labels:
app: apt-cacher-ng-test
spec:
replicas: 1
selector:
matchLabels:
app: apt-cacher-ng-test
strategy:
type: Recreate
template:
metadata:
labels:
app: apt-cacher-ng-test
spec:
containers:
- name: apt-cacher-ng-test
image: docker-registry.lan/apt-cacher-ng:arm64
imagePullPolicy: Always
ports:
- containerPort: 3142
protocol: TCP
volumeMounts:
- mountPath: /var/cache/apt-cacher-ng
name: data
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 128Mi
cpu: 100m
volumes:
- name: data
persistentVolumeClaim:
claimName: apt-cacher-volume-test
#---
#apiVersion: v1
#kind: Service
#metadata:
# name: apt-cacher-ng
# labels:
# app: apt-cacher-ng
#spec:
# type: LoadBalancer
# loadBalancerIP: 172.23.255.1
# ports:
# - name: apt-cacher-ng
# port: 3142
# targetPort: 3142
# protocol: TCP
# selector:
# app: apt-cacher-ng
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume-test
namespace: test
#annotations:
# volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
storageClassName: csi-s3-slow
#storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi

View File

@@ -1,464 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: argo
---
# This is an auto-generated file. DO NOT EDIT
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterworkflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: ClusterWorkflowTemplate
listKind: ClusterWorkflowTemplateList
plural: clusterworkflowtemplates
shortNames:
- clusterwftmpl
- cwft
singular: clusterworkflowtemplate
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cronworkflows.argoproj.io
spec:
group: argoproj.io
names:
kind: CronWorkflow
listKind: CronWorkflowList
plural: cronworkflows
shortNames:
- cwf
- cronwf
singular: cronworkflow
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflows.argoproj.io
spec:
additionalPrinterColumns:
- JSONPath: .status.phase
description: Status of the workflow
name: Status
type: string
- JSONPath: .status.startedAt
description: When the workflow was started
format: date-time
name: Age
type: date
group: argoproj.io
names:
kind: Workflow
listKind: WorkflowList
plural: workflows
shortNames:
- wf
singular: workflow
scope: Namespaced
subresources: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowTemplate
listKind: WorkflowTemplateList
plural: workflowtemplates
shortNames:
- wftmpl
singular: workflowtemplate
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: argo-aggregate-to-admin
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: argo-aggregate-to-edit
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: argo-aggregate-to-view
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-cluster-role
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- create
- apiGroups:
- argoproj.io
resources:
- workflowtemplates
- workflowtemplates/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- argoproj.io
resources:
- cronworkflows
- cronworkflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-server-cluster-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
- workflowtemplates
- cronworkflows
- clusterworkflowtemplates
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-cluster-role
subjects:
- kind: ServiceAccount
name: argo
namespace: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-server-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-server-cluster-role
subjects:
- kind: ServiceAccount
name: argo-server
namespace: argo
---
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
---
apiVersion: v1
kind: Service
metadata:
name: argo-server
spec:
ports:
- name: web
port: 2746
targetPort: 2746
selector:
app: argo-server
---
apiVersion: v1
kind: Service
metadata:
name: workflow-controller-metrics
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: workflow-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argo-server
spec:
selector:
matchLabels:
app: argo-server
template:
metadata:
labels:
app: argo-server
spec:
containers:
- args:
- server
image: argoproj/argocli:latest
name: argo-server
ports:
- containerPort: 2746
name: web
readinessProbe:
httpGet:
path: /
port: 2746
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 20
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: argo-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: workflow-controller
spec:
selector:
matchLabels:
app: workflow-controller
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
- --executor-image
- argoproj/argoexec:latest
command:
- workflow-controller
image: argoproj/workflow-controller:latest
name: workflow-controller
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: argo

7
apps/argocd/README.md Normal file
View File

@@ -0,0 +1,7 @@
FROM: https://tanzu.vmware.com/developer/guides/ci-cd/argocd-gs/
# kubectl apply -f namespace.yaml
# -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml-
# kubectl apply -n argocd -f install.yaml (needs changes for ARM builds)
# kubectl apply -n argocd -f ingress.yaml

18
apps/argocd/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
#https://argoproj.github.io/argo-cd/operator-manual/ingress/#kubernetesingress-nginx
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.lan
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https

2726
apps/argocd/install.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
name: argocd

3
apps/authelia/README.md Normal file
View File

@@ -0,0 +1,3 @@
### Apply new config
$ kubectl -n live-infra create configmap authelia-config --from-file=configMaps/ -o yaml --dry-run |kubectl apply -f -

View File

@@ -0,0 +1,678 @@
# yamllint disable rule:comments-indentation
---
###############################################################################
# Authelia Configuration #
###############################################################################
## Certificates directory specifies where Authelia will load trusted certificates (public portion) from in addition to
## the system certificates store.
## They should be in base64 format, and have one of the following extensions: *.cer, *.crt, *.pem.
certificates_directory: /etc/pki/chain
## The theme to display: light, dark, grey, auto.
theme: dark
## The secret used to generate JWT tokens when validating user identity by email confirmation. JWT Secret can also be
## set using a secret: https://www.authelia.com/docs/configuration/secrets.html
jwt_secret: hAnFzapSCusyF2W83JAg6PRqc6v7iQvN7sP3PQ70HAbPBshJzAMz
## Default redirection URL
##
## If user tries to authenticate without any referer, Authelia does not know where to redirect the user to at the end
## of the authentication process. This parameter allows you to specify the default redirection URL Authelia will use
## in such a case.
##
## Note: this parameter is optional. If not provided, user won't be redirected upon successful authentication.
default_redirection_url: http://nc.lan
##
## Server Configuration
##
server:
## The address to listen on.
host: 0.0.0.0
## The port to listen on.
port: 9091
## Set the single level path Authelia listens on.
## Must be alphanumeric chars and should not contain any slashes.
path: ""
## Buffers usually should be configured to be the same value.
## Explanation at https://www.authelia.com/docs/configuration/server.html
## Read buffer size adjusts the server's max incoming request size in bytes.
## Write buffer size does the same for outgoing responses.
read_buffer_size: 4096
write_buffer_size: 4096
## Enables the pprof endpoint.
enable_pprof: false
## Enables the expvars endpoint.
enable_expvars: false
## Disables writing the health check vars to /app/.healthcheck.env which makes healthcheck.sh return exit code 0.
## This is disabled by default if either /app/.healthcheck.env or /app/healthcheck.sh do not exist.
disable_healthcheck: false
## Authelia by default doesn't accept TLS communication on the server port. This section overrides this behaviour.
tls:
## The path to the DER base64/PEM format private key.
#key: "/etc/pki/private.key"
key: ""
## The path to the DER base64/PEM format public certificate.
#certificate: "/etc/pki/auth.lan.crt"
certificate : ""
##
## Log Configuration
##
log:
## Level of verbosity for logs: info, debug, trace.
level: debug
## Format the logs are written as: json, text.
format: text
## File path where the logs will be written. If not set logs are written to stdout.
file_path: "" #/config-nfs/authelia.log
## Whether to also log to stdout when a log_file_path is defined.
# keep_stdout: false
##
## TOTP Configuration
##
## Parameters used for TOTP generation.
totp:
## The issuer name displayed in the Authenticator application of your choice
## See: https://github.com/google/google-authenticator/wiki/Key-Uri-Format for more info on issuer names
issuer: auth.lan
## The period in seconds a one-time password is current for. Changing this will require all users to register
## their TOTP applications again. Warning: before changing period read the docs link below.
period: 30
## The skew controls number of one-time passwords either side of the current one that are valid.
## Warning: before changing skew read the docs link below.
skew: 1
## See: https://www.authelia.com/docs/configuration/one-time-password.html#period-and-skew to read the documentation.
##
## Duo Push API Configuration
##
## Parameters used to contact the Duo API. Those are generated when you protect an application of type
## "Partner Auth API" in the management panel.
duo_api:
hostname: api.auth.lan
integration_key: AUTHELIA
## Secret can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
secret_key: 8Jp5e822KP
##
## Authentication Backend Provider Configuration
##
## Used for verifying user passwords and retrieve information such as email address and groups users belong to.
##
## The available providers are: `file`, `ldap`. You must use only one of these providers.
authentication_backend:
## Disable both the HTML element and the API for reset password functionality.
disable_reset_password: false
## The amount of time to wait before we refresh data from the authentication backend. Uses duration notation.
## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will
## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP.
## To force update on every request you can set this to '0' or 'always', this will increase processor demand.
## See the below documentation for more information.
## Duration Notation docs: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
## Refresh Interval docs: https://www.authelia.com/docs/configuration/authentication/ldap.html#refresh-interval
refresh_interval: 5m
##
## LDAP (Authentication Provider)
##
## This is the recommended Authentication Provider in production
## because it allows Authelia to offload the stateful operations
## onto the LDAP service.
# ldap:
# ## The LDAP implementation, this affects elements like the attribute utilised for resetting a password.
# ## Acceptable options are as follows:
# ## - 'activedirectory' - For Microsoft Active Directory.
# ## - 'custom' - For custom specifications of attributes and filters.
# ## This currently defaults to 'custom' to maintain existing behaviour.
# ##
# ## Depending on the option here certain other values in this section have a default value, notably all of the
# ## attribute mappings have a default value that this config overrides, you can read more about these default values
# ## at https://www.authelia.com/docs/configuration/authentication/ldap.html#defaults
# implementation: custom
#
# ## The url to the ldap server. Format: <scheme>://<address>[:<port>].
# ## Scheme can be ldap or ldaps in the format (port optional).
# url: ldap://127.0.0.1
#
# ## The dial timeout for LDAP.
# timeout: 5s
#
# ## Use StartTLS with the LDAP connection.
# start_tls: false
#
# tls:
# ## Server Name for certificate validation (in case it's not set correctly in the URL).
# # server_name: ldap.example.com
#
# ## Skip verifying the server certificate (to allow a self-signed certificate).
# ## In preference to setting this we strongly recommend you add the public portion of the certificate to the
# ## certificates directory which is defined by the `certificates_directory` option at the top of the config.
# skip_verify: false
#
# ## Minimum TLS version for either Secure LDAP or LDAP StartTLS.
# minimum_version: TLS1.2
#
# ## The distinguished name of the container searched for objects in the directory information tree.
# ## See also: additional_users_dn, additional_groups_dn.
# base_dn: dc=example,dc=com
#
# ## The attribute holding the username of the user. This attribute is used to populate the username in the session
# ## information. It was introduced due to #561 to handle case insensitive search queries. For you information,
# ## Microsoft Active Directory usually uses 'sAMAccountName' and OpenLDAP usually uses 'uid'. Beware that this
# ## attribute holds the unique identifiers for the users binding the user and the configuration stored in database.
# ## Therefore only single value attributes are allowed and the value must never be changed once attributed to a user
# ## otherwise it would break the configuration for that user. Technically, non-unique attributes like 'mail' can also
# ## be used but we don't recommend using them, we instead advise to use the attributes mentioned above
# ## (sAMAccountName and uid) to follow https://www.ietf.org/rfc/rfc2307.txt.
# # username_attribute: uid
#
# ## The additional_users_dn is prefixed to base_dn and delimited by a comma when searching for users.
# ## i.e. with this set to OU=Users and base_dn set to DC=a,DC=com; OU=Users,DC=a,DC=com is searched for users.
# additional_users_dn: ou=users
#
# ## The users filter used in search queries to find the user profile based on input filled in login form.
# ## Various placeholders are available in the user filter which you can read about in the documentation which can
# ## be found at: https://www.authelia.com/docs/configuration/authentication/ldap.html#users-filter-replacements
# ##
# ## Recommended settings are as follows:
# ## - Microsoft Active Directory: (&({username_attribute}={input})(objectCategory=person)(objectClass=user))
# ## - OpenLDAP:
# ## - (&({username_attribute}={input})(objectClass=person))
# ## - (&({username_attribute}={input})(objectClass=inetOrgPerson))
# ##
# ## To allow sign in both with username and email, one can use a filter like
# ## (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))
# users_filter: (&({username_attribute}={input})(objectClass=person))
#
# ## The additional_groups_dn is prefixed to base_dn and delimited by a comma when searching for groups.
# ## i.e. with this set to OU=Groups and base_dn set to DC=a,DC=com; OU=Groups,DC=a,DC=com is searched for groups.
# additional_groups_dn: ou=groups
#
# ## The groups filter used in search queries to find the groups based on relevant authenticated user.
# ## Various placeholders are available in the groups filter which you can read about in the documentation which can
# ## be found at: https://www.authelia.com/docs/configuration/authentication/ldap.html#groups-filter-replacements
# ##
# ## If your groups use the `groupOfUniqueNames` structure use this instead:
# ## (&(uniqueMember={dn})(objectClass=groupOfUniqueNames))
# groups_filter: (&(member={dn})(objectClass=groupOfNames))
#
# ## The attribute holding the name of the group.
# # group_name_attribute: cn
#
# ## The attribute holding the mail address of the user. If multiple email addresses are defined for a user, only the
# ## first one returned by the LDAP server is used.
# # mail_attribute: mail
#
# ## The attribute holding the display name of the user. This will be used to greet an authenticated user.
# # display_name_attribute: displayName
#
# ## The username and password of the admin user.
# user: cn=admin,dc=example,dc=com
# ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: password
#
##
## File (Authentication Provider)
##
## With this backend, the users database is stored in a file which is updated when users reset their passwords.
## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia
## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security
## implications it is highly recommended you leave the default values. Before considering changing these settings
## please read the docs page below:
## https://www.authelia.com/docs/configuration/authentication/file.html#password-hash-algorithm-tuning
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
file:
path: /config-nfs/users_database.yml
password:
algorithm: sha512
salt_length: 16
#algorithm: argon2id
#iterations: 1
#key_length: 32
#memory: 32
#parallelism: 4
##
## Access Control Configuration
##
## Access control is a list of rules defining the authorizations applied for one resource to users or group of users.
##
## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed
## to anyone. Otherwise restrictions follow the rules defined.
##
## Note: One can use the wildcard * to match any subdomain.
## It must stand at the beginning of the pattern. (example: *.mydomain.com)
##
## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct.
##
## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'.
##
## - 'domain' defines which domain or set of domains the rule applies to.
##
## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not
## provided. If provided, the parameter represents either a user or a group. It should be of the form
## 'user:<username>' or 'group:<groupname>'.
##
## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'.
##
## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter
## is optional and matches any resource if not provided.
##
## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies.
access_control:
## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any
## resource if there is no policy to be applied to the user.
default_policy: deny
networks:
- name: internal
networks:
- 10.10.0.0/16
- 172.23.0.0/16
- 172.16.23.0/24
- 192.168.10.0/24
- name: VPN
networks: 10.14.0.0/27
rules:
## Rules applied to everyone
- domain: public.auth.lan
policy: bypass
- domain: secure.auth.lan
policy: one_factor
## Network based rule, if not provided any network matches.
networks:
- internal
- VPN
- domain:
- secure.auth.lan
- private.auth.lan
policy: two_factor
- domain: singlefactor.auth.lan
policy: one_factor
## Rules applied to 'admins' group
- domain: "mx2.mail.example.com"
subject: "group:admins"
policy: deny
- domain: "*.auth.lan"
subject:
- "group:admins"
- "group:moderators"
policy: two_factor
## Rules applied to 'dev' group
- domain: dev.auth.lan
resources:
- "^/groups/dev/.*$"
subject: "group:dev"
policy: two_factor
## Rules applied to user 'john'
- domain: dev.auth.lan
resources:
- "^/users/john/.*$"
subject: "user:john"
policy: two_factor
## Rules applied to user 'harry'
- domain: dev.auth.lan
resources:
- "^/users/harry/.*$"
subject: "user:harry"
policy: two_factor
## Rules applied to user 'bob'
- domain: "*.mail.auth.lan"
subject: "user:bob"
policy: two_factor
- domain: "dev.auth.lan"
resources:
- "^/users/bob/.*$"
subject: "user:bob"
policy: two_factor
##
## Session Provider Configuration
##
## The session cookies identify the user once logged in.
## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined.
session:
## The name of the session cookie.
name: authelia_session
## The domain to protect.
## Note: the authenticator must also be in that domain.
## If empty, the cookie is restricted to the subdomain of the issuer.
domain: lan
## Sets the Cookie SameSite value. Possible options are none, lax, or strict.
## Please read https://www.authelia.com/docs/configuration/session.html#same_site
same_site: lax
## The secret to encrypt the session data. This is only used with Redis / Redis Sentinel.
## Secret can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
secret: insecure_session_secret
## The value for expiration, inactivity, and remember_me_duration are in seconds or the duration notation format.
## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
## All three of these values affect the cookie/session validity period. Longer periods are considered less secure
## because a stolen cookie will last longer giving attackers more time to spy or attack.
## The time before the cookie expires and the session is destroyed if remember me IS NOT selected.
expiration: 1h
## The inactivity time before the session is reset. If expiration is set to 1h, and this is set to 5m, if the user
## does not select the remember me option their session will get destroyed after 1h, or after 5m since the last time
## Authelia detected user activity.
inactivity: 5m
## The time before the cookie expires and the session is destroyed if remember me IS selected.
## Value of 0 disables remember me.
remember_me_duration: 1M
##
## Redis Provider
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
#redis:
# host: 127.0.0.1
# port: 6379
# ## Use a unix socket instead
# # host: /var/run/redis/redis.sock
#
# ## Username used for redis authentication. This is optional and a new feature in redis 6.0.
# # username: authelia
#
# ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: authelia
#
# ## This is the Redis DB Index https://redis.io/commands/select (sometimes referred to as database number, DB, etc).
# database_index: 0
#
# ## The maximum number of concurrent active connections to Redis.
# maximum_active_connections: 8
#
# ## The target number of idle connections to have open ready for work. Useful when opening connections is slow.
# minimum_idle_connections: 0
#
# ## The Redis TLS configuration. If defined will require a TLS connection to the Redis instance(s).
# # tls:
# ## Server Name for certificate validation (in case you are using the IP or non-FQDN in the host option).
# # server_name: myredis.example.com
#
# ## Skip verifying the server certificate (to allow a self-signed certificate).
# ## In preference to setting this we strongly recommend you add the public portion of the certificate to the
# ## certificates directory which is defined by the `certificates_directory` option at the top of the config.
# # skip_verify: false
#
# ## Minimum TLS version for the connection.
# # minimum_version: TLS1.2
#
# ## The Redis HA configuration options.
# ## This provides specific options to Redis Sentinel, sentinel_name must be defined (Master Name).
# # high_availability:
# ## Sentinel Name / Master Name.
# # sentinel_name: mysentinel
#
# ## Specific password for Redis Sentinel. The node username and password is configured above.
# # sentinel_password: sentinel_specific_pass
#
# ## The additional nodes to pre-seed the redis provider with (for sentinel).
# ## If the host in the above section is defined, it will be combined with this list to connect to sentinel.
# ## For high availability to be used you must have either defined; the host above or at least one node below.
# # nodes:
# # - host: sentinel-node1
# # port: 6379
# # - host: sentinel-node2
# # port: 6379
#
# ## Choose the host with the lowest latency.
# # route_by_latency: false
#
# ## Choose the host randomly.
# # route_randomly: false
##
## Regulation Configuration
##
## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are made
## in a short period of time.
regulation:
## The number of failed login attempts before user is banned. Set it to 0 to disable regulation.
max_retries: 3
## The time range during which the user can attempt login before being banned. The user is banned if the
## authentication failed 'max_retries' times in a 'find_time' seconds window. Find Time accepts duration notation.
## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
find_time: 2m
## The length of time before a banned user can login again. Ban Time accepts duration notation.
## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
ban_time: 5m
##
## Storage Provider Configuration
##
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
storage:
##
## Local (Storage Provider)
##
## This stores the data in a SQLite3 Database.
## This is only recommended for lightweight non-stateful installations.
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
# local:
# path: /config/db.sqlite3
##
## MySQL / MariaDB (Storage Provider)
##
#mysql:
# host: 127.0.0.1
# port: 3306
# database: authelia
# username: authelia
# ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: mypassword
# timeout: 5s
#
##
## PostgreSQL (Storage Provider)
##
postgres:
host: postgres.live-env.svc.cluster.local
port: 5432
database: authelia
username: authelia
## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
password: auth2021
timeout: 5s
sslmode: disable
##
## Notification Provider
##
## Notifications are sent to users when they require a password reset, a U2F registration or a TOTP registration.
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: false
##
## File System (Notification Provider)
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
filesystem:
filename: /config-nfs/notification.txt
##
## SMTP (Notification Provider)
##
## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate.
## [Security] By default Authelia will:
## - force all SMTP connections over TLS including unauthenticated connections
## - use the disable_require_tls boolean value to disable this requirement
## (only works for unauthenticated connections)
## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates
## (configure in tls section)
#smtp:
# ## The SMTP host to connect to.
# host: 127.0.0.1
#
# ## The port to connect to the SMTP host on.
# port: 1025
#
# ## The connection timeout.
# timeout: 5s
#
# ## The username used for SMTP authentication.
# username: test
#
# ## The password used for SMTP authentication.
# ## Can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: password
#
# ## The address to send the email FROM.
# sender: admin@example.com
#
# ## HELO/EHLO Identifier. Some SMTP Servers may reject the default of localhost.
# identifier: localhost
#
# ## Subject configuration of the emails sent. {title} is replaced by the text from the notifier.
# subject: "[Authelia] {title}"
#
# ## This address is used during the startup check to verify the email configuration is correct.
# ## It's not important what it is except if your email server only allows local delivery.
# startup_check_address: test@authelia.com
#
# ## By default we require some form of TLS. This disables this check though is not advised.
# disable_require_tls: false
#
# ## Disables sending HTML formatted emails.
# disable_html_emails: false
#
# tls:
# ## Server Name for certificate validation (in case you are using the IP or non-FQDN in the host option).
# # server_name: smtp.example.com
#
# ## Skip verifying the server certificate (to allow a self-signed certificate).
# ## In preference to setting this we strongly recommend you add the public portion of the certificate to the
# ## certificates directory which is defined by the `certificates_directory` option at the top of the config.
# skip_verify: false
#
# ## Minimum TLS version for either StartTLS or SMTPS.
# minimum_version: TLS1.2
##
## Identity Providers
##
# identity_providers:
##
## OpenID Connect (Identity Provider)
##
## It's recommended you read the documentation before configuration of this section:
## https://www.authelia.com/docs/configuration/identity-providers/oidc.html
# oidc:
## The hmac_secret is used to sign OAuth2 tokens (authorization code, access tokens and refresh tokens).
## HMAC Secret can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# hmac_secret: this_is_a_secret_abc123abc123abc
## The issuer_private_key is used to sign the JWT forged by OpenID Connect.
## Issuer Private Key can also be set using a secret: https://docs.authelia.com/configuration/secrets.html
# issuer_private_key: |
# --- KEY START
# --- KEY END
## The lifespans configure the expiration for these token types.
# access_token_lifespan: 1h
# authorize_code_lifespan: 1m
# id_token_lifespan: 1h
# refresh_token_lifespan: 90m
## Enables additional debug messages.
# enable_client_debug_messages: false
## SECURITY NOTICE: It's not recommended changing this option, and highly discouraged to have it below 8 for
## security reasons.
# minimum_parameter_entropy: 8
## Clients is a list of known clients and their configuration.
# clients:
# -
## The ID is the OpenID Connect ClientID which is used to link an application to a configuration.
# id: myapp
## The description to show to users when they end up on the consent screen. Defaults to the ID above.
# description: My Application
## The client secret is a shared secret between Authelia and the consumer of this client.
# secret: this_is_a_secret
## Sets the client to public. This should typically not be set, please see the documentation for usage.
# public: false
## The policy to require for this client; one_factor or two_factor.
# authorization_policy: two_factor
## Audience this client is allowed to request.
# audience: []
## Scopes this client is allowed to request.
# scopes:
# - openid
# - groups
# - email
# - profile
## Redirect URI's specifies a list of valid case-sensitive callbacks for this client.
# redirect_uris:
# - https://oidc.example.com:8080/oauth2/callback
## Grant Types configures which grants this client can obtain.
## It's not recommended to define this unless you know what you're doing.
# grant_types:
# - refresh_token
# - authorization_code
## Response Types configures which responses this client can be sent.
## It's not recommended to define this unless you know what you're doing.
# response_types:
# - code
## Response Modes configures which response modes this client supports.
# response_modes:
# - form_post
# - query
# - fragment
## The algorithm used to sign userinfo endpoint responses for this client, either none or RS256.
# userinfo_signing_algorithm: none
...

View File

@@ -0,0 +1,172 @@
#we use postgresql:
#create database authelia;
#create user authelia with encrypted password 'secret';
#grant all privileges on database authelia to authelia;
apiVersion: apps/v1
kind: Deployment
metadata:
name: authelia
labels:
app: authelia
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: authelia
release: latest
template:
metadata:
labels:
app: authelia
release: latest
spec:
containers:
- name: authelia
image: authelia/authelia:latest
env:
#- name: AUTHELIA_SERVER_PORT
# value: "9091"
- name: TZ
value: "Europe/Berlin"
volumeMounts:
- name: authelia
mountPath: /config-nfs
- name: authelia-config
mountPath: /config
- name: pki
mountPath: /etc/pki
ports:
- name: http
containerPort: 9091
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "1000Mi"
cpu: "1500m"
enableServiceLinks: false
volumes:
- name: authelia
persistentVolumeClaim:
claimName: authelia
- name: authelia-config
configMap:
name: authelia-config
items:
- key: configuration.yml
path: configuration.yml
- name: pki
hostPath:
path: /etc/pki
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: authelia
labels:
app: authelia
spec:
storageClassName: nfs-ssd-ebin02
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: authelia
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/authelia
server: ebin02
capacity:
storage: 100Mi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: authelia
namespace: live-infra
---
apiVersion: v1
kind: Service
metadata:
name: authelia
labels:
app: authelia
spec:
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
- port: 443
targetPort: http
name: https
selector:
app: authelia
release: latest
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: authelia
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/auth-url: https://authelia.live-infra.svc.cluster.local/api/verify
nginx.ingress.kubernetes.io/auth-signin: https://auth.lan
nginx.ingress.kubernetes.io/auth-response-headers: Remote-User,Remote-Name,Remote-Groups,Remote-Email
nginx.ingress.kubernetes.io/auth-snippet: |
proxy_set_header X-Forwarded-Method $request_method;
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header X-Forwarded-Method $request_method;
spec:
rules:
- host: auth.lan
http:
paths:
- backend:
service:
name: authelia
port:
name: http
path: /
pathType: Prefix
- host: secure.auth.lan
http:
paths:
- backend:
service:
name: authelia
port:
name: http
path: /
pathType: Prefix
- host: public.auth.lan
http:
paths:
- backend:
service:
name: authelia
port:
name: http
path: /
pathType: Prefix

133
apps/codetogether.yaml Normal file
View File

@@ -0,0 +1,133 @@
# ========================================================================
# Secret: CodeTogether License Values
# ========================================================================
apiVersion: v1
kind: Secret
metadata:
name: codetogether-license
namespace: default
type: Opaque
stringData:
# Configure as needed for your deployment, should match your SSL certificate
CT_SERVER_URL: "https://cd.lan"
CT_TRUST_ALL_CERTS: "true"
# Provided by your Genuitec Sales Representative
# *values must match exactly
CT_LICENSEE: "Werkstatt"
CT_MAXCONNECTIONS: "0"
CT_EXPIRATION: "2022/10/01"
CT_SIGNATURE: "xXM4cwzG...619bef4"
---
# ========================================================================
# Secret: SSL Key and Certificate for SSL used by Ingress
# ========================================================================
apiVersion: v1
kind: Secret
metadata:
name: codetogether-sslsecret
namespace: default
type: kubernetes.io/tls
data:
# value from "cat ssl.crt | base64 -w 0"
tls.crt: "LS0tLS1CRUdJTi...UZJQ0FURS0tLS0tDQo="
# value from "cat ssl.key | base64 -w 0"
tls.key: "LS0tLS1CRUdJTi...EUgS0VZLS0tLS0NCg=="
---
# ========================================================================
# Ingress: Expose the HTTPS service to the network
# ========================================================================
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: codetogether
spec:
tls:
- hosts:
- SERVERFQDN
secretName: codetogether-sslsecret
rules:
- host: SERVERFQDN
http:
paths:
- path: /
backend:
serviceName: codetogether
servicePort: 80
---
# ========================================================================
# Service: Map the HTTP port from the container
# ========================================================================
apiVersion: v1
kind: Service
metadata:
name: codetogether
labels:
run: codetogether
spec:
ports:
- port: 80
name: http
targetPort: 1080
protocol: TCP
selector:
run: codetogether
---
# ========================================================================
# Deployment: Configure the Container Deployment
# ========================================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: codetogether
namespace: default
spec:
selector:
matchLabels:
run: codetogether
replicas: 1
template:
metadata:
labels:
run: codetogether
spec:
containers:
- name: codetogether
image: hub.edge.codetogether.com/latest/codetogether:latest
imagePullPolicy: Always
ports:
- containerPort: 1080
env:
- name: CT_LOCATOR
value: "none"
- name: CT_SERVER_URL
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_SERVER_URL
- name: CT_TRUST_ALL_CERTS
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_TRUST_ALL_CERTS
- name: CT_LICENSEE
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_LICENSEE
- name: CT_MAXCONNECTIONS
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_MAXCONNECTIONS
- name: CT_EXPIRATION
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_EXPIRATION
- name: CT_SIGNATURE
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_SIGNATURE
imagePullSecrets:
- name: ctcreds

View File

@@ -1,6 +1,5 @@
FROM debian:stable-slim
FROM cr.lan/debian-stable
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \
curl procps && \
apt-get clean -y && \

23
apps/curl/tekton.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-curl
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/curl
- name: path-to-dockerfile
value: apps/curl/Dockerfile
- name: image-name
value: cr.lan/curl
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/curl

View File

@@ -1,25 +0,0 @@
FROM debian:buster-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN dpkg --add-architecture armhf && \
apt-get update && \
apt-get install -y \
multiarch-support \
dpkg-dev \
distcc ccache \
build-essential \
gcc \
cpp \
g++ \
clang \
llvm && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*
# Op port
EXPOSE 3632
# Stats port
EXPOSE 3633
USER distccd
ENTRYPOINT /usr/bin/distccd --no-detach --daemon --stats --log-level error --log-stderr $OPTIONS

View File

@@ -1,9 +1,7 @@
Docker-ui
Build it for arm64:
Build it for arm64 in docker-registry-ui
docker build --platform linux/arm64 -t joxit/docker-registry-ui:static -f static.dockerfile github.com/Joxit/docker-registry-ui
ARCH=arm64; APP=docker-registry-ui ; podman build --arch=$ARCH -t $APP:$ARCH -t cr.lan/$APP:$ARCH -f arm64v8-static.dockerfile
docker tag 1494c11066f5 docker-registry.lan/docker-registry-ui:arm64
docker push docker-registry.lan/docker-registry-ui:arm64
ARCH=arm64; APP=docker-registry-ui ; podman push cr.lan/$APP:$ARCH

View File

@@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCaHN7wa2QK9qD3
ovn7ZZiKQ+E/f54MnHGgdlTcskTuiysbS4rqUC49MzWRZjxzxukbwF0a1yOOJUSM
YgOeDntRU4T49FLxY3YAZ9RV4Lr6qU8Tz45Ez4N7RLa3QLqY2wf3BEy32k8SqHsI
XMt0DV93w6q1eqW95XRNDDJF0xm4Oa4yaew0tNCx8Senv51jZ8lOX8CJljnE2Sil
P0HBFwfJqKk9qZg5WstQZFsr3D1wTpMZ3UmnzDN3EEBLJkvcAJvdo2E8TGb29UcD
OopHCeixdoKJw/BBdDCXDoSs9N+pDmoY7QSQaXP91sybP/zrcvrIFTT39IFrARRh
5X9QvCnJxhHXPhqqSeAE4YzTGHJV3BdpIVMPMWUHL9TfLFJxbUGImE2IUQZxSb2i
Wy8w9mnt4SFARGUIr0+tOmEDQ7smlFUke9yIPnti01OogfDNR4/szpwYvfE5+xG6
Vp0W590HxL6JE3nqaTJu+KIkBcRzroZZghmNEKik2MeRIxHjCpjvNr2INLn30S81
NhdP4uZdCeI5sERFaFOCgA64MPTtPYjQRV7BFwpN3+alUK8zVtXat/n5HyxvqrzG
s7IHA/GyCLjfsh9sWDhsfgsuIZzL+KblYnU1XPhRko4BQ2Y3GwA0QGFvM0+J1z/V
r3ieyio37CbEuVugMQ/VYYl8UYE0TwIDAQABAoICAC+rnopfraJ2h3QSRaEt2/Fo
7dPmdc0Q11T7RWS+//OJuNvIkj/IbYUgwgEnzqtBa/nZlvMmeSkO/hUufE/3ys1t
OESJzt48FdQqSdQGn8/Jb1yBZ1CBn/oRVzN4IkAGAIC4I8L7FFqBIw2DJqvPNyik
rblVJs+GmmL60tImal5B+VA+04G6LJPeNJX+/4AwKmTD2Zq1jUkGozv6RSylIxON
yEv6mcuj+h/z6v+2MIr8wyPM/2uYDpNVw417WxvCVHRKhVlRiMf7NuwYv40Z05CR
R++1XCvi9OTE6OVXGZgBjXAIYNEKzYZHWyLquCFcf5ZEeQ35485llxhxFOC0U3hL
lT8pI6EFnRiTi+Eq+7GOmvKYjNda6UtUVYPFIX0Ff3IkkwJ53rYdrar4xLnpmeUF
LcJhGJdfJSsvO2mdiLEFm/K7dQxDadusYPYFeUK4CGgoIsauf6XzdWbxJgv4qcOJ
dMzt2uLxpq5k7pQ5HU96Pa9g1flR1vaAtZ4htTMbQ6o7nrUoc8+zoo8pBYW6/zi+
OXf/9BvDQ/dQvtAF+gJQMfGDO5J0x5+yr+Jp7LKjlmG5B2bYMYF9/uZQTgY5kla5
uqihCZVZ14uojbXA3eqHvmtRfFqQ4Us3s0BUDm4W5PUe6jwJ8TavP+XJIjcCLU2c
kOrKZ0ZtIXwTUqKE5Z2BAoIBAQDKXleKtzEvvOWihxzuUmQYIT2HzrMG14s1M7wo
YF0ARaQTxX5HH2lYN7znWb/RpcDSj+IBNV4PxEOHVNCTWhev/PnFmm6FuqopJDIZ
sumP3jJg0K2/MFjBsHXNqacqjqMKlWFnuYqDHZSRX1bjC9IWB6HfS9Wjm2XrgBGx
xFTcAZ3kXX4NlVMz/JgWMKLRY+qGtDWG11sT+oAge81La+MRz/R/fAhf3K+0iDaK
F1iX8jXIcRfqk9OLafRcuIkS4q4rV6D9bI9xjbTz2tsm3b/wJezoSC06mTHoUEoG
p3MIPZ6ETDADDlB9hsWS23p2ueuUOCHg19+n30ah6qWx7UzjAoIBAQDC9KBAYr0T
sf7o5FA+Xp/N6ALxarNa1b15TjFtwSfvwZrrg02QQIpQCR70vy6wiczkTcmRCi4P
uiiVQz8abWbOW+aG4ThTpkOZDbCEVghFzGWPZjRsyrlhcegdS5FL4fCBrtUzOs7e
e+YtgyPrvmHamhMvKYWfW/DWfxOoBFoL9GTuC1646Va63u3MmLMflzYhj4dgbsm0
ut70aK3RAFkLVwswmx+OPINeSpEz6iIRArF4aSi8rH2eaMp4QiXz+zXSP+Bm4XTN
C6HrQeyOmiEtXcZemZVnUtkJBdkW+iRiiD3+xLEX11c/kzcyIeNpaGu9LckXuxqY
chu4XOVHLaKlAoIBAFapGfIESyL3UJtOIvyH+ec/bNsYkB/w8+M/mWbtBUaVjBMP
culAMVue2t1z2KoNwkopZY5A7VvxHz33+y3u2c/6lHejj4rjCfV+U5ofvNdoPsio
9I64RHoFeB0vdq/Jz1Y77C+ADCnj4/hxDINET54xfIdkMUPTy0yTVoB65CAm7Reb
Vdy5Qp0zoWl3QHJMyGURDQ8GcDFZB79hZOPUerPpCvoBApESr4evATQXlU/UYGXK
0IQa8+9y2ztNpx2YRx+2cfG0qKTnG0OGSG0XbxeHFjHOntfGPNIQd/LriF5SDOz4
t2LHoX5v1XHzXTk0mwapFxDzQQrhmZzDIFvWlCMCggEANLHORtjpZlNsJSLhFZqZ
8xvM/9fpVpoDNrCN566XztQzvYimBGGNgQiWF209f3YfrW3hF5T60kFtCrs8aTY8
3XY1nyttAB8mkk4C8iIW5lbS9KmZbfZ1mQMizBhK04nkagkJk2lH1RcEJjUWFnhF
FsMigFLmzSYauL9sXrOeazDJvxXPqodXa/cpq21yrQ1AEl4rJ0OKvZDtBn7szFsd
tlT2r1KeeuGcWHYrPS8BujtSIMu7uROeeJy2bT7j50h1Sbj+PJCf83Q7dc1B1WGP
qiV4osU8fssD4s5z2SQPhZpxt1UO0PThnkt6VdCXGTyiMmYXvpRSIfZly7VAO7b4
CQKCAQEAoVcWk9yQ5fD+uQ40duvjpzeNxBjttFLHe1CeOCIPtA3KBak4O+MNwZMz
oVUe2V/vb3kGpngF56d1hrBa4iQhvq4mGfnF/ZsbQHa4BZyaFIFvcOwZsgCjAO65
MpbybhRiOMMtu0Bg/H1hH2dzatugrqfVDYRnt9EgpDl7gkdVvmRu9khMWGHLv9qJ
gVeH5dNlpty3gkpSjJgTpEuKF7Yzw4seHpjkiwzIitgE2F7Xrv+6GtYOs0iziJTx
ZNq3BtxzCGe6MamLkXOj5DREhQMqAxJTUo/AYRNRiOeq+AdYgoAulse7HIO8q77E
i+DOL/C63wFKJddUnKSXCf+iAJraGw==
-----END PRIVATE KEY-----

View File

@@ -1,34 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIF2zCCA8OgAwIBAgIUCvX0FglFpG7UJJe6QruGhfKwglUwDQYJKoZIhvcNAQEL
BQAwfDELMAkGA1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVy
bGluMQ4wDAYDVQQKDAVjaGFvczEcMBoGA1UEAwwTZG9ja2VyLXJlZ2lzdHJ5Lmxh
bTEdMBsGCSqGSIb3DQEJARYOcm9vdEBjaGFvcy5sYW4wIBcNMjAwNjI0MTUxODE5
WhgPMjEyMDA1MzExNTE4MTlaMHwxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJs
aW4xDzANBgNVBAcMBkJlcmxpbjEOMAwGA1UECgwFY2hhb3MxHDAaBgNVBAMME2Rv
Y2tlci1yZWdpc3RyeS5sYW0xHTAbBgkqhkiG9w0BCQEWDnJvb3RAY2hhb3MubGFu
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAmhze8GtkCvag96L5+2WY
ikPhP3+eDJxxoHZU3LJE7osrG0uK6lAuPTM1kWY8c8bpG8BdGtcjjiVEjGIDng57
UVOE+PRS8WN2AGfUVeC6+qlPE8+ORM+De0S2t0C6mNsH9wRMt9pPEqh7CFzLdA1f
d8OqtXqlveV0TQwyRdMZuDmuMmnsNLTQsfEnp7+dY2fJTl/AiZY5xNkopT9BwRcH
yaipPamYOVrLUGRbK9w9cE6TGd1Jp8wzdxBASyZL3ACb3aNhPExm9vVHAzqKRwno
sXaCicPwQXQwlw6ErPTfqQ5qGO0EkGlz/dbMmz/863L6yBU09/SBawEUYeV/ULwp
ycYR1z4aqkngBOGM0xhyVdwXaSFTDzFlBy/U3yxScW1BiJhNiFEGcUm9olsvMPZp
7eEhQERlCK9PrTphA0O7JpRVJHvciD57YtNTqIHwzUeP7M6cGL3xOfsRuladFufd
B8S+iRN56mkybviiJAXEc66GWYIZjRCopNjHkSMR4wqY7za9iDS599EvNTYXT+Lm
XQniObBERWhTgoAOuDD07T2I0EVewRcKTd/mpVCvM1bV2rf5+R8sb6q8xrOyBwPx
sgi437IfbFg4bH4LLiGcy/im5WJ1NVz4UZKOAUNmNxsANEBhbzNPidc/1a94nsoq
N+wmxLlboDEP1WGJfFGBNE8CAwEAAaNTMFEwHQYDVR0OBBYEFCtnUlt2y35MUJ0x
YSvt8G3vi0NMMB8GA1UdIwQYMBaAFCtnUlt2y35MUJ0xYSvt8G3vi0NMMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAEXDBh9NNZza6Vjzwcll7uAc
x22ghoDinHOdfNWe9Hgocmj/Ci4M7f8TL35Zlm2PhOfYaol88uVIOiTKrf2USY2J
7RSvpl34voiWR8HBtkIFvmiUE2GR5I8gA21H8xaenIbg1Pj9V+E4SgIN1V9lX6S1
tjNVbhs/mU6YqyNytkjCuwJgCMPgXx4wwPZqaBqGJ5IrJfag0ZahT0IfKSzKtc8M
HBeXTy7Ck7WUOQWRCe289CBkYHZ+ScdnXnJao7uLvpuoUpu6/WPAnMN1t7KUO4tU
Z0SwNpY/Xsq3pjwTk2ZJwhFI1baaOyDZJW0+l2D48q7ADavq72NlPerZFkIN6Uvh
iyb4A/dzZWeZPIJinLtC6Bip5epg03KR0O4D/rYHbn6uVTq894ThIAXt1Q8fFVGb
oX+AK+ERCWc4ost+pr+Dk78bJUEcHCMRIGaWUVfzXvCagrx4eRLwoaLTovPHVvVl
on61w57W8csoj8lh3TX5t0MB4s87twHlErRIALqMd+m5K+2CPeWRd/6ZpmCGuL9s
bT+Rde3Sqw45N3Asw795yA73Av0coq8pB2DyDR5SoHkMD1rzJIVg4lBCwMSR3IJk
hiIO2qV1xNFrnA3ggKZSyDkH8eOR0dAmtthX6nDGvUbFsMFYnXli5wngTuXdHiYo
Lpilp6oWJLkzjfyGR3Um
-----END CERTIFICATE-----

View File

@@ -6,7 +6,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
replicas: 1
selector:
@@ -21,38 +20,36 @@ spec:
spec:
containers:
- name: registry-ui
image: "docker-registry.lan/docker-registry-ui:arm64"
image: docker.io/joxit/docker-registry-ui:latest
imagePullPolicy: Always
env:
- name: URL
value: "http://docker-registry.lan"
- name: NGINX_PROXY_PASS_URL
value: "http://cr.lan"
- name: REGISTRY_TITLE
value: "dReg"
value: "cReg"
- name: DELETE_IMAGES
value: "true"
- name: REGISTRY_URL
value: "http://docker-registry-ui.lan"
- name: PULL_URL
value: "http://docker-registry.lan"
#- name: REGISTRY_URL
# value: "http://cr.lan"
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#livenessProbe:
# httpGet:
# path: /
# port: http
#readinessProbe:
# httpGet:
# path: /
# port: http
resources:
requests:
memory: "24Mi"
cpu: "50m"
memory: "20Mi"
cpu: "10m"
limits:
memory: "64Mi"
cpu: "100m"
memory: "32Mi"
cpu: "50m"
---
apiVersion: v1
kind: Service
@@ -61,7 +58,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
ports:
- port: 80
@@ -72,16 +68,25 @@ spec:
app: registry-ui
release: docker-registry-ui
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: docker-registry-ui
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/cors-allow-origin: "*"
nginx.ingress.kubernetes.io/cors-expose-headers: "*"
spec:
rules:
- host: docker-registry-ui.lan
- host: cr-ui.lan
http:
paths:
- backend:
serviceName: docker-registry-ui
servicePort: http
path: /
- path: /
pathType: Prefix
backend:
service:
name: docker-registry-ui
port:
number: 80

View File

@@ -1,12 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: docker-registry
spec:
finalizers:
- kubernetes
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: docker-registry
@@ -28,7 +20,7 @@ metadata:
name: registry
labels:
app: registry
namespace: docker-registry
namespace: live-env
spec:
replicas: 1
selector:
@@ -66,7 +58,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: docker-registry-config
namespace: docker-registry
namespace: live-env
labels:
app: registry
data:
@@ -86,7 +78,7 @@ data:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
Access-Control-Allow-Origin: ['*']
Access-Control-Allow-Origin: ['*', 'http://cr-ui.lan']
Access-Control-Allow-Methods: ['HEAD', 'GET', 'OPTIONS', 'DELETE']
Access-Control-Allow-Headers: ['Authorization', 'Accept']
Access-Control-Max-Age: [1728000]
@@ -97,42 +89,43 @@ kind: Service
apiVersion: v1
metadata:
name: registry
namespace: docker-registry
namespace: live-env
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
#---
#apiVersion: v1
#data:
# proxy-connect-timeout: "30"
# proxy-read-timeout: "1801"
# proxy-send-timeout: "1801"
# proxy-body-size: "0"
# client-max-body-size: "0"
#kind: ConfigMap
#metadata:
# name: ingress-nginx-controller
# namespace: ingress-nginx
---
apiVersion: v1
data:
proxy-connect-timeout: "30"
proxy-read-timeout: "1801"
proxy-send-timeout: "1801"
proxy-body-size: "0"
client-max-body-size: "0"
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: docker-registry
namespace: docker-registry
#annotations:
# nginx.ingress.kubernetes.io/proxyconnecttimeout: 30
# nginx.ingress.kubernetes.io/proxyreadtimeout: 1800
# nginx.ingress.kubernetes.io/proxysendtimeout: 1800
# nginx.ingress.kubernetes.io/proxy-body-size: '5g'
namespace: live-env
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: docker-registry.lan
http:
paths:
- backend:
serviceName: registry
servicePort: 5000
path: /
- path: /
pathType: Prefix
backend:
service:
name: registry
port:
number: 5000

116
apps/dolibarr/Dockerfile Normal file
View File

@@ -0,0 +1,116 @@
FROM cr.lan/debian-stable-php-fpm
# see https://wiki.dolibarr.org/index.php/Dependencies_and_external_libraries
# Prepare folders
ENV DEBIAN_FRONTEND noninteractive
RUN set -ex; \
apt-get update -q; \
apt-get install -y --no-install-recommends \
bzip2 \
default-mysql-client \
cron \
rsync \
unzip \
zip php-soap;\
mkdir -p /var/www/documents; \
chown -R www-data:root /var/www; \
chmod -R g=u /var/www
# CLeanup
RUN apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/*
VOLUME /var/www/html /var/www/documents /var/www/scripts
# Runtime env var
ENV DOLI_AUTO_CONFIGURE=1 \
DOLI_DB_TYPE=mysqli \
DOLI_DB_HOST= \
DOLI_DB_PORT=3306 \
DOLI_DB_USER=dolibarr \
DOLI_DB_PASSWORD='' \
DOLI_DB_NAME=dolibarr \
DOLI_DB_PREFIX=llx_ \
DOLI_DB_CHARACTER_SET=utf8 \
DOLI_DB_COLLATION=utf8_unicode_ci \
DOLI_DB_ROOT_LOGIN='' \
DOLI_DB_ROOT_PASSWORD='' \
DOLI_ADMIN_LOGIN=admin \
DOLI_MODULES='' \
DOLI_URL_ROOT='http://localhost' \
DOLI_AUTH=dolibarr \
DOLI_LDAP_HOST= \
DOLI_LDAP_PORT=389 \
DOLI_LDAP_VERSION=3 \
DOLI_LDAP_SERVERTYPE=openldap \
DOLI_LDAP_LOGIN_ATTRIBUTE=uid \
DOLI_LDAP_DN='' \
DOLI_LDAP_FILTER='' \
DOLI_LDAP_ADMIN_LOGIN='' \
DOLI_LDAP_ADMIN_PASS='' \
DOLI_LDAP_DEBUG=false \
DOLI_HTTPS=0 \
DOLI_PROD=0 \
DOLI_NO_CSRF_CHECK=0 \
WWW_USER_ID=33 \
WWW_GROUP_ID=33 \
PHP_INI_DATE_TIMEZONE='UTC' \
PHP_MEMORY_LIMIT=256M \
PHP_MAX_UPLOAD=20M \
PHP_MAX_EXECUTION_TIME=300
# Build time env var
ARG DOLI_VERSION=13.0.4
# Get Dolibarr
ADD https://github.com/Dolibarr/dolibarr/archive/${DOLI_VERSION}.zip /tmp/dolibarr.zip
# Install Dolibarr from tag archive
RUN set -ex; \
mkdir -p /tmp/dolibarr; \
unzip -q /tmp/dolibarr.zip -d /tmp/dolibarr; \
rm /tmp/dolibarr.zip; \
mkdir -p /usr/src/dolibarr; \
cp -r "/tmp/dolibarr/dolibarr-${DOLI_VERSION}"/* /usr/src/dolibarr; \
rm -rf /tmp/dolibarr; \
chmod +x /usr/src/dolibarr/scripts/*; \
echo "${DOLI_VERSION}" > /usr/src/dolibarr/.docker-image-version
COPY entrypoint.sh /
RUN set -ex; \
chmod 755 /entrypoint.sh ;\
mkdir -p /run/php
ENTRYPOINT ["/entrypoint.sh"]
CMD ["php-fpm7.4", "--nodaemonize", "-c", "/etc/php/7.4/fpm/php.ini", "--fpm-config", "/etc/php/7.4/fpm/php-fpm.conf"]
# Arguments to label built container
ARG VCS_REF
ARG BUILD_DATE
# Container labels (http://label-schema.org/)
# Container annotations (https://github.com/opencontainers/image-spec)
LABEL maintainer="Monogramm maintainers <opensource at monogramm dot io>" \
product="Dolibarr" \
version=${DOLI_VERSION} \
org.label-schema.vcs-ref=${VCS_REF} \
org.label-schema.vcs-url="https://github.com/Monogramm/docker-dolibarr" \
org.label-schema.build-date=${BUILD_DATE} \
org.label-schema.name="Dolibarr" \
org.label-schema.description="Open Source ERP & CRM for Business" \
org.label-schema.url="https://www.dolibarr.org/" \
org.label-schema.vendor="Dolibarr" \
org.label-schema.version=$DOLI_VERSION \
org.label-schema.schema-version="1.0" \
org.opencontainers.image.revision=${VCS_REF} \
org.opencontainers.image.source="https://github.com/Monogramm/docker-dolibarr" \
org.opencontainers.image.created=${BUILD_DATE} \
org.opencontainers.image.title="Dolibarr" \
org.opencontainers.image.description="Open Source ERP & CRM for Business" \
org.opencontainers.image.url="https://www.dolibarr.org/" \
org.opencontainers.image.vendor="Dolibarr" \
org.opencontainers.image.version=${DOLI_VERSION} \
org.opencontainers.image.authors="Monogramm maintainers <opensource at monogramm dot io>"

3
apps/dolibarr/README.md Normal file
View File

@@ -0,0 +1,3 @@
create nginx configmap
kubectl -n live-env create configmap dolibarr-nginx-site --from-file=nginx-site.configmap.conf

View File

@@ -0,0 +1,104 @@
#we use postgresql:
#create database dolibarr;
#create user dolibarr with encrypted password 'secret';
#grant all privileges on database dolibarr to dolibarr;
apiVersion: apps/v1
kind: Deployment
metadata:
name: dolibarr
labels:
app: dolibarr
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: dolibarr
release: latest
template:
metadata:
labels:
app: dolibarr
release: latest
spec:
volumes:
- name: dolibarr-nginx-site
configMap:
name: dolibarr-nginx-site
- name: www-data
emptyDir: {}
containers:
- name: nginx-proxy
image: nginx
volumeMounts:
- name: dolibarr-nginx-site
mountPath: /etc/nginx/conf.d
- name: www-data
mountPath: /var/www/html
ports:
- name: http
containerPort: 80
protocol: TCP
- name: dolibarr
image: cr.lan/dolibarr:latest
volumeMounts:
- name: www-data
mountPath: /var/www/html
env:
- name: TZ
value: "Europe/Berlin"
- name: DOLI_DB_HOST
value: postgres.live-env.svc.cluster.local
- name: DOLI_DB_PORT
value: "5432"
- name: DOLI_DB_NAME
value: dolibarr
- name: DOLI_DB_USER
value: dolibarr
- name: DOLI_DB_PASSWORD
value: Vb7yHzmE5HIjfU4hjghjghj6AnMdB
- name: DOLI_DB_TYPE
value: pgsql
ports:
- name: php-fpm
containerPort: 9000
protocol: TCP
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "3000m"
---
apiVersion: v1
kind: Service
metadata:
name: dolibarr
spec:
ports:
- name: http
port: 80
selector:
app: dolibarr
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dolibarr
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/whitelist-x-forwarded-for: "true"
spec:
rules:
- host: dolibarr.lan
http:
paths:
- backend:
service:
name: dolibarr
port:
name: http
path: /
pathType: Prefix

271
apps/dolibarr/entrypoint.sh Normal file
View File

@@ -0,0 +1,271 @@
#!/bin/sh
set -e
log() {
echo "[$0] [$(date +%Y-%m-%dT%H:%M:%S)] $*"
}
# version_greater A B returns whether A > B
version_greater() {
[ "$(printf '%s\n' "$@" | sort -t '.' -n -k1,1 -k2,2 -k3,3 -k4,4 | head -n 1)" != "$1" ]
}
# return true if specified directory is empty
directory_empty() {
[ -z "$(ls -A "$1/")" ]
}
run_as() {
if [ "$(id -u)" = 0 ]; then
su - www-data -s /bin/sh -c "$1"
else
sh -c "$1"
fi
}
if [ ! -f /usr/local/etc/php/php.ini ]; then
log "Initializing PHP configuration..."
cat <<EOF > /etc/php/7.4/fpm/php.ini
date.timezone = "${PHP_INI_DATE_TIMEZONE}"
memory_limit = ${PHP_MEMORY_LIMIT}
file_uploads = On
upload_max_filesize = ${PHP_MAX_UPLOAD}
post_max_size = ${PHP_MAX_UPLOAD}
max_execution_time = ${PHP_MAX_EXECUTION_TIME}
sendmail_path = /usr/sbin/sendmail -t -i
extension = calendar.so
EOF
fi
if [ ! -d /var/www/documents ]; then
log "Initializing Dolibarr documents directory..."
mkdir -p /var/www/documents
fi
log "Updating Dolibarr users and group..."
usermod -u "$WWW_USER_ID" www-data
groupmod -g "$WWW_GROUP_ID" www-data
log "Updating Dolibarr folder ownership..."
chown -R www-data:www-data /var/www
if [ ! -d /var/www/html/conf/ ]; then
log "Initializing Dolibarr HTML configuration directory..."
mkdir -p /var/www/html/conf/
fi
# Create a default config if autoconfig enabled
if [ -n "$DOLI_AUTO_CONFIGURE" ] && [ ! -f /var/www/html/conf/conf.php ]; then
log "Initializing Dolibarr HTML configuration..."
cat <<EOF > /var/www/html/conf/conf.php
<?php
// Config file for Dolibarr ${DOLI_VERSION} ($(date +%Y-%m-%dT%H:%M:%S%:z))
// ###################
// # Main parameters #
// ###################
\$dolibarr_main_url_root='${DOLI_URL_ROOT}';
\$dolibarr_main_document_root='/var/www/html';
\$dolibarr_main_url_root_alt='/custom';
\$dolibarr_main_document_root_alt='/var/www/html/custom';
\$dolibarr_main_data_root='/var/www/documents';
\$dolibarr_main_db_host='${DOLI_DB_HOST}';
\$dolibarr_main_db_port='${DOLI_DB_PORT}';
\$dolibarr_main_db_name='${DOLI_DB_NAME}';
\$dolibarr_main_db_prefix='${DOLI_DB_PREFIX}';
\$dolibarr_main_db_user='${DOLI_DB_USER}';
\$dolibarr_main_db_pass='${DOLI_DB_PASSWORD}';
\$dolibarr_main_db_type='${DOLI_DB_TYPE}';
\$dolibarr_main_db_character_set='${DOLI_DB_CHARACTER_SET}';
\$dolibarr_main_db_collation='${DOLI_DB_COLLATION}';
// ##################
// # Login #
// ##################
\$dolibarr_main_authentication='${DOLI_AUTH}';
\$dolibarr_main_auth_ldap_host='${DOLI_LDAP_HOST}';
\$dolibarr_main_auth_ldap_port='${DOLI_LDAP_PORT}';
\$dolibarr_main_auth_ldap_version='${DOLI_LDAP_VERSION}';
\$dolibarr_main_auth_ldap_servertype='${DOLI_LDAP_SERVERTYPE}';
\$dolibarr_main_auth_ldap_login_attribute='${DOLI_LDAP_LOGIN_ATTRIBUTE}';
\$dolibarr_main_auth_ldap_dn='${DOLI_LDAP_DN}';
\$dolibarr_main_auth_ldap_filter ='${DOLI_LDAP_FILTER}';
\$dolibarr_main_auth_ldap_admin_login='${DOLI_LDAP_ADMIN_LOGIN}';
\$dolibarr_main_auth_ldap_admin_pass='${DOLI_LDAP_ADMIN_PASS}';
\$dolibarr_main_auth_ldap_debug='${DOLI_LDAP_DEBUG}';
// ##################
// # Security #
// ##################
\$dolibarr_main_prod='${DOLI_PROD}';
\$dolibarr_main_force_https='${DOLI_HTTPS}';
\$dolibarr_main_restrict_os_commands='mysqldump, mysql, pg_dump, pgrestore';
\$dolibarr_nocsrfcheck='${DOLI_NO_CSRF_CHECK}';
\$dolibarr_main_cookie_cryptkey='$(openssl rand -hex 32)';
\$dolibarr_mailing_limit_sendbyweb='0';
EOF
chown www-data:www-data /var/www/html/conf/conf.php
chmod 766 /var/www/html/conf/conf.php
fi
# Detect Docker container version (ie. previous installed version)
installed_version="0.0.0.0"
if [ -f /var/www/documents/.docker-container-version ]; then
# shellcheck disable=SC2016
installed_version="$(cat /var/www/documents/.docker-container-version)"
fi
if [ -f /var/www/documents/install.version ]; then
# shellcheck disable=SC2016
installed_version="$(cat /var/www/documents/install.version)"
mv \
/var/www/documents/install.version \
/var/www/documents/.docker-container-version
fi
# Detect Docker image version (docker specific solution)
# shellcheck disable=SC2016
image_version="${DOLI_VERSION}"
if [ -f /usr/src/dolibarr/.docker-image-version ]; then
# shellcheck disable=SC2016
image_version="$(cat /usr/src/dolibarr/.docker-image-version)"
fi
if version_greater "$installed_version" "$image_version"; then
log "Can't start Dolibarr because the version of the data ($installed_version) is higher than the docker image version ($image_version) and downgrading is not supported. Are you sure you have pulled the newest image version?"
exit 1
fi
# Initialize image
if version_greater "$image_version" "$installed_version"; then
log "Dolibarr initialization..."
if [ "$(id -u)" = 0 ]; then
rsync_options="-rvlDog --chown www-data:root"
else
rsync_options="-rvlD"
fi
mkdir -p /var/www/scripts
rsync $rsync_options /usr/src/dolibarr/scripts/ /var/www/scripts/
rsync $rsync_options --delete --exclude /conf/ --exclude /custom/ --exclude /theme/ /usr/src/dolibarr/htdocs/ /var/www/html/
for dir in conf custom; do
if [ ! -d "/var/www/html/$dir" ] || directory_empty "/var/www/html/$dir"; then
rsync $rsync_options --include "/$dir/" --exclude '/*' /usr/src/dolibarr/htdocs/ /var/www/html/
fi
done
# The theme folder contains custom and official themes. We must copy even if folder is not empty, but not delete content either
for dir in theme; do
rsync $rsync_options --include "/$dir/" --exclude '/*' /usr/src/dolibarr/htdocs/ /var/www/html/
done
if [ "$installed_version" != "0.0.0.0" ]; then
# Call upgrade if needed
# https://wiki.dolibarr.org/index.php/Installation_-_Upgrade#With_Dolibarr_.28standard_.zip_package.29
log "Dolibarr upgrade from $installed_version to $image_version..."
if [ -f /var/www/documents/install.lock ]; then
rm /var/www/documents/install.lock
fi
base_version=$(echo "${installed_version}" | sed -e 's|\(.*\..*\)\..*|\1|g')
target_version=$(echo "${image_version}" | sed -e 's|\(.*\..*\)\..*|\1|g')
run_as "cd /var/www/html/install/ && php upgrade.php ${base_version}.0 ${target_version}.0"
run_as "cd /var/www/html/install/ && php upgrade2.php ${base_version}.0 ${target_version}.0"
run_as "cd /var/www/html/install/ && php step5.php ${base_version}.0 ${target_version}.0"
log 'This is a lock file to prevent use of install pages (generated by container entrypoint)' > /var/www/documents/install.lock
chown www-data:www-data /var/www/documents/install.lock
chmod 400 /var/www/documents/install.lock
elif [ -n "$DOLI_AUTO_CONFIGURE" ] && [ ! -f /var/www/documents/install.lock ]; then
log "Create forced values for first Dolibarr install..."
cat <<EOF > /var/www/html/install/install.forced.php
<?php
// Forced install config file for Dolibarr ${DOLI_VERSION} ($(date +%Y-%m-%dT%H:%M:%S%:z))
/** @var bool Hide PHP informations */
\$force_install_nophpinfo = true;
/** @var int 1 = Lock and hide environment variables, 2 = Lock all set variables */
\$force_install_noedit = 2;
/** @var string Information message */
\$force_install_message = 'Dolibarr installation (Docker)';
/** @var string Data root absolute path (documents folder) */
\$force_install_main_data_root = '/var/www/documents';
/** @var bool Force HTTPS */
\$force_install_mainforcehttps = !empty('${DOLI_HTTPS}');
/** @var string Database name */
\$force_install_database = '${DOLI_DB_NAME}';
/** @var string Database driver (mysql|mysqli|pgsql|mssql|sqlite|sqlite3) */
\$force_install_type = '${DOLI_DB_TYPE}';
/** @var string Database server host */
\$force_install_dbserver = '${DOLI_DB_HOST}';
/** @var int Database server port */
\$force_install_port = '${DOLI_DB_PORT}';
/** @var string Database tables prefix */
\$force_install_prefix = '${DOLI_DB_PREFIX}';
/** @var string Database username */
\$force_install_databaselogin = '${DOLI_DB_USER}';
/** @var string Database password */
\$force_install_databasepass = '${DOLI_DB_PASSWORD}';
/** @var bool Force database user creation */
\$force_install_createuser = false;
/** @var bool Force database creation */
\$force_install_createdatabase = !empty('${DOLI_DB_ROOT_LOGIN}');
/** @var string Database root username */
\$force_install_databaserootlogin = '${DOLI_DB_ROOT_LOGIN}';
/** @var string Database root password */
\$force_install_databaserootpass = '${DOLI_DB_ROOT_PASSWORD}';
/** @var string Dolibarr super-administrator username */
\$force_install_dolibarrlogin = '${DOLI_ADMIN_LOGIN}';
/** @var bool Force install locking */
\$force_install_lockinstall = true;
/** @var string Enable module(s) (Comma separated class names list) */
\$force_install_module = '${DOLI_MODULES}';
EOF
log "You shall complete Dolibarr install manually at '${DOLI_URL_ROOT}/install'"
fi
fi
if [ ! -d /var/www/htdocs ]; then
log "Adding a symlink to /var/www/htdocs..."
ln -s /var/www/html /var/www/htdocs
fi
if [ ! -d /var/www/scripts ]; then
log "Initializing Dolibarr scripts directory..."
cp /usr/src/dolibarr/scripts /var/www/scripts
fi
if [ -f /var/www/documents/install.lock ]; then
log "Updating Dolibarr installed version..."
echo "$image_version" > /var/www/documents/.docker-container-version
fi
log "Serving Dolibarr...$@"
exec "$@"

View File

@@ -0,0 +1,60 @@
server {
listen 80;
listen [::]:80;
add_header Referrer-Policy origin; # make sure outgoing links don't show the URL to the Matomo instance
root /var/www/html;
index index.php index.html;
try_files $uri $uri/ =404;
## only allow accessing the following php files
location ~ \.php$ {
# regex to split $uri to $fastcgi_script_name and $fastcgi_path
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
# Check that the PHP script exists before passing it
try_files $fastcgi_script_name =404;
proxy_connect_timeout 3600;
proxy_send_timeout 3600;
proxy_read_timeout 3600;
send_timeout 3600;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param HTTP_PROXY ""; # prohibit httpoxy: https://httpoxy.org/
fastcgi_pass 127.0.0.1:9000;
}
## disable all access to the following directories
location ~ /\.ht {
deny all;
return 403;
}
location ~ /\.git {
deny all;
}
location ~ \.(gif|ico|jpg|png|svg|js|css|htm|html|mp3|mp4|wav|ogg|avi|ttf|eot|woff|woff2|json)$ {
allow all;
## Cache images,CSS,JS and webfonts for an hour
## Increasing the duration may improve the load-time, but may cause old files to show after an Matomo upgrade
expires 1h;
add_header Pragma public;
add_header Cache-Control "public";
}
location ~ /(libs|vendor|plugins|misc/user) {
deny all;
return 403;
}
## properly display textfiles in root directory
location ~/(.*\.md|LEGALNOTICE|LICENSE) {
default_type text/plain;
}
}
# vim: filetype=nginx

23
apps/dolibarr/tekton.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-dolibarr
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/dolibarr
- name: path-to-dockerfile
value: apps/dolibarr/Dockerfile
- name: image-name
value: cr.lan/dolibarr
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/dolibarr

View File

@@ -11,6 +11,8 @@ metadata:
release: latest
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: gitea
@@ -24,7 +26,6 @@ spec:
containers:
- name: gitea
image: gitea/gitea:latest
imagePullPolicy: IfNotPresent
env:
- name: USER_UID
value: "1000"
@@ -32,6 +33,38 @@ spec:
value: "1000"
- name: TZ
value: "Europe/Berlin"
- name: GITEA__lfs__PATH
value: /data/git/lfs
- name: DB_TYPE
value: postgres
- name: DB_HOST
value: postgres.live-env.svc.cluster.local:5432
- name: DB_NAME
value: gitea
- name: DB_USER
value: gitea
- name: DB_PASSWD
value: giteaEu94XSS4gKpheSBoMsIs
#- name: GITEA__indexer__ISSUE_INDEXER
#value: redis
#- name: GITEA__indexer__ISSUE_INDEXER_QUEUE_CONN_STR
#value: addrs=redis-standalone.live-env.svc.cluster.local:6379 db=1
- name: GITEA__packages__ENABLED
value: "true"
- name: GITEA__log__LEVEL
value: warn
- name: GITEA__log__MODE
value: file
- name: GITEA__log__ROUTER
value: file
- name: GITEA__log__MACARON
value: file
#- name: GITEA__queue__TYPE
#value: redis
#- name: GITEA__queue__CONN_STR
#value: redis://redis-standalone.live-env.svc.cluster.local:6397/0
- name: GITEA__server__ROOT_URL
value: http://git-ui.lan/
volumeMounts:
- name: gitea
mountPath: /data
@@ -43,20 +76,24 @@ spec:
containerPort: 22
protocol : TCP
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 300
periodSeconds: 10
httpGet:
path: /
port: http
readinessProbe:
initialDelaySeconds: 300
periodSeconds: 10
httpGet:
path: /
port: http
resources:
requests:
memory: "256Mi"
cpu: "250m"
memory: "300Mi"
cpu: "150m"
limits:
memory: "1000Mi"
cpu: "500m"
memory: "512Mi"
cpu: "1000m"
volumes:
- name: gitea
persistentVolumeClaim:
@@ -69,7 +106,8 @@ metadata:
labels:
app: gitea
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin02
volumeName: gitea
accessModes:
- ReadWriteOnce
resources:
@@ -77,6 +115,26 @@ spec:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gitea
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/gitea-data
server: ebin02
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: gitea
namespace: live-env
---
apiVersion: v1
kind: Service
metadata:
name: gitea
@@ -84,6 +142,7 @@ metadata:
app: gitea
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.2
ports:
- port: 3000
targetPort: http
@@ -96,16 +155,24 @@ spec:
app: gitea
release: latest
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/whitelist-x-forwarded-for: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 512m
spec:
rules:
- host: git-ui.lan
http:
paths:
- backend:
serviceName: gitea
servicePort: http
path: /
- path: /
pathType: Prefix
backend:
service:
name: gitea
port:
number: 3000

34
apps/grav/Dockerfile Normal file
View File

@@ -0,0 +1,34 @@
FROM cr.lan/debian-stable-php-fpm
ENV DEBIAN_FRONTEND noninteractive
ARG GRAV_VERSION=1.7.34
ARG DEV_PKGS="zlib1g-dev libpng-dev libjpeg-dev libfreetype6-dev \
libcurl4-gnutls-dev libxml2-dev libonig-dev"
RUN apt-get update && \
apt-get install -y git bash procps wget unzip supervisor \
php-fpm php-gd php-json php-curl php-dom php-xml php-yaml php-apcu \
php-opcache php-simplexml php-zip php-mbstring cron \
&& mkdir /var/www \
&& chown www-data:www-data /var/www \
&& cd /var/www
# CLeanup
RUN apt-get remove -y --purge ${DEV_PKGS} exim4* && \
apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/*
RUN mkdir /run/php && \
chown www-data:www-data /var/log /run/php && \
mkdir -p /etc/php/7.4/fpm/pool.d
ADD docker-entrypoint.sh /
ADD supervisor.conf /etc/supervisor.conf
ENTRYPOINT ["/docker-entrypoint.sh"]
#USER www-data
RUN (crontab -l; echo "* * * * * cd /var/www/grav;/usr/bin/php bin/grav scheduler 1>> /dev/null 2>&1") | crontab -u www-data -
#CMD ["dumb-init", "/usr/sbin/php-fpm7.3", "--nodaemonize", "--force-stderr"]
CMD ["supervisord", "-c", "/etc/supervisor.conf"]

3
apps/grav/README.md Normal file
View File

@@ -0,0 +1,3 @@
lighttpd is configured in etc_lighttpd
generate a configmap with:
kubectl create configmap grav-lighttpd-config --from-file etc_lighthttpd/

111
apps/grav/deployment.yaml Normal file
View File

@@ -0,0 +1,111 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: grav
spec:
selector:
matchLabels:
app: grav
strategy:
type: Recreate
template:
metadata:
labels:
app: grav
spec:
containers:
- image: cr.lan/grav:arm64
name: grav
imagePullPolicy: Always
ports:
- containerPort: 9000
name: php-fpm
volumeMounts:
- name: grav-pages
mountPath: /var/www/grav
- name: grav-etc-php-fpm-www-conf
mountPath: /etc/php/7.4/fpm/pool.d
- image: nginx:alpine
name: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
volumeMounts:
- name: grav-nginx-proxy-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: grav-pages
mountPath: /var/www/grav
initContainers:
- name: grav-install
image: busybox
command: ["/bin/sh"]
args:
- -c
- >-
wget -O /grav.zip "https://getgrav.org/download/core/grav-admin/latest" &&
unzip -q /grav.zip &&
rm -rf grav-admin/user/pages/* &&
cp -ru grav-admin/* /workdir/ &&
rm -rf /grav.zip &&
rm -rf /grav-admin &&
chown -R 33:33 /workdir/*
volumeMounts:
- name: grav-pages
mountPath: /workdir
volumes:
- name: grav-pages
persistentVolumeClaim:
claimName: grav-pages
- name: grav-nginx-proxy-config
configMap:
name: grav-nginx-proxy-config
- name: grav-etc-php-fpm-www-conf
configMap:
name: grav-etc-php-fpm-www-conf
---
apiVersion: v1
kind: Service
metadata:
name: grav
spec:
ports:
- name: http
port: 80
selector:
app: grav
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grav
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: 512m
spec:
rules:
- host: grav.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: grav
port:
name: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grav-pages
spec:
storageClassName: nfs-ssd-ebin01
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

5
apps/grav/docker-entrypoint.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,440 @@
; Start a new pool named 'www'.
; the variable $pool can be used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
; listen = /run/php/php7.4-fpm.sock
listen = 127.0.0.1:9000
; Set listen(2) backlog.
; Default Value: 511 (-1 on FreeBSD and OpenBSD)
;listen.backlog = 511
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions. The owner
; and group can be specified either by name or by their numeric IDs.
; Default Values: user and group are set as the running user
; mode is set to 0660
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
; or group is differrent than the master process user. It allows to create process
; core dump and ptrace the process for the pool user.
; Default Value: no
; process.dumpable = yes
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 5
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: (min_spare_servers + max_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php/7.4/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;pm.status_path = /status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; Depth of slow log stack trace.
; Default Value: 20
;request_slowlog_trace_depth = 20
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 0
; The timeout set by 'request_terminate_timeout' ini option is not engaged after
; application calls 'fastcgi_finish_request' or when application has finished and
; shutdown functions are being called (registered via register_shutdown_function).
; This option will enable timeout limit to be applied unconditionally
; even in such cases.
; Default Value: no
;request_terminate_timeout_track_finished = no
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
;chdir = /var/www
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
;catch_workers_output = yes
; Decorate worker output with prefix and suffix containing information about
; the child that writes to the log and if stdout or stderr is used as well as
; log level and time. This options is used only if catch_workers_output is yes.
; Settings to "no" will output data as written to the stdout or stderr.
; Default value: yes
;decorate_workers_output = no
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; execute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5 .php7
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

68
apps/grav/nginx.conf.yaml Normal file
View File

@@ -0,0 +1,68 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grav-nginx-proxy-config
data:
nginx.conf: |-
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 64;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log off;
#access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name _;
index index.html index.php;
root /var/www/grav;
## Begin - Index
# for subfolders, simply adjust:
# `location /subfolder {`
# and the rewrite to use `/subfolder/index.php`
location / {
try_files $uri $uri/ /index.php?$query_string;
}
## End - Index
## Begin - Security
# deny all direct access for these folders
location ~* /(\.git|cache|bin|logs|backup|tests)/.*$ { return 403; }
# deny running scripts inside core system folders
location ~* /(system|vendor)/.*\.(txt|xml|md|html|yaml|yml|php|pl|py|cgi|twig|sh|bat)$ { return 403; }
# deny running scripts inside user folder
location ~* /user/.*\.(txt|md|yaml|yml|php|pl|py|cgi|twig|sh|bat)$ { return 403; }
# deny access to specific files in the root folder
location ~ /(LICENSE\.txt|composer\.lock|composer\.json|nginx\.conf|web\.config|htaccess\.txt|\.htaccess) { return 403; }
## End - Security
## Begin - PHP
location ~ \.php$ {
# Choose either a socket or TCP/IP address
fastcgi_pass 127.0.0.1:9000;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
}
## End - PHP
}
}

14
apps/grav/supervisor.conf Normal file
View File

@@ -0,0 +1,14 @@
[supervisord]
nodaemon=true
[program:cron]
command=/usr/sbin/cron
killasgroup=true
stopasgroup=true
redirect_stderr=true
user=root
[program:php-fpm]
command=/usr/sbin/php-fpm7.4 --nodaemonize --force-stderr
user=www-data

23
apps/grav/tekton.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-grav
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/grav
- name: path-to-dockerfile
value: apps/grav/Dockerfile
- name: image-name
value: cr.lan/grav
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/grav

View File

@@ -1,94 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hassio
labels:
app: hassio
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: hassio
release: latest
template:
metadata:
labels:
app: hassio
release: latest
spec:
containers:
- name: hassio
image: "homeassistant/home-assistant:latest"
imagePullPolicy: Always
volumeMounts:
- name: hassio-storage
mountPath: /.storage
ports:
- name: http
containerPort: 8123
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
# resources:
# requests:
# memory: "256Mi"
# cpu: "250m"
# limits:
# memory: "1000Mi"
# cpu: "500m"
volumes:
- name: hassio-storage
persistentVolumeClaim:
claimName: hassio-storage
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hassio-storage
labels:
app: hassio
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: hassio
labels:
app: hassio
release: latest
spec:
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: hassio
release: latest
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: hassio
spec:
rules:
- host: hassio.lan
http:
paths:
- backend:
serviceName: hassio
servicePort: http
path: /

View File

@@ -11,7 +11,7 @@ spec:
selector:
app: mariadb
type: LoadBalancer
loadBalancerIP: 172.23.255.4
loadBalancerIP: 172.23.255.5
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
@@ -29,7 +29,7 @@ spec:
app: mariadb
spec:
containers:
- image: docker-registry.lan/mariadb:arm64
- image: cr.lan/mariadb
name: mariadb
imagePullPolicy: Always
env:
@@ -49,7 +49,7 @@ spec:
limits:
memory: "1500Mi"
cpu: "2000m"
- image: docker-registry.lan/mariadb-prometheus-exporter:arm64
- image: cr.lan/mariadb-prometheus-exporter
name: mariadb-prometheus-exporter
imagePullPolicy: Always
ports:
@@ -65,18 +65,37 @@ spec:
volumes:
- name: mariadb-persistent-storage
persistentVolumeClaim:
claimName: mariadb-pv-claim
claimName: mariadb-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pv-claim
annotations:
volume.beta.kubernetes.io/storage-class: nfs-ssd
name: mariadb-data
spec:
storageClassName: nfs-ssd
volumeName: mariadb-data
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-data
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/mariadb-data
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: mariadb-data
namspace: live-env

View File

@@ -1,5 +1,5 @@
# vim:set ft=dockerfile:
FROM debian:buster-slim
FROM cr.lan/debian-stable
RUN set -ex; \
apt-get update; \

View File

@@ -1,11 +1,13 @@
# vim:set ft=dockerfile:
FROM debian:buster-slim
FROM cr.lan/debian-stable
# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added
RUN groupadd -r mysql && useradd -r -g mysql mysql
# https://bugs.debian.org/830696 (apt uses gpgv by default in newer releases, rather than gpg)
RUN set -ex; \
sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list; \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list; \
apt-get update; \
if ! which gpg; then \
apt-get install -y --no-install-recommends gnupg; \
@@ -93,6 +95,7 @@ RUN set -ex; \
| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#&/'; \
# don't reverse lookup hostnames, they are usually another container
echo '[mysqld]\nskip-host-cache\nskip-name-resolve' > /etc/mysql/conf.d/docker.cnf; \
mkdir -p /run/mysqld; \
apt-get clean -y;
VOLUME /var/lib/mysql

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mariadb-prometheus-node-exporter
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mariadb/mariadb-prometheus
- name: path-to-dockerfile
value: apps/mariadb/mariadb-prometheus/Dockerfile
- name: image-name
value: cr.lan/mariadb-prometheus-node-exporter
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mariadb-prometheus-node-exporter

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mariadb
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mariadb/mariadb
- name: path-to-dockerfile
value: apps/mariadb/mariadb/Dockerfile
- name: image-name
value: cr.lan/mariadb
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mariadb

View File

@@ -1,19 +1,16 @@
FROM debian:buster-slim
FROM cr.lan/debian-stable
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && \
apt-get install -y --no-install-recommends \
mosquitto mosquitto-clients procps && \
mosquitto procps && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Op port
EXPOSE 1883
# Stats port
#EXPOSE 9090
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/sbin/mosquitto", "-c", "/mosquitto/config/mosquitto.conf"]
CMD ["/usr/sbin/mosquitto", "-v", "-c", "/mosquitto/config/mosquitto.conf"]

View File

@@ -6,7 +6,6 @@ metadata:
app: mosquitto
release: mqtt
name: mqtt-mosquitto
namespace: default
spec:
replicas: 1
selector:
@@ -23,7 +22,7 @@ spec:
spec:
containers:
- name: mqtt-mosquitto
image: docker-registry.lan/mosquitto:arm64
image: cr.lan/mosquitto
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -63,7 +62,8 @@ spec:
name: mosquitto-data
subPath: mosquitto/data
- name: mosquitto-exporter
image: docker-registry.lan/mosquitto-exporter:arm64
image: cr.lan/mosquitto-prometheus-exporter
args: ["--endpoint", "tcp://mqtt.lan:1883"]
imagePullPolicy: Always
ports:
- containerPort: 9234
@@ -96,7 +96,6 @@ metadata:
labels:
app: mosquitto
release: mqtt
namespace: default
name: mqtt-mosquitto
spec:
externalTrafficPolicy: Cluster
@@ -117,13 +116,10 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
#annotations:
# volume.beta.kubernetes.io/storage-provisioner: nfs-storage
labels:
app: mosquitto
release: mqtt
name: mqtt-mosquitto
namespace: default
spec:
accessModes:
- ReadWriteOnce
@@ -137,7 +133,6 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: mqtt-mosquitto
namespace: default
labels:
app: mosquitto
release: mqtt
@@ -148,4 +143,3 @@ data:
port 1883
persistence true
persistence_location /mosquitto/data/

View File

@@ -0,0 +1,10 @@
FROM cr.lan/debian-golang-stable
ENV GOARCH=arm64
ENV GOPATH=/usr/src/gopath
ENV GOCACHE=/usr/src/gocache
RUN go env
WORKDIR /usr/src
RUN go get github.com/sapcc/mosquitto-exporter
RUN make j4 build CGO_ENABLED=0
RUN ls -al

View File

@@ -0,0 +1,93 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: github-mosquitto-prometheus-exporter
spec:
type: git
params:
- name: revision
value: master
- name: url
value: https://github.com/sapcc/mosquitto-exporter.git
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-binary
image: cr.lan/debian-golang-stable
script: |
#!/usr/bin/env bash
cd $(resources.inputs.source.path)
ls -al
export GOARCH=arm64
export GOPATH=/usr/src/gopath
export GOCACHE=/usr/src/gocache
go env
go get github.com/sapcc/mosquitto-exporter
make -j4 build CGO_ENABLED=0
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
workspaces:
- name: usr-src
mountPath: /usr/src
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-prometheus-exporter
spec:
taskRef:
name: build-mosquitto-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: github-mosquitto-prometheus-exporter
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto-prometheus-exporter
workspaces:
- name: usr-src
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: usr_src

View File

@@ -0,0 +1,24 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mosquitto-prometheus
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mosquitto/prometheus
- name: path-to-dockerfile
value: apps/mosquitto/prometheus/Dockerfile
- name: image-name
value: cr.lan/mosquitto-prometheus-exporter
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mosquitto-prometheus

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mosquitto
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mosquitto
- name: path-to-dockerfile
value: apps/mosquitto/Dockerfile
- name: image-name
value: cr.lan/mosquitto
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mosquitto

86
apps/nextcloud/Dockerfile Normal file
View File

@@ -0,0 +1,86 @@
FROM nextcloud:24-fpm
#needed for some reason
ENV NEXTCLOUD_UPDATE=1
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
procps bash iputils-ping libmagickcore-6.q16-6-extra vim-tiny
RUN apt-get clean -y && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN touch /usr/src/nextcloud/data/.ocdata
COPY config.php /usr/src/nextcloud/config/
#COPY htaccess-data /usr/src/nextcloud/data/.htaccess
#COPY apache-default-vhost.conf /etc/apache2/sites-available/000-default.conf
RUN mv /usr/src/nextcloud/.htaccess /usr/src/nextcloud/.htaccess.bak
RUN mv /usr/src/nextcloud/config/.htaccess /usr/src/nextcloud/config/.htaccess.bak
#install ca.crt update script to the container
COPY post-start.sh /
RUN chmod +x /post-start.sh
#RUN set -ex; \
# \
# apt-get update; \
# apt-get install -y --no-install-recommends \
# ffmpeg \
# libmagickcore-6.q16-6-extra \
# procps \
# smbclient \
# supervisor \
## libreoffice \
# ; \
# rm -rf /var/lib/apt/lists/*
#
#RUN set -ex; \
# \
# savedAptMark="$(apt-mark showmanual)"; \
# \
# apt-get update; \
# apt-get install -y --no-install-recommends \
# libbz2-dev \
# libc-client-dev \
# libkrb5-dev \
# libsmbclient-dev \
# ; \
# \
# docker-php-ext-configure imap --with-kerberos --with-imap-ssl; \
# docker-php-ext-install \
# bz2 \
# imap \
# ; \
# pecl install smbclient; \
# docker-php-ext-enable smbclient; \
# \
## reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
# apt-mark auto '.*' > /dev/null; \
# apt-mark manual $savedAptMark; \
# ldd "$(php -r 'echo ini_get("extension_dir");')"/*.so \
# | awk '/=>/ { print $3 }' \
# | sort -u \
# | xargs -r dpkg-query -S \
# | cut -d: -f1 \
# | sort -u \
# | xargs -rt apt-mark manual; \
# \
# apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
# apt-get clean -y; \
# rm -rf /var/cache/apt/*; \
# rm -rf /var/lib/apt/lists/*
#
#RUN mkdir -p \
# /var/log/supervisord \
# /var/run/supervisord \
#;
#RUN chown www-data:www-data \
# /var/log/supervisord \
# /var/run/supervisord;
#
#COPY supervisord.conf /
#
#CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]

2
apps/nextcloud/README.md Normal file
View File

@@ -0,0 +1,2 @@
# kubectl -n live-env create configmap nextcloud-config --from-file=config.php
# kubectl -n live-env create configmap nextcloud-nginx-site --from-file=nginx-site.configmap.conf

Some files were not shown because too many files have changed in this diff Show More