160 Commits

Author SHA1 Message Date
688485987f antiaffinities and namespaces 2021-10-16 18:23:19 +02:00
f006923f61 I'm going slightly mad 2021-10-14 19:31:52 +02:00
1eb12be2e8 adapting memory requests 2021-10-07 11:31:38 +02:00
4fb80042a2 nc 21 again 2021-10-04 21:52:13 +02:00
fcbbf57031 postgres 13, forced
gitea with limits adapted
descheduler, still
2021-10-04 21:29:08 +02:00
6b8d34f88c nextcloud 22 2021-10-04 21:10:17 +02:00
82adc0d6ae migrated Ingresses to nginx 1.0.0
https://blog.hycorve.com/migrating-from-ingress-networking-k8s-io-v1beta1-to-v1/
2021-09-21 18:53:33 +02:00
90e89bf867 flannel 1.14 upgrade 2021-09-20 19:31:50 +02:00
6c7ba4385c postgres monitoring 2021-08-21 10:43:58 +02:00
7578dca854 refactored postgres 2021-08-21 10:26:07 +02:00
666db73722 adding exporter to postgres 2021-08-21 10:21:24 +02:00
8e3e8c0e45 trusted proxies 2021-08-20 12:36:29 +02:00
3108ceeebc config_is_read_only does not work 2021-08-20 12:13:44 +02:00
bb607f8774 config_is_read_only does not work 2021-08-20 12:13:18 +02:00
928a3a942a no more supervisor 2021-08-20 11:46:31 +02:00
da2c41de37 no more apache 2021-08-20 11:46:08 +02:00
8fc6757dbd nextcloud config from configmap 2021-08-20 11:45:44 +02:00
455f519fca yes, nginx we are 2021-08-19 20:44:29 +02:00
d1d240f4e3 going nginx/fpm 2021-08-19 15:14:00 +02:00
8dd75bef5d using fpm version 2021-08-19 08:31:10 +02:00
ed4cb41728 na, its to early 2021-08-17 10:35:48 +02:00
afb58dfacf running upgrade on init 2021-08-17 10:19:47 +02:00
8f4f586964 loki pv, installed via helm 2021-08-13 19:20:38 +02:00
07e6ec3779 addign magickcore with svg support 2021-08-10 08:56:26 +02:00
68255f12fd descheduler, I don't quite get it 2021-08-06 14:35:46 +02:00
2f0dfffc5c no 'full' nextcloud 2021-06-28 10:24:09 +02:00
b3919708e8 chown to www-data for supervisord dirs 2021-06-28 09:51:58 +02:00
08bbda1278 no access to stderr 2021-06-26 16:42:16 +02:00
f20dbd975f setting user to www-data 2021-06-26 11:48:01 +02:00
66eed92fd2 nextcloud 21 2021-06-25 15:56:53 +02:00
2ffeb004ba supervisord logs to stderr and pidfile in /tmp/ 2021-06-25 15:41:18 +02:00
2b1767271a vim and other pkgs removed 2021-06-25 14:56:21 +02:00
e10cfa28fe nextcloud full with supervisord and cron 2021-06-23 18:25:11 +02:00
c5c5605031 applied new paths 2021-06-23 18:10:55 +02:00
eb93167600 applied new paths 2021-06-23 18:10:13 +02:00
c83fdd990a removed tekton dashboard submod 2021-06-22 09:31:29 +02:00
2e1eb475e9 removed docker-registry-ui submod 2021-06-22 09:29:48 +02:00
20501a36d0 new rompr version 2021-06-21 15:59:03 +02:00
01255383e1 new rompr version 2021-06-21 15:56:00 +02:00
6201447116 refactored, noone needs the web folder 2021-05-28 19:45:35 +02:00
5e07cdc688 bucket update and no apps install yet 2021-05-20 11:36:28 +02:00
8e74a4c0ef no .htaccess but our own apache config 2021-05-15 19:20:24 +02:00
09a4c58638 we're still debugging 2021-05-14 21:39:59 +02:00
b6e45339f1 make firmware, it deploys that shit 2021-05-12 16:13:57 +02:00
cc0b6f92dc do we need the entrypoint and cmd 2021-05-12 15:59:48 +02:00
a15b29ad38 do we need the entrypoint and cmd 2021-05-12 15:46:04 +02:00
522899fd14 do we need the entrypoint and cmd 2021-05-12 15:25:30 +02:00
99185edd98 we need our own image, goddammit 2021-05-11 16:42:40 +02:00
8a7f54cf1b testing, realky. 2021-05-11 15:48:16 +02:00
fa3cd5ea7c debian testing image 2021-05-08 11:59:12 +02:00
60dbeadebc what the freaking f 2021-05-06 10:56:40 +02:00
219c25f8f4 no patch any more, we want the CA.crt 2021-05-05 13:15:24 +02:00
0e13b3446a dockerfile fix 2021-05-05 12:23:09 +02:00
0badcc15c6 adding ca.crt to pod (minio access) 2021-05-05 12:04:17 +02:00
c34600a47b adding ca.crt to pod (minio access) 2021-05-05 11:30:16 +02:00
84833b0e94 yes 2021-04-16 19:49:39 +02:00
4e45f3954c build-essential image 2021-04-11 21:52:18 +02:00
41f36deb08 build-essential image 2021-04-11 21:51:57 +02:00
0a42103c21 touch .ocdata 2021-04-10 21:46:11 +02:00
71157dfa19 hassio limits 2021-04-10 21:26:59 +02:00
437bf9a96f touch .ocdata 2021-04-10 20:53:07 +02:00
f1f0e0b98e debug for docker and our own image 2021-04-10 20:35:28 +02:00
29b7576c83 debug for docker and our own image 2021-04-10 20:34:43 +02:00
bd5a19fcb6 debug for docker and our own image 2021-04-10 20:16:46 +02:00
f47e96617f our own nextcloud image, of course! 2021-04-10 20:12:04 +02:00
ecc1041761 hdd ebin01 archive storagio 2021-04-10 16:17:12 +02:00
13cb0c1929 deschulder stops working, nfs-hdd-ebin01 storage class 2021-04-09 22:16:32 +02:00
741698526f systemd -dev libs 2021-03-24 19:32:02 +01:00
f740ce7ffa python-deps 2021-03-24 10:18:30 +01:00
223729647b sqlcipher includ 2021-03-24 08:45:49 +01:00
5a577afdd5 we actually need to fetch that thang! 2021-03-23 16:29:04 +01:00
c46975c112 we actually need to fetch that thang! 2021-03-23 16:09:04 +01:00
40c9df0bea we actually need to fetch that thang! 2021-03-23 15:46:03 +01:00
8773365477 we actually need to fetch that thang! 2021-03-23 15:29:03 +01:00
08f48796e8 npm install doesn't do a thing? 2021-03-23 15:16:40 +01:00
8f5d268fdc npm install doesn't do a thing? 2021-03-23 15:15:34 +01:00
7a1400e1f1 start.sh copying and starting fix, deployment 2021-03-23 14:38:37 +01:00
3506a17bc7 start.sh copying and starting fix, deployment 2021-03-23 11:10:01 +01:00
07ca7a7833 start.sh copying and starting fix, deployment 2021-03-23 10:19:07 +01:00
8365cebcb9 tensorboard? 2021-03-22 19:18:50 +01:00
7d50c1df5c tensorboard? 2021-03-22 19:03:29 +01:00
65e679cea6 npm, we need ! 2021-03-22 18:46:30 +01:00
deb3c618fe npm, we need ! 2021-03-22 18:43:31 +01:00
03938dc864 use apt-cache, you fool! 2021-03-22 18:36:26 +01:00
bb56ea5b39 tekton, baby! 2021-03-22 18:32:04 +01:00
f1c5493d95 verbose mosquitto 2021-03-19 21:36:00 +01:00
173c2a9d01 faster snapshots 2021-03-19 21:20:27 +01:00
b18dea273f procps 2021-03-19 21:20:08 +01:00
bfc01803a2 fixing some run issues and creating /rompr before volume 2021-03-19 14:23:48 +01:00
a625c7351e fixing some run issues and creating /rompr before volume 2021-03-19 14:04:32 +01:00
6e58e75668 migrated to debian 2021-03-19 12:34:01 +01:00
cc66ee9eae migrated to debian 2021-03-19 12:11:13 +01:00
aa55a0314e deployment adapt and tekton image-build 2021-03-19 12:00:30 +01:00
96ec8b5555 rompr tekton build 2021-03-19 11:04:06 +01:00
9431f73ead Dockerfile again 2021-03-14 10:14:19 +01:00
5912fd84f0 deprecate default StorageClass 2021-03-02 19:40:20 +01:00
f6a0f2af5e migrated to _sys/nfs-... 2021-03-02 19:38:59 +01:00
56fd09b49d nfs-client-provisioner, need tekton git status .! 2021-03-02 19:34:47 +01:00
4325eff624 new namespaces 2021-03-02 19:15:58 +01:00
ec3e530a36 new namespaces 2021-03-02 19:15:24 +01:00
eaa5d94a72 new namespaces 2021-03-02 19:15:11 +01:00
033118eb89 no deployment for debian-stable and descheduler policy update 2021-02-27 14:11:44 +01:00
8bf0fe5f10 removing sleep infinity 2021-02-27 13:24:12 +01:00
d26981cd78 distcc moved to _CI-CD 2021-02-23 21:26:21 +01:00
878359e846 also install g++ gnueabihf 2021-02-23 17:49:44 +01:00
a4b1be1bc2 removing unecessary archs 2021-02-22 19:52:33 +01:00
e9212313d6 debian-stable kept running 2021-02-22 19:20:27 +01:00
6582f5093a removing zeroconf /etc/distcc/hosts 2021-02-22 18:43:27 +01:00
f1fc18a594 building an debian-stable image 2021-02-22 13:45:27 +01:00
aea6550d6e building an debian-stable image 2021-02-22 13:37:55 +01:00
d4cc44a1ea building an debian-stable image 2021-02-22 13:13:10 +01:00
aaaf6fa29f building an debian-stable image 2021-02-22 13:08:48 +01:00
3538f407e8 mariadb and postgres and all of that 2021-02-20 22:40:49 +01:00
276e41fde2 obsolete 2021-02-19 22:35:43 +01:00
b25f6ca608 apt-cacher image doesn't use apt-cache.lan... yes,yes! 2021-02-19 22:19:00 +01:00
b66179023a debug 2021-02-19 22:17:47 +01:00
a397bdc71e debug 2021-02-19 22:06:09 +01:00
4ec401f348 debug 2021-02-19 22:03:29 +01:00
fd569d894a refactor _sys and namespaces 2021-02-19 22:02:19 +01:00
ed3e8cdddc apt-cacher image doesn't use apt-cache.lan... yes,yes! 2021-02-19 21:35:26 +01:00
9cda7c9f76 docker-reg-ui 2021-02-19 20:53:39 +01:00
c094e99451 obsolete 2021-02-19 20:53:25 +01:00
5a16e4cf40 no procps 2021-02-19 20:53:07 +01:00
be069c53bf distcc in tekton 2021-02-19 20:52:44 +01:00
ce329ca353 golang image 2021-02-18 23:52:00 +01:00
b45a4489fc golang image 2021-02-18 23:28:38 +01:00
1ac9cc0b4c stuff 2021-02-18 23:08:00 +01:00
38cac7a57f debian-golang image 2021-02-18 23:04:53 +01:00
9dd3b2b4e0 debian-golang image 2021-02-18 22:57:56 +01:00
0f6c04a0f0 debian-golang image 2021-02-18 22:55:40 +01:00
3a28bebcda mosquitto in tekton 2021-02-18 22:16:26 +01:00
f17dea5dff mosquitto in tekton 2021-02-18 22:16:12 +01:00
1a42071c26 mosquitto in tekton 2021-02-18 21:49:53 +01:00
0e79b36875 tekton pipelines for apps 2021-02-18 21:42:24 +01:00
c918c39e6e using apt-cache.lan 2021-02-18 21:08:56 +01:00
5932220ead Dockerfile again, because kaniko! 2021-02-18 21:03:57 +01:00
36fa98e78b CI-CD Stuff, mainly tekton 2021-02-18 20:55:44 +01:00
6b7b23dd71 tekton local configs 2021-02-18 20:55:20 +01:00
b131b76916 apt-cacher-ng in tekton 2021-02-18 20:54:53 +01:00
fb33950bc8 updated systems: descheduler runs at root.... 2021-02-18 20:54:24 +01:00
3970c20e3a updated systems 2021-02-18 20:54:06 +01:00
1cc1de7ed8 no more dockerfile, we're podmanning now :) 2021-02-10 14:58:10 +01:00
b91ea42a41 updates 2021-02-10 14:49:14 +01:00
f616346ac6 sweet caroline 2021-02-09 20:26:53 +01:00
315520baa6 static pvs for essential services 2021-01-24 00:20:55 +01:00
76c036fa79 static pvs 2021-01-21 12:56:33 +01:00
5ce8a3b5be all subs 2021-01-21 12:56:11 +01:00
0bdd4a2db0 all subs 2021-01-21 12:54:20 +01:00
76e516c7f3 pvs for grafana and prometheus 2021-01-21 12:53:10 +01:00
c7363d513e using flannel now 2021-01-21 12:52:46 +01:00
8d66cb1f66 postgres svc fix 2021-01-21 12:52:18 +01:00
f9269f2c2c persistent grafana/prometheus pvs 2021-01-21 10:06:01 +01:00
9b9b551907 new run 2021-01-20 15:33:26 +01:00
ab96839f50 removed external-storage 2021-01-20 15:32:30 +01:00
a3bd4349e2 nummer5 in wks 2021-01-07 21:50:45 +01:00
4dcb961e81 tekton for the masses 2020-12-08 17:12:45 +01:00
9561cb8d82 grav on php74/bullseye 2020-11-30 19:34:31 +01:00
2e3e37062a new and old scrapes 2020-11-19 18:38:25 +01:00
f85ff91873 doesn;t work yet 2020-11-12 18:03:29 +01:00
62cb2881c2 d-ui needs less resources 2020-11-11 21:00:58 +01:00
129 changed files with 9915 additions and 2030 deletions

15
.gitmodules vendored
View File

@@ -25,9 +25,6 @@
[submodule "mosquitto/charts"]
path = mosquitto/charts
url = https://github.com/smizy/charts.git
[submodule "external-storage"]
path = external-storage
url = https://github.com/kubernetes-incubator/external-storage.git
[submodule "mosquitto-exporter"]
path = mosquitto-exporter
url = https://github.com/sapcc/mosquitto-exporter.git
@@ -43,6 +40,12 @@
[submodule "csi-s3/node-driver-registrar"]
path = csi-s3/node-driver-registrar
url = https://github.com/kubernetes-csi/node-driver-registrar.git
[submodule "apps/postgresql/postgres_exporter"]
path = apps/postgresql/postgres_exporter
url = https://github.com/wrouesnel/postgres_exporter.git
[submodule "apps/tekton/dashboard"]
path = apps/tekton/dashboard
url = https://github.com/tektoncd/dashboard.git
[submodule "_sys/haproxy-ingress"]
path = _sys/haproxy-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "nfs-subdir-external-provisioner"]
path = nfs-subdir-external-provisioner
url = https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git

View File

@@ -0,0 +1,9 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
golang make git && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*

View File

@@ -0,0 +1,84 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-golang-stable
spec:
type: image
params:
- name: url
value: cr.lan/debian-golang-stable
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-golang
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-golang/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-golang
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-golang
spec:
taskRef:
name: build-debian-golang
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-golang-stable
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -0,0 +1,13 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash \
build-essential make ccache distcc-pump distcc g++ \
libncursesw5-dev && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,85 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-stable-build-essential
spec:
type: image
params:
- name: url
value: cr.lan/debian-stable-build-essential
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-stable-build-essential
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable-build-essential/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable-build-essential
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-stable-build-essential
spec:
taskRef:
name: build-debian-stable-build-essential
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-stable-build-essential
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -0,0 +1,11 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,85 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-stable
spec:
type: image
params:
- name: url
value: cr.lan/debian-stable
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-stable
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-stable
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-stable
spec:
taskRef:
name: build-debian-stable
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-stable
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

View File

@@ -0,0 +1,11 @@
FROM debian:testing-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
dnsutils procps nmap bash iputils-ping bash && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,85 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-testing
spec:
type: image
params:
- name: url
value: cr.lan/debian-testing
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-testing
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-testing/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-testing
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-testing
spec:
taskRef:
name: build-debian-testing
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-testing
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

22
_CI-CD/distcc/Dockerfile Normal file
View File

@@ -0,0 +1,22 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get install -y \
gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf \
multiarch-support dpkg-dev distcc ccache \
build-essential gcc cpp g++ clang llvm && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*; \
rm -rf /var/cache/apt/*; \
#removing distcc conf, no zeroconf
rm -fv /etc/distcc/hosts
# Op port
EXPOSE 3632
# Stats port
EXPOSE 3633
USER distccd
ENTRYPOINT /usr/bin/distccd --no-detach --daemon --stats --log-level error --log-stderr $OPTIONS

View File

@@ -1,27 +1,25 @@
apiVersion: apps/v1
kind: Deployment
kind: StatefulSet
metadata:
labels:
app: distcc
release: buster
release: stable
name: distcc
namespace: default
spec:
replicas: 3
serviceName: distcc
replicas: 4
selector:
matchLabels:
app: distcc
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: distcc
release: buster
release: stable
spec:
containers:
- name: distcc
image: docker-registry.lan/distcc:armhf
image: cr.lan/distcc
imagePullPolicy: Always
#env:
#- name: OPTIONS
@@ -35,12 +33,11 @@ spec:
protocol: TCP
resources:
limits:
cpu: 1
cpu: 4
memory: 128Mi
requests:
cpu: 1
cpu: 50m
memory: 64Mi
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
@@ -61,11 +58,9 @@ kind: Service
metadata:
labels:
app: distcc
release: buster
namespace: default
release: stable
name: distcc
spec:
externalTrafficPolicy: Cluster
ports:
- name: distcc-data
port: 3632
@@ -77,4 +72,3 @@ spec:
protocol: TCP
selector:
app: distcc
type: LoadBalancer

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-distcc
spec:
type: image
params:
- name: url
value: cr.lan/distcc
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-distcc
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/distcc/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/distcc
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-distcc
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-distcc
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-distcc

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: git-secret
type: Opaque
data:
token: Nzk1YTFhMGQxMWQ0MDJiY2FiOGM3MjkyZDk5ODIyMzg2NDNkM2U3OQo=

6
_sys/README.md Normal file
View File

@@ -0,0 +1,6 @@
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/configmap.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/job/job.yaml

View File

@@ -0,0 +1,47 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/15 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,35 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 30
"memory": 40
"pods": 10
targetThresholds:
"cpu": 50
"memory": 60
"pods": 20
nodeFit: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false

View File

@@ -1,204 +0,0 @@
#https://raw.githubusercontent.com/haproxytech/kubernetes-ingress/master/deploy/haproxy-ingress.yaml
#https://www.haproxy.com/documentation/kubernetes/latest/installation/community/kubernetes/
#
# NOTES: Images are not from haproxytech, no arm64 imgs
---
apiVersion: v1
kind: Namespace
metadata:
name: haproxy-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: haproxy-ingress-service-account
namespace: haproxy-controller
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: haproxy-ingress-cluster-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- services
- namespaces
- events
- serviceaccounts
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
- ingresses/status
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: haproxy-ingress-cluster-role-binding
namespace: haproxy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: haproxy-ingress-cluster-role
subjects:
- kind: ServiceAccount
name: haproxy-ingress-service-account
namespace: haproxy-controller
---
apiVersion: v1
kind: ConfigMap
metadata:
name: haproxy
namespace: haproxy-controller
data:
forwarded-for: "true"
load-balance: "leastconn"
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: ingress-default-backend
name: ingress-default-backend
namespace: haproxy-controller
spec:
replicas: 1
selector:
matchLabels:
run: ingress-default-backend
template:
metadata:
labels:
run: ingress-default-backend
spec:
containers:
- name: ingress-default-backend
#image: gcr.io/google_containers/defaultbackend:1.4
image: starlingx4arm/defaultbackend:1.5-aarch64
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
labels:
run: ingress-default-backend
name: ingress-default-backend
namespace: haproxy-controller
spec:
selector:
run: ingress-default-backend
ports:
- name: port-1
port: 8080
protocol: TCP
targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
namespace: haproxy-controller
spec:
replicas: 1
selector:
matchLabels:
run: haproxy-ingress
template:
metadata:
labels:
run: haproxy-ingress
spec:
serviceAccountName: haproxy-ingress-service-account
containers:
- name: haproxy-ingress
#image: haproxytech/kubernetes-ingress
image: bmanojlovic/kubernetes-ingress:latest
args:
- --configmap=haproxy-controller/haproxy
- --default-backend-service=haproxy-controller/ingress-default-backend
resources:
requests:
cpu: "500m"
memory: "50Mi"
livenessProbe:
httpGet:
path: /healthz
port: 1042
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
- name: stat
containerPort: 1024
env:
- name: TZ
value: "Etc/UTC"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
apiVersion: v1
kind: Service
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
namespace: haproxy-controller
spec:
selector:
run: haproxy-ingress
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
- name: stat
port: 1024
protocol: TCP
targetPort: 1024

View File

@@ -0,0 +1,10 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
#namespace: nginx-ingress
namespace: default
data:
proxy-connect-timeout: "10s"
proxy-read-timeout: "10s"
client-max-body-size: "0"

674
_sys/ingress-nginx.yaml Normal file
View File

@@ -0,0 +1,674 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.1
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: k8s.gcr.io/ingress-nginx/controller:v1.0.0@sha256:0851b34f69f69352bf168e6ccf30e1e20714a264ab1ecd1933e4d8c0fc3215c6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
namespace: ingress-nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000

223
_sys/kube-flannel.yml Normal file
View File

@@ -0,0 +1,223 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.23.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@@ -1,53 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-router
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
rules:
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- nodes
- endpoints
verbs:
- list
- get
- watch
- apiGroups:
- "networking.k8s.io"
resources:
- networkpolicies
verbs:
- list
- get
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-router
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-router
subjects:
- kind: ServiceAccount
name: kube-router
namespace: kube-system

View File

@@ -1,137 +0,0 @@
#https://gist.github.com/jjo/8c616aaf795284bb5b85d02143745f63
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-router-cfg
namespace: kube-system
labels:
tier: node
k8s-app: kube-router
data:
cni-conf.json: |
{
"cniVersion":"0.3.0",
"name":"mynet",
"plugins":[
{
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"isDefaultGateway":true,
"hairpinMode":true,
"ipam":{
"type":"host-local"
}
}
]
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-router
namespace: kube-system
labels:
k8s-app: kube-router
spec:
selector:
matchLabels:
k8s-app: kube-router
template:
metadata:
labels:
k8s-app: kube-router
spec:
priorityClassName: system-node-critical
containers:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
args:
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"
- "--bgp-graceful-restart=true"
- "--hairpin-mode=true"
- "--enable-cni=true"
- "--advertise-cluster-ip=true"
- "--advertise-external-ip=true"
- "--advertise-loadbalancer-ip=true"
- "--kubeconfig=/var/lib/kube-router/kubeconfig"
#- "--master=https://192.168.10.13:6443"
securityContext:
privileged: true
imagePullPolicy: Always
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KUBE_ROUTER_CNI_CONF_FILE
value: /etc/cni/net.d/10-kuberouter.conflist
livenessProbe:
httpGet:
path: /healthz
port: 20244
initialDelaySeconds: 10
periodSeconds: 3
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kubeconfig
mountPath: /var/lib/kube-router/kubeconfig
readOnly: true
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
initContainers:
- name: install-cni
image: docker.io/cloudnativelabs/kube-router
imagePullPolicy: Always
command:
- /bin/sh
- -c
- set -e -x;
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
if [ -f /etc/cni/net.d/*.conf ]; then
rm -f /etc/cni/net.d/*.conf;
fi;
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
cp /etc/kube-router/cni-conf.json ${TMP};
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
fi
volumeMounts:
- name: cni-conf-dir
mountPath: /etc/cni/net.d
- name: kube-router-cfg
mountPath: /etc/kube-router
hostNetwork: true
serviceAccountName: kube-router
serviceAccount: kube-router
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: cni-conf-dir
hostPath:
path: /etc/cni/net.d
- name: kube-router-cfg
configMap:
name: kube-router-cfg
- name: kubeconfig
hostPath:
path: /var/lib/kube-router/kubeconfig
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@@ -0,0 +1,21 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: loki-data
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/loki-data
server: ebin02
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: storage-loki-0
namespace: monitoring

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-openwrt
type: Opaque
data:
username: b3BlbndydAo=
password: ZUZWbmVnOEkwOE1zRTN0Q2VCRFB4c011OU0yVjJGdnkK
endpoint: aHR0cHM6Ly9taW5pby5saXZlLWluZnJhLnN2Yy5jbHVzdGVyLmxvY2FsOjk0NDMK

View File

@@ -0,0 +1,36 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd
provisioner: nfs-ssd # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin01
provisioner: nfs-ssd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-hdd-ebin01
provisioner: nfs-hdd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin02
provisioner: nfs-ssd-ebin02 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-hdd-ebin01
namespace: live-infra
labels:
app: nfs-hdd-ebin01
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-hdd-ebin01
template:
metadata:
labels:
app: nfs-hdd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-hdd-ebin01
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-hdd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-hdd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-hdd/k8s-data

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin01
namespace: live-infra
labels:
app: nfs-ssd-ebin01
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin01
template:
metadata:
labels:
app: nfs-ssd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin01
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-ssd/k8s-data

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin02
namespace: live-infra
labels:
app: nfs-ssd-ebin02
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin02
template:
metadata:
labels:
app: nfs-ssd-ebin02
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin02
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin02
- name: NFS_SERVER
value: ebin02
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin02
path: /data/raid1-ssd/k8s-data

View File

@@ -0,0 +1,65 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Namespace
metadata:
name: live-env
---
apiVersion: v1
kind: Namespace
metadata:
name: test-env
---
apiVersion: v1
kind: Namespace
metadata:
name: live-infra
---
apiVersion: v1
kind: Namespace
metadata:
name: test-infra

View File

@@ -1,59 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
replicas: 1
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
- name: admin
containerPort: 8080
args:
- --api
- --kubernetes
- --loglevel=ERROR
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
annotations:
kuber-router.io/service.hairpin: ""
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
type: LoadBalancer
loadBalancerIP: 172.23.255.1

View File

@@ -0,0 +1,37 @@
FROM debian:bullseye
ENV DEBIAN_FRONTEND noninteractive
ARG DEVPKGS="git make cmake gcc g++ python-dev libsqlcipher-dev"
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get -y install ${DEVPKGS} python3-pip python3-torch python3-dateutil python3-filelock python3-tqdm python3-pyparsing python3-joblib \
python3-portalocker python3-click python3-packaging python3-regex python3-docopt python3-systemd \
libsystemd-dev graphicsmagick zip unzip bubblewrap sqlcipher gettext nodejs npm && \
pip3 install tensorboardX && \
pip3 install 'git+https://github.com/stanford-oval/genienlp@0969c6ea74376b20982c0c8bea9a4732547b15cb#egg=genienlp' && \
git clone --depth=1 --branch v1.99.0 https://github.com/stanford-oval/almond-cloud.git /opt/almond-cloud
#setup
RUN useradd -ms /bin/bash -r almond-cloud && id almond-cloud
WORKDIR /opt/almond-cloud/
RUN chown -R almond-cloud:almond-cloud /opt/almond-cloud && \
echo "build_from_source = true" > ~almond-cloud/.npmrc && \
echo "sqlite = external" >> ~almond-cloud/.npmrc && \
echo "sqlite_libname = sqlcipher" >> ~almond-cloud/.npmrc && \
echo "======== package.json ============="; cat package.json && \
su almond-cloud -c 'CPLUS_INCLUDE_PATH=/usr/include/sqlcipher npm install' && \
chown -R root:root /opt/almond-cloud
COPY --chown=almond-cloud:almond-cloud start.sh /opt/almond-cloud/
# CLeanup
RUN apt-get remove -y --purge ${DEVPKGS} && \
apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/* /root/.cache
USER almond-cloud
WORKDIR /home/almond-cloud
ENTRYPOINT ["/opt/almond-cloud/start.sh"]

View File

@@ -0,0 +1,67 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: almond-cloud
spec:
selector:
matchLabels:
app: almond-cloud
strategy:
type: Recreate
template:
metadata:
labels:
app: almond-cloud
spec:
containers:
- image: cr.lan/almond-cloud
name: almond-cloud
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
volumeMounts:
- name: almond-cloud-data
mountPath: /home/almond-cloud
volumes:
- name: almond-cloud-data
persistentVolumeClaim:
claimName: almond-cloud-data
---
apiVersion: v1
kind: Service
metadata:
name: almond-cloud
spec:
ports:
- name: http
port: 3000
selector:
app: almond-cloud
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: almond-cloud
spec:
rules:
- host: almond.lan
http:
paths:
- backend:
serviceName: almond-cloud
servicePort: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: almond-cloud-data
spec:
storageClassName: nfs-ssd-ebin01
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

3
apps/almond-cloud/start.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
NODE_MAX_OLD_SPACE_SIZE=${NODE_MAX_OLD_SPACE_SIZE:-500}
exec node --max_old_space_size=${NODE_MAX_OLD_SPACE_SIZE} /opt/almond-cloud/main.js "$@"

View File

@@ -0,0 +1,77 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-almond-cloud
spec:
type: image
params:
- name: url
value: cr.lan/almond-cloud
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-almond-cloud
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/almond-cloud/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/almond-cloud
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-almond-cloud-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-almond-cloud
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-almond-cloud

View File

@@ -1,17 +1,12 @@
FROM debian:stable-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \
apt-cacher-ng procps && \
apt-cacher-ng && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
RUN echo 'PassThroughPattern: .*' >> /etc/apt-cacher-ng/acng.conf
CMD chown apt-cacher-ng:apt-cacher-ng /var/cache/apt-cacher-ng
EXPOSE 3142
USER apt-cacher-ng
#CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
CMD /usr/sbin/apt-cacher-ng -c /etc/apt-cacher-ng pidfile=/var/run/apt-cacher-ng/pid SocketPath=/var/run/apt-cacher-ng/socket foreground=1

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: apt-cacher-ng
image: docker-registry.lan/apt-cacher-ng:arm64
image: cr.lan/apt-cacher-ng:latest
ports:
- containerPort: 3142
protocol: TCP
@@ -27,10 +27,10 @@ spec:
name: data
resources:
requests:
memory: "24Mi"
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
memory: "192Mi"
cpu: "100m"
volumes:
- name: data
@@ -52,25 +52,29 @@ spec:
selector:
app: apt-cacher-ng
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apt-cacher-ng
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: apt-cache.lan
http:
paths:
- backend:
serviceName: apt-cacher-ng
servicePort: 3142
- path: /
pathType: Prefix
backend:
service:
name: apt-cacher-ng
port:
number: 3142
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume
#annotations:
# volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
storageClassName: nfs-ssd
accessModes:

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-apt-cacher-ng
spec:
type: image
params:
- name: url
value: cr.lan/apt-cacher-ng
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-apt-cacher-ng
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/apt-cacher-ng/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/apt-cacher-ng
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-apt-cacher-ng
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-apt-cacher-ng
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-apt-cacher-ng

View File

@@ -1,464 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: argo
---
# This is an auto-generated file. DO NOT EDIT
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterworkflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: ClusterWorkflowTemplate
listKind: ClusterWorkflowTemplateList
plural: clusterworkflowtemplates
shortNames:
- clusterwftmpl
- cwft
singular: clusterworkflowtemplate
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cronworkflows.argoproj.io
spec:
group: argoproj.io
names:
kind: CronWorkflow
listKind: CronWorkflowList
plural: cronworkflows
shortNames:
- cwf
- cronwf
singular: cronworkflow
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflows.argoproj.io
spec:
additionalPrinterColumns:
- JSONPath: .status.phase
description: Status of the workflow
name: Status
type: string
- JSONPath: .status.startedAt
description: When the workflow was started
format: date-time
name: Age
type: date
group: argoproj.io
names:
kind: Workflow
listKind: WorkflowList
plural: workflows
shortNames:
- wf
singular: workflow
scope: Namespaced
subresources: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowTemplate
listKind: WorkflowTemplateList
plural: workflowtemplates
shortNames:
- wftmpl
singular: workflowtemplate
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: argo-aggregate-to-admin
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: argo-aggregate-to-edit
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: argo-aggregate-to-view
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-cluster-role
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- create
- apiGroups:
- argoproj.io
resources:
- workflowtemplates
- workflowtemplates/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- argoproj.io
resources:
- cronworkflows
- cronworkflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-server-cluster-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
- workflowtemplates
- cronworkflows
- clusterworkflowtemplates
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-cluster-role
subjects:
- kind: ServiceAccount
name: argo
namespace: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-server-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-server-cluster-role
subjects:
- kind: ServiceAccount
name: argo-server
namespace: argo
---
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
---
apiVersion: v1
kind: Service
metadata:
name: argo-server
spec:
ports:
- name: web
port: 2746
targetPort: 2746
selector:
app: argo-server
---
apiVersion: v1
kind: Service
metadata:
name: workflow-controller-metrics
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: workflow-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argo-server
spec:
selector:
matchLabels:
app: argo-server
template:
metadata:
labels:
app: argo-server
spec:
containers:
- args:
- server
image: argoproj/argocli:latest
name: argo-server
ports:
- containerPort: 2746
name: web
readinessProbe:
httpGet:
path: /
port: 2746
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 20
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: argo-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: workflow-controller
spec:
selector:
matchLabels:
app: workflow-controller
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
- --executor-image
- argoproj/argoexec:latest
command:
- workflow-controller
image: argoproj/workflow-controller:latest
name: workflow-controller
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: argo

7
apps/argocd/README.md Normal file
View File

@@ -0,0 +1,7 @@
FROM: https://tanzu.vmware.com/developer/guides/ci-cd/argocd-gs/
# kubectl apply -f namespace.yaml
# -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml-
# kubectl apply -n argocd -f install.yaml (needs changes for ARM builds)
# kubectl apply -n argocd -f ingress.yaml

18
apps/argocd/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
#https://argoproj.github.io/argo-cd/operator-manual/ingress/#kubernetesingress-nginx
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.lan
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https

2726
apps/argocd/install.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: test
name: argocd

View File

@@ -0,0 +1,675 @@
# yamllint disable rule:comments-indentation
---
###############################################################################
# Authelia Configuration #
###############################################################################
## Certificates directory specifies where Authelia will load trusted certificates (public portion) from in addition to
## the system certificates store.
## They should be in base64 format, and have one of the following extensions: *.cer, *.crt, *.pem.
# certificates_directory: /config/certificates
## The theme to display: light, dark, grey, auto.
theme: dark
## The secret used to generate JWT tokens when validating user identity by email confirmation. JWT Secret can also be
## set using a secret: https://www.authelia.com/docs/configuration/secrets.html
jwt_secret: a_very_important_secret2
## Default redirection URL
##
## If user tries to authenticate without any referer, Authelia does not know where to redirect the user to at the end
## of the authentication process. This parameter allows you to specify the default redirection URL Authelia will use
## in such a case.
##
## Note: this parameter is optional. If not provided, user won't be redirected upon successful authentication.
default_redirection_url: http://nc.lan
##
## Server Configuration
##
server:
## The address to listen on.
host: 0.0.0.0
## The port to listen on.
port: 9091
## Set the single level path Authelia listens on.
## Must be alphanumeric chars and should not contain any slashes.
path: ""
## Buffers usually should be configured to be the same value.
## Explanation at https://www.authelia.com/docs/configuration/server.html
## Read buffer size adjusts the server's max incoming request size in bytes.
## Write buffer size does the same for outgoing responses.
read_buffer_size: 4096
write_buffer_size: 4096
## Enables the pprof endpoint.
enable_pprof: false
## Enables the expvars endpoint.
enable_expvars: false
## Disables writing the health check vars to /app/.healthcheck.env which makes healthcheck.sh return exit code 0.
## This is disabled by default if either /app/.healthcheck.env or /app/healthcheck.sh do not exist.
disable_healthcheck: false
## Authelia by default doesn't accept TLS communication on the server port. This section overrides this behaviour.
tls:
## The path to the DER base64/PEM format private key.
key: ""
## The path to the DER base64/PEM format public certificate.
certificate: ""
##
## Log Configuration
##
log:
## Level of verbosity for logs: info, debug, trace.
level: debug
## Format the logs are written as: json, text.
format: text
## File path where the logs will be written. If not set logs are written to stdout.
file_path: /config-nfs/authelia.log
## Whether to also log to stdout when a log_file_path is defined.
# keep_stdout: false
##
## TOTP Configuration
##
## Parameters used for TOTP generation.
totp:
## The issuer name displayed in the Authenticator application of your choice
## See: https://github.com/google/google-authenticator/wiki/Key-Uri-Format for more info on issuer names
issuer: auth.lan
## The period in seconds a one-time password is current for. Changing this will require all users to register
## their TOTP applications again. Warning: before changing period read the docs link below.
period: 30
## The skew controls number of one-time passwords either side of the current one that are valid.
## Warning: before changing skew read the docs link below.
skew: 1
## See: https://www.authelia.com/docs/configuration/one-time-password.html#period-and-skew to read the documentation.
##
## Duo Push API Configuration
##
## Parameters used to contact the Duo API. Those are generated when you protect an application of type
## "Partner Auth API" in the management panel.
duo_api:
hostname: api.auth.lan
integration_key: AUTHELIA
## Secret can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
secret_key: 8Jp5e822KP
##
## Authentication Backend Provider Configuration
##
## Used for verifying user passwords and retrieve information such as email address and groups users belong to.
##
## The available providers are: `file`, `ldap`. You must use only one of these providers.
authentication_backend:
## Disable both the HTML element and the API for reset password functionality.
disable_reset_password: false
## The amount of time to wait before we refresh data from the authentication backend. Uses duration notation.
## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will
## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP.
## To force update on every request you can set this to '0' or 'always', this will increase processor demand.
## See the below documentation for more information.
## Duration Notation docs: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
## Refresh Interval docs: https://www.authelia.com/docs/configuration/authentication/ldap.html#refresh-interval
refresh_interval: 5m
##
## LDAP (Authentication Provider)
##
## This is the recommended Authentication Provider in production
## because it allows Authelia to offload the stateful operations
## onto the LDAP service.
# ldap:
# ## The LDAP implementation, this affects elements like the attribute utilised for resetting a password.
# ## Acceptable options are as follows:
# ## - 'activedirectory' - For Microsoft Active Directory.
# ## - 'custom' - For custom specifications of attributes and filters.
# ## This currently defaults to 'custom' to maintain existing behaviour.
# ##
# ## Depending on the option here certain other values in this section have a default value, notably all of the
# ## attribute mappings have a default value that this config overrides, you can read more about these default values
# ## at https://www.authelia.com/docs/configuration/authentication/ldap.html#defaults
# implementation: custom
#
# ## The url to the ldap server. Format: <scheme>://<address>[:<port>].
# ## Scheme can be ldap or ldaps in the format (port optional).
# url: ldap://127.0.0.1
#
# ## The dial timeout for LDAP.
# timeout: 5s
#
# ## Use StartTLS with the LDAP connection.
# start_tls: false
#
# tls:
# ## Server Name for certificate validation (in case it's not set correctly in the URL).
# # server_name: ldap.example.com
#
# ## Skip verifying the server certificate (to allow a self-signed certificate).
# ## In preference to setting this we strongly recommend you add the public portion of the certificate to the
# ## certificates directory which is defined by the `certificates_directory` option at the top of the config.
# skip_verify: false
#
# ## Minimum TLS version for either Secure LDAP or LDAP StartTLS.
# minimum_version: TLS1.2
#
# ## The distinguished name of the container searched for objects in the directory information tree.
# ## See also: additional_users_dn, additional_groups_dn.
# base_dn: dc=example,dc=com
#
# ## The attribute holding the username of the user. This attribute is used to populate the username in the session
# ## information. It was introduced due to #561 to handle case insensitive search queries. For you information,
# ## Microsoft Active Directory usually uses 'sAMAccountName' and OpenLDAP usually uses 'uid'. Beware that this
# ## attribute holds the unique identifiers for the users binding the user and the configuration stored in database.
# ## Therefore only single value attributes are allowed and the value must never be changed once attributed to a user
# ## otherwise it would break the configuration for that user. Technically, non-unique attributes like 'mail' can also
# ## be used but we don't recommend using them, we instead advise to use the attributes mentioned above
# ## (sAMAccountName and uid) to follow https://www.ietf.org/rfc/rfc2307.txt.
# # username_attribute: uid
#
# ## The additional_users_dn is prefixed to base_dn and delimited by a comma when searching for users.
# ## i.e. with this set to OU=Users and base_dn set to DC=a,DC=com; OU=Users,DC=a,DC=com is searched for users.
# additional_users_dn: ou=users
#
# ## The users filter used in search queries to find the user profile based on input filled in login form.
# ## Various placeholders are available in the user filter which you can read about in the documentation which can
# ## be found at: https://www.authelia.com/docs/configuration/authentication/ldap.html#users-filter-replacements
# ##
# ## Recommended settings are as follows:
# ## - Microsoft Active Directory: (&({username_attribute}={input})(objectCategory=person)(objectClass=user))
# ## - OpenLDAP:
# ## - (&({username_attribute}={input})(objectClass=person))
# ## - (&({username_attribute}={input})(objectClass=inetOrgPerson))
# ##
# ## To allow sign in both with username and email, one can use a filter like
# ## (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))
# users_filter: (&({username_attribute}={input})(objectClass=person))
#
# ## The additional_groups_dn is prefixed to base_dn and delimited by a comma when searching for groups.
# ## i.e. with this set to OU=Groups and base_dn set to DC=a,DC=com; OU=Groups,DC=a,DC=com is searched for groups.
# additional_groups_dn: ou=groups
#
# ## The groups filter used in search queries to find the groups based on relevant authenticated user.
# ## Various placeholders are available in the groups filter which you can read about in the documentation which can
# ## be found at: https://www.authelia.com/docs/configuration/authentication/ldap.html#groups-filter-replacements
# ##
# ## If your groups use the `groupOfUniqueNames` structure use this instead:
# ## (&(uniqueMember={dn})(objectClass=groupOfUniqueNames))
# groups_filter: (&(member={dn})(objectClass=groupOfNames))
#
# ## The attribute holding the name of the group.
# # group_name_attribute: cn
#
# ## The attribute holding the mail address of the user. If multiple email addresses are defined for a user, only the
# ## first one returned by the LDAP server is used.
# # mail_attribute: mail
#
# ## The attribute holding the display name of the user. This will be used to greet an authenticated user.
# # display_name_attribute: displayName
#
# ## The username and password of the admin user.
# user: cn=admin,dc=example,dc=com
# ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: password
#
##
## File (Authentication Provider)
##
## With this backend, the users database is stored in a file which is updated when users reset their passwords.
## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia
## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security
## implications it is highly recommended you leave the default values. Before considering changing these settings
## please read the docs page below:
## https://www.authelia.com/docs/configuration/authentication/file.html#password-hash-algorithm-tuning
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
file:
path: /config-nfs/users_database.yml
password:
algorithm: argon2id
iterations: 1
key_length: 32
salt_length: 16
memory: 1024
parallelism: 8
##
## Access Control Configuration
##
## Access control is a list of rules defining the authorizations applied for one resource to users or group of users.
##
## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed
## to anyone. Otherwise restrictions follow the rules defined.
##
## Note: One can use the wildcard * to match any subdomain.
## It must stand at the beginning of the pattern. (example: *.mydomain.com)
##
## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct.
##
## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'.
##
## - 'domain' defines which domain or set of domains the rule applies to.
##
## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not
## provided. If provided, the parameter represents either a user or a group. It should be of the form
## 'user:<username>' or 'group:<groupname>'.
##
## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'.
##
## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter
## is optional and matches any resource if not provided.
##
## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies.
access_control:
## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any
## resource if there is no policy to be applied to the user.
default_policy: deny
networks:
- name: internal
networks:
- 10.10.0.0/16
- 172.23.0.0/16
- 172.16.23.0/24
- 192.168.10.0/24
- name: VPN
networks: 10.14.0.0/27
rules:
## Rules applied to everyone
- domain: public.auth.lan
policy: bypass
- domain: secure.auth.lan
policy: one_factor
## Network based rule, if not provided any network matches.
networks:
- internal
- VPN
- domain:
- secure.auth.lan
- private.auth.lan
policy: two_factor
- domain: singlefactor.auth.lan
policy: one_factor
## Rules applied to 'admins' group
- domain: "mx2.mail.example.com"
subject: "group:admins"
policy: deny
- domain: "*.auth.lan"
subject:
- "group:admins"
- "group:moderators"
policy: two_factor
## Rules applied to 'dev' group
- domain: dev.auth.lan
resources:
- "^/groups/dev/.*$"
subject: "group:dev"
policy: two_factor
## Rules applied to user 'john'
- domain: dev.auth.lan
resources:
- "^/users/john/.*$"
subject: "user:john"
policy: two_factor
## Rules applied to user 'harry'
- domain: dev.auth.lan
resources:
- "^/users/harry/.*$"
subject: "user:harry"
policy: two_factor
## Rules applied to user 'bob'
- domain: "*.mail.auth.lan"
subject: "user:bob"
policy: two_factor
- domain: "dev.auth.lan"
resources:
- "^/users/bob/.*$"
subject: "user:bob"
policy: two_factor
##
## Session Provider Configuration
##
## The session cookies identify the user once logged in.
## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined.
session:
## The name of the session cookie.
name: authelia_session
## The domain to protect.
## Note: the authenticator must also be in that domain.
## If empty, the cookie is restricted to the subdomain of the issuer.
domain: lan
## Sets the Cookie SameSite value. Possible options are none, lax, or strict.
## Please read https://www.authelia.com/docs/configuration/session.html#same_site
same_site: lax
## The secret to encrypt the session data. This is only used with Redis / Redis Sentinel.
## Secret can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
secret: insecure_session_secret
## The value for expiration, inactivity, and remember_me_duration are in seconds or the duration notation format.
## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
## All three of these values affect the cookie/session validity period. Longer periods are considered less secure
## because a stolen cookie will last longer giving attackers more time to spy or attack.
## The time before the cookie expires and the session is destroyed if remember me IS NOT selected.
expiration: 1h
## The inactivity time before the session is reset. If expiration is set to 1h, and this is set to 5m, if the user
## does not select the remember me option their session will get destroyed after 1h, or after 5m since the last time
## Authelia detected user activity.
inactivity: 5m
## The time before the cookie expires and the session is destroyed if remember me IS selected.
## Value of 0 disables remember me.
remember_me_duration: 1M
##
## Redis Provider
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
#redis:
# host: 127.0.0.1
# port: 6379
# ## Use a unix socket instead
# # host: /var/run/redis/redis.sock
#
# ## Username used for redis authentication. This is optional and a new feature in redis 6.0.
# # username: authelia
#
# ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: authelia
#
# ## This is the Redis DB Index https://redis.io/commands/select (sometimes referred to as database number, DB, etc).
# database_index: 0
#
# ## The maximum number of concurrent active connections to Redis.
# maximum_active_connections: 8
#
# ## The target number of idle connections to have open ready for work. Useful when opening connections is slow.
# minimum_idle_connections: 0
#
# ## The Redis TLS configuration. If defined will require a TLS connection to the Redis instance(s).
# # tls:
# ## Server Name for certificate validation (in case you are using the IP or non-FQDN in the host option).
# # server_name: myredis.example.com
#
# ## Skip verifying the server certificate (to allow a self-signed certificate).
# ## In preference to setting this we strongly recommend you add the public portion of the certificate to the
# ## certificates directory which is defined by the `certificates_directory` option at the top of the config.
# # skip_verify: false
#
# ## Minimum TLS version for the connection.
# # minimum_version: TLS1.2
#
# ## The Redis HA configuration options.
# ## This provides specific options to Redis Sentinel, sentinel_name must be defined (Master Name).
# # high_availability:
# ## Sentinel Name / Master Name.
# # sentinel_name: mysentinel
#
# ## Specific password for Redis Sentinel. The node username and password is configured above.
# # sentinel_password: sentinel_specific_pass
#
# ## The additional nodes to pre-seed the redis provider with (for sentinel).
# ## If the host in the above section is defined, it will be combined with this list to connect to sentinel.
# ## For high availability to be used you must have either defined; the host above or at least one node below.
# # nodes:
# # - host: sentinel-node1
# # port: 6379
# # - host: sentinel-node2
# # port: 6379
#
# ## Choose the host with the lowest latency.
# # route_by_latency: false
#
# ## Choose the host randomly.
# # route_randomly: false
##
## Regulation Configuration
##
## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are made
## in a short period of time.
regulation:
## The number of failed login attempts before user is banned. Set it to 0 to disable regulation.
max_retries: 3
## The time range during which the user can attempt login before being banned. The user is banned if the
## authentication failed 'max_retries' times in a 'find_time' seconds window. Find Time accepts duration notation.
## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
find_time: 2m
## The length of time before a banned user can login again. Ban Time accepts duration notation.
## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format
ban_time: 5m
##
## Storage Provider Configuration
##
## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers.
storage:
##
## Local (Storage Provider)
##
## This stores the data in a SQLite3 Database.
## This is only recommended for lightweight non-stateful installations.
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
# local:
# path: /config/db.sqlite3
##
## MySQL / MariaDB (Storage Provider)
##
#mysql:
# host: 127.0.0.1
# port: 3306
# database: authelia
# username: authelia
# ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: mypassword
# timeout: 5s
#
##
## PostgreSQL (Storage Provider)
##
postgres:
host: postgres.live-env.svc.cluster.local
port: 5432
database: authelia
username: authelia
## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
password: auth2021
timeout: 5s
sslmode: disable
##
## Notification Provider
##
## Notifications are sent to users when they require a password reset, a U2F registration or a TOTP registration.
## The available providers are: filesystem, smtp. You must use only one of these providers.
notifier:
## You can disable the notifier startup check by setting this to true.
disable_startup_check: false
##
## File System (Notification Provider)
##
## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html
##
filesystem:
filename: /config-nfs/notification.txt
##
## SMTP (Notification Provider)
##
## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate.
## [Security] By default Authelia will:
## - force all SMTP connections over TLS including unauthenticated connections
## - use the disable_require_tls boolean value to disable this requirement
## (only works for unauthenticated connections)
## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates
## (configure in tls section)
#smtp:
# ## The SMTP host to connect to.
# host: 127.0.0.1
#
# ## The port to connect to the SMTP host on.
# port: 1025
#
# ## The connection timeout.
# timeout: 5s
#
# ## The username used for SMTP authentication.
# username: test
#
# ## The password used for SMTP authentication.
# ## Can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# password: password
#
# ## The address to send the email FROM.
# sender: admin@example.com
#
# ## HELO/EHLO Identifier. Some SMTP Servers may reject the default of localhost.
# identifier: localhost
#
# ## Subject configuration of the emails sent. {title} is replaced by the text from the notifier.
# subject: "[Authelia] {title}"
#
# ## This address is used during the startup check to verify the email configuration is correct.
# ## It's not important what it is except if your email server only allows local delivery.
# startup_check_address: test@authelia.com
#
# ## By default we require some form of TLS. This disables this check though is not advised.
# disable_require_tls: false
#
# ## Disables sending HTML formatted emails.
# disable_html_emails: false
#
# tls:
# ## Server Name for certificate validation (in case you are using the IP or non-FQDN in the host option).
# # server_name: smtp.example.com
#
# ## Skip verifying the server certificate (to allow a self-signed certificate).
# ## In preference to setting this we strongly recommend you add the public portion of the certificate to the
# ## certificates directory which is defined by the `certificates_directory` option at the top of the config.
# skip_verify: false
#
# ## Minimum TLS version for either StartTLS or SMTPS.
# minimum_version: TLS1.2
##
## Identity Providers
##
# identity_providers:
##
## OpenID Connect (Identity Provider)
##
## It's recommended you read the documentation before configuration of this section:
## https://www.authelia.com/docs/configuration/identity-providers/oidc.html
# oidc:
## The hmac_secret is used to sign OAuth2 tokens (authorization code, access tokens and refresh tokens).
## HMAC Secret can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html
# hmac_secret: this_is_a_secret_abc123abc123abc
## The issuer_private_key is used to sign the JWT forged by OpenID Connect.
## Issuer Private Key can also be set using a secret: https://docs.authelia.com/configuration/secrets.html
# issuer_private_key: |
# --- KEY START
# --- KEY END
## The lifespans configure the expiration for these token types.
# access_token_lifespan: 1h
# authorize_code_lifespan: 1m
# id_token_lifespan: 1h
# refresh_token_lifespan: 90m
## Enables additional debug messages.
# enable_client_debug_messages: false
## SECURITY NOTICE: It's not recommended changing this option, and highly discouraged to have it below 8 for
## security reasons.
# minimum_parameter_entropy: 8
## Clients is a list of known clients and their configuration.
# clients:
# -
## The ID is the OpenID Connect ClientID which is used to link an application to a configuration.
# id: myapp
## The description to show to users when they end up on the consent screen. Defaults to the ID above.
# description: My Application
## The client secret is a shared secret between Authelia and the consumer of this client.
# secret: this_is_a_secret
## Sets the client to public. This should typically not be set, please see the documentation for usage.
# public: false
## The policy to require for this client; one_factor or two_factor.
# authorization_policy: two_factor
## Audience this client is allowed to request.
# audience: []
## Scopes this client is allowed to request.
# scopes:
# - openid
# - groups
# - email
# - profile
## Redirect URI's specifies a list of valid case-sensitive callbacks for this client.
# redirect_uris:
# - https://oidc.example.com:8080/oauth2/callback
## Grant Types configures which grants this client can obtain.
## It's not recommended to define this unless you know what you're doing.
# grant_types:
# - refresh_token
# - authorization_code
## Response Types configures which responses this client can be sent.
## It's not recommended to define this unless you know what you're doing.
# response_types:
# - code
## Response Modes configures which response modes this client supports.
# response_modes:
# - form_post
# - query
# - fragment
## The algorithm used to sign userinfo endpoint responses for this client, either none or RS256.
# userinfo_signing_algorithm: none
...

View File

@@ -0,0 +1,164 @@
#we use postgresql:
#create database authelia;
#create user authelia with encrypted password 'secret';
#grant all privileges on database authelia to authelia;
apiVersion: apps/v1
kind: Deployment
metadata:
name: authelia
labels:
app: authelia
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: authelia
release: latest
template:
metadata:
labels:
app: authelia
release: latest
spec:
containers:
- name: authelia
image: authelia/authelia:latest
imagePullPolicy: IfNotPresent
env:
#- name: AUTHELIA_SERVER_PORT
# value: "9091"
- name: TZ
value: "Europe/Berlin"
volumeMounts:
- name: authelia
mountPath: /config-nfs
- name: authelia-config
mountPath: /config
ports:
- name: http
containerPort: 9091
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "1000Mi"
cpu: "1500m"
enableServiceLinks: false
volumes:
- name: authelia
persistentVolumeClaim:
claimName: authelia
- name: authelia-config
configMap:
name: authelia-config
items:
- key: configuration.yml
path: configuration.yml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: authelia
labels:
app: authelia
spec:
storageClassName: nfs-ssd-ebin02
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: authelia
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/authelia
server: ebin02
capacity:
storage: 100Mi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: authelia
namespace: live-infra
---
apiVersion: v1
kind: Service
metadata:
name: authelia
labels:
app: authelia
spec:
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: authelia
release: latest
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: authelia
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/auth-url: http://authelia.live-infra.svc.cluster.local/api/verify
nginx.ingress.kubernetes.io/auth-signin: http://auth.lan
nginx.ingress.kubernetes.io/auth-response-headers: Remote-User,Remote-Name,Remote-Groups,Remote-Email
nginx.ingress.kubernetes.io/auth-snippet: |
proxy_set_header X-Forwarded-Method $request_method;
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header X-Forwarded-Method $request_method;
spec:
rules:
- host: auth.lan
http:
paths:
- backend:
service:
name: authelia
port:
name: http
path: /
pathType: Prefix
- host: secure.auth.lan
http:
paths:
- backend:
service:
name: authelia
port:
name: http
path: /
pathType: Prefix
- host: public.auth.lan
http:
paths:
- backend:
service:
name: authelia
port:
name: http
path: /
pathType: Prefix

View File

@@ -1,6 +1,5 @@
FROM debian:stable-slim
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \
curl procps && \
apt-get clean -y && \

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-curl
spec:
type: image
params:
- name: url
value: cr.lan/curl
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-curl
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/curl/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/curl
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-curl-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-curl
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-curl

View File

@@ -1,25 +0,0 @@
FROM debian:buster-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN dpkg --add-architecture armhf && \
apt-get update && \
apt-get install -y \
multiarch-support \
dpkg-dev \
distcc ccache \
build-essential \
gcc \
cpp \
g++ \
clang \
llvm && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*
# Op port
EXPOSE 3632
# Stats port
EXPOSE 3633
USER distccd
ENTRYPOINT /usr/bin/distccd --no-detach --daemon --stats --log-level error --log-stderr $OPTIONS

View File

@@ -1,9 +1,7 @@
Docker-ui
Build it for arm64:
Build it for arm64 in docker-registry-ui
docker build --platform linux/arm64 -t joxit/docker-registry-ui:static -f static.dockerfile github.com/Joxit/docker-registry-ui
ARCH=arm64; APP=docker-registry-ui ; podman build --arch=$ARCH -t $APP:$ARCH -t cr.lan/$APP:$ARCH -f arm64v8-static.dockerfile
docker tag 1494c11066f5 docker-registry.lan/docker-registry-ui:arm64
docker push docker-registry.lan/docker-registry-ui:arm64
ARCH=arm64; APP=docker-registry-ui ; podman push cr.lan/$APP:$ARCH

View File

@@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCaHN7wa2QK9qD3
ovn7ZZiKQ+E/f54MnHGgdlTcskTuiysbS4rqUC49MzWRZjxzxukbwF0a1yOOJUSM
YgOeDntRU4T49FLxY3YAZ9RV4Lr6qU8Tz45Ez4N7RLa3QLqY2wf3BEy32k8SqHsI
XMt0DV93w6q1eqW95XRNDDJF0xm4Oa4yaew0tNCx8Senv51jZ8lOX8CJljnE2Sil
P0HBFwfJqKk9qZg5WstQZFsr3D1wTpMZ3UmnzDN3EEBLJkvcAJvdo2E8TGb29UcD
OopHCeixdoKJw/BBdDCXDoSs9N+pDmoY7QSQaXP91sybP/zrcvrIFTT39IFrARRh
5X9QvCnJxhHXPhqqSeAE4YzTGHJV3BdpIVMPMWUHL9TfLFJxbUGImE2IUQZxSb2i
Wy8w9mnt4SFARGUIr0+tOmEDQ7smlFUke9yIPnti01OogfDNR4/szpwYvfE5+xG6
Vp0W590HxL6JE3nqaTJu+KIkBcRzroZZghmNEKik2MeRIxHjCpjvNr2INLn30S81
NhdP4uZdCeI5sERFaFOCgA64MPTtPYjQRV7BFwpN3+alUK8zVtXat/n5HyxvqrzG
s7IHA/GyCLjfsh9sWDhsfgsuIZzL+KblYnU1XPhRko4BQ2Y3GwA0QGFvM0+J1z/V
r3ieyio37CbEuVugMQ/VYYl8UYE0TwIDAQABAoICAC+rnopfraJ2h3QSRaEt2/Fo
7dPmdc0Q11T7RWS+//OJuNvIkj/IbYUgwgEnzqtBa/nZlvMmeSkO/hUufE/3ys1t
OESJzt48FdQqSdQGn8/Jb1yBZ1CBn/oRVzN4IkAGAIC4I8L7FFqBIw2DJqvPNyik
rblVJs+GmmL60tImal5B+VA+04G6LJPeNJX+/4AwKmTD2Zq1jUkGozv6RSylIxON
yEv6mcuj+h/z6v+2MIr8wyPM/2uYDpNVw417WxvCVHRKhVlRiMf7NuwYv40Z05CR
R++1XCvi9OTE6OVXGZgBjXAIYNEKzYZHWyLquCFcf5ZEeQ35485llxhxFOC0U3hL
lT8pI6EFnRiTi+Eq+7GOmvKYjNda6UtUVYPFIX0Ff3IkkwJ53rYdrar4xLnpmeUF
LcJhGJdfJSsvO2mdiLEFm/K7dQxDadusYPYFeUK4CGgoIsauf6XzdWbxJgv4qcOJ
dMzt2uLxpq5k7pQ5HU96Pa9g1flR1vaAtZ4htTMbQ6o7nrUoc8+zoo8pBYW6/zi+
OXf/9BvDQ/dQvtAF+gJQMfGDO5J0x5+yr+Jp7LKjlmG5B2bYMYF9/uZQTgY5kla5
uqihCZVZ14uojbXA3eqHvmtRfFqQ4Us3s0BUDm4W5PUe6jwJ8TavP+XJIjcCLU2c
kOrKZ0ZtIXwTUqKE5Z2BAoIBAQDKXleKtzEvvOWihxzuUmQYIT2HzrMG14s1M7wo
YF0ARaQTxX5HH2lYN7znWb/RpcDSj+IBNV4PxEOHVNCTWhev/PnFmm6FuqopJDIZ
sumP3jJg0K2/MFjBsHXNqacqjqMKlWFnuYqDHZSRX1bjC9IWB6HfS9Wjm2XrgBGx
xFTcAZ3kXX4NlVMz/JgWMKLRY+qGtDWG11sT+oAge81La+MRz/R/fAhf3K+0iDaK
F1iX8jXIcRfqk9OLafRcuIkS4q4rV6D9bI9xjbTz2tsm3b/wJezoSC06mTHoUEoG
p3MIPZ6ETDADDlB9hsWS23p2ueuUOCHg19+n30ah6qWx7UzjAoIBAQDC9KBAYr0T
sf7o5FA+Xp/N6ALxarNa1b15TjFtwSfvwZrrg02QQIpQCR70vy6wiczkTcmRCi4P
uiiVQz8abWbOW+aG4ThTpkOZDbCEVghFzGWPZjRsyrlhcegdS5FL4fCBrtUzOs7e
e+YtgyPrvmHamhMvKYWfW/DWfxOoBFoL9GTuC1646Va63u3MmLMflzYhj4dgbsm0
ut70aK3RAFkLVwswmx+OPINeSpEz6iIRArF4aSi8rH2eaMp4QiXz+zXSP+Bm4XTN
C6HrQeyOmiEtXcZemZVnUtkJBdkW+iRiiD3+xLEX11c/kzcyIeNpaGu9LckXuxqY
chu4XOVHLaKlAoIBAFapGfIESyL3UJtOIvyH+ec/bNsYkB/w8+M/mWbtBUaVjBMP
culAMVue2t1z2KoNwkopZY5A7VvxHz33+y3u2c/6lHejj4rjCfV+U5ofvNdoPsio
9I64RHoFeB0vdq/Jz1Y77C+ADCnj4/hxDINET54xfIdkMUPTy0yTVoB65CAm7Reb
Vdy5Qp0zoWl3QHJMyGURDQ8GcDFZB79hZOPUerPpCvoBApESr4evATQXlU/UYGXK
0IQa8+9y2ztNpx2YRx+2cfG0qKTnG0OGSG0XbxeHFjHOntfGPNIQd/LriF5SDOz4
t2LHoX5v1XHzXTk0mwapFxDzQQrhmZzDIFvWlCMCggEANLHORtjpZlNsJSLhFZqZ
8xvM/9fpVpoDNrCN566XztQzvYimBGGNgQiWF209f3YfrW3hF5T60kFtCrs8aTY8
3XY1nyttAB8mkk4C8iIW5lbS9KmZbfZ1mQMizBhK04nkagkJk2lH1RcEJjUWFnhF
FsMigFLmzSYauL9sXrOeazDJvxXPqodXa/cpq21yrQ1AEl4rJ0OKvZDtBn7szFsd
tlT2r1KeeuGcWHYrPS8BujtSIMu7uROeeJy2bT7j50h1Sbj+PJCf83Q7dc1B1WGP
qiV4osU8fssD4s5z2SQPhZpxt1UO0PThnkt6VdCXGTyiMmYXvpRSIfZly7VAO7b4
CQKCAQEAoVcWk9yQ5fD+uQ40duvjpzeNxBjttFLHe1CeOCIPtA3KBak4O+MNwZMz
oVUe2V/vb3kGpngF56d1hrBa4iQhvq4mGfnF/ZsbQHa4BZyaFIFvcOwZsgCjAO65
MpbybhRiOMMtu0Bg/H1hH2dzatugrqfVDYRnt9EgpDl7gkdVvmRu9khMWGHLv9qJ
gVeH5dNlpty3gkpSjJgTpEuKF7Yzw4seHpjkiwzIitgE2F7Xrv+6GtYOs0iziJTx
ZNq3BtxzCGe6MamLkXOj5DREhQMqAxJTUo/AYRNRiOeq+AdYgoAulse7HIO8q77E
i+DOL/C63wFKJddUnKSXCf+iAJraGw==
-----END PRIVATE KEY-----

View File

@@ -1,34 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIF2zCCA8OgAwIBAgIUCvX0FglFpG7UJJe6QruGhfKwglUwDQYJKoZIhvcNAQEL
BQAwfDELMAkGA1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVy
bGluMQ4wDAYDVQQKDAVjaGFvczEcMBoGA1UEAwwTZG9ja2VyLXJlZ2lzdHJ5Lmxh
bTEdMBsGCSqGSIb3DQEJARYOcm9vdEBjaGFvcy5sYW4wIBcNMjAwNjI0MTUxODE5
WhgPMjEyMDA1MzExNTE4MTlaMHwxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJs
aW4xDzANBgNVBAcMBkJlcmxpbjEOMAwGA1UECgwFY2hhb3MxHDAaBgNVBAMME2Rv
Y2tlci1yZWdpc3RyeS5sYW0xHTAbBgkqhkiG9w0BCQEWDnJvb3RAY2hhb3MubGFu
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAmhze8GtkCvag96L5+2WY
ikPhP3+eDJxxoHZU3LJE7osrG0uK6lAuPTM1kWY8c8bpG8BdGtcjjiVEjGIDng57
UVOE+PRS8WN2AGfUVeC6+qlPE8+ORM+De0S2t0C6mNsH9wRMt9pPEqh7CFzLdA1f
d8OqtXqlveV0TQwyRdMZuDmuMmnsNLTQsfEnp7+dY2fJTl/AiZY5xNkopT9BwRcH
yaipPamYOVrLUGRbK9w9cE6TGd1Jp8wzdxBASyZL3ACb3aNhPExm9vVHAzqKRwno
sXaCicPwQXQwlw6ErPTfqQ5qGO0EkGlz/dbMmz/863L6yBU09/SBawEUYeV/ULwp
ycYR1z4aqkngBOGM0xhyVdwXaSFTDzFlBy/U3yxScW1BiJhNiFEGcUm9olsvMPZp
7eEhQERlCK9PrTphA0O7JpRVJHvciD57YtNTqIHwzUeP7M6cGL3xOfsRuladFufd
B8S+iRN56mkybviiJAXEc66GWYIZjRCopNjHkSMR4wqY7za9iDS599EvNTYXT+Lm
XQniObBERWhTgoAOuDD07T2I0EVewRcKTd/mpVCvM1bV2rf5+R8sb6q8xrOyBwPx
sgi437IfbFg4bH4LLiGcy/im5WJ1NVz4UZKOAUNmNxsANEBhbzNPidc/1a94nsoq
N+wmxLlboDEP1WGJfFGBNE8CAwEAAaNTMFEwHQYDVR0OBBYEFCtnUlt2y35MUJ0x
YSvt8G3vi0NMMB8GA1UdIwQYMBaAFCtnUlt2y35MUJ0xYSvt8G3vi0NMMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAEXDBh9NNZza6Vjzwcll7uAc
x22ghoDinHOdfNWe9Hgocmj/Ci4M7f8TL35Zlm2PhOfYaol88uVIOiTKrf2USY2J
7RSvpl34voiWR8HBtkIFvmiUE2GR5I8gA21H8xaenIbg1Pj9V+E4SgIN1V9lX6S1
tjNVbhs/mU6YqyNytkjCuwJgCMPgXx4wwPZqaBqGJ5IrJfag0ZahT0IfKSzKtc8M
HBeXTy7Ck7WUOQWRCe289CBkYHZ+ScdnXnJao7uLvpuoUpu6/WPAnMN1t7KUO4tU
Z0SwNpY/Xsq3pjwTk2ZJwhFI1baaOyDZJW0+l2D48q7ADavq72NlPerZFkIN6Uvh
iyb4A/dzZWeZPIJinLtC6Bip5epg03KR0O4D/rYHbn6uVTq894ThIAXt1Q8fFVGb
oX+AK+ERCWc4ost+pr+Dk78bJUEcHCMRIGaWUVfzXvCagrx4eRLwoaLTovPHVvVl
on61w57W8csoj8lh3TX5t0MB4s87twHlErRIALqMd+m5K+2CPeWRd/6ZpmCGuL9s
bT+Rde3Sqw45N3Asw795yA73Av0coq8pB2DyDR5SoHkMD1rzJIVg4lBCwMSR3IJk
hiIO2qV1xNFrnA3ggKZSyDkH8eOR0dAmtthX6nDGvUbFsMFYnXli5wngTuXdHiYo
Lpilp6oWJLkzjfyGR3Um
-----END CERTIFICATE-----

View File

@@ -6,7 +6,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
replicas: 1
selector:
@@ -21,38 +20,37 @@ spec:
spec:
containers:
- name: registry-ui
image: "docker-registry.lan/docker-registry-ui:arm64"
#image: cr.lan/docker-registry-ui:arm64
image: docker.io/joxit/docker-registry-ui:main-debian
imagePullPolicy: Always
env:
- name: URL
value: "http://docker-registry.lan"
- name: NGINX_PROXY_PASS_URL
value: "http://cr.lan"
- name: REGISTRY_TITLE
value: "dReg"
value: "cReg"
- name: DELETE_IMAGES
value: "true"
- name: REGISTRY_URL
value: "http://docker-registry-ui.lan"
- name: PULL_URL
value: "http://docker-registry.lan"
#- name: REGISTRY_URL
# value: "http://cr.lan"
ports:
- name: http
containerPort: 80
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#livenessProbe:
# httpGet:
# path: /
# port: http
#readinessProbe:
# httpGet:
# path: /
# port: http
resources:
requests:
memory: "24Mi"
cpu: "50m"
memory: "20Mi"
cpu: "10m"
limits:
memory: "64Mi"
cpu: "100m"
memory: "32Mi"
cpu: "50m"
---
apiVersion: v1
kind: Service
@@ -61,7 +59,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
ports:
- port: 80
@@ -72,16 +69,25 @@ spec:
app: registry-ui
release: docker-registry-ui
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: docker-registry-ui
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/cors-allow-origin: "*"
nginx.ingress.kubernetes.io/cors-expose-headers: "*"
spec:
rules:
- host: docker-registry-ui.lan
- host: cr-ui.lan
http:
paths:
- backend:
serviceName: docker-registry-ui
servicePort: http
path: /
- path: /
pathType: Prefix
backend:
service:
name: docker-registry-ui
port:
number: 80

View File

@@ -1,12 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: docker-registry
spec:
finalizers:
- kubernetes
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: docker-registry
@@ -28,7 +20,7 @@ metadata:
name: registry
labels:
app: registry
namespace: docker-registry
namespace: live-env
spec:
replicas: 1
selector:
@@ -66,7 +58,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: docker-registry-config
namespace: docker-registry
namespace: live-env
labels:
app: registry
data:
@@ -86,7 +78,7 @@ data:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
Access-Control-Allow-Origin: ['*']
Access-Control-Allow-Origin: ['*', 'http://cr-ui.lan']
Access-Control-Allow-Methods: ['HEAD', 'GET', 'OPTIONS', 'DELETE']
Access-Control-Allow-Headers: ['Authorization', 'Accept']
Access-Control-Max-Age: [1728000]
@@ -97,42 +89,43 @@ kind: Service
apiVersion: v1
metadata:
name: registry
namespace: docker-registry
namespace: live-env
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
#---
#apiVersion: v1
#data:
# proxy-connect-timeout: "30"
# proxy-read-timeout: "1801"
# proxy-send-timeout: "1801"
# proxy-body-size: "0"
# client-max-body-size: "0"
#kind: ConfigMap
#metadata:
# name: ingress-nginx-controller
# namespace: ingress-nginx
---
apiVersion: v1
data:
proxy-connect-timeout: "30"
proxy-read-timeout: "1801"
proxy-send-timeout: "1801"
proxy-body-size: "0"
client-max-body-size: "0"
kind: ConfigMap
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: docker-registry
namespace: docker-registry
#annotations:
# nginx.ingress.kubernetes.io/proxyconnecttimeout: 30
# nginx.ingress.kubernetes.io/proxyreadtimeout: 1800
# nginx.ingress.kubernetes.io/proxysendtimeout: 1800
# nginx.ingress.kubernetes.io/proxy-body-size: '5g'
namespace: live-env
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: docker-registry.lan
http:
paths:
- backend:
serviceName: registry
servicePort: 5000
path: /
- path: /
pathType: Prefix
backend:
service:
name: registry
port:
number: 5000

View File

@@ -24,7 +24,7 @@ spec:
containers:
- name: gitea
image: gitea/gitea:latest
imagePullPolicy: IfNotPresent
imagePullPolicy: Always
env:
- name: USER_UID
value: "1000"
@@ -32,6 +32,16 @@ spec:
value: "1000"
- name: TZ
value: "Europe/Berlin"
- name: DB_TYPE
value: postgres
- name: DB_HOST
value: postgres.live-env.svc.cluster.local:5432
- name: DB_NAME
value: gitea
- name: DB_USER
value: gitea
- name: DB_PASSWD
value: giteaEu94XSS4gKpheSBoMsIs
volumeMounts:
- name: gitea
mountPath: /data
@@ -52,10 +62,10 @@ spec:
port: http
resources:
requests:
memory: "256Mi"
cpu: "250m"
memory: "200Mi"
cpu: "150m"
limits:
memory: "1000Mi"
memory: "312Mi"
cpu: "500m"
volumes:
- name: gitea
@@ -84,6 +94,7 @@ metadata:
app: gitea
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.2
ports:
- port: 3000
targetPort: http
@@ -96,18 +107,24 @@ spec:
app: gitea
release: latest
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/whitelist-x-forwarded-for: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 512m
spec:
rules:
- host: git-ui.lan
http:
paths:
- backend:
serviceName: gitea
servicePort: http
path: /
- path: /
pathType: Prefix
backend:
service:
name: gitea
port:
number: 3000

View File

@@ -1,4 +1,4 @@
FROM debian:buster-slim
FROM debian:bullseye-slim
ENV DEBIAN_FRONTEND noninteractive
ARG GRAV_VERSION=1.6.28
@@ -22,7 +22,7 @@ RUN apt-get remove -y --purge ${DEV_PKGS} exim4* && \
RUN mkdir /run/php && \
chown www-data:www-data /var/log /run/php && \
mkdir -p /etc/php/7.3/fpm/pool.d
mkdir -p /etc/php/7.4/fpm/pool.d
ADD docker-entrypoint.sh /
ADD supervisor.conf /etc/supervisor.conf

View File

@@ -15,7 +15,7 @@ spec:
app: grav
spec:
containers:
- image: docker-registry.lan/grav:arm64
- image: cr.lan/grav:arm64
name: grav
imagePullPolicy: Always
ports:
@@ -25,7 +25,7 @@ spec:
- name: grav-pages
mountPath: /var/www/grav
- name: grav-etc-php-fpm-www-conf
mountPath: /etc/php/7.3/fpm/pool.d
mountPath: /etc/php/7.4/fpm/pool.d
- image: nginx:alpine
name: nginx
imagePullPolicy: IfNotPresent
@@ -77,25 +77,32 @@ spec:
selector:
app: grav
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grav
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: 512m
spec:
rules:
- host: grav.lan
http:
paths:
- backend:
serviceName: grav
servicePort: http
- path: /
pathType: Prefix
backend:
service:
name: grav
port:
name: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grav-pages
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin01
accessModes:
- ReadWriteOnce
resources:

5
apps/grav/docker-entrypoint.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -10,5 +10,5 @@ user=root
[program:php-fpm]
command=/usr/sbin/php-fpm7.3 --nodaemonize --force-stderr
command=/usr/sbin/php-fpm7.4 --nodaemonize --force-stderr
user=www-data

View File

@@ -11,7 +11,7 @@ spec:
selector:
app: mariadb
type: LoadBalancer
loadBalancerIP: 172.23.255.4
loadBalancerIP: 172.23.255.5
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
@@ -29,7 +29,7 @@ spec:
app: mariadb
spec:
containers:
- image: docker-registry.lan/mariadb:arm64
- image: cr.lan/mariadb
name: mariadb
imagePullPolicy: Always
env:
@@ -49,7 +49,7 @@ spec:
limits:
memory: "1500Mi"
cpu: "2000m"
- image: docker-registry.lan/mariadb-prometheus-exporter:arm64
- image: cr.lan/mariadb-prometheus-exporter
name: mariadb-prometheus-exporter
imagePullPolicy: Always
ports:
@@ -65,18 +65,37 @@ spec:
volumes:
- name: mariadb-persistent-storage
persistentVolumeClaim:
claimName: mariadb-pv-claim
claimName: mariadb-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pv-claim
annotations:
volume.beta.kubernetes.io/storage-class: nfs-ssd
name: mariadb-data
spec:
storageClassName: nfs-ssd
volumeName: mariadb-data
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-data
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/mariadb-data
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: mariadb-data
namspace: live-env

View File

@@ -6,6 +6,8 @@ RUN groupadd -r mysql && useradd -r -g mysql mysql
# https://bugs.debian.org/830696 (apt uses gpgv by default in newer releases, rather than gpg)
RUN set -ex; \
sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list; \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list; \
apt-get update; \
if ! which gpg; then \
apt-get install -y --no-install-recommends gnupg; \
@@ -93,6 +95,7 @@ RUN set -ex; \
| xargs -rt -0 sed -Ei 's/^(bind-address|log)/#&/'; \
# don't reverse lookup hostnames, they are usually another container
echo '[mysqld]\nskip-host-cache\nskip-name-resolve' > /etc/mysql/conf.d/docker.cnf; \
mkdir -p /run/mysqld; \
apt-get clean -y;
VOLUME /var/lib/mysql

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mariadb-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mariadb-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mariadb-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb-prometheus/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb-prometheus
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mariadb-prometheus-exporter-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mariadb-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mariadb-prometheus-exporter

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mariadb
spec:
type: image
params:
- name: url
value: cr.lan/mariadb
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mariadb
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mariadb-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mariadb
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mariadb

View File

@@ -1,19 +1,18 @@
FROM debian:buster-slim
FROM debian:stable-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && \
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends \
mosquitto mosquitto-clients procps && \
mosquitto procps && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Op port
EXPOSE 1883
# Stats port
#EXPOSE 9090
ADD docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/sbin/mosquitto", "-c", "/mosquitto/config/mosquitto.conf"]
CMD ["/usr/sbin/mosquitto", "-v", "-c", "/mosquitto/config/mosquitto.conf"]

View File

@@ -6,7 +6,6 @@ metadata:
app: mosquitto
release: mqtt
name: mqtt-mosquitto
namespace: default
spec:
replicas: 1
selector:
@@ -23,7 +22,7 @@ spec:
spec:
containers:
- name: mqtt-mosquitto
image: docker-registry.lan/mosquitto:arm64
image: cr.lan/mosquitto
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -63,7 +62,7 @@ spec:
name: mosquitto-data
subPath: mosquitto/data
- name: mosquitto-exporter
image: docker-registry.lan/mosquitto-exporter:arm64
image: cr.lan/mosquitto-exporter
imagePullPolicy: Always
ports:
- containerPort: 9234
@@ -96,7 +95,6 @@ metadata:
labels:
app: mosquitto
release: mqtt
namespace: default
name: mqtt-mosquitto
spec:
externalTrafficPolicy: Cluster
@@ -117,13 +115,10 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
#annotations:
# volume.beta.kubernetes.io/storage-provisioner: nfs-storage
labels:
app: mosquitto
release: mqtt
name: mqtt-mosquitto
namespace: default
spec:
accessModes:
- ReadWriteOnce
@@ -137,7 +132,6 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: mqtt-mosquitto
namespace: default
labels:
app: mosquitto
release: mqtt

View File

@@ -0,0 +1,93 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: github-mosquitto-prometheus-exporter
spec:
type: git
params:
- name: revision
value: master
- name: url
value: https://github.com/sapcc/mosquitto-exporter.git
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-binary
image: cr.lan/debian-golang-stable
script: |
#!/usr/bin/env bash
cd $(resources.inputs.source.path)
ls -al
export GOARCH=arm64
export GOPATH=/usr/src/gopath
export GOCACHE=/usr/src/gocache
go env
go get github.com/sapcc/mosquitto-exporter
make -j4 build CGO_ENABLED=0
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
workspaces:
- name: usr-src
mountPath: /usr/src
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-prometheus-exporter
spec:
taskRef:
name: build-mosquitto-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: github-mosquitto-prometheus-exporter
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto-prometheus-exporter
workspaces:
- name: usr-src
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: usr_src

View File

@@ -0,0 +1,77 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mosquitto/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mosquitto
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mosquitto
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto

86
apps/nextcloud/Dockerfile Normal file
View File

@@ -0,0 +1,86 @@
FROM nextcloud:21-fpm
#needed for some reason
ENV NEXTCLOUD_UPDATE=1
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
procps bash iputils-ping libmagickcore-6.q16-6-extra
RUN apt-get clean -y && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN touch /usr/src/nextcloud/data/.ocdata
COPY config.php /usr/src/nextcloud/config/
#COPY htaccess-data /usr/src/nextcloud/data/.htaccess
#COPY apache-default-vhost.conf /etc/apache2/sites-available/000-default.conf
RUN mv /usr/src/nextcloud/.htaccess /usr/src/nextcloud/.htaccess.bak
RUN mv /usr/src/nextcloud/config/.htaccess /usr/src/nextcloud/config/.htaccess.bak
#install ca.crt update script to the container
COPY post-start.sh /
RUN chmod +x /post-start.sh
#RUN set -ex; \
# \
# apt-get update; \
# apt-get install -y --no-install-recommends \
# ffmpeg \
# libmagickcore-6.q16-6-extra \
# procps \
# smbclient \
# supervisor \
## libreoffice \
# ; \
# rm -rf /var/lib/apt/lists/*
#
#RUN set -ex; \
# \
# savedAptMark="$(apt-mark showmanual)"; \
# \
# apt-get update; \
# apt-get install -y --no-install-recommends \
# libbz2-dev \
# libc-client-dev \
# libkrb5-dev \
# libsmbclient-dev \
# ; \
# \
# docker-php-ext-configure imap --with-kerberos --with-imap-ssl; \
# docker-php-ext-install \
# bz2 \
# imap \
# ; \
# pecl install smbclient; \
# docker-php-ext-enable smbclient; \
# \
## reset apt-mark's "manual" list so that "purge --auto-remove" will remove all build dependencies
# apt-mark auto '.*' > /dev/null; \
# apt-mark manual $savedAptMark; \
# ldd "$(php -r 'echo ini_get("extension_dir");')"/*.so \
# | awk '/=>/ { print $3 }' \
# | sort -u \
# | xargs -r dpkg-query -S \
# | cut -d: -f1 \
# | sort -u \
# | xargs -rt apt-mark manual; \
# \
# apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \
# apt-get clean -y; \
# rm -rf /var/cache/apt/*; \
# rm -rf /var/lib/apt/lists/*
#
#RUN mkdir -p \
# /var/log/supervisord \
# /var/run/supervisord \
#;
#RUN chown www-data:www-data \
# /var/log/supervisord \
# /var/run/supervisord;
#
#COPY supervisord.conf /
#
#CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]

2
apps/nextcloud/README.md Normal file
View File

@@ -0,0 +1,2 @@
# kubectl -n live-env create configmap nextcloud-config --from-file=config.php
# kubectl -n live-env create configmap nextcloud-nginx-site --from-file=nginx-site.configmap.conf

60
apps/nextcloud/config.php Normal file
View File

@@ -0,0 +1,60 @@
<?php
//
// Manually deployed by yourself
//
$CONFIG = array(
'config_is_read_only' => true,
'htaccess.RewriteBase' => '/',
'memcache.local' => '\\OC\\Memcache\\APCu',
'apps_paths' => array(
0 => array(
'path' => '/var/www/html/apps',
'url' => '/apps',
'writable' => false
),
1 => array(
'path' => '/var/www/html/custom_apps',
'url' => '/custom_apps',
'writable' => true
)
),
'objectstore' => array(
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => array(
'bucket' => 'nextcloud',
'key' => 'nextcloud',
'secret' => 'tWnc3zdxcDUvcX5f9uY7RRYvKLcWI1KY',
'region' => '',
'hostname' => 'minio.live-infra.svc.cluster.local',
'port' => '443',
'objectPrefix' => 'urn:oid:',
'autocreate' => false,
'use_ssl' => true,
'use_path_style' => true,
'legacy_auth' => false
)
),
'instanceid' => 'ocsxqijfvpf7',
'passwordsalt' => 'OTjmXJP0VKlw+OLja6wUxbHlZk4Txw',
'secret' => '0g94SdF7A2k/LHTKUM+8HwEDFgF1zz7I/sMauap02/d8G677',
'trusted_domains' => array(
0 => 'nc.lan'
),
'trusted_proxies' => array(
0 => '172.23.255.1',
1 => '127.0.0.1'
),
'datadirectory' => '/var/www/html/data',
'dbtype' => 'pgsql',
'version' => '20.0.9.1',
'overwrite.cli.url' => 'http://nc.lan',
'dbname' => 'nextcloud',
'dbhost' => 'postgres.live-env.svc.cluster.local:5432',
'dbport' => '',
'dbtableprefix' => 'oc_',
'dbuser' => 'nextcloud',
'dbpassword' => 'Vb7yHzmE5HIjfU4hf89aXAmEEmxAnMdB',
'installed' => true,
'default_phone_region' => 'DE',
'updater.release.channel' => 'stable',
);

View File

@@ -0,0 +1,140 @@
#we use postgresql:
#create database nextcloud;
#create user nextcloud with encrypted password 'secret';
#grant all privileges on database nextcloud to nextcloud;
apiVersion: apps/v1
kind: Deployment
metadata:
name: nextcloud
labels:
app: nextcloud
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: nextcloud
release: latest
template:
metadata:
labels:
app: nextcloud
release: latest
spec:
volumes:
- name: nextcloud-nginx-site
configMap:
name: nextcloud-nginx-site
- name: nextcloud-config
configMap:
name: nextcloud-config
- name: www-data
emptyDir: {}
containers:
- name: nginx-proxy
image: nginx
volumeMounts:
- name: nextcloud-nginx-site
mountPath: /etc/nginx/conf.d
- name: www-data
mountPath: /var/www/html
ports:
- name: http
containerPort: 80
protocol: TCP
- name: nextcloud
image: cr.lan/nextcloud:latest
lifecycle:
postStart:
exec:
command:
- /post-start.sh
volumeMounts:
- name: www-data
mountPath: /var/www/html
#- name: nextcloud-config
# mountPath: /var/www/html/config/config.php
# subPath: config.php
env:
- name: TZ
value: "Europe/Berlin"
- name: POSTGRES_HOST
value: postgres.live-env.svc.cluster.local:5432
- name: POSTGRES_DB
value: nextcloud
- name: POSTGRES_USER
value: nextcloud
- name: POSTGRES_PASSWORD
value: Vb7yHzmE5HIjfU4hf89aXAmEEmxAnMdB
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: nc nc.lan 172.23.255.1
- name: OBJECTSTORE_S3_HOST
value: minio.live-infra.svc.cluster.local
- name: OBJECTSTORE_S3_BUCKET
value: nextcloud
- name: OBJECTSTORE_S3_KEY
value: nextcloud
- name: OBJECTSTORE_S3_SECRET
value: tWnc3zdxcDUvcX5f9uY7RRYvKLcWI1KY
- name: OBJECTSTORE_S3_PORT
value: "443"
- name: OBJECTSTORE_S3_USEPATH_STYLE
value: "true"
- name: OBJECTSTORE_S3_SSL
value: "true"
ports:
- name: php-fpm
containerPort: 9000
protocol: TCP
# startupProbe:
# httpGet:
# path: /
# port: http
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "768Mi"
cpu: "3000m"
---
apiVersion: v1
kind: Service
metadata:
name: nextcloud
spec:
ports:
- name: http
port: 80
selector:
app: nextcloud
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nextcloud
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/whitelist-x-forwarded-for: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 512m
spec:
rules:
- host: nc.lan
http:
paths:
- backend:
service:
name: nextcloud
port:
name: http
path: /
pathType: Prefix

View File

@@ -0,0 +1,146 @@
upstream php-handler {
server 127.0.0.1:9000;
#server unix:/var/run/php/php7.4-fpm.sock;
}
#server {
# listen 80;
# listen [::]:80;
# server_name cloud.example.com;
#
# # Enforce HTTPS
# return 301 https://$server_name$request_uri;
#}
server {
listen 80;
listen [::]:80;
server_name _;
# Use Mozilla's guidelines for SSL/TLS settings
# https://mozilla.github.io/server-side-tls/ssl-config-generator/
#ssl_certificate /etc/ssl/nginx/cloud.example.com.crt;
#ssl_certificate_key /etc/ssl/nginx/cloud.example.com.key;
# HSTS settings
# WARNING: Only add the preload option once you read about
# the consequences in https://hstspreload.org/. This option
# will add the domain to a hardcoded list that is shipped
# in all major browsers and getting removed from this list
# could take several months.
#add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;" always;
# set max upload size
client_max_body_size 512M;
fastcgi_buffers 64 4K;
# Enable gzip but do not remove ETag headers
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
# Pagespeed is not supported by Nextcloud, so if your server is built
# with the `ngx_pagespeed` module, uncomment this line to disable it.
#pagespeed off;
# HTTP response headers borrowed from Nextcloud `.htaccess`
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
# Remove X-Powered-By, which is an information leak
fastcgi_hide_header X-Powered-By;
# Path to the root of your installation
root /var/www/html;
# Specify how to handle directories -- specifying `/index.php$request_uri`
# here as the fallback means that Nginx always exhibits the desired behaviour
# when a client requests a path that corresponds to a directory that exists
# on the server. In particular, if that directory contains an index.php file,
# that file is correctly served; if it doesn't, then the request is passed to
# the front-end controller. This consistent behaviour means that we don't need
# to specify custom rules for certain paths (e.g. images and other assets,
# `/updater`, `/ocm-provider`, `/ocs-provider`), and thus
# `try_files $uri $uri/ /index.php$request_uri`
# always provides the desired behaviour.
index index.php index.html /index.php$request_uri;
# Rule borrowed from `.htaccess` to handle Microsoft DAV clients
location = / {
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# Make a regex exception for `/.well-known` so that clients can still
# access it despite the existence of the regex rule
# `location ~ /(\.|autotest|...)` which would otherwise handle requests
# for `/.well-known`.
location ^~ /.well-known {
# The following 6 rules are borrowed from `.htaccess`
location = /.well-known/carddav { return 301 /remote.php/dav/; }
location = /.well-known/caldav { return 301 /remote.php/dav/; }
# Anything else is dynamically handled by Nextcloud
location ^~ /.well-known { return 301 /index.php$uri; }
try_files $uri $uri/ =404;
}
# Rules borrowed from `.htaccess` to hide certain paths from clients
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/) { return 404; }
location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { return 404; }
# Ensure this block, which passes PHP files to the PHP process, is above the blocks
# which handle static assets (as seen below). If this block is not declared first,
# then Nginx will encounter an infinite rewriting loop when it prepends `/index.php`
# to the URI, resulting in a HTTP 500 error response.
location ~ \.php(?:$|/) {
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
fastcgi_param HTTPS off;
fastcgi_param modHeadersAvailable true; # Avoid sending the security headers twice
fastcgi_param front_controller_active true; # Enable pretty urls
fastcgi_pass php-handler;
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
}
location ~ \.(?:css|js|svg|gif)$ {
try_files $uri /index.php$request_uri;
expires 6M; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
location ~ \.woff2?$ {
try_files $uri /index.php$request_uri;
expires 7d; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
location / {
try_files $uri $uri/ /index.php$request_uri;
}
}

22
apps/nextcloud/post-start.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
ln -s /var/run/secrets/kubernetes.io/serviceaccount/ca.crt /usr/local/share/ca-certificates/
/usr/sbin/update-ca-certificates
#su - www-data --shell=/bin/bash --command="cd /var/www/html && php -d memory_limit=512M ./occ upgrade"
# reinstall/activate apps
#DIS_APP=( accessibility admin_audit contactsinteraction dashboard files_external
# files_rightclick firstrunwizard logreader nextcloud_announcements
# serverinfo sharebymail survey_client systemtags ser_ldap weather_status )
#
#EN_APP=( activity cloud_federation_api comments dav encryption federatedfilesharing
# federation files files_pdfviewer files_sharing files_trashbin files_videoplayer
# lookup_server_connector notes notifications oauth2 password_policy photos
# privacy provisioning_api recommendations settings support text theming
# twofactor_backupcodes updatenotification user_status viewer workflowengine
# files_versions timetracker tasks deck files_3d )
#
#for APP in ${DIS_APP[@]}; do echo "+${APP}+"; done
#echo "ENABLED"
#
#for APP in ${EN_APP[@]}; do echo "+${APP}+"; done

View File

@@ -0,0 +1,77 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-nextcloud
spec:
type: image
params:
- name: url
value: cr.lan/nextcloud
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-nextcloud
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/nextcloud/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/nextcloud
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-nextcloud
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-nextcloud
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-nextcloud

View File

@@ -25,6 +25,13 @@ spec:
volumeMounts:
- mountPath: /data
name: data
resources:
limits:
cpu: "1"
memory: "200Mi"
requests:
memory: "64Mi"
cpu: "50m"
volumes:
- name: data
persistentVolumeClaim:
@@ -46,29 +53,32 @@ spec:
selector:
app: node-red
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: node-red
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: node-red.lan
- host: nodered.lan
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: node-red
servicePort: 1880
service:
name: node-red
port:
number: 1880
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: node-red
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin01
accessModes:
- ReadWriteOnce
resources:

View File

@@ -3,7 +3,7 @@ apiVersion: v1
kind: Secret
metadata:
name: pihole-password
namespace: default
namespace: live-env
type: Opaque
data:
password: YWRtaW4yMDIw
@@ -33,6 +33,8 @@ spec:
spec:
containers:
- env:
- name: TZ
value: Europe/Berlin
- name: WEB_PORT
value: "80"
- name: VIRTUAL_HOST
@@ -46,7 +48,7 @@ spec:
value: 208.67.222.222
- name: DNS2
value: 208.67.220.220
image: pihole/pihole:v5.1.2
image: pihole/pihole:latest
imagePullPolicy: Always
livenessProbe:
failureThreshold: 10
@@ -123,7 +125,7 @@ spec:
name: pihole-custom-dnsmasq
name: custom-dnsmasq
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pihole
@@ -132,10 +134,14 @@ spec:
- host: pihole.lan
http:
paths:
- backend:
serviceName: pihole-tcp
servicePort: http
- path: /
pathType: ImplementationSpecific
backend:
service:
name: pihole-tcp
port:
name: http
---
apiVersion: v1
kind: PersistentVolumeClaim
@@ -170,7 +176,7 @@ metadata:
labels:
app: pihole
name: pihole-tcp
namespace: default
namespace: live-env
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.253
@@ -202,7 +208,7 @@ metadata:
labels:
app: pihole
name: pihole-udp
namespace: default
namespace: live-env
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.253

View File

@@ -17,9 +17,8 @@ spec:
app: piwigo
spec:
containers:
- image: linuxserver/piwigo
- image: linuxserver/piwigo:latest
name: piwigo
imagePullPolicy: IfNotPresent
env:
# Use secret in real usage
- name: TZ
@@ -74,20 +73,26 @@ spec:
selector:
app: piwigo
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: piwigo
labels:
app: piwigo
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: foto.lan
http:
paths:
- backend:
serviceName: piwigo
servicePort: http
- path: /
pathType: Prefix
backend:
service:
name: piwigo
port:
name: http
---
apiVersion: batch/v1beta1
kind: CronJob

View File

@@ -19,8 +19,20 @@ spec:
env: live
spec:
containers:
- name: postgres-exporter
image: quay.io/prometheuscommunity/postgres-exporter
ports:
- containerPort: 9187
protocol: TCP
env:
- name: DATA_SOURCE_NAME
value: "postgresql://postgres:pg2020@localhost:5432/postgres?sslmode=disable"
#value: "port=5432 host=127.0.0.1"
- name: postgres
image: postgres
image: postgres:13
ports:
- containerPort: 5432
protocol: TCP
volumeMounts:
- name: postgres-disk
mountPath: /var/lib/postgresql/data
@@ -37,7 +49,7 @@ spec:
volumes:
- name: postgres-disk
persistentVolumeClaim:
claimName: postgres
claimName: postgres-data
# volumeClaimTemplates:
# - metadata:
# name: postgres-disk
@@ -51,17 +63,37 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres
name: postgres-data
labels:
app: postgres
spec:
storageClassName: nfs-ssd
volumeName: postgres-data
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
# service.yml
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-data
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/postgres-data
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: postgres-data
namespace: live-env
---
apiVersion: v1
kind: Service
@@ -74,6 +106,12 @@ spec:
selector:
env: live
type: LoadBalancer
loadBalancerIP: 172.23.255.4
ports:
- port: 5432
targetPort: 5432
- name: postgres
port: 5432
targetPort: 5432
- name: exporter
port: 9187
targetPort: 9187

55
apps/rompr/Dockerfile Normal file
View File

@@ -0,0 +1,55 @@
FROM debian:buster-slim
ARG ROMPR_VERSION=1.58
# Install packages
ENV DEBIAN_FRONTEND noninteractive
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get -y install \
nginx \
php-fpm \
curl \
php-mysql \
php-curl \
php-gd \
unzip \
imagemagick \
php-json \
php-xml \
php-mbstring \
php-sqlite3 \
php-intl
# CLeanup
RUN apt-get remove -y --purge ${DEV_PKGS} && \
apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/*
RUN curl -k -L -o rompr.zip https://github.com/fatg3erman/RompR/releases/download/${ROMPR_VERSION}/rompr-${ROMPR_VERSION}.zip
RUN mkdir -p /app
RUN unzip -d /app rompr.zip && rm rompr.zip
RUN mkdir -p /rompr
RUN ln -sf /rompr/prefs /app/rompr/prefs
RUN ln -sf /rompr/albumart /app/rompr/albumart
RUN chown -R www-data:www-data /app/rompr /rompr
COPY nginx_default /etc/nginx/sites-available/default
RUN mkdir -p /run/php/
#Environment variables to configure php
RUN sed -ri -e 's/^allow_url_fopen =.*/allow_url_fopen = On/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^memory_limit =.*/memory_limit = 128M/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^max_execution_time =.*/max_execution_time = 1800/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^post_max_size =.*/post_max_size = 256M/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^upload_max_filesize =.*/upload_max_filesize = 8M/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^max_file_uploads =.*/max_file_uploads = 50/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^display_errors =.*/display_errors = On/g' /etc/php/7.3/fpm/php.ini
RUN sed -ri -e 's/^display_startup_errors =.*/display_startup_errors = On/g' /etc/php/7.3/fpm/php.ini
RUN echo "<?php phpinfo(); ?>" > /app/rompr/phpinfo.php
RUN update-rc.d php7.3-fpm defaults
COPY run-httpd /usr/local/bin/
RUN chmod 755 /usr/local/bin/run-httpd
EXPOSE 80
VOLUME ["/rompr"]
CMD ["/usr/local/bin/run-httpd"]

View File

@@ -15,33 +15,19 @@ spec:
app: rompr
spec:
containers:
- image: docker-registry.lan/rompr:arm64
- image: cr.lan/rompr
name: rompr
imagePullPolicy: Always
ports:
- containerPort: 9000
name: php-fpm
volumeMounts:
- name: rompr-data
mountPath: /rompr
- image: sebp/lighttpd:latest
name: lighttpd
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
volumeMounts:
- name: rompr-data
mountPath: /rompr
- name: rompr-lighttpd-config
mountPath: /etc/lighttpd
volumes:
- name: rompr-data
persistentVolumeClaim:
claimName: rompr-data
- name: rompr-lighttpd-config
configMap:
name: rompr-lighttpd-config
---
apiVersion: v1
kind: Service
@@ -54,18 +40,24 @@ spec:
selector:
app: rompr
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rompr
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: musik.lan
http:
paths:
- backend:
serviceName: rompr
servicePort: http
- path: /
pathType: Prefix
backend:
service:
name: rompr
port:
name: http
---
apiVersion: v1
kind: PersistentVolumeClaim

36
apps/rompr/nginx_default Normal file
View File

@@ -0,0 +1,36 @@
# Default server configuration
#
server {
listen 80;
listen [::]:80;
root /app/rompr;
# Add index.php to the list if you are using PHP
index index.php index.html index.htm;
server_name _;
client_max_body_size 256M;
# This section can be copied into an existing default setup
location / {
allow all;
access_log off;
index index.php;
location ~ \.php {
try_files $uri index.php =404;
fastcgi_pass unix:/var/run/php/php7.3-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $request_filename;
include /etc/nginx/fastcgi_params;
fastcgi_read_timeout 1800;
}
error_page 404 = /404.php;
try_files $uri $uri/ =404;
location ~ /albumart/* {
expires -1s;
}
}
}

7
apps/rompr/run-httpd Normal file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
rm -f /var/run/nginx.pid
mkdir -p /var/log/nginx
set -e
/etc/init.d/php7.3-fpm restart
exec /usr/sbin/nginx -g 'daemon off;'

View File

@@ -0,0 +1,77 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-rompr
spec:
type: image
params:
- name: url
value: cr.lan/rompr
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-rompr
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/rompr/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/rompr
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-rompr-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-rompr
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-rompr

View File

@@ -0,0 +1,50 @@
FROM node:current-buster
# Set the commit of Zwave2Mqtt to checkout when cloning the repo
ENV Z2M_VERSION=9cc3740740b57f1e896139b5ffdb25be7576ad58
ENV DEBIAN_FRONTEND noninteractive
#setup local apt cache
#RUN sed -i 's@http://@http://apt-cache.lan/@g' /etc/apt/sources.list
#/apt-cache
# Install required dependencies
RUN apt update -y
RUN apt full-upgrade -y
# Packages we need
RUN apt install -y \
socat libopenzwave1.5 npm git
# Clone Zwave2Mqtt build pkg files and move them to /dist/pkg
RUN npm config set unsafe-perm true && npm install -g pkg
RUN cd /root \
&& git clone https://github.com/OpenZWave/Zwave2Mqtt.git \
&& cd Zwave2Mqtt \
&& git checkout ${Z2M_VERSION} \
&& npm install \
&& npm run build
# Clean up
RUN apt autoremove -y
RUN apt clean -y
RUN rm -rf /root/*
RUN apt-get clean -y
RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=build /dist/lib/ /lib/
COPY --from=build /dist/pkg /usr/src/app
# supervisor base configuration
ADD supervisor.conf /etc/supervisor.conf
LABEL maintainer="zoide"
# Set enviroment
ENV LD_LIBRARY_PATH /lib
EXPOSE 8091
CMD ["supervisord", "-c", "/etc/supervisor.conf"]
#CMD ["/usr/src/app/zwave2mqtt"]

View File

@@ -1,98 +0,0 @@
# ----------------
# STEP 1:
# https://lobradov.github.io/Building-docker-multiarch-images/
# Build Openzwave and Zwave2Mqtt pkg
# All result files will be put in /dist folder
FROM node:8.15.1-alpine AS build
# Set the commit of Zwave2Mqtt to checkout when cloning the repo
ENV Z2M_VERSION=9cc3740740b57f1e896139b5ffdb25be7576ad58
# Install required dependencies
RUN apk update && apk --no-cache add \
gnutls \
gnutls-dev \
libusb \
eudev \
# Install build dependencies
&& apk --no-cache --virtual .build-deps add \
coreutils \
eudev-dev \
build-base \
git \
python \
bash \
libusb-dev \
linux-headers \
wget \
tar \
openssl \
make \
socat
# Build binaries and move them to /dist/lib
RUN cd /root \
&& wget http://old.openzwave.com/downloads/openzwave-1.4.1.tar.gz \
&& tar zxvf openzwave-*.gz \
&& cd openzwave-* && make && make install \
&& mkdir -p /dist/lib \
&& mv libopenzwave.so* /dist/lib/
COPY bin/package.sh /root/package.sh
# Clone Zwave2Mqtt build pkg files and move them to /dist/pkg
RUN npm config set unsafe-perm true && npm install -g pkg \
&& cd /root \
&& git clone https://github.com/OpenZWave/Zwave2Mqtt.git \
&& cd Zwave2Mqtt \
&& git checkout ${Z2M_VERSION} \
&& npm install \
&& npm run build
RUN cd /root \
&& chmod +x package.sh && ./package.sh \
&& mkdir -p /dist/pkg \
&& mv /root/Zwave2Mqtt/pkg/* /dist/pkg
# Get last config DB from main repo and move files to /dist/db
RUN cd /root \
&& git clone https://github.com/OpenZWave/open-zwave.git \
&& cd open-zwave \
&& mkdir -p /dist/db \
&& mv config/* /dist/db
# Clean up
RUN rm -R /root/* && apk del .build-deps
# ----------------
# STEP 2:
# Run a minimal alpine image
FROM alpine:latest
LABEL maintainer="zoide"
RUN apk update && apk add --no-cache \
libstdc++ \
libgcc \
libusb \
eudev
# Copy files from previous build stage
COPY --from=build /dist/lib/ /lib/
COPY --from=build /dist/db/ /usr/local/etc/openzwave/
COPY --from=build /dist/pkg /usr/src/app
# supervisor base configuration
ADD supervisor.conf /etc/supervisor.conf
# Set enviroment
ENV LD_LIBRARY_PATH /lib
EXPOSE 8091
CMD ["supervisord", "-c", "/etc/supervisor.conf"]
#CMD ["/usr/src/app/zwave2mqtt"]

View File

@@ -19,7 +19,7 @@ spec:
spec:
containers:
- name: hassio
image: "homeassistant/home-assistant:latest"
image: homeassistant/home-assistant:latest
imagePullPolicy: Always
env:
- name: TZ
@@ -35,21 +35,57 @@ spec:
httpGet:
path: /
port: http
# resources:
initialDelaySeconds: 300
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
# requests:
# memory: "256Mi"
# cpu: "250m"
# limits:
# memory: "1000Mi"
# cpu: "500m"
initialDelaySeconds: 120
periodSeconds: 5
resources:
requests:
memory: "200Mi"
cpu: "250m"
limits:
memory: "256Mi"
cpu: "500m"
- name: configurator
image: "causticlab/hass-configurator-docker:arm"
imagePullPolicy: Always
env:
- name: HC_HASS_API
value: http://127.0.0.1:8123/api/
- name: HC_HASS_API_PASSWORD
value: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4
ports:
- name: adm
containerPort: 3218
protocol: TCP
#livenessProbe:
# httpGet:
# path: /
# port: 3218
# initialDelaySeconds: 60
# periodSeconds: 3
#readinessProbe:
# httpGet:
# path: /
# port: 3218
# initialDelaySeconds: 60
# periodSeconds: 5
volumeMounts:
- name: hassio-storage
mountPath: /hass-config
- name: hassio-conf-storage
mountPath: /config
volumes:
- name: hassio-storage
persistentVolumeClaim:
claimName: hassio-storage
- name: hassio-conf-storage
persistentVolumeClaim:
claimName: hassio-configurator
---
apiVersion: v1
kind: PersistentVolumeClaim
@@ -66,6 +102,20 @@ spec:
storage: 20Mi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hassio-configurator
labels:
app: hassio
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: hassio
@@ -82,15 +132,48 @@ spec:
app: hassio
release: latest
---
apiVersion: networking.k8s.io/v1beta1
apiVersion: v1
kind: Service
metadata:
name: hassio-conf
labels:
app: hassio
release: latest
spec:
ports:
- port: 80
targetPort: adm
protocol: TCP
name: adm
selector:
app: hassio
release: latest
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hassio
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: hassio.lan
http:
paths:
- backend:
serviceName: hassio
servicePort: http
- path: /
pathType: Prefix
backend:
service:
name: hassio
port:
name: http
- host: hassio-conf.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hassio-conf
port:
name: adm

Some files were not shown because too many files have changed in this diff Show More