19 Commits

Author SHA1 Message Date
1cc1de7ed8 no more dockerfile, we're podmanning now :) 2021-02-10 14:58:10 +01:00
b91ea42a41 updates 2021-02-10 14:49:14 +01:00
f616346ac6 sweet caroline 2021-02-09 20:26:53 +01:00
315520baa6 static pvs for essential services 2021-01-24 00:20:55 +01:00
76c036fa79 static pvs 2021-01-21 12:56:33 +01:00
5ce8a3b5be all subs 2021-01-21 12:56:11 +01:00
0bdd4a2db0 all subs 2021-01-21 12:54:20 +01:00
76e516c7f3 pvs for grafana and prometheus 2021-01-21 12:53:10 +01:00
c7363d513e using flannel now 2021-01-21 12:52:46 +01:00
8d66cb1f66 postgres svc fix 2021-01-21 12:52:18 +01:00
f9269f2c2c persistent grafana/prometheus pvs 2021-01-21 10:06:01 +01:00
9b9b551907 new run 2021-01-20 15:33:26 +01:00
ab96839f50 removed external-storage 2021-01-20 15:32:30 +01:00
a3bd4349e2 nummer5 in wks 2021-01-07 21:50:45 +01:00
4dcb961e81 tekton for the masses 2020-12-08 17:12:45 +01:00
9561cb8d82 grav on php74/bullseye 2020-11-30 19:34:31 +01:00
2e3e37062a new and old scrapes 2020-11-19 18:38:25 +01:00
f85ff91873 doesn;t work yet 2020-11-12 18:03:29 +01:00
62cb2881c2 d-ui needs less resources 2020-11-11 21:00:58 +01:00
50 changed files with 1753 additions and 249 deletions

12
.gitmodules vendored
View File

@@ -25,9 +25,6 @@
[submodule "mosquitto/charts"]
path = mosquitto/charts
url = https://github.com/smizy/charts.git
[submodule "external-storage"]
path = external-storage
url = https://github.com/kubernetes-incubator/external-storage.git
[submodule "mosquitto-exporter"]
path = mosquitto-exporter
url = https://github.com/sapcc/mosquitto-exporter.git
@@ -46,3 +43,12 @@
[submodule "apps/postgresql/postgres_exporter"]
path = apps/postgresql/postgres_exporter
url = https://github.com/wrouesnel/postgres_exporter.git
[submodule "apps/tekton/dashboard"]
path = apps/tekton/dashboard
url = https://github.com/tektoncd/dashboard.git
[submodule "_sys/haproxy-ingress"]
path = _sys/haproxy-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "nfs-subdir-external-provisioner"]
path = nfs-subdir-external-provisioner
url = https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git

6
_sys/README.md Normal file
View File

@@ -0,0 +1,6 @@
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/configmap.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/job/job.yaml

View File

@@ -167,7 +167,7 @@ spec:
containerPort: 1024
env:
- name: TZ
value: "Etc/UTC"
value: "Europe/Berlin"
- name: POD_NAME
valueFrom:
fieldRef:

View File

@@ -0,0 +1,10 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
#namespace: nginx-ingress
namespace: default
data:
proxy-connect-timeout: "10s"
proxy-read-timeout: "10s"
client-max-body-size: "0"

663
_sys/ingress-nginx.yaml Normal file
View File

@@ -0,0 +1,663 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- update
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- endpoints
verbs:
- create
- get
- update
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.1
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: k8s.gcr.io/ingress-nginx/controller:v0.43.0@sha256:9bba603b99bf25f6d117cf1235b6598c16033ad027b143c90fa5b3cc583c5713
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1beta1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: docker.io/jettech/kube-webhook-certgen:v1.5.0
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-3.19.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.43.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: docker.io/jettech/kube-webhook-certgen:v1.5.0
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000

223
_sys/kube-flannel.yml Normal file
View File

@@ -0,0 +1,223 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.23.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.1-rc1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.1-rc1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@@ -17,6 +17,7 @@ data:
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"mtu":1420,
"isDefaultGateway":true,
"hairpinMode":true,
"ipam":{
@@ -47,6 +48,7 @@ spec:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
args:
- "--auto-mtu=false"
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"

View File

@@ -1,8 +1,6 @@
FROM debian:buster-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN dpkg --add-architecture armhf && \
apt-get update && \
RUN apt-get update && \
apt-get install -y \
multiarch-support \
dpkg-dev \

View File

@@ -7,7 +7,7 @@ metadata:
name: distcc
namespace: default
spec:
replicas: 3
replicas: 5
selector:
matchLabels:
app: distcc
@@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: distcc
image: docker-registry.lan/distcc:armhf
image: docker-registry.lan/distcc:aarch64
imagePullPolicy: Always
#env:
#- name: OPTIONS

View File

@@ -2,8 +2,6 @@ Docker-ui
Build it for arm64:
docker build --platform linux/arm64 -t joxit/docker-registry-ui:static -f static.dockerfile github.com/Joxit/docker-registry-ui
docker build --platform linux/arm64 -t docker-registry.lan/docker-registry-ui:arm64 -f static.dockerfile github.com/Joxit/docker-registry-ui
docker tag 1494c11066f5 docker-registry.lan/docker-registry-ui:arm64
docker push docker-registry.lan/docker-registry-ui:arm64

View File

@@ -6,7 +6,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
replicas: 1
selector:
@@ -30,8 +29,8 @@ spec:
value: "dReg"
- name: DELETE_IMAGES
value: "true"
- name: REGISTRY_URL
value: "http://docker-registry-ui.lan"
#- name: REGISTRY_URL
# value: "http://docker-registry.lan"
- name: PULL_URL
value: "http://docker-registry.lan"
ports:
@@ -48,11 +47,11 @@ spec:
port: http
resources:
requests:
memory: "24Mi"
cpu: "50m"
memory: "20Mi"
cpu: "10m"
limits:
memory: "64Mi"
cpu: "100m"
memory: "32Mi"
cpu: "50m"
---
apiVersion: v1
kind: Service
@@ -61,7 +60,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
ports:
- port: 80

View File

@@ -24,7 +24,7 @@ spec:
containers:
- name: gitea
image: gitea/gitea:latest
imagePullPolicy: IfNotPresent
imagePullPolicy: Always
env:
- name: USER_UID
value: "1000"
@@ -56,7 +56,7 @@ spec:
cpu: "250m"
limits:
memory: "1000Mi"
cpu: "500m"
cpu: "1500m"
volumes:
- name: gitea
persistentVolumeClaim:

View File

@@ -11,7 +11,7 @@ spec:
selector:
app: mariadb
type: LoadBalancer
loadBalancerIP: 172.23.255.4
loadBalancerIP: 172.23.255.5
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
@@ -65,18 +65,37 @@ spec:
volumes:
- name: mariadb-persistent-storage
persistentVolumeClaim:
claimName: mariadb-pv-claim
claimName: mariadb-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pv-claim
annotations:
volume.beta.kubernetes.io/storage-class: nfs-ssd
name: mariadb-data
spec:
storageClassName: nfs-ssd
volumeName: mariadb-data
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-data
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/mariadb-data
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: mariadb-data
namespace: default

View File

@@ -1,6 +1,6 @@
FROM debian:buster-slim
FROM debian:stable-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && \
apt-get install -y --no-install-recommends \
mosquitto mosquitto-clients procps && \

View File

@@ -117,8 +117,6 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
#annotations:
# volume.beta.kubernetes.io/storage-provisioner: nfs-storage
labels:
app: mosquitto
release: mqtt

View File

@@ -25,6 +25,13 @@ spec:
volumeMounts:
- mountPath: /data
name: data
resources:
limits:
cpu: "1"
memory: "200Mi"
requests:
memory: "64Mi"
cpu: "50m"
volumes:
- name: data
persistentVolumeClaim:
@@ -54,7 +61,7 @@ metadata:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: node-red.lan
- host: nodered.lan
http:
paths:
- path: /

View File

@@ -33,6 +33,8 @@ spec:
spec:
containers:
- env:
- name: TZ
value: Europe/Berlin
- name: WEB_PORT
value: "80"
- name: VIRTUAL_HOST
@@ -46,7 +48,7 @@ spec:
value: 208.67.222.222
- name: DNS2
value: 208.67.220.220
image: pihole/pihole:v5.1.2
image: pihole/pihole:latest
imagePullPolicy: Always
livenessProbe:
failureThreshold: 10

View File

@@ -21,6 +21,9 @@ spec:
containers:
- name: postgres
image: postgres
ports:
- containerPort: 5432
protocol: TCP
volumeMounts:
- name: postgres-disk
mountPath: /var/lib/postgresql/data

View File

@@ -0,0 +1,50 @@
FROM node:current-buster
# Set the commit of Zwave2Mqtt to checkout when cloning the repo
ENV Z2M_VERSION=9cc3740740b57f1e896139b5ffdb25be7576ad58
ENV DEBIAN_FRONTEND noninteractive
#setup local apt cache
#RUN sed -i 's@http://@http://apt-cache.lan/@g' /etc/apt/sources.list
#/apt-cache
# Install required dependencies
RUN apt update -y
RUN apt full-upgrade -y
# Packages we need
RUN apt install -y \
socat libopenzwave1.5 npm git
# Clone Zwave2Mqtt build pkg files and move them to /dist/pkg
RUN npm config set unsafe-perm true && npm install -g pkg
RUN cd /root \
&& git clone https://github.com/OpenZWave/Zwave2Mqtt.git \
&& cd Zwave2Mqtt \
&& git checkout ${Z2M_VERSION} \
&& npm install \
&& npm run build
# Clean up
RUN apt autoremove -y
RUN apt clean -y
RUN rm -rf /root/*
RUN apt-get clean -y
RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=build /dist/lib/ /lib/
COPY --from=build /dist/pkg /usr/src/app
# supervisor base configuration
ADD supervisor.conf /etc/supervisor.conf
LABEL maintainer="zoide"
# Set enviroment
ENV LD_LIBRARY_PATH /lib
EXPOSE 8091
CMD ["supervisord", "-c", "/etc/supervisor.conf"]
#CMD ["/usr/src/app/zwave2mqtt"]

View File

@@ -1,98 +0,0 @@
# ----------------
# STEP 1:
# https://lobradov.github.io/Building-docker-multiarch-images/
# Build Openzwave and Zwave2Mqtt pkg
# All result files will be put in /dist folder
FROM node:8.15.1-alpine AS build
# Set the commit of Zwave2Mqtt to checkout when cloning the repo
ENV Z2M_VERSION=9cc3740740b57f1e896139b5ffdb25be7576ad58
# Install required dependencies
RUN apk update && apk --no-cache add \
gnutls \
gnutls-dev \
libusb \
eudev \
# Install build dependencies
&& apk --no-cache --virtual .build-deps add \
coreutils \
eudev-dev \
build-base \
git \
python \
bash \
libusb-dev \
linux-headers \
wget \
tar \
openssl \
make \
socat
# Build binaries and move them to /dist/lib
RUN cd /root \
&& wget http://old.openzwave.com/downloads/openzwave-1.4.1.tar.gz \
&& tar zxvf openzwave-*.gz \
&& cd openzwave-* && make && make install \
&& mkdir -p /dist/lib \
&& mv libopenzwave.so* /dist/lib/
COPY bin/package.sh /root/package.sh
# Clone Zwave2Mqtt build pkg files and move them to /dist/pkg
RUN npm config set unsafe-perm true && npm install -g pkg \
&& cd /root \
&& git clone https://github.com/OpenZWave/Zwave2Mqtt.git \
&& cd Zwave2Mqtt \
&& git checkout ${Z2M_VERSION} \
&& npm install \
&& npm run build
RUN cd /root \
&& chmod +x package.sh && ./package.sh \
&& mkdir -p /dist/pkg \
&& mv /root/Zwave2Mqtt/pkg/* /dist/pkg
# Get last config DB from main repo and move files to /dist/db
RUN cd /root \
&& git clone https://github.com/OpenZWave/open-zwave.git \
&& cd open-zwave \
&& mkdir -p /dist/db \
&& mv config/* /dist/db
# Clean up
RUN rm -R /root/* && apk del .build-deps
# ----------------
# STEP 2:
# Run a minimal alpine image
FROM alpine:latest
LABEL maintainer="zoide"
RUN apk update && apk add --no-cache \
libstdc++ \
libgcc \
libusb \
eudev
# Copy files from previous build stage
COPY --from=build /dist/lib/ /lib/
COPY --from=build /dist/db/ /usr/local/etc/openzwave/
COPY --from=build /dist/pkg /usr/src/app
# supervisor base configuration
ADD supervisor.conf /etc/supervisor.conf
# Set enviroment
ENV LD_LIBRARY_PATH /lib
EXPOSE 8091
CMD ["supervisord", "-c", "/etc/supervisor.conf"]
#CMD ["/usr/src/app/zwave2mqtt"]

View File

@@ -35,21 +35,56 @@ spec:
httpGet:
path: /
port: http
# resources:
initialDelaySeconds: 60
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 60
periodSeconds: 5
# requests:
# memory: "256Mi"
# cpu: "250m"
# limits:
# memory: "1000Mi"
# cpu: "500m"
- name: configurator
image: "causticlab/hass-configurator-docker:arm"
imagePullPolicy: Always
env:
- name: HC_HASS_API
value: http://127.0.0.1:8123/api/
- name: HC_HASS_API_PASSWORD
value: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4
ports:
- name: adm
containerPort: 3218
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 3218
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /
port: 3218
initialDelaySeconds: 10
periodSeconds: 5
volumeMounts:
- name: hassio-storage
mountPath: /hass-config
- name: hassio-conf-storage
mountPath: /config
volumes:
- name: hassio-storage
persistentVolumeClaim:
claimName: hassio-storage
- name: hassio-conf-storage
persistentVolumeClaim:
claimName: hassio-configurator
---
apiVersion: v1
kind: PersistentVolumeClaim
@@ -66,6 +101,20 @@ spec:
storage: 20Mi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hassio-configurator
labels:
app: hassio
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: hassio
@@ -82,6 +131,23 @@ spec:
app: hassio
release: latest
---
apiVersion: v1
kind: Service
metadata:
name: hassio-conf
labels:
app: hassio
release: latest
spec:
ports:
- port: 80
targetPort: adm
protocol: TCP
name: adm
selector:
app: hassio
release: latest
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
@@ -94,3 +160,9 @@ spec:
- backend:
serviceName: hassio
servicePort: http
- host: hassio-conf.lan
http:
paths:
- backend:
serviceName: hassio-conf
servicePort: adm

8
apps/tekton/README.md Normal file
View File

@@ -0,0 +1,8 @@
Install:
# Pipelines: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml@
# Triggers: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml@ #https://github.com/tektoncd/triggers/blob/master/docs/install.md
# Dashboard:
## update submodule in ./dashboard
## Build: @docker build -t tekton-dashboard:arm64 -t docker-registry.lan/tekton-dashboard:arm64 --platform linux/arm64 --build-arg GOARCH=arm64 .@
## apply deployment.yaml

1
apps/tekton/dashboard Submodule

Submodule apps/tekton/dashboard added at c881ad4d3a

View File

@@ -0,0 +1,12 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: hello
spec:
steps:
- name: hello
image: debian
command:
- echo
args:
- "Hello World!"

View File

@@ -0,0 +1,526 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: extensions.dashboard.tekton.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.apiVersion
name: API version
type: string
- JSONPath: .spec.name
name: Kind
type: string
- JSONPath: .spec.displayname
name: Display name
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: dashboard.tekton.dev
names:
categories:
- tekton
- tekton-dashboard
kind: Extension
plural: extensions
shortNames:
- ext
- exts
preserveUnknownFields: false
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- use
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- create
- update
- delete
- patch
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.dashboard.tekton.dev/aggregate-to-dashboard: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
- taskruns/finalizers
- pipelineruns/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- taskruns/finalizers
- pipelineruns/finalizers
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-backend
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
ports:
- name: http
port: 9097
protocol: TCP
targetPort: 9097
selector:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
template:
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
name: tekton-dashboard
spec:
containers:
- args:
- --port=9097
- --logout-url=
- --pipelines-namespace=tekton-pipelines
- --triggers-namespace=tekton-pipelines
- --read-only=false
- --csrf-secure-cookie=false
- --log-level=info
- --log-format=json
- --namespace=
- --openshift=false
- --stream-logs=false
- --external-logs=
env:
- name: INSTALLED_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
- name: TEKTON_PIPELINES_WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
#image: gcr.io/tekton-releases/github.com/tektoncd/dashboard/cmd/dashboard@sha256:744eb92d7d0365bbfb2405df4ba4d2a66c01edc26028c362bd5675e2bc1b9626
image: docker-registry.lan/tekton-dashboard:arm64
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /health
port: 9097
name: tekton-dashboard
ports:
- containerPort: 9097
readinessProbe:
httpGet:
path: /readiness
port: 9097
securityContext:
runAsNonRoot: true
runAsUser: 65532
serviceAccountName: tekton-dashboard
volumes: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-pipelines
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-dashboard
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-triggers
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-tenant
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-extensions
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
spec:
rules:
- host: tekton.lan
http:
paths:
- backend:
serviceName: tekton-dashboard
servicePort: 9097

View File

@@ -1,4 +1,4 @@
FROM debian:buster-slim
FROM debian:bullseye-slim
ENV DEBIAN_FRONTEND noninteractive
ARG GRAV_VERSION=1.6.28
@@ -22,7 +22,7 @@ RUN apt-get remove -y --purge ${DEV_PKGS} exim4* && \
RUN mkdir /run/php && \
chown www-data:www-data /var/log /run/php && \
mkdir -p /etc/php/7.3/fpm/pool.d
mkdir -p /etc/php/7.4/fpm/pool.d
ADD docker-entrypoint.sh /
ADD supervisor.conf /etc/supervisor.conf

View File

@@ -25,7 +25,7 @@ spec:
- name: grav-pages
mountPath: /var/www/grav
- name: grav-etc-php-fpm-www-conf
mountPath: /etc/php/7.3/fpm/pool.d
mountPath: /etc/php/7.4/fpm/pool.d
- image: nginx:alpine
name: nginx
imagePullPolicy: IfNotPresent

View File

@@ -10,5 +10,5 @@ user=root
[program:php-fpm]
command=/usr/sbin/php-fpm7.3 --nodaemonize --force-stderr
command=/usr/sbin/php-fpm7.4 --nodaemonize --force-stderr
user=www-data

View File

@@ -17,9 +17,8 @@ spec:
app: piwigo
spec:
containers:
- image: linuxserver/piwigo
- image: linuxserver/piwigo:latest
name: piwigo
imagePullPolicy: IfNotPresent
env:
# Use secret in real usage
- name: TZ

View File

@@ -0,0 +1,7 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogZ2l0ZWEKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGdpdC11aS5sYW4KLSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0Ci0gam9iX25hbWU6IGhhcHJveHkKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGRydWNraS53a3M6OTEwMQogICAgLSBhdXRvMDIuY2hhb3M6OTEwMQotIGpvYl9uYW1lOiBrbGlwcGVyCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjM5MDMKLSBqb2JfbmFtZTogb2N0b3ByaW50CiAgbWV0cmljc19wYXRoOiAvcGx1Z2luL3Byb21ldGhldXNfZXhwb3J0ZXIvbWV0cmljcwogIHBhcmFtczoKICAgIGFwaWtleToKICAgIC0gMzBFOEIwMUJGRDY3NEU1QkJENDQ2RDA4QzQ3MzBERjQKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGRydWNraS53a3M6ODAKLSBqb2JfbmFtZTogaGFzc2lvCiAgbWV0cmljc19wYXRoOiAvYXBpL3Byb21ldGhldXMKICBiZWFyZXJfdG9rZW46ICdleUowZVhBaU9pSktWMVFpTENKaGJHY2lPaUpJVXpJMU5pSjkuZXlKcGMzTWlPaUpoTXpCbVlqVTFaamN5WkdFMFl6YzJZbVUyTm1ZME5qbGpOVEF5TWpkalpDSXNJbWxoZENJNk1UWXhNamc0TXpJNU55d2laWGh3SWpveE9USTRNalF6TWprM2ZRLjFJQ3NIbGlVWFIwQ0c0SDh2UVJZSjVqVnFGd21xS1NCMGZTY1NpdEMtUTQnCiAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICAgIC0gaGFzc2lvLmxhbjo4MAotIGpvYl9uYW1lOiBoYXNzaW9fcmluZzg2CiAgbWV0cmljc19wYXRoOiAvYXBpL3Byb21ldGhldXMKICBiZWFyZXJfdG9rZW46ICdleUowZVhBaU9pSktWMVFpTENKaGJHY2lPaUpJVXpJMU5pSjkuZXlKcGMzTWlPaUkwT0dGalpUSmlObVEzT1RnME1qYzNZV0ZtTW1ObVptVTFZemM0TlRFME5DSXNJbWxoZENJNk1UWXhNakU1TWprME1Dd2laWGh3SWpveE9USTNOVFV5T1RRd2ZRLkJiSUFYbTlScTBqYjZvdXFnVkhOZDZLZWV6M05QM3loLTd3eWZ1b0I4WWsnCiAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICAgIC0gYXV0by5jaGFvczo4MAotIGpvYl9uYW1lOiBub2RlCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhZG0wMS53a3M6OTEwMAogICAgLSBkdW1vbnQud2tzOjkxMDAKICAgIC0gZHJ1Y2tpLndrczo5MTAwCiAgICAtIGViaW4wMS53a3M6OTEwMAogICAgLSBlYmluMDIud2tzOjkxMDAKICAgIC0gcmlvdDAxLndrczo5MTAwCiAgICAtIHRydWhlLmNoYW9zOjkxMDAKICAgIC0gYXV0bzAxLmNoYW9zOjkxMDAKICAgIC0gYXV0bzAyLmNoYW9zOjkxMDAKICAgIC0gZHVtb250LmNoYW9zOjkxMDAKICAgIC0gdHVtb3IuY2hhb3M6OTEwMAogICAgLSB3b2huei5jaGFvczo5MTAwCg==
kind: Secret
metadata:
creationTimestamp: null
name: additional-scrape-configs

View File

@@ -0,0 +1,56 @@
- job_name: gitea
static_configs:
- targets:
- git-ui.lan
- job_name: mysqld
static_configs:
- targets:
- mariadb.lan:9104
- job_name: mqtt.mosquitto
static_configs:
- targets:
- mqtt.lan:9234
- job_name: haproxy
static_configs:
- targets:
- drucki.wks:9101
- auto02.chaos:9101
- job_name: klipper
static_configs:
- targets:
- drucki.wks:3903
- job_name: octoprint
metrics_path: /plugin/prometheus_exporter/metrics
params:
apikey:
- 30E8B01BFD674E5BBD446D08C4730DF4
static_configs:
- targets:
- drucki.wks:80
- job_name: hassio
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4'
static_configs:
- targets:
- hassio.lan:80
- job_name: hassio_ring86
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiI0OGFjZTJiNmQ3OTg0Mjc3YWFmMmNmZmU1Yzc4NTE0NCIsImlhdCI6MTYxMjE5Mjk0MCwiZXhwIjoxOTI3NTUyOTQwfQ.BbIAXm9Rq0jb6ouqgVHNd6Keez3NP3yh-7wyfuoB8Yk'
static_configs:
- targets:
- auto.chaos:80
- job_name: node
static_configs:
- targets:
- adm01.wks:9100
- dumont.wks:9100
- drucki.wks:9100
- ebin01.wks:9100
- ebin02.wks:9100
- riot01.wks:9100
- truhe.chaos:9100
- auto01.chaos:9100
- auto02.chaos:9100
- dumont.chaos:9100
- tumor.chaos:9100
- wohnz.chaos:9100

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-db
annotations:
pv.kubernetes.io/pirvisioned-by: nfs-ssd
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/prometheus-db
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: prometheus-k8s-db-prometheus-k8s-0
namespace: monitoring
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-conf
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/grafana-conf
server: ebin01
capacity:
storage: 40Mi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: grafana-conf
namespace: monitoring

Submodule external-storage deleted from ea9eda7019

View File

@@ -1,7 +0,0 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0Ci0gam9iX25hbWU6IGhhcHJveHkKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGRydWNraS53a3M6OTEwMQogICAgLSByaW90MDEuY2hhb3M6OTEwMQogICAgLSBhdXRvOjkxMDEKLSBqb2JfbmFtZToga2xpcHBlcgogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHJ1Y2tpLndrczozOTAzCi0gam9iX25hbWU6IG9jdG9wcmludAogIG1ldHJpY3NfcGF0aDogL3BsdWdpbi9wcm9tZXRoZXVzX2V4cG9ydGVyL21ldHJpY3MKICBwYXJhbXM6CiAgICBhcGlrZXk6CiAgICAtIDMwRThCMDFCRkQ2NzRFNUJCRDQ0NkQwOEM0NzMwREY0CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjgwCi0gam9iX25hbWU6IG9wZW5oYWIyCiAgbWV0cmljc19wYXRoOiAvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhdXRvLmNoYW9zOjk5OTkKLSBqb2JfbmFtZTogbm9kZQogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHVtb250LmNoYW9zOjkxMDAKICAgIC0gZHVtb250Lndrczo5MTAwCiAgICAtIGF1dG8wMTo5MTAwCiAgICAtIGRydWNraS53a3M6OTEwMAogICAgLSBlYmluMDEuY2hhb3M6OTEwMAogICAgLSBlYmluMDIuY2hhb3M6OTEwMAogICAgLSByaW90MDEuY2hhb3M6OTEwMAogICAgLSB0cnVoZTo5MTAwCiAgICAtIHR1bW9yLmNoYW9zOjkxMDAKICAgIC0gd29obno6OTEwMAogICAgLSB5b3JpLmNoYW9zOjkxMDAK
kind: Secret
metadata:
creationTimestamp: null
name: additional-scrape-configs

View File

@@ -1,45 +0,0 @@
- job_name: mysqld
static_configs:
- targets:
- mariadb.lan:9104
- job_name: mqtt.mosquitto
static_configs:
- targets:
- mqtt.lan:9234
- job_name: haproxy
static_configs:
- targets:
- drucki.wks:9101
- riot01.chaos:9101
- auto:9101
- job_name: klipper
static_configs:
- targets:
- drucki.wks:3903
- job_name: octoprint
metrics_path: /plugin/prometheus_exporter/metrics
params:
apikey:
- 30E8B01BFD674E5BBD446D08C4730DF4
static_configs:
- targets:
- drucki.wks:80
- job_name: openhab2
metrics_path: /
static_configs:
- targets:
- auto.chaos:9999
- job_name: node
static_configs:
- targets:
- dumont.chaos:9100
- dumont.wks:9100
- auto01:9100
- drucki.wks:9100
- ebin01.chaos:9100
- ebin02.chaos:9100
- riot01.chaos:9100
- truhe:9100
- tumor.chaos:9100
- wohnz:9100
- yori.chaos:9100

View File

@@ -1,42 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-storage
spec:
capacity:
storage: 5Gi # Doesn't really matter, as EFS does not enforce it anyway
volumeMode: Filesystem
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- timeo=300
- retrans=2
nfs:
path: /k8s-data-fast/grafana
server: ebin01.chaos
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-k8s-db-prometheus-k8s-0
spec:
capacity:
storage: 50Gi # Doesn't really matter, as EFS does not enforce it anyway
volumeMode: Filesystem
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- timeo=300
- retrans=2
nfs:
path: /k8s-data-fast/prometheus
server: ebin02.chaos

View File

@@ -1,9 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: fast
annotations:
storageclass.kubernetes.io/is-default-class: true
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Retain