33 Commits

Author SHA1 Message Date
2e3bb35f86 coreddns update 2023-10-15 19:17:51 +02:00
47cbd88587 coredns / cluster upgrade 2023-01-16 18:57:58 +01:00
dd74762778 tekton PVC? required? 2023-01-12 20:54:31 +01:00
07d7f45e64 other stings 2023-01-12 20:53:46 +01:00
536c0c4ddc flannel 0.20 upgrade 2023-01-12 20:53:23 +01:00
fcb2e69615 upgrade galore from 1.23 to 1.26. and cluster ist still at 1.25? See: Readme.md 2023-01-12 20:52:46 +01:00
e2e032ac94 another nfs -client provisioner 2022-12-08 17:51:23 +01:00
4bbf79569c another nfs -client provisioner 2022-12-08 17:47:14 +01:00
273fb0e252 more updates 2022-12-08 17:09:38 +01:00
62f5788742 changing output dir 2022-12-08 16:47:19 +01:00
9b2d2a9d95 php-fpm 2022-12-08 16:43:36 +01:00
b5ff289f66 stuff 2022-12-08 16:39:52 +01:00
7cb8d572e7 stuff 2022-12-08 14:03:01 +01:00
14aceae467 new version and create dirs on run 2022-12-08 13:57:10 +01:00
604d065252 new version and create dirs on run 2022-12-08 13:09:24 +01:00
b50d6de8f7 cleanup 2022-11-18 10:26:13 +01:00
79c4e5e0c7 tekton stuff and install 2022-11-18 10:24:39 +01:00
d7241c7563 removed obsolete submods 2022-11-18 10:21:37 +01:00
8fbf07efdf removed descheduler, helm is on its way 2022-10-25 14:03:10 +02:00
beb1bfe0da nginx ingress is installed via helm now 2022-10-25 14:01:34 +02:00
8b62746bcc cleanup 2022-10-12 13:20:42 +02:00
94b39a804b merged 2022-09-19 16:58:14 +02:00
43d17581b3 gitea and apt-cacher 2022-09-19 16:56:40 +02:00
180d28fe80 Merge branch 'master' of git.lan:chaos/kubernetes 2022-09-19 16:54:53 +02:00
30ba290918 don't know why this shit doesn't run anymore 2022-09-10 13:32:34 +02:00
b111463cf5 Merge branch 'master' of git.lan:chaos/kubernetes 2022-08-24 19:17:10 +02:00
c2f6c546eb gitea uses ebin02 2022-08-24 19:16:24 +02:00
748b94f069 local changes 2022-07-30 12:54:52 +02:00
59c019727d rompr version 1.61 2022-07-30 12:51:17 +02:00
17f8b2f5cb mosquitto and prometheus 2022-07-30 12:43:56 +02:00
105e051d64 grav and tekton 2022-07-30 12:33:26 +02:00
9b92cf35e0 Merge branch 'master' of git.lan:chaos/kubernetes 2022-07-30 12:29:55 +02:00
41a2ba8c82 Dockerfile using our debian image 2022-07-30 12:29:43 +02:00
40 changed files with 528 additions and 3407 deletions

2
.gitmodules vendored
View File

@@ -3,7 +3,7 @@
url = https://github.com/coreos/kube-prometheus.git
[submodule "cluster-monitoring"]
path = cluster-monitoring
url = https://github.com/carlosedp/cluster-monitoring.git
url = git@git.lan:chaos/k8s-cluster-monitoring.git
[submodule "gluster-kubernetes"]
path = gluster-kubernetes
url = https://github.com/jayflory/gluster-kubernetes.git

9
TODO.md Normal file
View File

@@ -0,0 +1,9 @@
root@pine01:/etc/kubernetes# kubeadm upgrade apply v1.24.9 --ignore-preflight-errors=CoreDNSUnsupportedPlugins
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0112 18:28:48.533830 21616 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
CoreDNS v1.8.6 v1.9.3

33
_CI-CD/tekton-pvc.yaml Normal file
View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tektoncd-workspaces
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi
storageClassName: nfs-ssd-ebin02
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: tektoncd-workspaces
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/tektoncd-workspaces
server: ebin02
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: tektoncd-workspaces
namespace: default

View File

@@ -22,8 +22,10 @@ spec:
value: $(params.git-revision)
- name: submodules
value: false
- subdirectory:
value: "source"
workspaces:
- name: output
- name: source
workspace: git-source
- name: build-image
taskRef:

View File

@@ -33,8 +33,7 @@ spec:
default: []
- name: BUILDER_IMAGE
description: The image on which builds will run (default is v1.5.1)
default: gcr.io/kaniko-project/executor:v1.8.0
#default: gcr.io/kaniko-project/executor:v1.5.1@sha256:c6166717f7fe0b7da44908c986137ecfeab21f31ec3992f6e128fff8a94be8a5
default: gcr.io/kaniko-project/executor:v1.9.1
workspaces:
- name: source
description: Holds the context and docker file
@@ -47,12 +46,21 @@ spec:
description: Digest of the image just built.
steps:
- name: debug
workingDir: $(workspaces.source.path)
image: bash
script: |
#!/usr/bin/env bash
export
pwd
mount
ls -al
- name: build-and-push
workingDir: $(workspaces.source.path)
image: $(params.BUILDER_IMAGE)
args:
- $(params.EXTRA_ARGS[*])
- --dockerfile=$(workspaces.source.path)/$(params.DOCKERFILE)
- --dockerfile=$(params.DOCKERFILE)
- --context=$(params.CONTEXT) # The user does not need to care the workspace and the source.
- --destination=$(params.IMAGE)
- --digest-file=/tekton/results/IMAGE-DIGEST
@@ -60,6 +68,9 @@ spec:
- --single-snapshot
- --use-new-run
- --skip-tls-verify
- --cache
- --cache-copy-layers
- --cache-dir=/workspace/cache
# kaniko assumes it is running as root, which means this example fails on platforms
# that default to run containers as random uid (like OpenShift). Adding this securityContext
# makes it explicit that it needs to run as root.

View File

@@ -1,3 +1,87 @@
Upgrade:
```
export KV=1.26.0-00;
apt-mark unhold kubeadm=$KV kubectl=$KV kubelet=$KV;
apt install -y kubeadm=$KV;
```
```
kubeadm upgrade node #Other pines in the wood
```
```
#pine01
kubeadm upgrade plan --ignore-preflight-errors=CoreDNSUnsupportedPlugins;
kubeadm config images pull;
kubeadm upgrade apply ${KV/\-*/} --ignore-preflight-errors=CoreDNSUnsupportedPlugins --certificate-renewal=false; #sometimes true
```
```
apt install kubectl=$KV kubelet=$KV;
systemctl daemon-reload && systemctl restart kubelet;
apt-mark hold kubeadm=$KV kubectl=$KV kubelet=$KV;
echo 'You can now uncordon, der Geraet';
```
# Infos:
```
$ kubectl -n kube-system get cm kubeadm-config -o yaml
apiVersion: v1
data:
ClusterConfiguration: |
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: v1.23.15
networking:
dnsDomain: cluster.local
podSubnet: 172.23.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
ClusterStatus: |
apiEndpoints:
pine01:
advertiseAddress: 172.16.23.21
bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterStatus
kind: ConfigMap
metadata:
creationTimestamp: "2021-01-20T14:55:12Z"
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:ClusterConfiguration: {}
f:ClusterStatus: {}
manager: kubeadm
operation: Update
time: "2021-01-20T14:55:12Z"
name: kubeadm-config
namespace: kube-system
resourceVersion: "441685033"
uid: c70fefd3-02c3-44c8-a37d-7b17ec445455
```
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml

122
_sys/coredns-1.26-x.yaml Normal file
View File

@@ -0,0 +1,122 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
uid: 7bf78c83-68ac-4dee-95f6-52964e38e2d3
resourceVersion: '441765420'
generation: 37
creationTimestamp: '2021-01-20T14:55:14Z'
labels:
k8s-app: kube-dns
annotations:
deployment.kubernetes.io/revision: '34'
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
defaultMode: 420
containers:
- name: coredns
image: registry.k8s.io/coredns/coredns:v1.9.3
args:
- '-conf'
- /etc/coredns/Corefile
ports:
- name: dns
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
- name: metrics
containerPort: 9153
protocol: TCP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- name: config-volume
readOnly: true
mountPath: /etc/coredns
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: coredns
serviceAccount: coredns
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
priorityClassName: system-cluster-critical
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600

View File

@@ -26,23 +26,45 @@ data:
reload
loadbalance
}
lan.db: "; lan. zone file\n$ORIGIN lan.\n@ 3600 IN SOA sns.dns.icann.org.
noc.dns.icann.org. 2021102006 7200 3600 1209600 3600\n 3600 IN NS 172.23.255.252\n\nns
\ IN A 172.23.255.252\nsalt IN A 192.168.10.2 \nmqtt
\ IN A 172.16.23.1\nwww-proxy IN A 172.23.255.1\ngit IN
\ A 172.23.255.2\npostgresql IN A 172.23.255.4\nmariadb IN A
\ 172.23.255.5\npihole IN A 172.23.255.253\nadm IN CNAME
adm01.wks.\n\nprometheus IN CNAME www-proxy \nalertmanager IN CNAME
www-proxy\nstats IN CNAME www-proxy\ncr-ui IN CNAME
www-proxy\napt IN CNAME www-proxy\napt-cache IN CNAME
www-proxy\nnodered IN CNAME www-proxy\nfoto IN CNAME
www-proxy\nmusik IN CNAME www-proxy\nhassio IN CNAME
www-proxy\nhassio-conf IN CNAME www-proxy \ngit-ui IN CNAME
www-proxy\ngrav IN CNAME www-proxy\ntekton IN CNAME
www-proxy\nnc IN CNAME www-proxy\nauth IN CNAME
www-proxy\npublic.auth IN CNAME www-proxy \nsecure.auth IN CNAME
www-proxy\ndocker-registry IN CNAME adm\ncr IN CNAME adm\ndr-mirror
\ IN CNAME adm\nlog IN CNAME adm\n"
lan.db: |
;lan. zone file
$ORIGIN lan.
@ 600 IN SOA sns.dns.icann.org. noc.dns.icann.org. 2022032201 7200 600 1209600 600
3600 IN NS 172.23.255.252
ns IN A 172.23.255.252
salt IN A 192.168.10.2
mqtt IN A 172.16.23.1
www-proxy IN A 172.23.255.1
git IN A 172.23.255.2
postgresql IN A 172.23.255.4
mariadb IN A 172.23.255.5
redis IN A 172.23.255.6
pihole IN A 172.23.255.253
adm IN CNAME adm01.wks.
prometheus IN CNAME www-proxy
alertmanager IN CNAME www-proxy
stats IN CNAME www-proxy
cr-ui IN CNAME www-proxy
apt IN CNAME www-proxy
apt-cache IN CNAME www-proxy
nodered IN CNAME www-proxy
foto IN CNAME www-proxy
musik IN CNAME www-proxy
hassio IN CNAME www-proxy
hassio-conf IN CNAME www-proxy
git-ui IN CNAME www-proxy
grav IN CNAME www-proxy
tekton IN CNAME www-proxy
nc IN CNAME www-proxy
dolibarr IN CNAME www-proxy
auth IN CNAME www-proxy
public.auth IN CNAME www-proxy
secure.auth IN CNAME www-proxy
docker-registry IN CNAME adm
cr IN CNAME adm
dr-mirror IN CNAME adm
log IN CNAME adm
---
apiVersion: v1
kind: Service
@@ -63,6 +85,7 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
labels:
k8s-app: kube-dns
spec:
@@ -86,7 +109,7 @@ spec:
- args:
- -conf
- /etc/coredns/Corefile
image: k8s.gcr.io/coredns/coredns:v1.8.4
image: registry.k8s.io/coredns/coredns:v1.9.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
@@ -109,6 +132,16 @@ spec:
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
failureThreshold: 3
httpGet:
@@ -132,6 +165,7 @@ spec:
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
@@ -164,4 +198,5 @@ spec:
- key: lan.db
path: lan.db
name: coredns
name: config-volume
name: config-volume

View File

@@ -5,7 +5,7 @@ metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/15 * * * *"
schedule: "*/50 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:latest
image: k8s.gcr.io/descheduler/descheduler:v0.25.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -7,7 +7,7 @@ data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
maxNoOfPodsToEvictPerNode: 3
maxNoOfPodsToEvictPerNode: 1
strategies:
"RemoveDuplicates":
enabled: true

View File

@@ -1,674 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.1
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: k8s.gcr.io/ingress-nginx/controller:v1.0.0@sha256:0851b34f69f69352bf168e6ccf30e1e20714a264ab1ecd1933e4d8c0fc3215c6
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
namespace: ingress-nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.0.1
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.0.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000

View File

@@ -1,60 +1,16 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
kind: Namespace
apiVersion: v1
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
@@ -66,6 +22,7 @@ rules:
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
@@ -86,19 +43,19 @@ roleRef:
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
namespace: kube-flannel
labels:
tier: node
app: flannel
@@ -135,7 +92,7 @@ apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
namespace: kube-flannel
labels:
tier: node
app: flannel
@@ -165,8 +122,21 @@ spec:
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- cp
args:
@@ -180,7 +150,8 @@ spec:
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- /opt/bin/flanneld
args:
@@ -206,18 +177,29 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@@ -21,7 +21,7 @@ spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-hdd-ebin01
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
@@ -31,7 +31,7 @@ spec:
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-hdd/k8s-data
value: /data/k8s-data-hdd
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -46,4 +46,4 @@ spec:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-hdd/k8s-data
path: /data/k8s-data-hdd

View File

@@ -21,7 +21,7 @@ spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin01
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes

View File

@@ -21,7 +21,7 @@ spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin02
image: quay.io/external_storage/nfs-client-provisioner-arm:latest
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes

View File

@@ -76,9 +76,30 @@ kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin02
volumeName: apt-cacher-ng
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: apt-cacher-ng
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/apt-cacher-ng
server: ebin02
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: apt-cacher-volume
namespace: live-infra

View File

@@ -20,7 +20,6 @@ spec:
spec:
containers:
- name: registry-ui
#image: cr.lan/docker-registry-ui:arm64
image: docker.io/joxit/docker-registry-ui:latest
imagePullPolicy: Always
env:

View File

@@ -45,16 +45,26 @@ spec:
value: gitea
- name: DB_PASSWD
value: giteaEu94XSS4gKpheSBoMsIs
- name: GITEA__indexer__ISSUE_INDEXER_TYPE
value: db
#- name: GITEA__indexer__ISSUE_INDEXER
#value: redis
#- name: GITEA__indexer__ISSUE_INDEXER_QUEUE_CONN_STR
#value: addrs=redis-standalone.live-env.svc.cluster.local:6379 db=1
- name: GITEA__packages__ENABLED
value: "true"
- name: GITEA__log__LEVEL
value: warn
- name: GITEA__log__MODE
value: console
- name: GITEA__queue__TYPE
value: persistable-channel
value: file
- name: GITEA__log__ROUTER
value: file
- name: GITEA__log__MACARON
value: file
#- name: GITEA__queue__TYPE
#value: redis
#- name: GITEA__queue__CONN_STR
#value: redis://redis-standalone.live-env.svc.cluster.local:6397/0
- name: GITEA__server__ROOT_URL
value: http://git-ui.lan/
volumeMounts:
- name: gitea
mountPath: /data
@@ -66,13 +76,13 @@ spec:
containerPort: 22
protocol : TCP
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 10
httpGet:
path: /
port: http
initialDelaySeconds: 300
periodSeconds: 10
httpGet:
path: /
port: http
readinessProbe:
initialDelaySeconds: 30
initialDelaySeconds: 300
periodSeconds: 10
httpGet:
path: /
@@ -96,7 +106,8 @@ metadata:
labels:
app: gitea
spec:
storageClassName: nfs-ssd
storageClassName: nfs-ssd-ebin02
volumeName: gitea
accessModes:
- ReadWriteOnce
resources:
@@ -104,6 +115,26 @@ spec:
storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: gitea
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/gitea-data
server: ebin02
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: gitea
namespace: live-env
---
apiVersion: v1
kind: Service
metadata:
name: gitea

View File

@@ -1,6 +1,6 @@
FROM cr.lan/debian-stable-php-fpm
ENV DEBIAN_FRONTEND noninteractive
ARG GRAV_VERSION=1.6.28
ARG GRAV_VERSION=1.7.34
ARG DEV_PKGS="zlib1g-dev libpng-dev libjpeg-dev libfreetype6-dev \
libcurl4-gnutls-dev libxml2-dev libonig-dev"

23
apps/grav/tekton.yaml Normal file
View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-grav
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/grav
- name: path-to-dockerfile
value: apps/grav/Dockerfile
- name: image-name
value: cr.lan/grav
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/grav

View File

@@ -1,5 +1,5 @@
# vim:set ft=dockerfile:
FROM debian:buster-slim
FROM cr.lan/debian-stable
RUN set -ex; \
apt-get update; \

View File

@@ -1,5 +1,5 @@
# vim:set ft=dockerfile:
FROM debian:buster-slim
FROM cr.lan/debian-stable
# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added
RUN groupadd -r mysql && useradd -r -g mysql mysql

View File

@@ -1,8 +1,6 @@
FROM debian:stable-slim
FROM cr.lan/debian-stable
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
RUN apt-get update && \
apt-get install -y --no-install-recommends \
mosquitto procps && \
apt-get clean -y && \

View File

@@ -0,0 +1,10 @@
FROM cr.lan/debian-golang-stable
ENV GOARCH=arm64
ENV GOPATH=/usr/src/gopath
ENV GOCACHE=/usr/src/gocache
RUN go env
WORKDIR /usr/src
RUN go get github.com/sapcc/mosquitto-exporter
RUN make j4 build CGO_ENABLED=0
RUN ls -al

View File

@@ -1,77 +0,0 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mosquitto/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mosquitto
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --snapshotMode=redo
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mosquitto
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto

View File

@@ -0,0 +1,24 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mosquitto-prometheus
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mosquitto/prometheus
- name: path-to-dockerfile
value: apps/mosquitto/prometheus/Dockerfile
- name: image-name
value: cr.lan/mosquitto-prometheus-exporter
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mosquitto-prometheus

View File

@@ -0,0 +1,23 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: img-mosquitto
spec:
pipelineRef:
name: kaniko-pipeline
params:
- name: git-url
value: http://git-ui.lan/chaos/kubernetes.git
- name: git-revision
value: master
- name: path-to-image-context
value: apps/mosquitto
- name: path-to-dockerfile
value: apps/mosquitto/Dockerfile
- name: image-name
value: cr.lan/mosquitto
workspaces:
- name: git-source
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: tekton/mosquitto

View File

@@ -1,4 +1,4 @@
FROM debian:stable-slim
FROM cr.lan/debian-stable
#RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \

View File

@@ -49,7 +49,7 @@ spec:
- key: app
operator: In
values:
- promtheus
- prometheus
- loki
topologyKey: kubernetes.io/hostname
# - name: prometheus-exporter

View File

@@ -5,6 +5,7 @@ metadata:
namespace: live-env
data:
redis.conf: |-
bind * -::*
appendonly yes
maxmemory 5mb
---

View File

@@ -1,6 +1,6 @@
FROM cr.lan/debian-stable-php-fpm
ARG ROMPR_VERSION=1.60.1
ARG ROMPR_VERSION=1.61
# Install packages
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && \

View File

@@ -2,6 +2,7 @@
rm -f /var/run/nginx.pid
mkdir -p /var/log/nginx
set -e
mkdir -p /rompr/albumart /rompr/prefs
chown www-data:www-data -R /rompr/albumart /rompr/prefs
/etc/init.d/php7.4-fpm restart
exec /usr/sbin/nginx -g 'daemon off;'

View File

@@ -13,7 +13,7 @@ spec:
- name: path-to-image-context
value: apps/rompr
- name: path-to-dockerfile
value: apps/rompr/Dockerfile
value: ./Dockerfile
- name: image-name
value: cr.lan/rompr
workspaces:

View File

@@ -1,8 +1,6 @@
Install:
# Pipelines: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml@
# Triggers: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml@ #https://github.com/tektoncd/triggers/blob/master/docs/install.md
# Dashboard:
## update submodule in ./dashboard
## Build: @docker build -t tekton-dashboard:arm64 -t docker-registry.lan/tekton-dashboard:arm64 --platform linux/arm64 --build-arg GOARCH=arm64 .@
## apply deployment.yaml
# Triggers: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml@
@kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml@ #https://github.com/tektoncd/triggers/blob/master/docs/install.md
# Dashboard: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/tekton-dashboard-release.yaml@

View File

@@ -1,526 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: extensions.dashboard.tekton.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.apiVersion
name: API version
type: string
- JSONPath: .spec.name
name: Kind
type: string
- JSONPath: .spec.displayname
name: Display name
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: dashboard.tekton.dev
names:
categories:
- tekton
- tekton-dashboard
kind: Extension
plural: extensions
shortNames:
- ext
- exts
preserveUnknownFields: false
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- use
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- create
- update
- delete
- patch
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.dashboard.tekton.dev/aggregate-to-dashboard: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
- taskruns/finalizers
- pipelineruns/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- taskruns/finalizers
- pipelineruns/finalizers
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-backend
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
ports:
- name: http
port: 9097
protocol: TCP
targetPort: 9097
selector:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
template:
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
name: tekton-dashboard
spec:
containers:
- args:
- --port=9097
- --logout-url=
- --pipelines-namespace=tekton-pipelines
- --triggers-namespace=tekton-pipelines
- --read-only=false
- --csrf-secure-cookie=false
- --log-level=info
- --log-format=json
- --namespace=
- --openshift=false
- --stream-logs=false
- --external-logs=
env:
- name: INSTALLED_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
- name: TEKTON_PIPELINES_WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
#image: gcr.io/tekton-releases/github.com/tektoncd/dashboard/cmd/dashboard@sha256:744eb92d7d0365bbfb2405df4ba4d2a66c01edc26028c362bd5675e2bc1b9626
image: docker-registry.lan/tekton-dashboard:arm64
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /health
port: 9097
name: tekton-dashboard
ports:
- containerPort: 9097
readinessProbe:
httpGet:
path: /readiness
port: 9097
securityContext:
runAsNonRoot: true
runAsUser: 65532
serviceAccountName: tekton-dashboard
volumes: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-pipelines
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-dashboard
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-triggers
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-tenant
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-extensions
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
spec:
rules:
- host: tekton.lan
http:
paths:
- backend:
serviceName: tekton-dashboard
servicePort: 9097

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogZ2l0ZWEKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGdpdC11aS5sYW4KLSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0CiAgICAtIG1xdHQuY2hhb3M6OTIzNAotIGpvYl9uYW1lOiBoYXByb3h5CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhZG0wMS53a3M6OTEwMQogICAgLSBkcnVja2kud2tzOjkxMDEKICAgIC0gYXV0bzAxLmNoYW9zOjkxMDEKICAgIC0gYXV0bzAyLmNoYW9zOjkxMDEKLSBqb2JfbmFtZToga2xpcHBlcgogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHJ1Y2tpLndrczozOTAzCi0gam9iX25hbWU6IG9jdG9wcmludAogIG1ldHJpY3NfcGF0aDogL3BsdWdpbi9wcm9tZXRoZXVzX2V4cG9ydGVyL21ldHJpY3MKICBwYXJhbXM6CiAgICBhcGlrZXk6CiAgICAtIDMwRThCMDFCRkQ2NzRFNUJCRDQ0NkQwOEM0NzMwREY0CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjgwCi0gam9iX25hbWU6IGhhc3NpbwogIG1ldHJpY3NfcGF0aDogL2FwaS9wcm9tZXRoZXVzCiAgYmVhcmVyX3Rva2VuOiAnZXlKMGVYQWlPaUpLVjFRaUxDSmhiR2NpT2lKSVV6STFOaUo5LmV5SnBjM01pT2lKaE16Qm1ZalUxWmpjeVpHRTBZemMyWW1VMk5tWTBOamxqTlRBeU1qZGpaQ0lzSW1saGRDSTZNVFl4TWpnNE16STVOeXdpWlhod0lqb3hPVEk0TWpRek1qazNmUS4xSUNzSGxpVVhSMENHNEg4dlFSWUo1alZxRndtcUtTQjBmU2NTaXRDLVE0JwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIGhhc3Npby5sYW46ODAKLSBqb2JfbmFtZTogaGFzc2lvX3Jpbmc4NgogIG1ldHJpY3NfcGF0aDogL2FwaS9wcm9tZXRoZXVzCiAgYmVhcmVyX3Rva2VuOiAnZXlKMGVYQWlPaUpLVjFRaUxDSmhiR2NpT2lKSVV6STFOaUo5LmV5SnBjM01pT2lJME9HRmpaVEppTm1RM09UZzBNamMzWVdGbU1tTm1abVUxWXpjNE5URTBOQ0lzSW1saGRDSTZNVFl4TWpFNU1qazBNQ3dpWlhod0lqb3hPVEkzTlRVeU9UUXdmUS5CYklBWG05UnEwamI2b3VxZ1ZITmQ2S2VlejNOUDN5aC03d3lmdW9COFlrJwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIGF1dG8uY2hhb3M6ODAKLSBqb2JfbmFtZTogcG9zdGdyZXMKICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgICAgLSBwb3N0Z3Jlcy5saXZlLWVudi5zdmMuY2x1c3Rlci5sb2NhbDo5MTg3Ci0gam9iX25hbWU6IG5vZGUKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGFkbTAxLndrczo5MTAwCiAgICAtIGR1bW9udC13a3Mud2tzOjkxMDAKICAgIC0gZHJ1Y2tpLndrczo5MTAwCiAgICAtIGViaW4wMS53a3M6OTEwMAogICAgLSBlYmluMDIud2tzOjkxMDAKICAgIC0gb3NtYy53a3M6OTEwMAogICAgLSByaW90MDEud2tzOjkxMDAKICAgIC0gdHJ1aGUuY2hhb3M6OTEwMAogICAgLSBhdXRvMDEuY2hhb3M6OTEwMAogICAgLSBhdXRvMDIuY2hhb3M6OTEwMAogICAgLSBkdW1vbnQuY2hhb3M6OTEwMAogICAgLSB0dW1vci5jaGFvczo5MTAwCiAgICAtIHdvaG56LmNoYW9zOjkxMDAKICAgIC0geW9yaS5jaGFvczo5MTAwCg==
prometheus-additional.yaml: LSBqb2JfbmFtZTogZ2l0ZWEKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGdpdC11aS5sYW4KLSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0CiAgICAtIG1xdHQuY2hhb3M6OTIzNAotIGpvYl9uYW1lOiBoYXByb3h5CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhZG0wMS53a3M6OTEwMQogICAgLSBkcnVja2kud2tzOjkxMDEKICAgIC0gYXV0bzAyLmNoYW9zOjkxMDEKLSBqb2JfbmFtZToga2xpcHBlcgogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHJ1Y2tpLndrczozOTAzCi0gam9iX25hbWU6IG9jdG9wcmludAogIG1ldHJpY3NfcGF0aDogL3BsdWdpbi9wcm9tZXRoZXVzX2V4cG9ydGVyL21ldHJpY3MKICBwYXJhbXM6CiAgICBhcGlrZXk6CiAgICAtIDMwRThCMDFCRkQ2NzRFNUJCRDQ0NkQwOEM0NzMwREY0CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjgwCi0gam9iX25hbWU6IGhhc3NpbwogIG1ldHJpY3NfcGF0aDogL2FwaS9wcm9tZXRoZXVzCiAgYmVhcmVyX3Rva2VuOiAnZXlKMGVYQWlPaUpLVjFRaUxDSmhiR2NpT2lKSVV6STFOaUo5LmV5SnBjM01pT2lKaE16Qm1ZalUxWmpjeVpHRTBZemMyWW1VMk5tWTBOamxqTlRBeU1qZGpaQ0lzSW1saGRDSTZNVFl4TWpnNE16STVOeXdpWlhod0lqb3hPVEk0TWpRek1qazNmUS4xSUNzSGxpVVhSMENHNEg4dlFSWUo1alZxRndtcUtTQjBmU2NTaXRDLVE0JwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIGhhc3Npby5sYW46ODAKLSBqb2JfbmFtZTogaGFzc2lvX3Jpbmc4NgogIG1ldHJpY3NfcGF0aDogL2FwaS9wcm9tZXRoZXVzCiAgYmVhcmVyX3Rva2VuOiAnZXlKMGVYQWlPaUpLVjFRaUxDSmhiR2NpT2lKSVV6STFOaUo5LmV5SnBjM01pT2lJME9HRmpaVEppTm1RM09UZzBNamMzWVdGbU1tTm1abVUxWXpjNE5URTBOQ0lzSW1saGRDSTZNVFl4TWpFNU1qazBNQ3dpWlhod0lqb3hPVEkzTlRVeU9UUXdmUS5CYklBWG05UnEwamI2b3VxZ1ZITmQ2S2VlejNOUDN5aC03d3lmdW9COFlrJwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIGF1dG8uY2hhb3M6ODAKLSBqb2JfbmFtZTogcG9zdGdyZXMKICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgICAgLSBwb3N0Z3Jlcy5saXZlLWVudi5zdmMuY2x1c3Rlci5sb2NhbDo5MTg3Ci0gam9iX25hbWU6IG5vZGUKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGFkbTAxLndrczo5MTAwCiAgICAtIGR1bW9udC13a3Mud2tzOjkxMDAKICAgIC0gZHJ1Y2tpLndrczo5MTAwCiAgICAtIGViaW4wMS53a3M6OTEwMAogICAgLSBlYmluMDIud2tzOjkxMDAKICAgIC0gb3NtYy53a3M6OTEwMAogICAgLSByaW90MDEud2tzOjkxMDAKICAgIC0gdHJ1aGUuY2hhb3M6OTEwMAogICAgLSBhdXRvMDIuY2hhb3M6OTEwMAogICAgLSBkdW1vbnQuY2hhb3M6OTEwMAogICAgLSB0dW1vcjAxLmNoYW9zOjkxMDAKICAgIC0gd29obnouY2hhb3M6OTEwMAogICAgLSB5b3JpLmNoYW9zOjkxMDAK
kind: Secret
metadata:
creationTimestamp: null

View File

@@ -16,7 +16,6 @@
- targets:
- adm01.wks:9101
- drucki.wks:9101
- auto01.chaos:9101
- auto02.chaos:9101
- job_name: klipper
static_configs:
@@ -57,9 +56,8 @@
- osmc.wks:9100
- riot01.wks:9100
- truhe.chaos:9100
- auto01.chaos:9100
- auto02.chaos:9100
- dumont.chaos:9100
- tumor.chaos:9100
- tumor01.chaos:9100
- wohnz.chaos:9100
- yori.chaos:9100

Submodule descheduler deleted from b2418ef481