60 Commits

Author SHA1 Message Date
9cda7c9f76 docker-reg-ui 2021-02-19 20:53:39 +01:00
c094e99451 obsolete 2021-02-19 20:53:25 +01:00
5a16e4cf40 no procps 2021-02-19 20:53:07 +01:00
be069c53bf distcc in tekton 2021-02-19 20:52:44 +01:00
ce329ca353 golang image 2021-02-18 23:52:00 +01:00
b45a4489fc golang image 2021-02-18 23:28:38 +01:00
1ac9cc0b4c stuff 2021-02-18 23:08:00 +01:00
38cac7a57f debian-golang image 2021-02-18 23:04:53 +01:00
9dd3b2b4e0 debian-golang image 2021-02-18 22:57:56 +01:00
0f6c04a0f0 debian-golang image 2021-02-18 22:55:40 +01:00
3a28bebcda mosquitto in tekton 2021-02-18 22:16:26 +01:00
f17dea5dff mosquitto in tekton 2021-02-18 22:16:12 +01:00
1a42071c26 mosquitto in tekton 2021-02-18 21:49:53 +01:00
0e79b36875 tekton pipelines for apps 2021-02-18 21:42:24 +01:00
c918c39e6e using apt-cache.lan 2021-02-18 21:08:56 +01:00
5932220ead Dockerfile again, because kaniko! 2021-02-18 21:03:57 +01:00
36fa98e78b CI-CD Stuff, mainly tekton 2021-02-18 20:55:44 +01:00
6b7b23dd71 tekton local configs 2021-02-18 20:55:20 +01:00
b131b76916 apt-cacher-ng in tekton 2021-02-18 20:54:53 +01:00
fb33950bc8 updated systems: descheduler runs at root.... 2021-02-18 20:54:24 +01:00
3970c20e3a updated systems 2021-02-18 20:54:06 +01:00
1cc1de7ed8 no more dockerfile, we're podmanning now :) 2021-02-10 14:58:10 +01:00
b91ea42a41 updates 2021-02-10 14:49:14 +01:00
f616346ac6 sweet caroline 2021-02-09 20:26:53 +01:00
315520baa6 static pvs for essential services 2021-01-24 00:20:55 +01:00
76c036fa79 static pvs 2021-01-21 12:56:33 +01:00
5ce8a3b5be all subs 2021-01-21 12:56:11 +01:00
0bdd4a2db0 all subs 2021-01-21 12:54:20 +01:00
76e516c7f3 pvs for grafana and prometheus 2021-01-21 12:53:10 +01:00
c7363d513e using flannel now 2021-01-21 12:52:46 +01:00
8d66cb1f66 postgres svc fix 2021-01-21 12:52:18 +01:00
f9269f2c2c persistent grafana/prometheus pvs 2021-01-21 10:06:01 +01:00
9b9b551907 new run 2021-01-20 15:33:26 +01:00
ab96839f50 removed external-storage 2021-01-20 15:32:30 +01:00
a3bd4349e2 nummer5 in wks 2021-01-07 21:50:45 +01:00
4dcb961e81 tekton for the masses 2020-12-08 17:12:45 +01:00
9561cb8d82 grav on php74/bullseye 2020-11-30 19:34:31 +01:00
2e3e37062a new and old scrapes 2020-11-19 18:38:25 +01:00
f85ff91873 doesn;t work yet 2020-11-12 18:03:29 +01:00
62cb2881c2 d-ui needs less resources 2020-11-11 21:00:58 +01:00
bb4fb01b7c hack: running socat and z2mqtt in one container with supervisor 2020-11-10 22:32:33 +01:00
e8c1fa3cef hassio 2020-11-09 19:33:15 +01:00
5efad7226b migrated to stats.lan 2020-11-09 13:59:58 +01:00
a5ca5799c7 hairpin mode does the trick 2020-11-09 13:59:42 +01:00
2224fc50c8 grav 2020-11-08 10:45:46 +01:00
54a80d25d9 grav+nginx+phpfpm 2020-11-02 16:22:38 +01:00
c4d78c6805 webapps refactoring 2020-10-31 23:47:09 +01:00
3bb2b5072a webapps 2020-10-31 23:45:24 +01:00
fef8a517ee entrypoint 2020-10-31 22:43:47 +01:00
6419eec2af urubu python CMS 2020-10-30 21:16:40 +01:00
c7bb0632d1 we-re traefik now 2020-10-30 21:16:03 +01:00
879a375a8c removed ingress-nginx submod 2020-10-30 21:15:26 +01:00
1fb381f2db removed ingress-nginx submod 2020-10-30 21:14:38 +01:00
163792a913 haproxy replaces nginx? 2020-10-29 12:13:59 +01:00
76318f92bd codetogether original - never mind... 2020-10-23 23:05:49 +02:00
1489869898 codetogether original 2020-10-23 23:00:44 +02:00
e0df26962a no connection to mqqt.chaos 2020-10-22 18:19:45 +02:00
f0729d9055 tumor ist der meister, immerhin! 2020-10-20 23:10:41 +02:00
cad7c23dac we're suddenly a python thing :) 2020-10-18 23:16:38 +02:00
bdd139b34a gitea and git-ui.lan and git.lan 2020-10-17 00:32:10 +02:00
98 changed files with 8962 additions and 925 deletions

21
.gitmodules vendored
View File

@@ -10,9 +10,6 @@
[submodule "kubernetes-ingress"]
path = kubernetes-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "ingress-nginx"]
path = ingress-nginx
url = https://github.com/kubernetes/ingress-nginx.git
[submodule "pihole-kubernetes"]
path = pihole-kubernetes
url = https://github.com/MoJo2600/pihole-kubernetes.git
@@ -28,9 +25,6 @@
[submodule "mosquitto/charts"]
path = mosquitto/charts
url = https://github.com/smizy/charts.git
[submodule "external-storage"]
path = external-storage
url = https://github.com/kubernetes-incubator/external-storage.git
[submodule "mosquitto-exporter"]
path = mosquitto-exporter
url = https://github.com/sapcc/mosquitto-exporter.git
@@ -46,3 +40,18 @@
[submodule "csi-s3/node-driver-registrar"]
path = csi-s3/node-driver-registrar
url = https://github.com/kubernetes-csi/node-driver-registrar.git
[submodule "apps/postgresql/postgres_exporter"]
path = apps/postgresql/postgres_exporter
url = https://github.com/wrouesnel/postgres_exporter.git
[submodule "apps/tekton/dashboard"]
path = apps/tekton/dashboard
url = https://github.com/tektoncd/dashboard.git
[submodule "_sys/haproxy-ingress"]
path = _sys/haproxy-ingress
url = https://github.com/haproxytech/kubernetes-ingress.git
[submodule "nfs-subdir-external-provisioner"]
path = nfs-subdir-external-provisioner
url = https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git
[submodule "apps/docker-registry/docker-registry-ui"]
path = apps/docker-registry/docker-registry-ui
url = https://github.com/Joxit/docker-registry-ui.git

View File

@@ -5,7 +5,13 @@
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

5
.pydevproject Normal file
View File

@@ -0,0 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python interpreter</pydev_property>
</pydev_project>

View File

@@ -0,0 +1,9 @@
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
golang make git && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*

View File

@@ -0,0 +1,84 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-debian-golang-stable
spec:
type: image
params:
- name: url
value: cr.lan/debian-golang-stable
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-debian-golang
spec:
params:
- name: pathToContainerFile
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-golang/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/_CI-CD/debian-golang
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToContainerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
#workspaces:
# - name: workspace
# mountPath: /workspace
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-debian-golang
spec:
taskRef:
name: build-debian-golang
params:
- name: pathToContainerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-debian-golang-stable
# workspaces:
# - name: workspace
# persistentVolumeClaim:
# claimName: tektoncd-workspaces
# subPath: workspaces

73
_scripts/get_resources.py Executable file
View File

@@ -0,0 +1,73 @@
#!/usr/bin/python3
import kubernetes as k8s
from pint import UnitRegistry
from collections import defaultdict
__all__ = ["compute_allocated_resources"]
def compute_allocated_resources():
ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
Q_ = ureg.Quantity
data = {}
# doing this computation within a k8s cluster
k8s.config.load_kube_config()
core_v1 = k8s.client.CoreV1Api()
# print("Listing pods with their IPs:")
# ret = core_v1.list_pod_for_all_namespaces(watch=False)
# for i in ret.items:
# print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
for node in core_v1.list_node().items:
stats = {}
node_name = node.metadata.name
allocatable = node.status.allocatable
max_pods = int(int(allocatable["pods"]) * 1.5)
# print("{} ALLOC: {} MAX_PODS: {}".format(node_name,allocatable,max_pods))
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
stats["cpu_alloc"] = Q_(allocatable["cpu"])
stats["mem_alloc"] = Q_(allocatable["memory"])
pods = core_v1.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).items
# compute the allocated resources
cpureqs, cpulmts, memreqs, memlmts = [], [], [], []
for pod in pods:
for container in pod.spec.containers:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
lmts = defaultdict(lambda: 0, res.limits or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)
data[node_name] = stats
return data
if __name__ == "__main__":
# execute only if run as a script
print(compute_allocated_resources())

View File

@@ -0,0 +1,20 @@
# memory units
kmemunits = 1 = [kmemunits]
Ki = 1024 * kmemunits
Mi = Ki^2
Gi = Ki^3
Ti = Ki^4
Pi = Ki^5
Ei = Ki^6
# cpu units
kcpuunits = 1 = [kcpuunits]
m = 1/1000 * kcpuunits
k = 1000 * kcpuunits
M = k^2
G = k^3
T = k^4
P = k^5
E = k^6

6
_sys/README.md Normal file
View File

@@ -0,0 +1,6 @@
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/configmap.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/job/job.yaml

View File

@@ -0,0 +1,47 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/2 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.20.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -0,0 +1,27 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: false
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 30
"memory": 50
"pods": 15
targetThresholds:
"cpu": 70
"memory": 70
"pods": 16

204
_sys/haproxy-ingress.yaml Normal file
View File

@@ -0,0 +1,204 @@
#https://raw.githubusercontent.com/haproxytech/kubernetes-ingress/master/deploy/haproxy-ingress.yaml
#https://www.haproxy.com/documentation/kubernetes/latest/installation/community/kubernetes/
#
# NOTES: Images are not from haproxytech, no arm64 imgs
---
apiVersion: v1
kind: Namespace
metadata:
name: haproxy-controller
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: haproxy-ingress-service-account
namespace: haproxy-controller
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: haproxy-ingress-cluster-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- services
- namespaces
- events
- serviceaccounts
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
- ingresses/status
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- patch
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: haproxy-ingress-cluster-role-binding
namespace: haproxy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: haproxy-ingress-cluster-role
subjects:
- kind: ServiceAccount
name: haproxy-ingress-service-account
namespace: haproxy-controller
---
apiVersion: v1
kind: ConfigMap
metadata:
name: haproxy
namespace: haproxy-controller
data:
forwarded-for: "true"
load-balance: "leastconn"
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: ingress-default-backend
name: ingress-default-backend
namespace: haproxy-controller
spec:
replicas: 1
selector:
matchLabels:
run: ingress-default-backend
template:
metadata:
labels:
run: ingress-default-backend
spec:
containers:
- name: ingress-default-backend
#image: gcr.io/google_containers/defaultbackend:1.4
image: starlingx4arm/defaultbackend:1.5-aarch64
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
labels:
run: ingress-default-backend
name: ingress-default-backend
namespace: haproxy-controller
spec:
selector:
run: ingress-default-backend
ports:
- name: port-1
port: 8080
protocol: TCP
targetPort: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
namespace: haproxy-controller
spec:
replicas: 1
selector:
matchLabels:
run: haproxy-ingress
template:
metadata:
labels:
run: haproxy-ingress
spec:
serviceAccountName: haproxy-ingress-service-account
containers:
- name: haproxy-ingress
#image: haproxytech/kubernetes-ingress
image: bmanojlovic/kubernetes-ingress:latest
args:
- --configmap=haproxy-controller/haproxy
- --default-backend-service=haproxy-controller/ingress-default-backend
resources:
requests:
cpu: "500m"
memory: "50Mi"
livenessProbe:
httpGet:
path: /healthz
port: 1042
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
- name: stat
containerPort: 1024
env:
- name: TZ
value: "Europe/Berlin"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
apiVersion: v1
kind: Service
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
namespace: haproxy-controller
spec:
selector:
run: haproxy-ingress
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
- name: stat
port: 1024
protocol: TCP
targetPort: 1024

View File

@@ -0,0 +1,10 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
#namespace: nginx-ingress
namespace: default
data:
proxy-connect-timeout: "10s"
proxy-read-timeout: "10s"
client-max-body-size: "0"

654
_sys/ingress-nginx.yaml Normal file
View File

@@ -0,0 +1,654 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: LoadBalancer
loadBalancerIP: 172.23.255.1
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: k8s.gcr.io/ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-ssl-passthrough
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1beta1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: docker.io/jettech/kube-webhook-certgen:v1.5.1
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-3.23.0
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 0.44.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: docker.io/jettech/kube-webhook-certgen:v1.5.1
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000

223
_sys/kube-flannel.yml Normal file
View File

@@ -0,0 +1,223 @@
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.23.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.13.1-rc1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.1-rc1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@@ -17,6 +17,7 @@ data:
"name":"kubernetes",
"type":"bridge",
"bridge":"kube-bridge",
"mtu":1420,
"isDefaultGateway":true,
"hairpinMode":true,
"ipam":{
@@ -47,6 +48,7 @@ spec:
- name: kube-router
image: docker.io/cloudnativelabs/kube-router
args:
- "--auto-mtu=false"
- "--run-router=true"
- "--run-firewall=true"
- "--run-service-proxy=true"

View File

@@ -0,0 +1,59 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
replicas: 1
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v1.7
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
- name: admin
containerPort: 8080
args:
- --api
- --kubernetes
- --loglevel=ERROR
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
annotations:
kuber-router.io/service.hairpin: ""
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
- protocol: TCP
port: 8080
name: admin
type: LoadBalancer
loadBalancerIP: 172.23.255.1

View File

@@ -1,8 +1,9 @@
FROM debian:stable-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && apt-get install -y \
apt-cacher-ng procps && \
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && apt-get install -y \
apt-cacher-ng && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: apt-cacher-ng
image: docker-registry.lan/apt-cacher-ng:arm64
image: cr.lan/apt-cacher-ng:arm64
ports:
- containerPort: 3142
protocol: TCP
@@ -27,10 +27,10 @@ spec:
name: data
resources:
requests:
memory: "24Mi"
memory: "64Mi"
cpu: "50m"
limits:
memory: "256Mi"
memory: "192Mi"
cpu: "100m"
volumes:
- name: data
@@ -69,8 +69,6 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apt-cacher-volume
#annotations:
# volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
storageClassName: nfs-ssd
accessModes:

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-apt-cacher-ng
spec:
type: image
params:
- name: url
value: cr.lan/apt-cacher-ng
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-apt-cacher-ng
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/apt-cacher-ng/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/apt-cacher-ng
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-apt-cacher-ng-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-apt-cacher-ng
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-apt-cacher-ng

View File

@@ -1,464 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: argo
---
# This is an auto-generated file. DO NOT EDIT
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterworkflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: ClusterWorkflowTemplate
listKind: ClusterWorkflowTemplateList
plural: clusterworkflowtemplates
shortNames:
- clusterwftmpl
- cwft
singular: clusterworkflowtemplate
scope: Cluster
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: cronworkflows.argoproj.io
spec:
group: argoproj.io
names:
kind: CronWorkflow
listKind: CronWorkflowList
plural: cronworkflows
shortNames:
- cwf
- cronwf
singular: cronworkflow
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflows.argoproj.io
spec:
additionalPrinterColumns:
- JSONPath: .status.phase
description: Status of the workflow
name: Status
type: string
- JSONPath: .status.startedAt
description: When the workflow was started
format: date-time
name: Age
type: date
group: argoproj.io
names:
kind: Workflow
listKind: WorkflowList
plural: workflows
shortNames:
- wf
singular: workflow
scope: Namespaced
subresources: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: workflowtemplates.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowTemplate
listKind: WorkflowTemplateList
plural: workflowtemplates
shortNames:
- wftmpl
singular: workflowtemplate
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-server
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: argo-aggregate-to-admin
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: argo-aggregate-to-edit
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: argo-aggregate-to-view
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
- workflowtemplates
- workflowtemplates/finalizers
- cronworkflows
- cronworkflows/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-cluster-role
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
- workflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- create
- apiGroups:
- argoproj.io
resources:
- workflowtemplates
- workflowtemplates/finalizers
- clusterworkflowtemplates
- clusterworkflowtemplates/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- list
- apiGroups:
- argoproj.io
resources:
- cronworkflows
- cronworkflows/finalizers
verbs:
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-server-cluster-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
- workflowtemplates
- cronworkflows
- clusterworkflowtemplates
verbs:
- create
- get
- list
- watch
- update
- patch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-cluster-role
subjects:
- kind: ServiceAccount
name: argo
namespace: argo
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-server-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: argo-server-cluster-role
subjects:
- kind: ServiceAccount
name: argo-server
namespace: argo
---
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
---
apiVersion: v1
kind: Service
metadata:
name: argo-server
spec:
ports:
- name: web
port: 2746
targetPort: 2746
selector:
app: argo-server
---
apiVersion: v1
kind: Service
metadata:
name: workflow-controller-metrics
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: workflow-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: argo-server
spec:
selector:
matchLabels:
app: argo-server
template:
metadata:
labels:
app: argo-server
spec:
containers:
- args:
- server
image: argoproj/argocli:latest
name: argo-server
ports:
- containerPort: 2746
name: web
readinessProbe:
httpGet:
path: /
port: 2746
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 20
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: argo-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: workflow-controller
spec:
selector:
matchLabels:
app: workflow-controller
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
- --executor-image
- argoproj/argoexec:latest
command:
- workflow-controller
image: argoproj/workflow-controller:latest
name: workflow-controller
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: argo

7
apps/argocd/README.md Normal file
View File

@@ -0,0 +1,7 @@
FROM: https://tanzu.vmware.com/developer/guides/ci-cd/argocd-gs/
# kubectl apply -f namespace.yaml
# -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml-
# kubectl apply -n argocd -f install.yaml (needs changes for ARM builds)
# kubectl apply -n argocd -f ingress.yaml

18
apps/argocd/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
#https://argoproj.github.io/argo-cd/operator-manual/ingress/#kubernetesingress-nginx
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.lan
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https

2726
apps/argocd/install.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd

133
apps/codetogether.yaml Normal file
View File

@@ -0,0 +1,133 @@
# ========================================================================
# Secret: CodeTogether License Values
# ========================================================================
apiVersion: v1
kind: Secret
metadata:
name: codetogether-license
namespace: default
type: Opaque
stringData:
# Configure as needed for your deployment, should match your SSL certificate
CT_SERVER_URL: "https://cd.lan"
CT_TRUST_ALL_CERTS: "true"
# Provided by your Genuitec Sales Representative
# *values must match exactly
CT_LICENSEE: "Werkstatt"
CT_MAXCONNECTIONS: "0"
CT_EXPIRATION: "2022/10/01"
CT_SIGNATURE: "xXM4cwzG...619bef4"
---
# ========================================================================
# Secret: SSL Key and Certificate for SSL used by Ingress
# ========================================================================
apiVersion: v1
kind: Secret
metadata:
name: codetogether-sslsecret
namespace: default
type: kubernetes.io/tls
data:
# value from "cat ssl.crt | base64 -w 0"
tls.crt: "LS0tLS1CRUdJTi...UZJQ0FURS0tLS0tDQo="
# value from "cat ssl.key | base64 -w 0"
tls.key: "LS0tLS1CRUdJTi...EUgS0VZLS0tLS0NCg=="
---
# ========================================================================
# Ingress: Expose the HTTPS service to the network
# ========================================================================
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: codetogether
spec:
tls:
- hosts:
- SERVERFQDN
secretName: codetogether-sslsecret
rules:
- host: SERVERFQDN
http:
paths:
- path: /
backend:
serviceName: codetogether
servicePort: 80
---
# ========================================================================
# Service: Map the HTTP port from the container
# ========================================================================
apiVersion: v1
kind: Service
metadata:
name: codetogether
labels:
run: codetogether
spec:
ports:
- port: 80
name: http
targetPort: 1080
protocol: TCP
selector:
run: codetogether
---
# ========================================================================
# Deployment: Configure the Container Deployment
# ========================================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: codetogether
namespace: default
spec:
selector:
matchLabels:
run: codetogether
replicas: 1
template:
metadata:
labels:
run: codetogether
spec:
containers:
- name: codetogether
image: hub.edge.codetogether.com/latest/codetogether:latest
imagePullPolicy: Always
ports:
- containerPort: 1080
env:
- name: CT_LOCATOR
value: "none"
- name: CT_SERVER_URL
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_SERVER_URL
- name: CT_TRUST_ALL_CERTS
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_TRUST_ALL_CERTS
- name: CT_LICENSEE
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_LICENSEE
- name: CT_MAXCONNECTIONS
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_MAXCONNECTIONS
- name: CT_EXPIRATION
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_EXPIRATION
- name: CT_SIGNATURE
valueFrom:
secretKeyRef:
name: codetogether-license
key: CT_SIGNATURE
imagePullSecrets:
- name: ctcreds

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-curl
spec:
type: image
params:
- name: url
value: cr.lan/curl
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-curl
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/curl/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/curl
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-curl-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-curl
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-curl

View File

@@ -1,21 +1,16 @@
FROM debian:buster-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN dpkg --add-architecture armhf && \
apt-get update && \
apt-get install -y \
multiarch-support \
dpkg-dev \
distcc ccache \
build-essential \
gcc \
cpp \
g++ \
clang \
llvm && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/*
FROM debian:stable-slim
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get install -y \
gcc-arm-linux-gnueabihf gcc-arm-none-eabi gcc-aarch64-linux-gnu \
multiarch-support dpkg-dev distcc ccache \
build-essential gcc cpp g++ clang llvm && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/*
# Op port
EXPOSE 3632
# Stats port

View File

@@ -3,11 +3,11 @@ kind: Deployment
metadata:
labels:
app: distcc
release: buster
release: stable
name: distcc
namespace: default
spec:
replicas: 3
replicas: 5
selector:
matchLabels:
app: distcc
@@ -17,11 +17,11 @@ spec:
metadata:
labels:
app: distcc
release: buster
release: stable
spec:
containers:
- name: distcc
image: docker-registry.lan/distcc:armhf
image: cr.lan/distcc:aarch64
imagePullPolicy: Always
#env:
#- name: OPTIONS
@@ -61,7 +61,7 @@ kind: Service
metadata:
labels:
app: distcc
release: buster
release: stable
namespace: default
name: distcc
spec:
@@ -77,4 +77,4 @@ spec:
protocol: TCP
selector:
app: distcc
type: LoadBalancer
type: LoadBalancer

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-distcc
spec:
type: image
params:
- name: url
value: cr.lan/distcc
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-distcc
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/distcc/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/distcc
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-distcc-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-distcc
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-distcc

View File

@@ -1,9 +1,7 @@
Docker-ui
Build it for arm64:
Build it for arm64 in docker-registry-ui
docker build --platform linux/arm64 -t joxit/docker-registry-ui:static -f static.dockerfile github.com/Joxit/docker-registry-ui
ARCH=arm64; APP=docker-registry-ui ; podman build --arch=$ARCH -t $APP:$ARCH -t cr.lan/$APP:$ARCH -f arm64v8-static.dockerfile
docker tag 1494c11066f5 docker-registry.lan/docker-registry-ui:arm64
docker push docker-registry.lan/docker-registry-ui:arm64
ARCH=arm64; APP=docker-registry-ui ; podman push cr.lan/$APP:$ARCH

View File

@@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCaHN7wa2QK9qD3
ovn7ZZiKQ+E/f54MnHGgdlTcskTuiysbS4rqUC49MzWRZjxzxukbwF0a1yOOJUSM
YgOeDntRU4T49FLxY3YAZ9RV4Lr6qU8Tz45Ez4N7RLa3QLqY2wf3BEy32k8SqHsI
XMt0DV93w6q1eqW95XRNDDJF0xm4Oa4yaew0tNCx8Senv51jZ8lOX8CJljnE2Sil
P0HBFwfJqKk9qZg5WstQZFsr3D1wTpMZ3UmnzDN3EEBLJkvcAJvdo2E8TGb29UcD
OopHCeixdoKJw/BBdDCXDoSs9N+pDmoY7QSQaXP91sybP/zrcvrIFTT39IFrARRh
5X9QvCnJxhHXPhqqSeAE4YzTGHJV3BdpIVMPMWUHL9TfLFJxbUGImE2IUQZxSb2i
Wy8w9mnt4SFARGUIr0+tOmEDQ7smlFUke9yIPnti01OogfDNR4/szpwYvfE5+xG6
Vp0W590HxL6JE3nqaTJu+KIkBcRzroZZghmNEKik2MeRIxHjCpjvNr2INLn30S81
NhdP4uZdCeI5sERFaFOCgA64MPTtPYjQRV7BFwpN3+alUK8zVtXat/n5HyxvqrzG
s7IHA/GyCLjfsh9sWDhsfgsuIZzL+KblYnU1XPhRko4BQ2Y3GwA0QGFvM0+J1z/V
r3ieyio37CbEuVugMQ/VYYl8UYE0TwIDAQABAoICAC+rnopfraJ2h3QSRaEt2/Fo
7dPmdc0Q11T7RWS+//OJuNvIkj/IbYUgwgEnzqtBa/nZlvMmeSkO/hUufE/3ys1t
OESJzt48FdQqSdQGn8/Jb1yBZ1CBn/oRVzN4IkAGAIC4I8L7FFqBIw2DJqvPNyik
rblVJs+GmmL60tImal5B+VA+04G6LJPeNJX+/4AwKmTD2Zq1jUkGozv6RSylIxON
yEv6mcuj+h/z6v+2MIr8wyPM/2uYDpNVw417WxvCVHRKhVlRiMf7NuwYv40Z05CR
R++1XCvi9OTE6OVXGZgBjXAIYNEKzYZHWyLquCFcf5ZEeQ35485llxhxFOC0U3hL
lT8pI6EFnRiTi+Eq+7GOmvKYjNda6UtUVYPFIX0Ff3IkkwJ53rYdrar4xLnpmeUF
LcJhGJdfJSsvO2mdiLEFm/K7dQxDadusYPYFeUK4CGgoIsauf6XzdWbxJgv4qcOJ
dMzt2uLxpq5k7pQ5HU96Pa9g1flR1vaAtZ4htTMbQ6o7nrUoc8+zoo8pBYW6/zi+
OXf/9BvDQ/dQvtAF+gJQMfGDO5J0x5+yr+Jp7LKjlmG5B2bYMYF9/uZQTgY5kla5
uqihCZVZ14uojbXA3eqHvmtRfFqQ4Us3s0BUDm4W5PUe6jwJ8TavP+XJIjcCLU2c
kOrKZ0ZtIXwTUqKE5Z2BAoIBAQDKXleKtzEvvOWihxzuUmQYIT2HzrMG14s1M7wo
YF0ARaQTxX5HH2lYN7znWb/RpcDSj+IBNV4PxEOHVNCTWhev/PnFmm6FuqopJDIZ
sumP3jJg0K2/MFjBsHXNqacqjqMKlWFnuYqDHZSRX1bjC9IWB6HfS9Wjm2XrgBGx
xFTcAZ3kXX4NlVMz/JgWMKLRY+qGtDWG11sT+oAge81La+MRz/R/fAhf3K+0iDaK
F1iX8jXIcRfqk9OLafRcuIkS4q4rV6D9bI9xjbTz2tsm3b/wJezoSC06mTHoUEoG
p3MIPZ6ETDADDlB9hsWS23p2ueuUOCHg19+n30ah6qWx7UzjAoIBAQDC9KBAYr0T
sf7o5FA+Xp/N6ALxarNa1b15TjFtwSfvwZrrg02QQIpQCR70vy6wiczkTcmRCi4P
uiiVQz8abWbOW+aG4ThTpkOZDbCEVghFzGWPZjRsyrlhcegdS5FL4fCBrtUzOs7e
e+YtgyPrvmHamhMvKYWfW/DWfxOoBFoL9GTuC1646Va63u3MmLMflzYhj4dgbsm0
ut70aK3RAFkLVwswmx+OPINeSpEz6iIRArF4aSi8rH2eaMp4QiXz+zXSP+Bm4XTN
C6HrQeyOmiEtXcZemZVnUtkJBdkW+iRiiD3+xLEX11c/kzcyIeNpaGu9LckXuxqY
chu4XOVHLaKlAoIBAFapGfIESyL3UJtOIvyH+ec/bNsYkB/w8+M/mWbtBUaVjBMP
culAMVue2t1z2KoNwkopZY5A7VvxHz33+y3u2c/6lHejj4rjCfV+U5ofvNdoPsio
9I64RHoFeB0vdq/Jz1Y77C+ADCnj4/hxDINET54xfIdkMUPTy0yTVoB65CAm7Reb
Vdy5Qp0zoWl3QHJMyGURDQ8GcDFZB79hZOPUerPpCvoBApESr4evATQXlU/UYGXK
0IQa8+9y2ztNpx2YRx+2cfG0qKTnG0OGSG0XbxeHFjHOntfGPNIQd/LriF5SDOz4
t2LHoX5v1XHzXTk0mwapFxDzQQrhmZzDIFvWlCMCggEANLHORtjpZlNsJSLhFZqZ
8xvM/9fpVpoDNrCN566XztQzvYimBGGNgQiWF209f3YfrW3hF5T60kFtCrs8aTY8
3XY1nyttAB8mkk4C8iIW5lbS9KmZbfZ1mQMizBhK04nkagkJk2lH1RcEJjUWFnhF
FsMigFLmzSYauL9sXrOeazDJvxXPqodXa/cpq21yrQ1AEl4rJ0OKvZDtBn7szFsd
tlT2r1KeeuGcWHYrPS8BujtSIMu7uROeeJy2bT7j50h1Sbj+PJCf83Q7dc1B1WGP
qiV4osU8fssD4s5z2SQPhZpxt1UO0PThnkt6VdCXGTyiMmYXvpRSIfZly7VAO7b4
CQKCAQEAoVcWk9yQ5fD+uQ40duvjpzeNxBjttFLHe1CeOCIPtA3KBak4O+MNwZMz
oVUe2V/vb3kGpngF56d1hrBa4iQhvq4mGfnF/ZsbQHa4BZyaFIFvcOwZsgCjAO65
MpbybhRiOMMtu0Bg/H1hH2dzatugrqfVDYRnt9EgpDl7gkdVvmRu9khMWGHLv9qJ
gVeH5dNlpty3gkpSjJgTpEuKF7Yzw4seHpjkiwzIitgE2F7Xrv+6GtYOs0iziJTx
ZNq3BtxzCGe6MamLkXOj5DREhQMqAxJTUo/AYRNRiOeq+AdYgoAulse7HIO8q77E
i+DOL/C63wFKJddUnKSXCf+iAJraGw==
-----END PRIVATE KEY-----

View File

@@ -1,34 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIF2zCCA8OgAwIBAgIUCvX0FglFpG7UJJe6QruGhfKwglUwDQYJKoZIhvcNAQEL
BQAwfDELMAkGA1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVy
bGluMQ4wDAYDVQQKDAVjaGFvczEcMBoGA1UEAwwTZG9ja2VyLXJlZ2lzdHJ5Lmxh
bTEdMBsGCSqGSIb3DQEJARYOcm9vdEBjaGFvcy5sYW4wIBcNMjAwNjI0MTUxODE5
WhgPMjEyMDA1MzExNTE4MTlaMHwxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJs
aW4xDzANBgNVBAcMBkJlcmxpbjEOMAwGA1UECgwFY2hhb3MxHDAaBgNVBAMME2Rv
Y2tlci1yZWdpc3RyeS5sYW0xHTAbBgkqhkiG9w0BCQEWDnJvb3RAY2hhb3MubGFu
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAmhze8GtkCvag96L5+2WY
ikPhP3+eDJxxoHZU3LJE7osrG0uK6lAuPTM1kWY8c8bpG8BdGtcjjiVEjGIDng57
UVOE+PRS8WN2AGfUVeC6+qlPE8+ORM+De0S2t0C6mNsH9wRMt9pPEqh7CFzLdA1f
d8OqtXqlveV0TQwyRdMZuDmuMmnsNLTQsfEnp7+dY2fJTl/AiZY5xNkopT9BwRcH
yaipPamYOVrLUGRbK9w9cE6TGd1Jp8wzdxBASyZL3ACb3aNhPExm9vVHAzqKRwno
sXaCicPwQXQwlw6ErPTfqQ5qGO0EkGlz/dbMmz/863L6yBU09/SBawEUYeV/ULwp
ycYR1z4aqkngBOGM0xhyVdwXaSFTDzFlBy/U3yxScW1BiJhNiFEGcUm9olsvMPZp
7eEhQERlCK9PrTphA0O7JpRVJHvciD57YtNTqIHwzUeP7M6cGL3xOfsRuladFufd
B8S+iRN56mkybviiJAXEc66GWYIZjRCopNjHkSMR4wqY7za9iDS599EvNTYXT+Lm
XQniObBERWhTgoAOuDD07T2I0EVewRcKTd/mpVCvM1bV2rf5+R8sb6q8xrOyBwPx
sgi437IfbFg4bH4LLiGcy/im5WJ1NVz4UZKOAUNmNxsANEBhbzNPidc/1a94nsoq
N+wmxLlboDEP1WGJfFGBNE8CAwEAAaNTMFEwHQYDVR0OBBYEFCtnUlt2y35MUJ0x
YSvt8G3vi0NMMB8GA1UdIwQYMBaAFCtnUlt2y35MUJ0xYSvt8G3vi0NMMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAEXDBh9NNZza6Vjzwcll7uAc
x22ghoDinHOdfNWe9Hgocmj/Ci4M7f8TL35Zlm2PhOfYaol88uVIOiTKrf2USY2J
7RSvpl34voiWR8HBtkIFvmiUE2GR5I8gA21H8xaenIbg1Pj9V+E4SgIN1V9lX6S1
tjNVbhs/mU6YqyNytkjCuwJgCMPgXx4wwPZqaBqGJ5IrJfag0ZahT0IfKSzKtc8M
HBeXTy7Ck7WUOQWRCe289CBkYHZ+ScdnXnJao7uLvpuoUpu6/WPAnMN1t7KUO4tU
Z0SwNpY/Xsq3pjwTk2ZJwhFI1baaOyDZJW0+l2D48q7ADavq72NlPerZFkIN6Uvh
iyb4A/dzZWeZPIJinLtC6Bip5epg03KR0O4D/rYHbn6uVTq894ThIAXt1Q8fFVGb
oX+AK+ERCWc4ost+pr+Dk78bJUEcHCMRIGaWUVfzXvCagrx4eRLwoaLTovPHVvVl
on61w57W8csoj8lh3TX5t0MB4s87twHlErRIALqMd+m5K+2CPeWRd/6ZpmCGuL9s
bT+Rde3Sqw45N3Asw795yA73Av0coq8pB2DyDR5SoHkMD1rzJIVg4lBCwMSR3IJk
hiIO2qV1xNFrnA3ggKZSyDkH8eOR0dAmtthX6nDGvUbFsMFYnXli5wngTuXdHiYo
Lpilp6oWJLkzjfyGR3Um
-----END CERTIFICATE-----

View File

@@ -6,7 +6,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
replicas: 1
selector:
@@ -21,19 +20,19 @@ spec:
spec:
containers:
- name: registry-ui
image: "docker-registry.lan/docker-registry-ui:arm64"
image: "cr.lan/docker-registry-ui:arm64"
imagePullPolicy: Always
env:
- name: URL
value: "http://docker-registry.lan"
#- name: URL
# value: "https://cr.lan"
- name: REGISTRY_TITLE
value: "dReg"
value: "cReg"
- name: DELETE_IMAGES
value: "true"
- name: REGISTRY_URL
value: "http://docker-registry-ui.lan"
- name: PULL_URL
value: "http://docker-registry.lan"
value: "https://cr.lan"
#- name: PULL_URL
# value: "https://cr.lan"
ports:
- name: http
containerPort: 80
@@ -48,11 +47,11 @@ spec:
port: http
resources:
requests:
memory: "24Mi"
cpu: "50m"
memory: "20Mi"
cpu: "10m"
limits:
memory: "64Mi"
cpu: "100m"
memory: "32Mi"
cpu: "50m"
---
apiVersion: v1
kind: Service
@@ -61,7 +60,6 @@ metadata:
labels:
app: registry-ui
release: docker-registry-ui
app/version: "1.2.1"
spec:
ports:
- port: 80
@@ -76,9 +74,13 @@ apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: docker-registry-ui
annotations:
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/cors-allow-origin: "http://cr-ui.lan"
nginx.ingress.kubernetes.io/cors-expose-headers: "*"
spec:
rules:
- host: docker-registry-ui.lan
- host: cr-ui.lan
http:
paths:
- backend:

View File

@@ -1,3 +1,7 @@
#we use postgresql:
#create database gitea;
#create user gitea with encrypted password 'secret';
#grant all privileges on database gitea to gitea;
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -20,7 +24,7 @@ spec:
containers:
- name: gitea
image: gitea/gitea:latest
imagePullPolicy: IfNotPresent
imagePullPolicy: Always
env:
- name: USER_UID
value: "1000"
@@ -46,13 +50,13 @@ spec:
httpGet:
path: /
port: http
# resources:
# requests:
# memory: "256Mi"
# cpu: "250m"
# limits:
# memory: "1000Mi"
# cpu: "500m"
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "1000Mi"
cpu: "1500m"
volumes:
- name: gitea
persistentVolumeClaim:
@@ -78,14 +82,14 @@ metadata:
name: gitea
labels:
app: gitea
release: latest
spec:
type: LoadBalancer
ports:
- port: 3000
targetPort: http
protocol: TCP
name: http
- port: 2222
- port: 22
targetPort: 22
name: ssh
selector:
@@ -96,9 +100,11 @@ apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: gitea
annotations:
ingress.kubernetes.io/whitelist-x-forwarded-for: "true"
spec:
rules:
- host: git.lan
- host: git-ui.lan
http:
paths:
- backend:

View File

@@ -1,94 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hassio
labels:
app: hassio
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: hassio
release: latest
template:
metadata:
labels:
app: hassio
release: latest
spec:
containers:
- name: hassio
image: "homeassistant/home-assistant:latest"
imagePullPolicy: Always
volumeMounts:
- name: hassio-storage
mountPath: /.storage
ports:
- name: http
containerPort: 8123
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
# resources:
# requests:
# memory: "256Mi"
# cpu: "250m"
# limits:
# memory: "1000Mi"
# cpu: "500m"
volumes:
- name: hassio-storage
persistentVolumeClaim:
claimName: hassio-storage
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hassio-storage
labels:
app: hassio
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: hassio
labels:
app: hassio
release: latest
spec:
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: hassio
release: latest
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: hassio
spec:
rules:
- host: hassio.lan
http:
paths:
- backend:
serviceName: hassio
servicePort: http
path: /

View File

@@ -11,7 +11,7 @@ spec:
selector:
app: mariadb
type: LoadBalancer
loadBalancerIP: 172.23.255.4
loadBalancerIP: 172.23.255.5
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
@@ -29,7 +29,7 @@ spec:
app: mariadb
spec:
containers:
- image: docker-registry.lan/mariadb:arm64
- image: cr.lan/mariadb
name: mariadb
imagePullPolicy: Always
env:
@@ -49,7 +49,7 @@ spec:
limits:
memory: "1500Mi"
cpu: "2000m"
- image: docker-registry.lan/mariadb-prometheus-exporter:arm64
- image: cr.lan/mariadb-prometheus-exporter
name: mariadb-prometheus-exporter
imagePullPolicy: Always
ports:
@@ -65,18 +65,37 @@ spec:
volumes:
- name: mariadb-persistent-storage
persistentVolumeClaim:
claimName: mariadb-pv-claim
claimName: mariadb-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-pv-claim
annotations:
volume.beta.kubernetes.io/storage-class: nfs-ssd
name: mariadb-data
spec:
storageClassName: nfs-ssd
volumeName: mariadb-data
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storage: 40Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-data
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/mariadb-data
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: mariadb-data
namespace: default

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mariadb-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mariadb-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mariadb-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb-prometheus/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb-prometheus
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mariadb-prometheus-exporter-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mariadb-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mariadb-prometheus-exporter

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mariadb
spec:
type: image
params:
- name: url
value: cr.lan/mariadb
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mariadb
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mariadb/mariadb
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mariadb-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mariadb
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mariadb

View File

@@ -1,16 +1,15 @@
FROM debian:buster-slim
FROM debian:stable-slim
RUN echo 'Acquire::http::proxy "http://172.23.255.1:3142";' >/etc/apt/apt.conf.d/proxy
RUN apt-get update && \
RUN sed -i 's@deb.debian.org@apt-cache.lan/deb.debian.org@g' /etc/apt/sources.list && \
sed -i 's@security.debian.org@apt-cache.lan/security.debian.org@g' /etc/apt/sources.list && \
apt-get update && \
apt-get install -y --no-install-recommends \
mosquitto mosquitto-clients procps && \
mosquitto && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Op port
EXPOSE 1883
# Stats port
#EXPOSE 9090
ADD docker-entrypoint.sh /

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: mqtt-mosquitto
image: docker-registry.lan/mosquitto:arm64
image: cr.lan/mosquitto
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
@@ -63,7 +63,7 @@ spec:
name: mosquitto-data
subPath: mosquitto/data
- name: mosquitto-exporter
image: docker-registry.lan/mosquitto-exporter:arm64
image: cr.lan/mosquitto-exporter
imagePullPolicy: Always
ports:
- containerPort: 9234
@@ -117,8 +117,6 @@ spec:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
#annotations:
# volume.beta.kubernetes.io/storage-provisioner: nfs-storage
labels:
app: mosquitto
release: mqtt
@@ -148,4 +146,3 @@ data:
port 1883
persistence true
persistence_location /mosquitto/data/

View File

@@ -0,0 +1,98 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: github-mosquitto-prometheus-exporter
spec:
type: git
params:
- name: revision
value: master
- name: url
value: https://github.com/sapcc/mosquitto-exporter.git
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto-prometheus-exporter
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto-prometheus-exporter
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto-prometheus-exporter
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-binary
image: cr.lan/debian-golang-stable
script: |
#!/usr/bin/env bash
cd $(resources.inputs.source.path)
pwd
ls -la
GO_PATH=/usr/src/gopath
PKG_NAME=github.com/sapcc/mosquitto-exporter
BUILD_DIR=bin
MOSQUITTO_EXPORTER_BINARY=${BUILD_DIR}/mosquitto_exporter
IMAGE=apcc/mosquitto-exporter
VERSION=0.6.0
LDFLAGS="-s -w -X main.Version=${VERSION} -X main.GITCOMMIT=$(git rev-parse --short HEAD)"
CGO_ENABLED=0
GOOS=linux
mkdir -p ${BUILD_DIR} ${GO_PATH}
go build -o ${MOSQUITTO_EXPORTER_BINARY} -ldflags="${LDFLAGS}" ${PKG_NAME}
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
workspaces:
- name: usr-src
mountPath: /usr/src
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-prometheus-exporter
spec:
taskRef:
name: build-mosquitto-prometheus-exporter
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: github-mosquitto-prometheus-exporter
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto-prometheus-exporter
workspaces:
- name: usr-src
persistentVolumeClaim:
claimName: tektoncd-workspaces
subPath: usr_src

View File

@@ -0,0 +1,76 @@
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: chaos-kubernetes-git
spec:
type: git
params:
- name: revision
value: master
- name: url
value: http://git-ui.lan/chaos/kubernetes.git
- name: submodules
value: "false"
---
apiVersion: tekton.dev/v1alpha1
kind: PipelineResource
metadata:
name: img-mosquitto
spec:
type: image
params:
- name: url
value: cr.lan/mosquitto
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-mosquitto
spec:
params:
- name: pathToDockerFile
type: string
default: $(resources.inputs.source.path)/apps/mosquitto/Dockerfile
- name: pathToContext
type: string
default: $(resources.inputs.source.path)/apps/mosquitto
resources:
inputs:
- name: source
type: git
outputs:
- name: builtImage
type: image
steps:
- name: build-and-push
image: gcr.io/kaniko-project/executor:arm64
command:
- /kaniko/executor
args:
- --dockerfile=$(params.pathToDockerFile)
- --destination=$(resources.outputs.builtImage.url)
- --context=$(params.pathToContext)
- --skip-tls-verify
---
apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: img-mosquitto-taskrun
spec:
#serviceAccountName: dockerhub-service
taskRef:
name: build-mosquitto
params:
- name: pathToDockerFile
value: Dockerfile
resources:
inputs:
- name: source
resourceRef:
name: chaos-kubernetes-git
outputs:
- name: builtImage
resourceRef:
name: img-mosquitto

View File

@@ -25,6 +25,13 @@ spec:
volumeMounts:
- mountPath: /data
name: data
resources:
limits:
cpu: "1"
memory: "200Mi"
requests:
memory: "64Mi"
cpu: "50m"
volumes:
- name: data
persistentVolumeClaim:
@@ -54,7 +61,7 @@ metadata:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: node-red.lan
- host: nodered.lan
http:
paths:
- path: /

View File

@@ -33,6 +33,8 @@ spec:
spec:
containers:
- env:
- name: TZ
value: Europe/Berlin
- name: WEB_PORT
value: "80"
- name: VIRTUAL_HOST
@@ -46,7 +48,7 @@ spec:
value: 208.67.222.222
- name: DNS2
value: 208.67.220.220
image: pihole/pihole:v5.1.2
image: pihole/pihole:latest
imagePullPolicy: Always
livenessProbe:
failureThreshold: 10
@@ -127,8 +129,6 @@ apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: pihole
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /admin/$1
spec:
rules:
- host: pihole.lan
@@ -137,7 +137,6 @@ spec:
- backend:
serviceName: pihole-tcp
servicePort: http
path: /(.*)
pathType: ImplementationSpecific
---
apiVersion: v1

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
labels:
app: postgres
env: live
spec:
selector:
matchLabels:
app: postgres
env: live
serviceName: postgres-service
replicas: 1
template:
metadata:
labels:
app: postgres
env: live
spec:
containers:
- name: postgres
image: postgres
ports:
- containerPort: 5432
protocol: TCP
volumeMounts:
- name: postgres-disk
mountPath: /var/lib/postgresql/data
env:
- name: POSTGRES_PASSWORD
value: pg2020
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
# - name: prometheus-exporter
# image: wrouesnel/postgres_exporter
# env:
# - name: DATA_SOURCE_NAME
# value: postgresql://postgres:pg2020@localhost:5432/postgres?sslmode=disable
volumes:
- name: postgres-disk
persistentVolumeClaim:
claimName: postgres
# volumeClaimTemplates:
# - metadata:
# name: postgres-disk
# spec:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres
labels:
app: postgres
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
# service.yml
---
apiVersion: v1
kind: Service
metadata:
name: postgres
labels:
app: postgres
env: live
spec:
selector:
env: live
type: LoadBalancer
ports:
- port: 5432
targetPort: 5432

View File

@@ -1,81 +0,0 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: rompr
spec:
selector:
matchLabels:
app: rompr
strategy:
type: Recreate
template:
metadata:
labels:
app: rompr
spec:
containers:
- image: docker-registry.lan/rompr:arm64
name: rompr
imagePullPolicy: Always
ports:
- containerPort: 9000
name: php-fpm
volumeMounts:
- name: rompr-data
mountPath: /rompr
- image: sebp/lighttpd:latest
name: lighttpd
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
volumeMounts:
- name: rompr-data
mountPath: /rompr
- name: rompr-lighttpd-config
mountPath: /etc/lighttpd
volumes:
- name: rompr-data
persistentVolumeClaim:
claimName: rompr-data
- name: rompr-lighttpd-config
configMap:
name: rompr-lighttpd-config
---
apiVersion: v1
kind: Service
metadata:
name: rompr
spec:
ports:
- name: http
port: 80
selector:
app: rompr
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rompr
spec:
rules:
- host: musik.lan
http:
paths:
- backend:
serviceName: rompr
servicePort: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rompr-data
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

View File

@@ -0,0 +1,50 @@
FROM node:current-buster
# Set the commit of Zwave2Mqtt to checkout when cloning the repo
ENV Z2M_VERSION=9cc3740740b57f1e896139b5ffdb25be7576ad58
ENV DEBIAN_FRONTEND noninteractive
#setup local apt cache
#RUN sed -i 's@http://@http://apt-cache.lan/@g' /etc/apt/sources.list
#/apt-cache
# Install required dependencies
RUN apt update -y
RUN apt full-upgrade -y
# Packages we need
RUN apt install -y \
socat libopenzwave1.5 npm git
# Clone Zwave2Mqtt build pkg files and move them to /dist/pkg
RUN npm config set unsafe-perm true && npm install -g pkg
RUN cd /root \
&& git clone https://github.com/OpenZWave/Zwave2Mqtt.git \
&& cd Zwave2Mqtt \
&& git checkout ${Z2M_VERSION} \
&& npm install \
&& npm run build
# Clean up
RUN apt autoremove -y
RUN apt clean -y
RUN rm -rf /root/*
RUN apt-get clean -y
RUN rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=build /dist/lib/ /lib/
COPY --from=build /dist/pkg /usr/src/app
# supervisor base configuration
ADD supervisor.conf /etc/supervisor.conf
LABEL maintainer="zoide"
# Set enviroment
ENV LD_LIBRARY_PATH /lib
EXPOSE 8091
CMD ["supervisord", "-c", "/etc/supervisor.conf"]
#CMD ["/usr/src/app/zwave2mqtt"]

View File

@@ -0,0 +1,168 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hassio
labels:
app: hassio
release: latest
spec:
replicas: 1
selector:
matchLabels:
app: hassio
release: latest
template:
metadata:
labels:
app: hassio
release: latest
spec:
containers:
- name: hassio
image: "homeassistant/home-assistant:latest"
imagePullPolicy: Always
env:
- name: TZ
value: Europe/Berlin
volumeMounts:
- name: hassio-storage
mountPath: /config
ports:
- name: http
containerPort: 8123
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 60
periodSeconds: 10
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 60
periodSeconds: 5
# requests:
# memory: "256Mi"
# cpu: "250m"
# limits:
# memory: "1000Mi"
# cpu: "500m"
- name: configurator
image: "causticlab/hass-configurator-docker:arm"
imagePullPolicy: Always
env:
- name: HC_HASS_API
value: http://127.0.0.1:8123/api/
- name: HC_HASS_API_PASSWORD
value: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4
ports:
- name: adm
containerPort: 3218
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 3218
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /
port: 3218
initialDelaySeconds: 10
periodSeconds: 5
volumeMounts:
- name: hassio-storage
mountPath: /hass-config
- name: hassio-conf-storage
mountPath: /config
volumes:
- name: hassio-storage
persistentVolumeClaim:
claimName: hassio-storage
- name: hassio-conf-storage
persistentVolumeClaim:
claimName: hassio-configurator
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hassio-storage
labels:
app: hassio
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hassio-configurator
labels:
app: hassio
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: hassio
labels:
app: hassio
release: latest
spec:
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: hassio
release: latest
---
apiVersion: v1
kind: Service
metadata:
name: hassio-conf
labels:
app: hassio
release: latest
spec:
ports:
- port: 80
targetPort: adm
protocol: TCP
name: adm
selector:
app: hassio
release: latest
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: hassio
spec:
rules:
- host: hassio.lan
http:
paths:
- backend:
serviceName: hassio
servicePort: http
- host: hassio-conf.lan
http:
paths:
- backend:
serviceName: hassio-conf
servicePort: adm

View File

@@ -0,0 +1,8 @@
FROM alpine:latest
ARG VERSION=1.7.3.2
RUN apk --no-cache add socat
#=${VERSION}
ENTRYPOINT ["socat"]

View File

@@ -0,0 +1,13 @@
[supervisord]
nodaemon=true
[program:socat]
command=/usr/bin/socat -d -d -d pty,link=/dev/ttySER2NET0,raw,user=root,group=root,mode=660 tcp:auto:3333
killasgroup=true
stopasgroup=true
redirect_stderr=true
[program:zwave2mqtt]
directory=/usr/src/app
command=/usr/src/app/zwave2mqtt
redirect_stderr=true

View File

@@ -0,0 +1,120 @@
## FROM: https://github.com/OpenZWave/Zwave2Mqtt
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zwave2mqtt
spec:
replicas: 1
selector:
matchLabels:
name: zwave2mqtt
template:
metadata:
labels:
name: zwave2mqtt
spec:
containers:
- name: zwave2mqtt
image: docker-registry.lan/zwave2mqtt:arm64
livenessProbe:
failureThreshold: 12
httpGet:
httpHeaders:
- name: Accept
value: text/plain
path: /
port: http
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
ports:
- containerPort: 8091
name: http
protocol: TCP
resources:
limits:
cpu: '1'
memory: 512Mi
requests:
cpu: '1'
memory: 200Mi
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /usr/src/app/store
name: data
# - mountPath: /usr/local/etc/openzwave
# name: ozwdatabase
# - mountPath: /usr/src/app/store/settings.json <-- if putting your settings.json in a secret
# name: config
# readOnly: true
# subPath: settings.json
# nodeSelector:
# kubernetes.io/hostname: stick1 #<--- the name of your cluster node that the zwave usb stick in
# - name: socat
# image: docker-registry.lan/socat:arm64
# args:
# - pty,link=/dev/ttySER2NET0,raw,user=root,group=root,mode=660
# - tcp:auto:3333
# securityContext:
# allowPrivilegeEscalation: true
# privileged: true
volumes:
# - name: config <-- if putting your settings.json in a secret
# secret:
# defaultMode: 420
# secretName: zwave2mqtt
#- name: zwavestick
# hostPath:
# path: /dev/ttyACM0
# type: File
- name: data
persistentVolumeClaim:
claimName: zwave2mqtt-storage
# - name: ozwdatabase
# hostPath:
# path: /zwave2mqtt/database
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zwave2mqtt-storage
labels:
app: zwave2mqtt
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Mi
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: zwave2mqtt
spec:
rules:
- host: zwave.lan
http:
paths:
- backend:
serviceName: zwave2mqtt
servicePort: http
---
apiVersion: v1
kind: Service
metadata:
name: zwave2mqtt
spec:
ports:
- name: http
port: 80
targetPort: http
selector:
name: zwave2mqtt

8
apps/tekton/README.md Normal file
View File

@@ -0,0 +1,8 @@
Install:
# Pipelines: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml@
# Triggers: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml@ #https://github.com/tektoncd/triggers/blob/master/docs/install.md
# Dashboard:
## update submodule in ./dashboard
## Build: @docker build -t tekton-dashboard:arm64 -t docker-registry.lan/tekton-dashboard:arm64 --platform linux/arm64 --build-arg GOARCH=arm64 .@
## apply deployment.yaml

View File

@@ -0,0 +1,60 @@
# Copyright 2020 Tekton Authors LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: config-registry-cert
namespace: tekton-pipelines
labels:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
data:
# Registry's self-signed certificate
# TODO: somehow automate this with salt
cert: |
-----BEGIN CERTIFICATE-----
MIIFujCCA6KgAwIBAgIEYsvT+zANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE
RTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFDASBgNVBAMMC3R1
bW9yLmNoYW9zMB4XDTIxMDIxMjE4MzAzM1oXDTIyMDIxMjE4MzAzM1owLzELMAkG
A1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVybGluMIICIjAN
BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAog4t352wKHS4pflQK4NlWH6yv1FK
MnqNJiNnIgkWrNABTu9ES3cmUwdEhf+Um7MJYvQivOZFIH65wBBmOxfnYWB+NPwn
XAi/o3BcePIdbwEGs0cxgIEKbmL9fY0SCXq0pXRu8Y7WAhqdTNp6/HY2fTMx7ghX
RNQPoeNlcfAZgpsJlZdkSzMYoFpGIW+Tvj3INNuIuHo1pagckWW/hGUIqY0NuUV9
Aj8LOHhHB+vKtjbq5DMVAob4kKOPJFmq/1D6fmRh3W1YAGikowVv3V45jAmnkcBj
Z8BIEiOnBy1AyW9o8Tc5000MAGNrm9IGpRfBBTptSAApZmK1V6zKreqCiCpgOBbh
6U1Bf1L39u8aLVRxeyzQbxqBM1VTbjKxygFSIR/7rVd9BEhx6VA95EG+EdPLpKDp
mymElCcVgv2ZhKBRxtne4CAQD5ng2SoEqLdjvZdC44QNapnj+6jlaNvKRJ1q63kq
B5Y4shJxYOc6QDQp2+Eh2d7qQNiTE3FJC/aeXDNQ+dqeV7chU+PbcbMQoxnIN6ou
Zc2IdtNL87+Apgh6vqZX9pELBXUN1Nu3NI88T8tw1CdqfFfh4Z2EEBBCsPD0yZPV
UrHZsAMiHh5prRkwsBVzDBIaLYd6glf/w9W8sWxe5wceDNhxD8VAfq/ZXeuE1Pme
cTVYsBNj8idC9tECAwEAAaOBxzCBxDAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF
4DAdBgNVHQ4EFgQUa7ADNR68XrDsLtLtngmdJQ9UtOswcAYDVR0jBGkwZ4AU9l9v
D1+dukLLV/uDnP3eB4i6ZyihSaRHMEUxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZC
ZXJsaW4xDzANBgNVBAcMBkJlcmxpbjEUMBIGA1UEAwwLdHVtb3IuY2hhb3OCBBKa
C88wFgYDVR0RBA8wDYILdHVtb3IuY2hhb3MwDQYJKoZIhvcNAQELBQADggIBAKK3
S8qKrsarBflGrDI4diG+QOcMG3/y6juARp3vxQf3fDqC6HZCl+kWAp+Cq3Sp/hU7
GKM7qraWpvGxgmDyaevAirLdFlYQBgcIl9frPI8yfLWbZHWvx3PFXNqg2Ckm98xX
vSUacPTPp/tKFBquJ5+j+/YS2U4qWWNIYYtDEI+3lswfoeh0CIEPSxDk0wHDAyfZ
Vh30ZuZhsf3F63xMggw/RpEHeTTCr0YGOAmzpb7jItcbP/EER1qTQ4T+3ExuC40C
EdOAeL377O2rr7zjcmJWk8B5FaQ8K8UdE/iQGM7tP5ieMNTVACe21KFpqIIXaIka
HqRTyvRmJGUrVf1NeXE16yKirIqAjEV/B/4S244wxYcwqweZObbI0PnbnEMn3PMF
TV+e1CUmVOKyGIxfHH7j/VKQfmH/W0jOlGWI7OkbdU5GckoX4Knjrv2MmT9i2ENy
6dID3BJVm6hK2SjJLc7SxbPXMG3I6BrlA5/3LaXzl+2fWAk5OA1jnGZz0P4XcdOO
iAulB4I3PdmNRdSYAXVRdo5OLoq/7iBcqSrCXRw1IbgJm0VlS2AI6hGEXDQvjQwP
38ijZUV/ch2lGyUZOfQymI7Ylh+Airn8ctqyMS8FeZBAyny4/t7xrhWuGO1awUzp
4p/sEjg6kqp3oLai5yhaz9S+y7Ao5XmGDdzfalWH
-----END CERTIFICATE-----

1
apps/tekton/dashboard Submodule

Submodule apps/tekton/dashboard added at c881ad4d3a

View File

@@ -0,0 +1,13 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
spec:
rules:
- host: tekton.lan
http:
paths:
- backend:
serviceName: tekton-dashboard
servicePort: 9097

View File

@@ -0,0 +1,526 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: extensions.dashboard.tekton.dev
spec:
additionalPrinterColumns:
- JSONPath: .spec.apiVersion
name: API version
type: string
- JSONPath: .spec.name
name: Kind
type: string
- JSONPath: .spec.displayname
name: Display name
type: string
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: dashboard.tekton.dev
names:
categories:
- tekton
- tekton-dashboard
kind: Extension
plural: extensions
shortNames:
- ext
- exts
preserveUnknownFields: false
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
type: object
x-kubernetes-preserve-unknown-fields: true
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
rules:
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- use
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- create
- update
- delete
- patch
- apiGroups:
- tekton.dev
resources:
- clustertasks
- clustertasks/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- clustertriggerbindings
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.dashboard.tekton.dev/aggregate-to-dashboard: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- dashboard.tekton.dev
resources:
- extensions
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
- pods/log
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
- taskruns/finalizers
- pipelineruns/finalizers
verbs:
- get
- list
- watch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- delete
- apiGroups:
- tekton.dev
resources:
- tasks
- taskruns
- pipelines
- pipelineruns
- pipelineresources
- conditions
- taskruns/finalizers
- pipelineruns/finalizers
- tasks/status
- taskruns/status
- pipelines/status
- pipelineruns/status
verbs:
- create
- update
- delete
- patch
- apiGroups:
- triggers.tekton.dev
resources:
- eventlisteners
- triggerbindings
- triggertemplates
verbs:
- create
- update
- delete
- patch
- add
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-backend
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-backend
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: v1
kind: Service
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
ports:
- name: http
port: 9097
protocol: TCP
targetPort: 9097
selector:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
dashboard.tekton.dev/release: v0.11.1
version: v0.11.1
name: tekton-dashboard
namespace: tekton-pipelines
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
template:
metadata:
labels:
app: tekton-dashboard
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/name: dashboard
app.kubernetes.io/part-of: tekton-dashboard
app.kubernetes.io/version: v0.11.1
name: tekton-dashboard
spec:
containers:
- args:
- --port=9097
- --logout-url=
- --pipelines-namespace=tekton-pipelines
- --triggers-namespace=tekton-pipelines
- --read-only=false
- --csrf-secure-cookie=false
- --log-level=info
- --log-format=json
- --namespace=
- --openshift=false
- --stream-logs=false
- --external-logs=
env:
- name: INSTALLED_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
- name: TEKTON_PIPELINES_WEB_RESOURCES_DIR
value: /go/src/github.com/tektoncd/dashboard/web
#image: gcr.io/tekton-releases/github.com/tektoncd/dashboard/cmd/dashboard@sha256:744eb92d7d0365bbfb2405df4ba4d2a66c01edc26028c362bd5675e2bc1b9626
image: docker-registry.lan/tekton-dashboard:arm64
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /health
port: 9097
name: tekton-dashboard
ports:
- containerPort: 9097
readinessProbe:
httpGet:
path: /readiness
port: 9097
securityContext:
runAsNonRoot: true
runAsUser: 65532
serviceAccountName: tekton-dashboard
volumes: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-pipelines
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-pipelines
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-dashboard
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-dashboard
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-triggers
namespace: tekton-pipelines
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-triggers
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-tenant
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-tenant
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
name: tekton-dashboard-extensions
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: tekton-dashboard-extensions
subjects:
- kind: ServiceAccount
name: tekton-dashboard
namespace: tekton-pipelines
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
labels:
app.kubernetes.io/component: dashboard
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-dashboard
spec:
rules:
- host: tekton.lan
http:
paths:
- backend:
serviceName: tekton-dashboard
servicePort: 9097

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tektoncd-workspaces
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi

View File

@@ -0,0 +1,34 @@
FROM debian:bullseye-slim
ENV DEBIAN_FRONTEND noninteractive
ARG GRAV_VERSION=1.6.28
ARG DEV_PKGS="zlib1g-dev libpng-dev libjpeg-dev libfreetype6-dev \
libcurl4-gnutls-dev libxml2-dev libonig-dev"
RUN apt-get update && \
apt-get install -y git bash procps wget unzip supervisor \
php-fpm php-gd php-json php-curl php-dom php-xml php-yaml php-apcu \
php-opcache php-simplexml php-zip php-mbstring cron \
&& mkdir /var/www \
&& chown www-data:www-data /var/www \
&& cd /var/www
# CLeanup
RUN apt-get remove -y --purge ${DEV_PKGS} exim4* && \
apt-get autoremove --purge -y && \
apt-get clean -y && \
rm -rf /var/lib/apt/lists/* && \
rm -rf /var/cache/apt/* /tmp/* /var/tmp/* /var/log/*
RUN mkdir /run/php && \
chown www-data:www-data /var/log /run/php && \
mkdir -p /etc/php/7.4/fpm/pool.d
ADD docker-entrypoint.sh /
ADD supervisor.conf /etc/supervisor.conf
ENTRYPOINT ["/docker-entrypoint.sh"]
#USER www-data
RUN (crontab -l; echo "* * * * * cd /var/www/grav;/usr/bin/php bin/grav scheduler 1>> /dev/null 2>&1") | crontab -u www-data -
#CMD ["dumb-init", "/usr/sbin/php-fpm7.3", "--nodaemonize", "--force-stderr"]
CMD ["supervisord", "-c", "/etc/supervisor.conf"]

3
apps/web/grav/README.md Normal file
View File

@@ -0,0 +1,3 @@
lighttpd is configured in etc_lighttpd
generate a configmap with:
kubectl create configmap grav-lighttpd-config --from-file etc_lighthttpd/

View File

@@ -0,0 +1,104 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: grav
spec:
selector:
matchLabels:
app: grav
strategy:
type: Recreate
template:
metadata:
labels:
app: grav
spec:
containers:
- image: docker-registry.lan/grav:arm64
name: grav
imagePullPolicy: Always
ports:
- containerPort: 9000
name: php-fpm
volumeMounts:
- name: grav-pages
mountPath: /var/www/grav
- name: grav-etc-php-fpm-www-conf
mountPath: /etc/php/7.4/fpm/pool.d
- image: nginx:alpine
name: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
volumeMounts:
- name: grav-nginx-proxy-config
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: grav-pages
mountPath: /var/www/grav
initContainers:
- name: grav-install
image: busybox
command: ["/bin/sh"]
args:
- -c
- >-
wget -O /grav.zip "https://getgrav.org/download/core/grav-admin/latest" &&
unzip -q /grav.zip &&
rm -rf grav-admin/user/pages/* &&
cp -ru grav-admin/* /workdir/ &&
rm -rf /grav.zip &&
rm -rf /grav-admin &&
chown -R 33:33 /workdir/*
volumeMounts:
- name: grav-pages
mountPath: /workdir
volumes:
- name: grav-pages
persistentVolumeClaim:
claimName: grav-pages
- name: grav-nginx-proxy-config
configMap:
name: grav-nginx-proxy-config
- name: grav-etc-php-fpm-www-conf
configMap:
name: grav-etc-php-fpm-www-conf
---
apiVersion: v1
kind: Service
metadata:
name: grav
spec:
ports:
- name: http
port: 80
selector:
app: grav
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: grav
spec:
rules:
- host: grav.lan
http:
paths:
- backend:
serviceName: grav
servicePort: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grav-pages
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

View File

@@ -0,0 +1,5 @@
#!/bin/sh
set -e
exec "$@"

View File

@@ -0,0 +1,440 @@
; Start a new pool named 'www'.
; the variable $pool can be used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or /usr) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of processes
; Note: The user is mandatory. If the group is not set, the default user's group
; will be used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
; listen = /run/php/php7.4-fpm.sock
listen = 127.0.0.1:9000
; Set listen(2) backlog.
; Default Value: 511 (-1 on FreeBSD and OpenBSD)
;listen.backlog = 511
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions. The owner
; and group can be specified either by name or by their numeric IDs.
; Default Values: user and group are set as the running user
; mode is set to 0660
listen.owner = www-data
listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
; or group is differrent than the master process user. It allows to create process
; core dump and ptrace the process for the pool user.
; Default Value: no
; process.dumpable = yes
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 5
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: (min_spare_servers + max_spare_servers) / 2
pm.start_servers = 2
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 1
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 3
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following informations:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/share/php/7.4/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;pm.status_path = /status
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{miliseconds}d
; - %{mili}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some exemples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; Depth of slow log stack trace.
; Default Value: 20
;request_slowlog_trace_depth = 20
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 0
; The timeout set by 'request_terminate_timeout' ini option is not engaged after
; application calls 'fastcgi_finish_request' or when application has finished and
; shutdown functions are being called (registered via register_shutdown_function).
; This option will enable timeout limit to be applied unconditionally
; even in such cases.
; Default Value: no
;request_terminate_timeout_track_finished = no
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
;chdir = /var/www
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environement, this can cause some delay in the page
; process time (several ms).
; Default Value: no
;catch_workers_output = yes
; Decorate worker output with prefix and suffix containing information about
; the child that writes to the log and if stdout or stderr is used as well as
; log level and time. This options is used only if catch_workers_output is yes.
; Settings to "no" will output data as written to the stdout or stderr.
; Default value: yes
;decorate_workers_output = no
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; execute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5 .php7
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View File

@@ -0,0 +1,68 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: grav-nginx-proxy-config
data:
nginx.conf: |-
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 64;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log off;
#access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name _;
index index.html index.php;
root /var/www/grav;
## Begin - Index
# for subfolders, simply adjust:
# `location /subfolder {`
# and the rewrite to use `/subfolder/index.php`
location / {
try_files $uri $uri/ /index.php?$query_string;
}
## End - Index
## Begin - Security
# deny all direct access for these folders
location ~* /(\.git|cache|bin|logs|backup|tests)/.*$ { return 403; }
# deny running scripts inside core system folders
location ~* /(system|vendor)/.*\.(txt|xml|md|html|yaml|yml|php|pl|py|cgi|twig|sh|bat)$ { return 403; }
# deny running scripts inside user folder
location ~* /user/.*\.(txt|md|yaml|yml|php|pl|py|cgi|twig|sh|bat)$ { return 403; }
# deny access to specific files in the root folder
location ~ /(LICENSE\.txt|composer\.lock|composer\.json|nginx\.conf|web\.config|htaccess\.txt|\.htaccess) { return 403; }
## End - Security
## Begin - PHP
location ~ \.php$ {
# Choose either a socket or TCP/IP address
fastcgi_pass 127.0.0.1:9000;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;
}
## End - PHP
}
}

View File

@@ -0,0 +1,14 @@
[supervisord]
nodaemon=true
[program:cron]
command=/usr/sbin/cron
killasgroup=true
stopasgroup=true
redirect_stderr=true
user=root
[program:php-fpm]
command=/usr/sbin/php-fpm7.4 --nodaemonize --force-stderr
user=www-data

View File

@@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
run: nginx-deployment
template:
metadata:
labels:
run: nginx-deployment
spec:
containers:
- image: nginx
name: nginx-webserver
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
type: NodePort
selector:
run: nginx-deployment
ports:
- port: 80
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx-test
spec:
rules:
- host: nginx-test.lan
http:
paths:
- backend:
serviceName: nginx-service
servicePort: 80

View File

@@ -17,9 +17,8 @@ spec:
app: piwigo
spec:
containers:
- image: linuxserver/piwigo
- image: linuxserver/piwigo:latest
name: piwigo
imagePullPolicy: IfNotPresent
env:
# Use secret in real usage
- name: TZ

View File

@@ -0,0 +1,81 @@
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: rompr
spec:
selector:
matchLabels:
app: rompr
strategy:
type: Recreate
template:
metadata:
labels:
app: rompr
spec:
containers:
- image: docker-registry.lan/rompr:arm64
name: rompr
imagePullPolicy: Always
ports:
- containerPort: 9000
name: php-fpm
volumeMounts:
- name: rompr-data
mountPath: /rompr
- image: sebp/lighttpd:latest
name: lighttpd
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: http
volumeMounts:
- name: rompr-data
mountPath: /rompr
- name: rompr-lighttpd-config
mountPath: /etc/lighttpd
volumes:
- name: rompr-data
persistentVolumeClaim:
claimName: rompr-data
- name: rompr-lighttpd-config
configMap:
name: rompr-lighttpd-config
---
apiVersion: v1
kind: Service
metadata:
name: rompr
spec:
ports:
- name: http
port: 80
selector:
app: rompr
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: rompr
spec:
rules:
- host: musik.lan
http:
paths:
- backend:
serviceName: rompr
servicePort: http
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rompr-data
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 6Gi

View File

@@ -0,0 +1,7 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogZ2l0ZWEKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGdpdC11aS5sYW4KLSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0Ci0gam9iX25hbWU6IGhhcHJveHkKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGFkbTAxLndrczo5MTAxCiAgICAtIGRydWNraS53a3M6OTEwMQogICAgLSBhdXRvMDIuY2hhb3M6OTEwMQotIGpvYl9uYW1lOiBrbGlwcGVyCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjM5MDMKLSBqb2JfbmFtZTogb2N0b3ByaW50CiAgbWV0cmljc19wYXRoOiAvcGx1Z2luL3Byb21ldGhldXNfZXhwb3J0ZXIvbWV0cmljcwogIHBhcmFtczoKICAgIGFwaWtleToKICAgIC0gMzBFOEIwMUJGRDY3NEU1QkJENDQ2RDA4QzQ3MzBERjQKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGRydWNraS53a3M6ODAKLSBqb2JfbmFtZTogaGFzc2lvCiAgbWV0cmljc19wYXRoOiAvYXBpL3Byb21ldGhldXMKICBiZWFyZXJfdG9rZW46ICdleUowZVhBaU9pSktWMVFpTENKaGJHY2lPaUpJVXpJMU5pSjkuZXlKcGMzTWlPaUpoTXpCbVlqVTFaamN5WkdFMFl6YzJZbVUyTm1ZME5qbGpOVEF5TWpkalpDSXNJbWxoZENJNk1UWXhNamc0TXpJNU55d2laWGh3SWpveE9USTRNalF6TWprM2ZRLjFJQ3NIbGlVWFIwQ0c0SDh2UVJZSjVqVnFGd21xS1NCMGZTY1NpdEMtUTQnCiAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICAgIC0gaGFzc2lvLmxhbjo4MAotIGpvYl9uYW1lOiBoYXNzaW9fcmluZzg2CiAgbWV0cmljc19wYXRoOiAvYXBpL3Byb21ldGhldXMKICBiZWFyZXJfdG9rZW46ICdleUowZVhBaU9pSktWMVFpTENKaGJHY2lPaUpJVXpJMU5pSjkuZXlKcGMzTWlPaUkwT0dGalpUSmlObVEzT1RnME1qYzNZV0ZtTW1ObVptVTFZemM0TlRFME5DSXNJbWxoZENJNk1UWXhNakU1TWprME1Dd2laWGh3SWpveE9USTNOVFV5T1RRd2ZRLkJiSUFYbTlScTBqYjZvdXFnVkhOZDZLZWV6M05QM3loLTd3eWZ1b0I4WWsnCiAgc3RhdGljX2NvbmZpZ3M6CiAgICAtIHRhcmdldHM6CiAgICAgIC0gYXV0by5jaGFvczo4MAotIGpvYl9uYW1lOiBub2RlCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhZG0wMS53a3M6OTEwMAogICAgLSBkdW1vbnQud2tzOjkxMDAKICAgIC0gZHJ1Y2tpLndrczo5MTAwCiAgICAtIGViaW4wMS53a3M6OTEwMAogICAgLSBlYmluMDIud2tzOjkxMDAKICAgIC0gb3NtYy53a3M6OTEwMAogICAgLSByaW90MDEud2tzOjkxMDAKICAgIC0gdHJ1aGUuY2hhb3M6OTEwMAogICAgLSBhdXRvMDEuY2hhb3M6OTEwMAogICAgLSBhdXRvMDIuY2hhb3M6OTEwMAogICAgLSBkdW1vbnQuY2hhb3M6OTEwMAogICAgLSB0dW1vci5jaGFvczo5MTAwCiAgICAtIHdvaG56LmNoYW9zOjkxMDAK
kind: Secret
metadata:
creationTimestamp: null
name: additional-scrape-configs

View File

@@ -0,0 +1,58 @@
- job_name: gitea
static_configs:
- targets:
- git-ui.lan
- job_name: mysqld
static_configs:
- targets:
- mariadb.lan:9104
- job_name: mqtt.mosquitto
static_configs:
- targets:
- mqtt.lan:9234
- job_name: haproxy
static_configs:
- targets:
- adm01.wks:9101
- drucki.wks:9101
- auto02.chaos:9101
- job_name: klipper
static_configs:
- targets:
- drucki.wks:3903
- job_name: octoprint
metrics_path: /plugin/prometheus_exporter/metrics
params:
apikey:
- 30E8B01BFD674E5BBD446D08C4730DF4
static_configs:
- targets:
- drucki.wks:80
- job_name: hassio
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4'
static_configs:
- targets:
- hassio.lan:80
- job_name: hassio_ring86
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiI0OGFjZTJiNmQ3OTg0Mjc3YWFmMmNmZmU1Yzc4NTE0NCIsImlhdCI6MTYxMjE5Mjk0MCwiZXhwIjoxOTI3NTUyOTQwfQ.BbIAXm9Rq0jb6ouqgVHNd6Keez3NP3yh-7wyfuoB8Yk'
static_configs:
- targets:
- auto.chaos:80
- job_name: node
static_configs:
- targets:
- adm01.wks:9100
- dumont.wks:9100
- drucki.wks:9100
- ebin01.wks:9100
- ebin02.wks:9100
- osmc.wks:9100
- riot01.wks:9100
- truhe.chaos:9100
- auto01.chaos:9100
- auto02.chaos:9100
- dumont.chaos:9100
- tumor.chaos:9100
- wohnz.chaos:9100

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-db
annotations:
pv.kubernetes.io/pirvisioned-by: nfs-ssd
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/prometheus-db
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: prometheus-k8s-db-prometheus-k8s-0
namespace: monitoring
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-conf
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/grafana-conf
server: ebin01
capacity:
storage: 40Mi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: grafana-conf
namespace: monitoring

Submodule external-storage deleted from ea9eda7019

Submodule ingress-nginx deleted from f7f3815bc7

View File

@@ -1,7 +0,0 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0Ci0gam9iX25hbWU6IGhhcHJveHkKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGRydWNraS5jaGFvczo5MTAxCiAgICAtIHJpb3QwMS5jaGFvczo5MTAxCiAgICAtIGF1dG86OTEwMQotIGpvYl9uYW1lOiBrbGlwcGVyCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kuY2hhb3M6MzkwMwotIGpvYl9uYW1lOiBvY3RvcHJpbnQKICBtZXRyaWNzX3BhdGg6IC9wbHVnaW4vcHJvbWV0aGV1c19leHBvcnRlci9tZXRyaWNzCiAgcGFyYW1zOgogICAgYXBpa2V5OgogICAgLSAzMEU4QjAxQkZENjc0RTVCQkQ0NDZEMDhDNDczMERGNAogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHJ1Y2tpLmNoYW9zOjgwCi0gam9iX25hbWU6IG9wZW5oYWIyCiAgbWV0cmljc19wYXRoOiAvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhdXRvLmNoYW9zOjk5OTkKLSBqb2JfbmFtZTogbm9kZQogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHVtb250LmNoYW9zOjkxMDAKICAgIC0gYXV0bzAxOjkxMDAKICAgIC0gZHJ1Y2tpLmNoYW9zOjkxMDAKICAgIC0gZWJpbjAxLmNoYW9zOjkxMDAKICAgIC0gZWJpbjAyLmNoYW9zOjkxMDAKICAgIC0gbGVubnkuY2hhb3M6OTEwMAogICAgLSByaW90MDEuY2hhb3M6OTEwMAogICAgLSB0cnVoZTo5MTAwCiAgICAtIHR1bW9yLmNoYW9zOjkxMDAKICAgIC0gd29obno6OTEwMAogICAgLSB5b3JpLmNoYW9zOjkxMDAK
kind: Secret
metadata:
creationTimestamp: null
name: additional-scrape-configs

View File

@@ -1,45 +0,0 @@
- job_name: mysqld
static_configs:
- targets:
- mariadb.lan:9104
- job_name: mqtt.mosquitto
static_configs:
- targets:
- mqtt.lan:9234
- job_name: haproxy
static_configs:
- targets:
- drucki.chaos:9101
- riot01.chaos:9101
- auto:9101
- job_name: klipper
static_configs:
- targets:
- drucki.chaos:3903
- job_name: octoprint
metrics_path: /plugin/prometheus_exporter/metrics
params:
apikey:
- 30E8B01BFD674E5BBD446D08C4730DF4
static_configs:
- targets:
- drucki.chaos:80
- job_name: openhab2
metrics_path: /
static_configs:
- targets:
- auto.chaos:9999
- job_name: node
static_configs:
- targets:
- dumont.chaos:9100
- auto01:9100
- drucki.chaos:9100
- ebin01.chaos:9100
- ebin02.chaos:9100
- lenny.chaos:9100
- riot01.chaos:9100
- truhe:9100
- tumor.chaos:9100
- wohnz:9100
- yori.chaos:9100

View File

@@ -1,42 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-storage
spec:
capacity:
storage: 5Gi # Doesn't really matter, as EFS does not enforce it anyway
volumeMode: Filesystem
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- timeo=300
- retrans=2
nfs:
path: /k8s-data-fast/grafana
server: ebin01.chaos
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-k8s-db-prometheus-k8s-0
spec:
capacity:
storage: 50Gi # Doesn't really matter, as EFS does not enforce it anyway
volumeMode: Filesystem
accessModes:
- ReadWriteMany
mountOptions:
- hard
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- timeo=300
- retrans=2
nfs:
path: /k8s-data-fast/prometheus
server: ebin02.chaos

View File

@@ -1,9 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: fast
annotations:
storageclass.kubernetes.io/is-default-class: true
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Retain