removing kubernetes stuff

This commit is contained in:
2023-12-13 18:09:45 +01:00
parent 757ab5a092
commit e60be3ab70
42 changed files with 0 additions and 7014 deletions

View File

@@ -1,9 +0,0 @@
root@pine01:/etc/kubernetes# kubeadm upgrade apply v1.24.9 --ignore-preflight-errors=CoreDNSUnsupportedPlugins
[upgrade/config] Making sure the configuration is correct:
[upgrade/config] Reading configuration from the cluster...
[upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0112 18:28:48.533830 21616 initconfiguration.go:120] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/run/containerd/containerd.sock". Please update your configuration!
CoreDNS v1.8.6 v1.9.3

View File

@@ -1,7 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: git-secret
type: Opaque
data:
token: Nzk1YTFhMGQxMWQ0MDJiY2FiOGM3MjkyZDk5ODIyMzg2NDNkM2U3OQo=

View File

@@ -1,33 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tektoncd-workspaces
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi
storageClassName: nfs-ssd-ebin02
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: tektoncd-workspaces
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/tektoncd-workspaces
server: ebin02
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: tektoncd-workspaces
namespace: default

View File

@@ -1,101 +0,0 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: git-clone
spec:
workspaces:
- name: output
description: The git repo will be cloned onto the volume backing this workspace
params:
- name: url
description: git url to clone
type: string
default: http://git-ui.lan/chaos/kubernetes.git
- name: revision
description: git revision to checkout (branch, tag, sha, ref…)
type: string
default: master
- name: refspec
description: (optional) git refspec to fetch before checking out revision
default: ""
- name: submodules
description: defines if the resource should initialize and fetch the submodules
type: string
default: "true"
- name: depth
description: performs a shallow clone where only the most recent commit(s) will be fetched
type: string
default: "1"
- name: sslVerify
description: defines if http.sslVerify should be set to true or false in the global git config
type: string
default: "true"
- name: subdirectory
description: subdirectory inside the "output" workspace to clone the git repo into
type: string
default: ""
- name: deleteExisting
description: clean out the contents of the repo's destination directory (if it already exists) before trying to clone the repo there
type: string
default: "true"
- name: httpProxy
description: git HTTP proxy server for non-SSL requests
type: string
default: ""
- name: httpsProxy
description: git HTTPS proxy server for SSL requests
type: string
default: ""
- name: noProxy
description: git no proxy - opt out of proxying HTTP/HTTPS requests
type: string
default: ""
results:
- name: commit
description: The precise commit SHA that was fetched by this Task
steps:
- name: clone
image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:v0.30.2
script: |
CHECKOUT_DIR="$(workspaces.output.path)/$(params.subdirectory)"
cleandir() {
# Delete any existing contents of the repo directory if it exists.
#
# We don't just "rm -rf $CHECKOUT_DIR" because $CHECKOUT_DIR might be "/"
# or the root of a mounted volume.
if [[ -d "$CHECKOUT_DIR" ]] ; then
# Delete non-hidden files and directories
rm -rf "$CHECKOUT_DIR"/*
# Delete files and directories starting with . but excluding ..
rm -rf "$CHECKOUT_DIR"/.[!.]*
# Delete files and directories starting with .. plus any other character
rm -rf "$CHECKOUT_DIR"/..?*
fi
}
if [[ "$(params.deleteExisting)" == "true" ]] ; then
cleandir
fi
test -z "$(params.httpProxy)" || export HTTP_PROXY=$(params.httpProxy)
test -z "$(params.httpsProxy)" || export HTTPS_PROXY=$(params.httpsProxy)
test -z "$(params.noProxy)" || export NO_PROXY=$(params.noProxy)
/ko-app/git-init \
-url "$(params.url)" \
-revision "$(params.revision)" \
-refspec "$(params.refspec)" \
-path "$CHECKOUT_DIR" \
-sslVerify="$(params.sslVerify)" \
-submodules="$(params.submodules)" \
-depth "$(params.depth)"
cd "$CHECKOUT_DIR"
RESULT_SHA="$(git rev-parse HEAD | tr -d '\n')"
EXIT_CODE="$?"
if [ "$EXIT_CODE" != 0 ]
then
exit $EXIT_CODE
fi
# Make sure we don't add a trailing newline to the result!
echo -n "$RESULT_SHA" > $(results.commit.path)

View File

@@ -1,45 +0,0 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: kaniko
spec:
params:
- name: git-url
- name: git-revision
- name: image-name
- name: path-to-image-context
- name: path-to-dockerfile
workspaces:
- name: git-source
tasks:
- name: fetch-from-git
taskRef:
name: git-clone
params:
- name: url
value: $(params.git-url)
- name: revision
value: $(params.git-revision)
- name: submodules
value: false
- subdirectory:
value: "source"
workspaces:
- name: source
workspace: git-source
- name: build-image
taskRef:
name: kaniko
params:
- name: IMAGE
value: $(params.image-name)
- name: CONTEXT
value: $(params.path-to-image-context)
- name: DOCKERFILE
value: $(params.path-to-dockerfile)
workspaces:
- name: source
workspace: git-source
# If you want you can add a Task that uses the IMAGE_DIGEST from the kaniko task
# via $(tasks.build-image.results.IMAGE_DIGEST) - this was a feature we hadn't been
# able to fully deliver with the Image PipelineResource!

View File

@@ -1,78 +0,0 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: kaniko
labels:
app.kubernetes.io/version: "0.5"
annotations:
tekton.dev/pipelines.minVersion: "0.17.0"
tekton.dev/categories: Image Build
tekton.dev/tags: image-build
tekton.dev/displayName: "Build and upload container image using Kaniko"
tekton.dev/platforms: "linux/arm64"
spec:
description: >-
This Task builds source into a container image using Google's kaniko tool.
Kaniko doesn't depend on a Docker daemon and executes each
command within a Dockerfile completely in userspace. This enables
building container images in environments that can't easily or
securely run a Docker daemon, such as a standard Kubernetes cluster.
params:
- name: IMAGE
description: Name (reference) of the image to build.
- name: DOCKERFILE
description: Path to the Dockerfile to build.
default: ./Dockerfile
- name: CONTEXT
description: The build context used by Kaniko.
default: ./
- name: EXTRA_ARGS
type: array
default: []
- name: BUILDER_IMAGE
description: The image on which builds will run (default is v1.5.1)
default: gcr.io/kaniko-project/executor:v1.9.1
workspaces:
- name: source
description: Holds the context and docker file
- name: dockerconfig
description: Includes a docker `config.json`
optional: true
mountPath: /kaniko/.docker
results:
- name: IMAGE-DIGEST
description: Digest of the image just built.
steps:
- name: debug
workingDir: $(workspaces.source.path)
image: bash
script: |
#!/usr/bin/env bash
export
pwd
mount
ls -al
- name: build-and-push
workingDir: $(workspaces.source.path)
image: $(params.BUILDER_IMAGE)
args:
- $(params.EXTRA_ARGS[*])
- --dockerfile=$(params.DOCKERFILE)
- --context=$(params.CONTEXT) # The user does not need to care the workspace and the source.
- --destination=$(params.IMAGE)
- --digest-file=/tekton/results/IMAGE-DIGEST
- --snapshotMode=redo
- --single-snapshot
- --use-new-run
- --skip-tls-verify
- --cache
- --cache-copy-layers
- --cache-dir=/workspace/cache
# kaniko assumes it is running as root, which means this example fails on platforms
# that default to run containers as random uid (like OpenShift). Adding this securityContext
# makes it explicit that it needs to run as root.
securityContext:
runAsUser: 0

View File

@@ -1,73 +0,0 @@
#!/usr/bin/python3
import kubernetes as k8s
from pint import UnitRegistry
from collections import defaultdict
__all__ = ["compute_allocated_resources"]
def compute_allocated_resources():
ureg = UnitRegistry()
ureg.load_definitions('kubernetes_units.txt')
Q_ = ureg.Quantity
data = {}
# doing this computation within a k8s cluster
k8s.config.load_kube_config()
core_v1 = k8s.client.CoreV1Api()
# print("Listing pods with their IPs:")
# ret = core_v1.list_pod_for_all_namespaces(watch=False)
# for i in ret.items:
# print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name))
for node in core_v1.list_node().items:
stats = {}
node_name = node.metadata.name
allocatable = node.status.allocatable
max_pods = int(int(allocatable["pods"]) * 1.5)
# print("{} ALLOC: {} MAX_PODS: {}".format(node_name,allocatable,max_pods))
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
stats["cpu_alloc"] = Q_(allocatable["cpu"])
stats["mem_alloc"] = Q_(allocatable["memory"])
pods = core_v1.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).items
# compute the allocated resources
cpureqs, cpulmts, memreqs, memlmts = [], [], [], []
for pod in pods:
for container in pod.spec.containers:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
lmts = defaultdict(lambda: 0, res.limits or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
cpulmts.append(Q_(lmts["cpu"]))
memlmts.append(Q_(lmts["memory"]))
stats["cpu_req"] = sum(cpureqs)
stats["cpu_lmt"] = sum(cpulmts)
stats["cpu_req_per"] = (stats["cpu_req"] / stats["cpu_alloc"] * 100)
stats["cpu_lmt_per"] = (stats["cpu_lmt"] / stats["cpu_alloc"] * 100)
stats["mem_req"] = sum(memreqs)
stats["mem_lmt"] = sum(memlmts)
stats["mem_req_per"] = (stats["mem_req"] / stats["mem_alloc"] * 100)
stats["mem_lmt_per"] = (stats["mem_lmt"] / stats["mem_alloc"] * 100)
data[node_name] = stats
return data
if __name__ == "__main__":
# execute only if run as a script
print(compute_allocated_resources())

View File

@@ -1,20 +0,0 @@
# memory units
kmemunits = 1 = [kmemunits]
Ki = 1024 * kmemunits
Mi = Ki^2
Gi = Ki^3
Ti = Ki^4
Pi = Ki^5
Ei = Ki^6
# cpu units
kcpuunits = 1 = [kcpuunits]
m = 1/1000 * kcpuunits
k = 1000 * kcpuunits
M = k^2
G = k^3
T = k^4
P = k^5
E = k^6

View File

@@ -1,90 +0,0 @@
Upgrade:
```
export KV=1.26.0-00;
apt-mark unhold kubeadm=$KV kubectl=$KV kubelet=$KV;
apt install -y kubeadm=$KV;
```
```
kubeadm upgrade node #Other pines in the wood
```
```
#pine01
kubeadm upgrade plan --ignore-preflight-errors=CoreDNSUnsupportedPlugins;
kubeadm config images pull;
kubeadm upgrade apply ${KV/\-*/} --ignore-preflight-errors=CoreDNSUnsupportedPlugins --certificate-renewal=false; #sometimes true
```
```
apt install kubectl=$KV kubelet=$KV;
systemctl daemon-reload && systemctl restart kubelet;
apt-mark hold kubeadm=$KV kubectl=$KV kubelet=$KV;
echo 'You can now uncordon, der Geraet';
```
# Infos:
```
$ kubectl -n kube-system get cm kubeadm-config -o yaml
apiVersion: v1
data:
ClusterConfiguration: |
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: v1.23.15
networking:
dnsDomain: cluster.local
podSubnet: 172.23.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
ClusterStatus: |
apiEndpoints:
pine01:
advertiseAddress: 172.16.23.21
bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterStatus
kind: ConfigMap
metadata:
creationTimestamp: "2021-01-20T14:55:12Z"
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:ClusterConfiguration: {}
f:ClusterStatus: {}
manager: kubeadm
operation: Update
time: "2021-01-20T14:55:12Z"
name: kubeadm-config
namespace: kube-system
resourceVersion: "441685033"
uid: c70fefd3-02c3-44c8-a37d-7b17ec445455
```
Descheduler (reschedule pods)
# https://github.com/kubernetes-sigs/descheduler
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/rbac.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/base/configmap.yaml
# kubectl apply -n kube-system -f https://raw.githubusercontent.com/kubernetes-sigs/descheduler/master/kubernetes/job/job.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -1,122 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
uid: 7bf78c83-68ac-4dee-95f6-52964e38e2d3
resourceVersion: '441765420'
generation: 37
creationTimestamp: '2021-01-20T14:55:14Z'
labels:
k8s-app: kube-dns
annotations:
deployment.kubernetes.io/revision: '34'
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
defaultMode: 420
containers:
- name: coredns
image: registry.k8s.io/coredns/coredns:v1.9.3
args:
- '-conf'
- /etc/coredns/Corefile
ports:
- name: dns
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
- name: metrics
containerPort: 9153
protocol: TCP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- name: config-volume
readOnly: true
mountPath: /etc/coredns
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: coredns
serviceAccount: coredns
securityContext: {}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
priorityClassName: system-cluster-critical
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600

View File

@@ -1,202 +0,0 @@
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
file /etc/coredns/lan.db lan
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
lan.db: |
;lan. zone file
$ORIGIN lan.
@ 600 IN SOA sns.dns.icann.org. noc.dns.icann.org. 2022032201 7200 600 1209600 600
3600 IN NS 172.23.255.252
ns IN A 172.23.255.252
salt IN A 192.168.10.2
mqtt IN A 172.16.23.1
www-proxy IN A 172.23.255.1
git IN A 172.23.255.2
postgresql IN A 172.23.255.4
mariadb IN A 172.23.255.5
redis IN A 172.23.255.6
pihole IN A 172.23.255.253
adm IN CNAME adm01.wks.
prometheus IN CNAME www-proxy
alertmanager IN CNAME www-proxy
stats IN CNAME www-proxy
cr-ui IN CNAME www-proxy
apt IN CNAME www-proxy
apt-cache IN CNAME www-proxy
nodered IN CNAME www-proxy
foto IN CNAME www-proxy
musik IN CNAME www-proxy
hassio IN CNAME www-proxy
hassio-conf IN CNAME www-proxy
git-ui IN CNAME www-proxy
grav IN CNAME www-proxy
tekton IN CNAME www-proxy
nc IN CNAME www-proxy
dolibarr IN CNAME www-proxy
auth IN CNAME www-proxy
public.auth IN CNAME www-proxy
secure.auth IN CNAME www-proxy
docker-registry IN CNAME adm
cr IN CNAME adm
dr-mirror IN CNAME adm
log IN CNAME adm
---
apiVersion: v1
kind: Service
metadata:
name: dns-ext
namespace: kube-system
spec:
ports:
- name: dns-udp
protocol: UDP
port: 53
targetPort: 53
selector:
k8s-app: kube-dns
type: LoadBalancer
loadBalancerIP: 172.23.255.252
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
labels:
k8s-app: kube-dns
spec:
progressDeadlineSeconds: 600
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.9.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 8181
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
- key: lan.db
path: lan.db
name: coredns
name: config-volume

View File

@@ -1,47 +0,0 @@
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: descheduler-cronjob
namespace: kube-system
spec:
schedule: "*/50 * * * *"
concurrencyPolicy: "Forbid"
jobTemplate:
spec:
template:
metadata:
name: descheduler-pod
spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.25.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
resources:
requests:
cpu: "500m"
memory: "256Mi"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: false
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

View File

@@ -1,34 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
maxNoOfPodsToEvictPerNode: 1
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 20
"memory": 40
"pods": 20
targetThresholds:
"cpu": 50
"memory": 60
"pods": 20
#nodeFit: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false

View File

@@ -1,10 +0,0 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-config
#namespace: nginx-ingress
namespace: default
data:
proxy-connect-timeout: "10s"
proxy-read-timeout: "10s"
client-max-body-size: "0"

View File

@@ -1,205 +0,0 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "172.23.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.20.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate

View File

@@ -1,21 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: loki-data
spec:
storageClassName: "nfs-ssd-ebin02"
nfs:
path: /data/raid1-ssd/k8s-data/loki-data
server: ebin02
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: storage-loki-0
namespace: monitoring

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 172.23.255.1-172.23.255.254

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 172.23.255.1-172.23.255.254

View File

@@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-openwrt
type: Opaque
data:
username: b3BlbndydAo=
password: ZUZWbmVnOEkwOE1zRTN0Q2VCRFB4c011OU0yVjJGdnkK
endpoint: aHR0cHM6Ly9taW5pby5saXZlLWluZnJhLnN2Yy5jbHVzdGVyLmxvY2FsOjk0NDMK

View File

@@ -1,36 +0,0 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd
provisioner: nfs-ssd # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin01
provisioner: nfs-ssd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-hdd-ebin01
provisioner: nfs-hdd-ebin01 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-ssd-ebin02
provisioner: nfs-ssd-ebin02 # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
reclaimPolicy: Retain

View File

@@ -1,49 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-hdd-ebin01
namespace: live-infra
labels:
app: nfs-hdd-ebin01
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-hdd-ebin01
template:
metadata:
labels:
app: nfs-hdd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-hdd-ebin01
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-hdd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/k8s-data-hdd
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/k8s-data-hdd

View File

@@ -1,49 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin01
namespace: live-infra
labels:
app: nfs-ssd-ebin01
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin01
template:
metadata:
labels:
app: nfs-ssd-ebin01
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin01
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin01
- name: NFS_SERVER
value: ebin01
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin01
path: /data/raid1-ssd/k8s-data

View File

@@ -1,49 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-ssd-ebin02
namespace: live-infra
labels:
app: nfs-ssd-ebin02
service: nfs
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-ssd-ebin02
template:
metadata:
labels:
app: nfs-ssd-ebin02
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-ssd-ebin02
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-ssd-ebin02
- name: NFS_SERVER
value: ebin02
- name: NFS_PATH
value: /data/raid1-ssd/k8s-data
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: service
operator: In
values:
- nfs
topologyKey: kubernetes.io/hostname
volumes:
- name: nfs-client-root
nfs:
server: ebin02
path: /data/raid1-ssd/k8s-data

View File

@@ -1,65 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: live-infra
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: live-env
---
apiVersion: v1
kind: Namespace
metadata:
name: test-env
---
apiVersion: v1
kind: Namespace
metadata:
name: live-infra
---
apiVersion: v1
kind: Namespace
metadata:
name: test-infra

View File

@@ -1,7 +0,0 @@
FROM: https://tanzu.vmware.com/developer/guides/ci-cd/argocd-gs/
# kubectl apply -f namespace.yaml
# -kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml-
# kubectl apply -n argocd -f install.yaml (needs changes for ARM builds)
# kubectl apply -n argocd -f ingress.yaml

View File

@@ -1,18 +0,0 @@
#https://argoproj.github.io/argo-cd/operator-manual/ingress/#kubernetesingress-nginx
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: argocd.lan
http:
paths:
- backend:
serviceName: argocd-server
servicePort: https

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: argocd

View File

@@ -1,42 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
run: nginx-deployment
template:
metadata:
labels:
run: nginx-deployment
spec:
containers:
- image: nginx
name: nginx-webserver
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
type: NodePort
selector:
run: nginx-deployment
ports:
- port: 80
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: nginx-test
spec:
rules:
- host: nginx-test.lan
http:
paths:
- backend:
serviceName: nginx-service
servicePort: 80

View File

@@ -1,6 +0,0 @@
Install:
# Pipelines: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml@
# Triggers: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml@
@kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml@ #https://github.com/tektoncd/triggers/blob/master/docs/install.md
# Dashboard: @kubectl apply --filename https://storage.googleapis.com/tekton-releases/dashboard/latest/tekton-dashboard-release.yaml@

View File

@@ -1,60 +0,0 @@
# Copyright 2020 Tekton Authors LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: config-registry-cert
namespace: tekton-pipelines
labels:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
data:
# Registry's self-signed certificate
# TODO: somehow automate this with salt
cert: |
-----BEGIN CERTIFICATE-----
MIIFujCCA6KgAwIBAgIEYsvT+zANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE
RTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFDASBgNVBAMMC3R1
bW9yLmNoYW9zMB4XDTIxMDIxMjE4MzAzM1oXDTIyMDIxMjE4MzAzM1owLzELMAkG
A1UEBhMCREUxDzANBgNVBAgMBkJlcmxpbjEPMA0GA1UEBwwGQmVybGluMIICIjAN
BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAog4t352wKHS4pflQK4NlWH6yv1FK
MnqNJiNnIgkWrNABTu9ES3cmUwdEhf+Um7MJYvQivOZFIH65wBBmOxfnYWB+NPwn
XAi/o3BcePIdbwEGs0cxgIEKbmL9fY0SCXq0pXRu8Y7WAhqdTNp6/HY2fTMx7ghX
RNQPoeNlcfAZgpsJlZdkSzMYoFpGIW+Tvj3INNuIuHo1pagckWW/hGUIqY0NuUV9
Aj8LOHhHB+vKtjbq5DMVAob4kKOPJFmq/1D6fmRh3W1YAGikowVv3V45jAmnkcBj
Z8BIEiOnBy1AyW9o8Tc5000MAGNrm9IGpRfBBTptSAApZmK1V6zKreqCiCpgOBbh
6U1Bf1L39u8aLVRxeyzQbxqBM1VTbjKxygFSIR/7rVd9BEhx6VA95EG+EdPLpKDp
mymElCcVgv2ZhKBRxtne4CAQD5ng2SoEqLdjvZdC44QNapnj+6jlaNvKRJ1q63kq
B5Y4shJxYOc6QDQp2+Eh2d7qQNiTE3FJC/aeXDNQ+dqeV7chU+PbcbMQoxnIN6ou
Zc2IdtNL87+Apgh6vqZX9pELBXUN1Nu3NI88T8tw1CdqfFfh4Z2EEBBCsPD0yZPV
UrHZsAMiHh5prRkwsBVzDBIaLYd6glf/w9W8sWxe5wceDNhxD8VAfq/ZXeuE1Pme
cTVYsBNj8idC9tECAwEAAaOBxzCBxDAMBgNVHRMBAf8EAjAAMAsGA1UdDwQEAwIF
4DAdBgNVHQ4EFgQUa7ADNR68XrDsLtLtngmdJQ9UtOswcAYDVR0jBGkwZ4AU9l9v
D1+dukLLV/uDnP3eB4i6ZyihSaRHMEUxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZC
ZXJsaW4xDzANBgNVBAcMBkJlcmxpbjEUMBIGA1UEAwwLdHVtb3IuY2hhb3OCBBKa
C88wFgYDVR0RBA8wDYILdHVtb3IuY2hhb3MwDQYJKoZIhvcNAQELBQADggIBAKK3
S8qKrsarBflGrDI4diG+QOcMG3/y6juARp3vxQf3fDqC6HZCl+kWAp+Cq3Sp/hU7
GKM7qraWpvGxgmDyaevAirLdFlYQBgcIl9frPI8yfLWbZHWvx3PFXNqg2Ckm98xX
vSUacPTPp/tKFBquJ5+j+/YS2U4qWWNIYYtDEI+3lswfoeh0CIEPSxDk0wHDAyfZ
Vh30ZuZhsf3F63xMggw/RpEHeTTCr0YGOAmzpb7jItcbP/EER1qTQ4T+3ExuC40C
EdOAeL377O2rr7zjcmJWk8B5FaQ8K8UdE/iQGM7tP5ieMNTVACe21KFpqIIXaIka
HqRTyvRmJGUrVf1NeXE16yKirIqAjEV/B/4S244wxYcwqweZObbI0PnbnEMn3PMF
TV+e1CUmVOKyGIxfHH7j/VKQfmH/W0jOlGWI7OkbdU5GckoX4Knjrv2MmT9i2ENy
6dID3BJVm6hK2SjJLc7SxbPXMG3I6BrlA5/3LaXzl+2fWAk5OA1jnGZz0P4XcdOO
iAulB4I3PdmNRdSYAXVRdo5OLoq/7iBcqSrCXRw1IbgJm0VlS2AI6hGEXDQvjQwP
38ijZUV/ch2lGyUZOfQymI7Ylh+Airn8ctqyMS8FeZBAyny4/t7xrhWuGO1awUzp
4p/sEjg6kqp3oLai5yhaz9S+y7Ao5XmGDdzfalWH
-----END CERTIFICATE-----

View File

@@ -1,19 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: tekton-dashboard
namespace: tekton-pipelines
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: tekton.lan
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: tekton-dashboard
port:
number: 9097

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tektoncd-workspaces
spec:
storageClassName: nfs-ssd
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi

View File

@@ -1,5 +0,0 @@
from :https://github.com/coreos/prometheus-operator/blob/master/Documentation/additional-scrape-config.md
# create new secret:
kubectl create secret generic additional-scrape-configs --from-file=prometheus-additional.yaml --dry-run -oyaml > additional-scrape-configs.yaml
# add "namespace: monitoring"
# apply

View File

@@ -1,7 +0,0 @@
apiVersion: v1
data:
prometheus-additional.yaml: LSBqb2JfbmFtZTogZ2l0ZWEKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGdpdC11aS5sYW4KLSBqb2JfbmFtZTogbXlzcWxkCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtYXJpYWRiLmxhbjo5MTA0Ci0gam9iX25hbWU6IG1xdHQubW9zcXVpdHRvCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBtcXR0Lmxhbjo5MjM0CiAgICAtIG1xdHQuY2hhb3M6OTIzNAotIGpvYl9uYW1lOiBoYXByb3h5CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBhZG0wMS53a3M6OTEwMQogICAgLSBkcnVja2kud2tzOjkxMDEKICAgIC0gYXV0bzAyLmNoYW9zOjkxMDEKLSBqb2JfbmFtZToga2xpcHBlcgogIHN0YXRpY19jb25maWdzOgogIC0gdGFyZ2V0czoKICAgIC0gZHJ1Y2tpLndrczozOTAzCi0gam9iX25hbWU6IG9jdG9wcmludAogIG1ldHJpY3NfcGF0aDogL3BsdWdpbi9wcm9tZXRoZXVzX2V4cG9ydGVyL21ldHJpY3MKICBwYXJhbXM6CiAgICBhcGlrZXk6CiAgICAtIDMwRThCMDFCRkQ2NzRFNUJCRDQ0NkQwOEM0NzMwREY0CiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOgogICAgLSBkcnVja2kud2tzOjgwCi0gam9iX25hbWU6IGhhc3NpbwogIG1ldHJpY3NfcGF0aDogL2FwaS9wcm9tZXRoZXVzCiAgYmVhcmVyX3Rva2VuOiAnZXlKMGVYQWlPaUpLVjFRaUxDSmhiR2NpT2lKSVV6STFOaUo5LmV5SnBjM01pT2lKaE16Qm1ZalUxWmpjeVpHRTBZemMyWW1VMk5tWTBOamxqTlRBeU1qZGpaQ0lzSW1saGRDSTZNVFl4TWpnNE16STVOeXdpWlhod0lqb3hPVEk0TWpRek1qazNmUS4xSUNzSGxpVVhSMENHNEg4dlFSWUo1alZxRndtcUtTQjBmU2NTaXRDLVE0JwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIGhhc3Npby5sYW46ODAKLSBqb2JfbmFtZTogaGFzc2lvX3Jpbmc4NgogIG1ldHJpY3NfcGF0aDogL2FwaS9wcm9tZXRoZXVzCiAgYmVhcmVyX3Rva2VuOiAnZXlKMGVYQWlPaUpLVjFRaUxDSmhiR2NpT2lKSVV6STFOaUo5LmV5SnBjM01pT2lJME9HRmpaVEppTm1RM09UZzBNamMzWVdGbU1tTm1abVUxWXpjNE5URTBOQ0lzSW1saGRDSTZNVFl4TWpFNU1qazBNQ3dpWlhod0lqb3hPVEkzTlRVeU9UUXdmUS5CYklBWG05UnEwamI2b3VxZ1ZITmQ2S2VlejNOUDN5aC03d3lmdW9COFlrJwogIHN0YXRpY19jb25maWdzOgogICAgLSB0YXJnZXRzOgogICAgICAtIGF1dG8uY2hhb3M6ODAKLSBqb2JfbmFtZTogcG9zdGdyZXMKICBzdGF0aWNfY29uZmlnczoKICAgIC0gdGFyZ2V0czoKICAgICAgLSBwb3N0Z3Jlcy5saXZlLWVudi5zdmMuY2x1c3Rlci5sb2NhbDo5MTg3Ci0gam9iX25hbWU6IG5vZGUKICBzdGF0aWNfY29uZmlnczoKICAtIHRhcmdldHM6CiAgICAtIGFkbTAxLndrczo5MTAwCiAgICAtIGR1bW9udC13a3Mud2tzOjkxMDAKICAgIC0gZHJ1Y2tpLndrczo5MTAwCiAgICAtIGViaW4wMS53a3M6OTEwMAogICAgLSBlYmluMDIud2tzOjkxMDAKICAgIC0gb3NtYy53a3M6OTEwMAogICAgLSByaW90MDEud2tzOjkxMDAKICAgIC0gdHJ1aGUuY2hhb3M6OTEwMAogICAgLSBhdXRvMDIuY2hhb3M6OTEwMAogICAgLSBkdW1vbnQuY2hhb3M6OTEwMAogICAgLSB0dW1vcjAxLmNoYW9zOjkxMDAKICAgIC0gd29obnouY2hhb3M6OTEwMAogICAgLSB5b3JpLmNoYW9zOjkxMDAK
kind: Secret
metadata:
creationTimestamp: null
name: additional-scrape-configs

View File

@@ -1,30 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: prometheus-k8s
namespace: metallb-system
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: prometheus-k8s
namespace: metallb-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: prometheus-k8s
subjects:
- kind: ServiceAccount
name: prometheus-k8s
namespace: monitoring

View File

@@ -1,63 +0,0 @@
- job_name: gitea
static_configs:
- targets:
- git-ui.lan
- job_name: mysqld
static_configs:
- targets:
- mariadb.lan:9104
- job_name: mqtt.mosquitto
static_configs:
- targets:
- mqtt.lan:9234
- mqtt.chaos:9234
- job_name: haproxy
static_configs:
- targets:
- adm01.wks:9101
- drucki.wks:9101
- auto02.chaos:9101
- job_name: klipper
static_configs:
- targets:
- drucki.wks:3903
- job_name: octoprint
metrics_path: /plugin/prometheus_exporter/metrics
params:
apikey:
- 30E8B01BFD674E5BBD446D08C4730DF4
static_configs:
- targets:
- drucki.wks:80
- job_name: hassio
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJhMzBmYjU1ZjcyZGE0Yzc2YmU2NmY0NjljNTAyMjdjZCIsImlhdCI6MTYxMjg4MzI5NywiZXhwIjoxOTI4MjQzMjk3fQ.1ICsHliUXR0CG4H8vQRYJ5jVqFwmqKSB0fScSitC-Q4'
static_configs:
- targets:
- hassio.lan:80
- job_name: hassio_ring86
metrics_path: /api/prometheus
bearer_token: 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiI0OGFjZTJiNmQ3OTg0Mjc3YWFmMmNmZmU1Yzc4NTE0NCIsImlhdCI6MTYxMjE5Mjk0MCwiZXhwIjoxOTI3NTUyOTQwfQ.BbIAXm9Rq0jb6ouqgVHNd6Keez3NP3yh-7wyfuoB8Yk'
static_configs:
- targets:
- auto.chaos:80
- job_name: postgres
static_configs:
- targets:
- postgres.live-env.svc.cluster.local:9187
- job_name: node
static_configs:
- targets:
- adm01.wks:9100
- dumont-wks.wks:9100
- drucki.wks:9100
- ebin01.wks:9100
- ebin02.wks:9100
- osmc.wks:9100
- riot01.wks:9100
- truhe.chaos:9100
- auto02.chaos:9100
- dumont.chaos:9100
- tumor01.chaos:9100
- wohnz.chaos:9100
- yori.chaos:9100

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: prometheus-k8s-db-prometheus-k8s-0
namespace: monitoring
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
storageClassName: fast
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi

View File

@@ -1,41 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: prometheus-db
annotations:
pv.kubernetes.io/pirvisioned-by: nfs-ssd
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/prometheus-db
server: ebin01
capacity:
storage: 40Gi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: prometheus-k8s-db-prometheus-k8s-0
namespace: monitoring
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-conf
spec:
storageClassName: "nfs-ssd"
nfs:
path: /data/raid1-ssd/k8s-data/grafana-conf
server: ebin01
capacity:
storage: 40Mi
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
persistentVolumeReclaimPolicy: Retain
claimRef:
kind: PersistentVolumeClaim
name: grafana-conf
namespace: monitoring

View File

@@ -1,215 +0,0 @@
# Default values for coredns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: coredns/coredns
tag: "1.6.9"
pullPolicy: IfNotPresent
replicaCount: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
serviceType: "ClusterIP"
prometheus:
monitor:
enabled: false
additionalLabels: {}
namespace: ""
service:
# clusterIP: ""
# loadBalancerIP: ""
# externalTrafficPolicy: ""
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
serviceAccount:
create: false
# The name of the ServiceAccount to use
# If not set and create is true, a name is generated using the fullname template
name:
rbac:
# If true, create & use RBAC resources
create: true
# If true, create and use PodSecurityPolicy
pspEnable: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name:
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
isClusterService: false
# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set.
priorityClassName: ""
# Default zone is what Kubernetes recommends:
# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options
servers:
- zones:
- zone: .
port: 53
plugins:
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
configBlock: |-
lameduck 5s
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
- name: cache
parameters: 30
- name: loop
- name: reload
- name: loadbalance
# Complete example with all the options:
# - zones: # the `zones` block can be left out entirely, defaults to "."
# - zone: hello.world. # optional, defaults to "."
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
# - zone: foo.bar.
# scheme: dns://
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
# # Note that this will not work if you are also exposing tls or grpc on the same server
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
# plugins: # the plugins to use for this server block
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
# parameters: foo bar # list of parameters after the plugin
# configBlock: |- # if the plugin supports extra block style config, supply it here
# hello world
# foo bar
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
podDisruptionBudget: {}
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
zoneFiles: []
# - filename: example.db
# domain: example.com
# contents: |
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
# example.com. IN NS b.iana-servers.net.
# example.com. IN NS a.iana-servers.net.
# example.com. IN A 192.168.99.102
# *.example.com. IN A 192.168.99.102
# optional array of extra volumes to create
extraVolumes: []
# - name: some-volume-name
# emptyDir: {}
# optional array of mount points for extraVolumes
extraVolumeMounts: []
# - name: some-volume-name
# mountPath: /etc/wherever
# optional array of secrets to mount inside coredns container
# possible usecase: need for secure connection with etcd backend
extraSecrets: []
# - name: etcd-client-certs
# mountPath: /etc/coredns/tls/etcd
# - name: some-fancy-secret
# mountPath: /etc/wherever
# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled.
customLabels: {}
## Configue a cluster-proportional-autoscaler for coredns
# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler
autoscaler:
# Enabled the cluster-proportional-autoscaler
enabled: false
# Number of cores in the cluster per coredns replica
coresPerReplica: 256
# Number of nodes in the cluster per coredns replica
nodesPerReplica: 16
# Min size of replicaCount
min: 0
# Max size of replicaCount (default of 0 is no max)
max: 0
# Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler
includeUnschedulableNodes: false
# If true does not allow single points of failure to form
preventSinglePointFailure: true
image:
repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64
tag: "1.8.0"
pullPolicy: IfNotPresent
# Optional priority class to be used for the autoscaler pods. priorityClassName used if not set.
priorityClassName: ""
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
tolerations: []
# resources for autoscaler pod
resources:
requests:
cpu: "20m"
memory: "10Mi"
limits:
cpu: "20m"
memory: "10Mi"
# Options for autoscaler configmap
configmap:
## Annotations for the coredns-autoscaler configmap
# i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed
annotations: {}