ansible is its own repo now

This commit is contained in:
2025-11-17 13:37:05 +01:00
parent 749a2a7d12
commit 2ac4fc31a5
32 changed files with 0 additions and 1578 deletions

View File

@@ -1,19 +0,0 @@
wks:
hosts:
adm01.wks:
drucki.wks:
ebin01.wks:
ebin02.wks:
pine01.wks:
#pine02.wks:
pine03.wks:
#pine04.wks:
pine05.wks:
ring86:
hosts:
auto02.chaos:
truhe.chaos:
#wohnz.chaos:
#yori.chaos:
#lenny.chaos:

View File

@@ -1,9 +0,0 @@
- name: Remove Apt-Daily timer
ansible.builtin.systemd_service:
name: apt-daily.timer
state: stopped
- name: Remove Apt-Daily-Upgrade timer
ansible.builtin.systemd_service:
name: apt-daily-upgrade.timer
state: stopped

View File

@@ -1,25 +0,0 @@
- name: Add Armbian Repo
block:
- name: Armbian GPG Key
ansible.builtin.get_url:
url: https://apt.armbian.com/armbian.key
dest: /etc/apt/trusted.gpg.d/armbian-archive-keyring.asc
- name: Armbian Repo
ansible.builtin.apt_repository:
filename: armbian
repo: deb http://apt-cache.service.nr5/apt.armbian.com bookworm main bookworm-utils
- name: Armbian config Repo & Package
block:
- name: Armbian-config Repo
ansible.builtin.apt_repository:
filename: armbian-config
repo: deb https://github.armbian.com/configng stable main
- name: armbian-config install
apt:
state: present
name:
- armbian-config
- name: Apt Timers
import_tasks: "apt-timers.yaml"

View File

@@ -1,24 +0,0 @@
- name: Install autofs
apt:
state: present
name:
- autofs
- name: Create /net
file:
path: /net
state: directory
- name: Update auto.master
lineinfile:
path: /etc/auto.master
regexp: '^#/net'
line: /net -hosts
register: auto_master_update
- name: Restart autofs
service:
name: autofs
state: restarted
when: auto_master_update.changed

View File

@@ -1,14 +0,0 @@
- name: Install common packages
become: true
become_user: admin
homebrew:
state: present
name:
- btop
- htop
- rsync
- rclone
- vim
- curl
- arping

View File

@@ -1,69 +0,0 @@
- name: Install common packages
apt:
state: present
name:
- btop
- htop
- iotop
- net-tools
- tcpdump
- curl
- vim
- rclone
- rsync
- borgmatic
- cifs-utils
- nfs-common
- etckeeper
- iputils-arping
- bwm-ng
- iftop
- name: Remove common packages
apt:
state: absent
name:
- salt-*
- name: Remove packages in VMs
apt:
state: absent
purge: true
name:
- smartmontools
- lm-sensors
- ipmitools
- openipmi
when: ansible_facts['virtualization_role'] == "guest"
- name: Install packages on physical hosts
apt:
state: present
name:
- lm-sensors
- hdparm
when: ansible_facts['virtualization_role'] == "host"
- name: Update /etc/hosts
lineinfile:
path: /etc/hosts
regexp: '^127\.0\.1\.1'
line: 127.0.1.1 {{ ansible_nodename }}
- name: Kernel modules
include_role:
name: modules
- name: sudo for Linux
include_role:
name: sudo
#- name: autofs for Linux
# include_role:
# name: autofs
#
# Set vm.swappiness to 5 in /etc/sysctl.conf
- sysctl:
name: vm.swappiness
value: '5'
state: present

View File

@@ -1,6 +0,0 @@
- name: Common Tasks for all hosts
include_tasks: "{{ ansible_system | lower }}.yaml"
- name: Prometheus
include_role:
name: prometheus-node-exporter

View File

@@ -1,10 +0,0 @@
- name: REPO - Disable SID
ansible.builtin.apt_repository:
state: absent
filename: debian-sid
repo: deb http://apt-cache.service.nr5/deb.debian.org/debian sid main contrib
- name: REPO - Disable bullseye
ansible.builtin.file:
state: absent
path: /etc/apt/sources.list.d/debian-bullseye.list

View File

@@ -1,7 +0,0 @@
- name: Java
apt:
state: absent
name:
- java-common
- ca-certificates-java
- openjdk-*

View File

@@ -1,3 +0,0 @@
blacklist axp20x-battery
blacklist bluetooth
blacklist snd_soc_hdmi_codec

View File

@@ -1,26 +0,0 @@
- name: Kernel module nf_conntrack
community.general.modprobe:
name: nf_conntrack
state: present
persistent: present
- name: Kernel module xt_conntrack
community.general.modprobe:
name: nf_conntrack
state: present
persistent: present
- name: blacklist modules
copy:
src: ansible-blacklist.conf
dest: /etc/modprobe.d/ansible-blacklist.conf
register: blacklist
- name: depmod and initramfs
block:
- name: depmod
command: depmod -ae
- name: initramfs
command: update-initramfs -u
when: blacklist.changed

View File

@@ -1,15 +0,0 @@
{
"service": {
"ID": "consul-ui-{{ ansible_nodename }}",
"Name": "consul-ui",
"Tags": [
"traefik.enable=true",
"traefik.http.routers.consul-ui-http.rule=Host(`consul.service.nr5`)"
],
"Port": 8500,
"Check": {
"HTTP": "http://{{ ansible_facts['default_ipv4']['address'] }}:8500/ui/",
"Interval": "60s"
}
}
}

View File

@@ -1,105 +0,0 @@
######
## Ansible
##
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
# Full configuration options can be found at https://www.consul.io/docs/agent/config
# datacenter
# This flag controls the datacenter in which the agent is running. If not provided,
# it defaults to "dc1". Consul has first-class support for multiple datacenters, but
# it relies on proper configuration. Nodes in the same datacenter should be on a
# single LAN.
datacenter = "nummer5"
domain = "nr5"
alt_domain = "consul"
recursors = ["172.16.23.1"]
# data_dir
# This flag provides a data directory for the agent to store state. This is required
# for all agents. The directory should be durable across reboots. This is especially
# critical for agents that are running in server mode as they must be able to persist
# cluster state. Additionally, the directory must support the use of filesystem
# locking, meaning some types of mounted folders (e.g. VirtualBox shared folders) may
# not be suitable.
data_dir = "/opt/consul"
# client_addr
# The address to which Consul will bind client interfaces, including the HTTP and DNS
# servers. By default, this is "127.0.0.1", allowing only loopback connections. In
# Consul 1.0 and later this can be set to a space-separated list of addresses to bind
# to, or a go-sockaddr template that can potentially resolve to multiple addresses.
client_addr = "0.0.0.0"
# ui
# Enables the built-in web UI server and the required HTTP routes. This eliminates
# the need to maintain the Consul web UI files separately from the binary.
# Version 1.10 deprecated ui=true in favor of ui_config.enabled=true
ui_config{
enabled = true
}
# server
# This flag is used to control if an agent is in server or client mode. When provided,
# an agent will act as a Consul server. Each Consul cluster must have at least one
# server and ideally no more than 5 per datacenter. All servers participate in the Raft
# consensus algorithm to ensure that transactions occur in a consistent, linearizable
# manner. Transactions modify cluster state, which is maintained on all server nodes to
# ensure availability in the case of node failure. Server nodes also participate in a
# WAN gossip pool with server nodes in other datacenters. Servers act as gateways to
# other datacenters and forward traffic as appropriate.
server = true
# Bind addr
# You may use IPv4 or IPv6 but if you have multiple interfaces you must be explicit.
#bind_addr = "[::]" # Listen on all IPv6
#bind_addr = "0.0.0.0" # Listen on all IPv4
bind_addr = "{{ ansible_facts['default_ipv4']['address'] }}"
# Advertise addr - if you want to point clients to a different address than bind or LB.
#advertise_addr = "127.0.0.1"
# Enterprise License
# As of 1.10, Enterprise requires a license_path and does not have a short trial.
#license_path = "/etc/consul.d/consul.hclic"
# bootstrap_expect
# This flag provides the number of expected servers in the datacenter. Either this value
# should not be provided or the value must agree with other servers in the cluster. When
# provided, Consul waits until the specified number of servers are available and then
# bootstraps the cluster. This allows an initial leader to be elected automatically.
# This cannot be used in conjunction with the legacy -bootstrap flag. This flag requires
# -server mode.
bootstrap_expect=3
# encrypt
# Specifies the secret key to use for encryption of Consul network traffic. This key must
# be 32-bytes that are Base64-encoded. The easiest way to create an encryption key is to
# use consul keygen. All nodes within a cluster must share the same encryption key to
# communicate. The provided key is automatically persisted to the data directory and loaded
# automatically whenever the agent is restarted. This means that to encrypt Consul's gossip
# protocol, this option only needs to be provided once on each agent's initial startup
# sequence. If it is provided after Consul has been initialized with an encryption key,
# then the provided key is ignored and a warning will be displayed.
encrypt = "5/P+DSsvMz0ykNwzqLRts2pl5P1WJTslVHIE58usBQ0="
# retry_join
# Similar to -join but allows retrying a join until it is successful. Once it joins
# successfully to a member in a list of members it will never attempt to join again.
# Agents will then solely maintain their membership via gossip. This is useful for
# cases where you know the address will eventually be available. This option can be
# specified multiple times to specify multiple agents to join. The value can contain
# IPv4, IPv6, or DNS addresses. In Consul 1.1.0 and later this can be set to a go-sockaddr
# template. If Consul is running on the non-default Serf LAN port, this must be specified
# as well. IPv6 must use the "bracketed" syntax. If multiple values are given, they are
# tried and retried in the order listed until the first succeeds. Here are some examples:
#retry_join = ["consul.domain.internal"]
#retry_join = ["10.0.4.67"]
#retry_join = ["[::1]:8301"]
#retry_join = ["consul.domain.internal", "10.0.4.67"]
# Cloud Auto-join examples:
# More details - https://www.consul.io/docs/agent/cloud-auto-join
#retry_join = ["provider=aws tag_key=... tag_value=..."]
#retry_join = ["provider=azure tag_name=... tag_value=... tenant_id=... client_id=... subscription_id=... secret_access_key=..."]
#retry_join = ["provider=gce project_name=... tag_value=..."]
retry_join = ["172.16.23.21", "172.16.23.22", "172.16.23.23", "172.16.23.24", "172.16.23.25"]

View File

@@ -1,6 +0,0 @@
acl = {
enabled = true
default_policy = "allow"
enable_token_persistence = true
}

View File

@@ -1,15 +0,0 @@
{
"service": {
"ID": "nomad-ui-{{ ansible_nodename }}",
"Name": "nomad-ui",
"Tags": [
"traefik.enable=true",
"traefik.http.routers.nomad-ui-http.rule=Host(`nomad.service.nr5`)"
],
"Port": 4646,
"Check": {
"HTTP": "http://{{ ansible_facts['default_ipv4']['address'] }}:4646/ui/",
"Interval": "60s"
}
}
}

View File

@@ -1,24 +0,0 @@
###
# Ansible
##
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
# Full configuration options can be found at https://www.nomadproject.io/docs/configuration
data_dir = "/opt/nomad/data"
bind_addr = "{{ ansible_facts['default_ipv4']['address'] }}"
datacenter = "nummer5"
server {
enabled = true
bootstrap_expect = 3
}
client {
enabled = true
servers = ["{{ ansible_facts['default_ipv4']['address'] }}"]
network_interface = "{{ ansible_facts['default_ipv4']['interface'] }}"
node_pool = "{{ nomad_pool }}"
}

View File

@@ -1,7 +0,0 @@
telemetry {
collection_interval = "10s"
disable_hostname = true
prometheus_metrics = true
publish_allocation_metrics = true
publish_node_metrics = true
}

View File

@@ -1,11 +0,0 @@
{
"service": {
"ID": "vault-ui-{{ ansible_nodename }}",
"Name": "vault-ui",
"Tags": [
"traefik.enable=true",
"traefik.http.routers.vault-ui-http.rule=HostSNI(`vault.service.nr5`)"
],
"Port": 8200
}
}

View File

@@ -1,41 +0,0 @@
- name: Install consul Package
apt:
state: present
name:
- consul
- name: Consul acl
register: conf_update
copy:
src: consul_acl.hcl
dest: /etc/consul.d/acl.hcl
- name: Configure Consul
register: conf_update
template:
src: files/consul.hcl.j2
dest: /etc/consul.d/consul.hcl
- name: Consul UI
register: conf_update
template:
src: files/consul-ui.json.j2
dest: /etc/consul.d/consul-ui.json
- name: Nomad UI
register: conf_update
template:
src: files/nomad-ui.json.j2
dest: /etc/consul.d/nomad-ui.json
- name: Vault UI
register: conf_update
template:
src: files/vault-ui.json.j2
dest: /etc/consul.d/vault-ui.json
- name: Restart consul
service:
name: consul
state: restarted
when: conf_update.changed

View File

@@ -1,16 +0,0 @@
- name: Hashicorp repo
include_tasks: repo.yaml
- name: Podman
include_role:
name: podman
- name: Java
include_role:
name: java
- name: Nomad
include_tasks: nomad.yaml
- name: Consul
include_tasks: consul.yaml

View File

@@ -1,26 +0,0 @@
- name: Install nomad Packages
apt:
state: present
name:
- nomad
- nomad-driver-podman
- name: Configure Nomad telemetry
register: conf_update
copy:
src: telemetry.hcl
dest: /etc/nomad.d/telemetry.hcl
- name: Configure Nomad
register: conf_update
template:
src: files/nomad.hcl.j2
dest: /etc/nomad.d/nomad.hcl
- name: Restart nomad
service:
name: nomad
state: restarted
when: conf_update.changed

View File

@@ -1,10 +0,0 @@
- name: Add Hashicorp Repo
block:
- name: Hashicorp GPG Key
ansible.builtin.get_url:
url: https://apt.releases.hashicorp.com/gpg
dest: /etc/apt/trusted.gpg.d/hashicorp-archive-keyring.asc
- name: Hashicorp Repo
ansible.builtin.apt_repository:
filename: hashicorp
repo: deb https://apt.releases.hashicorp.com bookworm main

View File

@@ -1,715 +0,0 @@
# The containers configuration file specifies all of the available configuration
# command-line options/flags for container engine tools like Podman & Buildah,
# but in a TOML format that can be easily modified and versioned.
# Please refer to containers.conf(5) for details of all configuration options.
# Not all container engines implement all of the options.
# All of the options have hard coded defaults and these options will override
# the built in defaults. Users can then override these options via the command
# line. Container engines will read containers.conf files in up to three
# locations in the following order:
# 1. /usr/share/containers/containers.conf
# 2. /etc/containers/containers.conf
# 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY)
# Items specified in the latter containers.conf, if they exist, override the
# previous containers.conf settings, or the default settings.
[containers]
# List of annotation. Specified as
# "key = value"
# If it is empty or commented out, no annotations will be added
#
#annotations = []
# Used to change the name of the default AppArmor profile of container engine.
#
#apparmor_profile = "container-default"
# The hosts entries from the base hosts file are added to the containers hosts
# file. This must be either an absolute path or as special values "image" which
# uses the hosts file from the container image or "none" which means
# no base hosts file is used. The default is "" which will use /etc/hosts.
#
#base_hosts_file = ""
# Default way to to create a cgroup namespace for the container
# Options are:
# `private` Create private Cgroup Namespace for the container.
# `host` Share host Cgroup Namespace with the container.
#
#cgroupns = "private"
# Control container cgroup configuration
# Determines whether the container will create CGroups.
# Options are:
# `enabled` Enable cgroup support within container
# `disabled` Disable cgroup support, will inherit cgroups from parent
# `no-conmon` Do not create a cgroup dedicated to conmon.
#
#cgroups = "enabled"
# List of default capabilities for containers. If it is empty or commented out,
# the default capabilities defined in the container engine will be added.
#
default_capabilities = [
"CHOWN",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"NET_BIND_SERVICE",
"SETFCAP",
"SETGID",
"SETPCAP",
"SETUID",
"SYS_CHROOT"
]
# A list of sysctls to be set in containers by default,
# specified as "name=value",
# for example:"net.ipv4.ping_group_range=0 0".
#
default_sysctls = [
"net.ipv4.ping_group_range=0 0",
]
# A list of ulimits to be set in containers by default, specified as
# "<ulimit name>=<soft limit>:<hard limit>", for example:
# "nofile=1024:2048"
# See setrlimit(2) for a list of resource names.
# Any limit not specified here will be inherited from the process launching the
# container engine.
# Ulimits has limits for non privileged container engines.
#
#default_ulimits = [
# "nofile=1280:2560",
#]
# List of devices. Specified as
# "<device-on-host>:<device-on-container>:<permissions>", for example:
# "/dev/sdc:/dev/xvdc:rwm".
# If it is empty or commented out, only the default devices will be used
#
#devices = []
# List of default DNS options to be added to /etc/resolv.conf inside of the container.
#
#dns_options = []
# List of default DNS search domains to be added to /etc/resolv.conf inside of the container.
#
#dns_searches = []
# Set default DNS servers.
# This option can be used to override the DNS configuration passed to the
# container. The special value "none" can be specified to disable creation of
# /etc/resolv.conf in the container.
# The /etc/resolv.conf file in the image will be used without changes.
#
#dns_servers = []
# Environment variable list for the conmon process; used for passing necessary
# environment variables to conmon or the runtime.
#
#env = [
# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
# "TERM=xterm",
#]
# Pass all host environment variables into the container.
#
#env_host = false
# Set the ip for the host.containers.internal entry in the containers /etc/hosts
# file. This can be set to "none" to disable adding this entry. By default it
# will automatically choose the host ip.
#
# NOTE: When using podman machine this entry will never be added to the containers
# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore
# it is not possible to disable the entry in this case.
#
#host_containers_internal_ip = ""
# Default proxy environment variables passed into the container.
# The environment variables passed in include:
# http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of
# these. This option is needed when host system uses a proxy but container
# should not use proxy. Proxy environment variables specified for the container
# in any other way will override the values passed from the host.
#
#http_proxy = true
# Run an init inside the container that forwards signals and reaps processes.
#
#init = false
# Container init binary, if init=true, this is the init binary to be used for containers.
#
#init_path = "/usr/libexec/podman/catatonit"
# Default way to to create an IPC namespace (POSIX SysV IPC) for the container
# Options are:
# "host" Share host IPC Namespace with the container.
# "none" Create shareable IPC Namespace for the container without a private /dev/shm.
# "private" Create private IPC Namespace for the container, other containers are not allowed to share it.
# "shareable" Create shareable IPC Namespace for the container.
#
#ipcns = "shareable"
# keyring tells the container engine whether to create
# a kernel keyring for use within the container.
#
#keyring = true
# label tells the container engine whether to use container separation using
# MAC(SELinux) labeling or not.
# The label flag is ignored on label disabled systems.
#
#label = true
# Logging driver for the container. Available options: k8s-file and journald.
#
#log_driver = "k8s-file"
# Maximum size allowed for the container log file. Negative numbers indicate
# that no size limit is imposed. If positive, it must be >= 8192 to match or
# exceed conmon's read buffer. The file is truncated and re-opened so the
# limit is never exceeded.
#
#log_size_max = -1
# Specifies default format tag for container log messages.
# This is useful for creating a specific tag for container log messages.
# Containers logs default to truncated container ID as a tag.
#
#log_tag = ""
# Default way to to create a Network namespace for the container
# Options are:
# `private` Create private Network Namespace for the container.
# `host` Share host Network Namespace with the container.
# `none` Containers do not use the network
#
#netns = "private"
# Create /etc/hosts for the container. By default, container engine manage
# /etc/hosts, automatically adding the container's own IP address.
#
#no_hosts = false
# Default way to to create a PID namespace for the container
# Options are:
# `private` Create private PID Namespace for the container.
# `host` Share host PID Namespace with the container.
#
#pidns = "private"
# Maximum number of processes allowed in a container.
#
#pids_limit = 2048
# Copy the content from the underlying image into the newly created volume
# when the container is created instead of when it is started. If false,
# the container engine will not copy the content until the container is started.
# Setting it to true may have negative performance implications.
#
#prepare_volume_on_create = false
# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime.
#
#seccomp_profile = "/usr/share/containers/seccomp.json"
# Size of /dev/shm. Specified as <number><unit>.
# Unit is optional, values:
# b (bytes), k (kilobytes), m (megabytes), or g (gigabytes).
# If the unit is omitted, the system uses bytes.
#
#shm_size = "65536k"
# Set timezone in container. Takes IANA timezones as well as "local",
# which sets the timezone in the container to match the host machine.
#
#tz = ""
# Set umask inside the container
#
#umask = "0022"
# Default way to to create a User namespace for the container
# Options are:
# `auto` Create unique User Namespace for the container.
# `host` Share host User Namespace with the container.
#
#userns = "host"
# Number of UIDs to allocate for the automatic container creation.
# UIDs are allocated from the "container" UIDs listed in
# /etc/subuid & /etc/subgid
#
#userns_size = 65536
# Default way to to create a UTS namespace for the container
# Options are:
# `private` Create private UTS Namespace for the container.
# `host` Share host UTS Namespace with the container.
#
#utsns = "private"
# List of volumes. Specified as
# "<directory-on-host>:<directory-in-container>:<options>", for example:
# "/db:/var/lib/db:ro".
# If it is empty or commented out, no volumes will be added
#
#volumes = []
[secrets]
#driver = "file"
[secrets.opts]
#root = "/example/directory"
[network]
# Network backend determines what network driver will be used to set up and tear down container networks.
# Valid values are "cni" and "netavark".
# The default value is empty which means that it will automatically choose CNI or netavark. If there are
# already containers/images or CNI networks preset it will choose CNI.
#
# Before changing this value all containers must be stopped otherwise it is likely that
# iptables rules and network interfaces might leak on the host. A reboot will fix this.
#
#network_backend = ""
# Path to directory where CNI plugin binaries are located.
#
#cni_plugin_dirs = [
# "/usr/local/libexec/cni",
# "/usr/libexec/cni",
# "/usr/local/lib/cni",
# "/usr/lib/cni",
# "/opt/cni/bin",
#]
# The network name of the default network to attach pods to.
#
#default_network = "podman"
# The default subnet for the default network given in default_network.
# If a network with that name does not exist, a new network using that name and
# this subnet will be created.
# Must be a valid IPv4 CIDR prefix.
#
#default_subnet = "10.88.0.0/16"
# DefaultSubnetPools is a list of subnets and size which are used to
# allocate subnets automatically for podman network create.
# It will iterate through the list and will pick the first free subnet
# with the given size. This is only used for ipv4 subnets, ipv6 subnets
# are always assigned randomly.
#
#default_subnet_pools = [
# {"base" = "10.89.0.0/16", "size" = 24},
# {"base" = "10.90.0.0/15", "size" = 24},
# {"base" = "10.92.0.0/14", "size" = 24},
# {"base" = "10.96.0.0/11", "size" = 24},
# {"base" = "10.128.0.0/9", "size" = 24},
#]
# Path to the directory where network configuration files are located.
# For the CNI backend the default is "/etc/cni/net.d" as root
# and "$HOME/.config/cni/net.d" as rootless.
# For the netavark backend "/etc/containers/networks" is used as root
# and "$graphroot/networks" as rootless.
#
#network_config_dir = "/etc/cni/net.d/"
# Port to use for dns forwarding daemon with netavark in rootful bridge
# mode and dns enabled.
# Using an alternate port might be useful if other dns services should
# run on the machine.
#
#dns_bind_port = 53
[engine]
# Index to the active service
#
#active_service = production
# The compression format to use when pushing an image.
# Valid options are: `gzip`, `zstd` and `zstd:chunked`.
#
#compression_format = "gzip"
# Cgroup management implementation used for the runtime.
# Valid options "systemd" or "cgroupfs"
#
#cgroup_manager = "systemd"
# Environment variables to pass into conmon
#
#conmon_env_vars = [
# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
#]
# Paths to look for the conmon container manager binary
#
#conmon_path = [
# "/usr/libexec/podman/conmon",
# "/usr/local/libexec/podman/conmon",
# "/usr/local/lib/podman/conmon",
# "/usr/bin/conmon",
# "/usr/sbin/conmon",
# "/usr/local/bin/conmon",
# "/usr/local/sbin/conmon"
#]
# Enforces using docker.io for completing short names in Podman's compatibility
# REST API. Note that this will ignore unqualified-search-registries and
# short-name aliases defined in containers-registries.conf(5).
#compat_api_enforce_docker_hub = true
# Specify the keys sequence used to detach a container.
# Format is a single character [a-Z] or a comma separated sequence of
# `ctrl-<value>`, where `<value>` is one of:
# `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_`
#
#detach_keys = "ctrl-p,ctrl-q"
# Determines whether engine will reserve ports on the host when they are
# forwarded to containers. When enabled, when ports are forwarded to containers,
# ports are held open by as long as the container is running, ensuring that
# they cannot be reused by other programs on the host. However, this can cause
# significant memory usage if a container has many ports forwarded to it.
# Disabling this can save memory.
#
#enable_port_reservation = true
# Environment variables to be used when running the container engine (e.g., Podman, Buildah).
# For example "http_proxy=internal.proxy.company.com".
# Note these environment variables will not be used within the container.
# Set the env section under [containers] table, if you want to set environment variables for the container.
#
#env = []
# Define where event logs will be stored, when events_logger is "file".
#events_logfile_path=""
# Sets the maximum size for events_logfile_path.
# The size can be b (bytes), k (kilobytes), m (megabytes), or g (gigabytes).
# The format for the size is `<number><unit>`, e.g., `1b` or `3g`.
# If no unit is included then the size will be read in bytes.
# When the limit is exceeded, the logfile will be rotated and the old one will be deleted.
# If the maximum size is set to 0, then no limit will be applied,
# and the logfile will not be rotated.
#events_logfile_max_size = "1m"
# Selects which logging mechanism to use for container engine events.
# Valid values are `journald`, `file` and `none`.
#
#events_logger = "journald"
# A is a list of directories which are used to search for helper binaries.
#
#helper_binaries_dir = [
# "/usr/local/libexec/podman",
# "/usr/local/lib/podman",
# "/usr/libexec/podman",
# "/usr/lib/podman",
#]
# Path to OCI hooks directories for automatically executed hooks.
#
#hooks_dir = [
# "/usr/share/containers/oci/hooks.d",
#]
# Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building
# container images. By default image pulled and pushed match the format of the
# source image. Building/committing defaults to OCI.
#
#image_default_format = ""
# Default transport method for pulling and pushing for images
#
#image_default_transport = "docker://"
# Maximum number of image layers to be copied (pulled/pushed) simultaneously.
# Not setting this field, or setting it to zero, will fall back to containers/image defaults.
#
#image_parallel_copies = 0
# Tells container engines how to handle the builtin image volumes.
# * bind: An anonymous named volume will be created and mounted
# into the container.
# * tmpfs: The volume is mounted onto the container as a tmpfs,
# which allows users to create content that disappears when
# the container is stopped.
# * ignore: All volumes are just ignored and no action is taken.
#
#image_volume_mode = ""
# Default command to run the infra container
#
#infra_command = "/pause"
# Infra (pause) container image name for pod infra containers. When running a
# pod, we start a `pause` process in a container to hold open the namespaces
# associated with the pod. This container does nothing other then sleep,
# reserving the pods resources for the lifetime of the pod. By default container
# engines run a builtin container using the pause executable. If you want override
# specify an image to pull.
#
#infra_image = ""
# Specify the locking mechanism to use; valid values are "shm" and "file".
# Change the default only if you are sure of what you are doing, in general
# "file" is useful only on platforms where cgo is not available for using the
# faster "shm" lock type. You may need to run "podman system renumber" after
# you change the lock type.
#
#lock_type** = "shm"
# MultiImageArchive - if true, the container engine allows for storing archives
# (e.g., of the docker-archive transport) with multiple images. By default,
# Podman creates single-image archives.
#
#multi_image_archive = "false"
# Default engine namespace
# If engine is joined to a namespace, it will see only containers and pods
# that were created in the same namespace, and will create new containers and
# pods in that namespace.
# The default namespace is "", which corresponds to no namespace. When no
# namespace is set, all containers and pods are visible.
#
#namespace = ""
# Path to the slirp4netns binary
#
#network_cmd_path = ""
# Default options to pass to the slirp4netns binary.
# Valid options values are:
#
# - allow_host_loopback=true|false: Allow the slirp4netns to reach the host loopback IP (`10.0.2.2`).
# Default is false.
# - mtu=MTU: Specify the MTU to use for this network. (Default is `65520`).
# - cidr=CIDR: Specify ip range to use for this network. (Default is `10.0.2.0/24`).
# - enable_ipv6=true|false: Enable IPv6. Default is true. (Required for `outbound_addr6`).
# - outbound_addr=INTERFACE: Specify the outbound interface slirp should bind to (ipv4 traffic only).
# - outbound_addr=IPv4: Specify the outbound ipv4 address slirp should bind to.
# - outbound_addr6=INTERFACE: Specify the outbound interface slirp should bind to (ipv6 traffic only).
# - outbound_addr6=IPv6: Specify the outbound ipv6 address slirp should bind to.
# - port_handler=rootlesskit: Use rootlesskit for port forwarding. Default.
# Note: Rootlesskit changes the source IP address of incoming packets to a IP address in the container
# network namespace, usually `10.0.2.100`. If your application requires the real source IP address,
# e.g. web server logs, use the slirp4netns port handler. The rootlesskit port handler is also used for
# rootless containers when connected to user-defined networks.
# - port_handler=slirp4netns: Use the slirp4netns port forwarding, it is slower than rootlesskit but
# preserves the correct source IP address. This port handler cannot be used for user-defined networks.
#
#network_cmd_options = []
# Whether to use chroot instead of pivot_root in the runtime
#
#no_pivot_root = false
# Number of locks available for containers and pods.
# If this is changed, a lock renumber must be performed (e.g. with the
# 'podman system renumber' command).
#
#num_locks = 2048
# Set the exit policy of the pod when the last container exits.
#pod_exit_policy = "continue"
# Whether to pull new image before running a container
#
#pull_policy = "missing"
# Indicates whether the application should be running in remote mode. This flag modifies the
# --remote option on container engines. Setting the flag to true will default
# `podman --remote=true` for access to the remote Podman service.
#
#remote = false
# Default OCI runtime
#
#runtime = "crun"
# List of the OCI runtimes that support --format=json. When json is supported
# engine will use it for reporting nicer errors.
#
#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"]
# List of the OCI runtimes that supports running containers with KVM Separation.
#
#runtime_supports_kvm = ["kata", "krun"]
# List of the OCI runtimes that supports running containers without cgroups.
#
#runtime_supports_nocgroups = ["crun", "krun"]
# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment
# variable. If you specify "storage", then the location of the
# container/storage tmp directory will be used.
# image_copy_tmp_dir="/var/tmp"
# Number of seconds to wait without a connection
# before the `podman system service` times out and exits
#
#service_timeout = 5
# Directory for persistent engine files (database, etc)
# By default, this will be configured relative to where the containers/storage
# stores containers
# Uncomment to change location from this default
#
#static_dir = "/var/lib/containers/storage/libpod"
# Number of seconds to wait for container to exit before sending kill signal.
#
#stop_timeout = 10
# Number of seconds to wait before exit command in API process is given to.
# This mimics Docker's exec cleanup behaviour, where the default is 5 minutes (value is in seconds).
#
#exit_command_delay = 300
# map of service destinations
#
#[service_destinations]
# [service_destinations.production]
# URI to access the Podman service
# Examples:
# rootless "unix://run/user/$UID/podman/podman.sock" (Default)
# rootful "unix://run/podman/podman.sock (Default)
# remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock
# remote rootful ssh://root@10.10.1.136:22/run/podman/podman.sock
#
# uri = "ssh://user@production.example.com/run/user/1001/podman/podman.sock"
# Path to file containing ssh identity key
# identity = "~/.ssh/id_rsa"
# Directory for temporary files. Must be tmpfs (wiped after reboot)
#
#tmp_dir = "/run/libpod"
# Directory for libpod named volumes.
# By default, this will be configured relative to where containers/storage
# stores containers.
# Uncomment to change location from this default.
#
#volume_path = "/var/lib/containers/storage/volumes"
# Default timeout (in seconds) for volume plugin operations.
# Plugins are external programs accessed via a REST API; this sets a timeout
# for requests to that API.
# A value of 0 is treated as no timeout.
#volume_plugin_timeout = 5
# Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc)
[engine.runtimes]
#crun = [
# "/usr/bin/crun",
# "/usr/sbin/crun",
# "/usr/local/bin/crun",
# "/usr/local/sbin/crun",
# "/sbin/crun",
# "/bin/crun",
# "/run/current-system/sw/bin/crun",
#]
#kata = [
# "/usr/bin/kata-runtime",
# "/usr/sbin/kata-runtime",
# "/usr/local/bin/kata-runtime",
# "/usr/local/sbin/kata-runtime",
# "/sbin/kata-runtime",
# "/bin/kata-runtime",
# "/usr/bin/kata-qemu",
# "/usr/bin/kata-fc",
#]
#runc = [
# "/usr/bin/runc",
# "/usr/sbin/runc",
# "/usr/local/bin/runc",
# "/usr/local/sbin/runc",
# "/sbin/runc",
# "/bin/runc",
# "/usr/lib/cri-o-runc/sbin/runc",
#]
#runsc = [
# "/usr/bin/runsc",
# "/usr/sbin/runsc",
# "/usr/local/bin/runsc",
# "/usr/local/sbin/runsc",
# "/bin/runsc",
# "/sbin/runsc",
# "/run/current-system/sw/bin/runsc",
#]
#krun = [
# "/usr/bin/krun",
# "/usr/local/bin/krun",
#]
[engine.volume_plugins]
#testplugin = "/run/podman/plugins/test.sock"
[machine]
# Number of CPU's a machine is created with.
#
#cpus=1
# The size of the disk in GB created when init-ing a podman-machine VM.
#
#disk_size=10
# Default image URI when creating a new VM using `podman machine init`.
# Options: On Linux/Mac, `testing`, `stable`, `next`. On Windows, the major
# version of the OS (e.g `36`) for Fedora 36. For all platforms you can
# alternatively specify a custom download URL to an image. Container engines
# translate URIs $OS and $ARCH to the native OS and ARCH. URI
# "https://example.com/$OS/$ARCH/foobar.ami" becomes
# "https://example.com/linux/amd64/foobar.ami" on a Linux AMD machine.
# The default value is `testing`.
#
# image = "testing"
# Memory in MB a machine is created with.
#
#memory=2048
# The username to use and create on the podman machine OS for rootless
# container access.
#
#user = "core"
# Host directories to be mounted as volumes into the VM by default.
# Environment variables like $HOME as well as complete paths are supported for
# the source and destination. An optional third field `:ro` can be used to
# tell the container engines to mount the volume readonly.
#
# volumes = [
# "$HOME:$HOME",
#]
# The [machine] table MUST be the last entry in this file.
# (Unless another table is added)
# TOML does not provide a way to end a table other than a further table being
# defined, so every key hereafter will be part of [machine] and not the
# main config.
[farms]
#
# the default farm to use when farming out builds
# default = ""
#
# map of existing farms
#[farms.list]

View File

@@ -1,3 +0,0 @@
[[registry]]
location="cr.chaos"
insecure=true

View File

@@ -1,4 +0,0 @@
[[registry]]
prefix="docker.io"
location="dr-mirror.wks"
insecure=true

View File

@@ -1,195 +0,0 @@
# This file is is the configuration file for all tools
# that use the containers/storage library.
# See man 5 containers-storage.conf for more information
# The "container storage" table contains all of the server options.
[storage]
# Default Storage Driver, Must be set for proper operation.
driver = "overlay"
# Temporary storage location
runroot = "/run/containers/storage"
# Primary Read/Write location of container storage
graphroot = "/var/lib/containers/storage"
# Storage path for rootless users
#
# rootless_storage_path = "$HOME/.local/share/containers/storage"
[storage.options]
# Storage options to be passed to underlying storage drivers
# AdditionalImageStores is used to pass paths to additional Read/Only image stores
# Must be comma separated list.
additionalimagestores = [
]
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
# remap-uids = 0:1668442479:65536
# remap-gids = 0:1668442479:65536
# Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
# with an in-container ID of 0 and then a host-level ID taken from the lowest
# range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
# until all of the entries have been used for maps.
#
# remap-user = "containers"
# remap-group = "containers"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
# to containers configured to create automatically a user namespace. Containers
# configured to automatically create a user namespace can still overlap with containers
# having an explicit mapping set.
# This setting is ignored when running as rootless.
# root-auto-userns-user = "storage"
#
# Auto-userns-min-size is the minimum size for a user namespace created automatically.
# auto-userns-min-size=1024
#
# Auto-userns-max-size is the minimum size for a user namespace created automatically.
# auto-userns-max-size=65536
[storage.options.overlay]
# ignore_chown_errors can be set to allow a non privileged user running with
# a single UID within a user namespace to run containers. The user can pull
# and use any image even those with multiple uids. Note multiple UIDs will be
# squashed down to the default uid in the container. These images will have no
# separation between the users in the container. Only supported for the overlay
# and vfs drivers.
#ignore_chown_errors = "false"
# Inodes is used to set a maximum inodes of the container image.
# inodes = ""
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
mount_program = "/usr/bin/fuse-overlayfs"
# mountopt specifies comma separated list of extra mount options
mountopt = "nodev"
# Set to skip a PRIVATE bind mount on the storage home directory.
# skip_mount_home = "false"
# Size is used to set a maximum size of the container image.
# size = ""
# ForceMask specifies the permissions mask that is used for new files and
# directories.
#
# The values "shared" and "private" are accepted.
# Octal permission masks are also accepted.
#
# "": No value specified.
# All files/directories, get set with the permissions identified within the
# image.
# "private": it is equivalent to 0700.
# All files/directories get set with 0700 permissions. The owner has rwx
# access to the files. No other users on the system can access the files.
# This setting could be used with networked based homedirs.
# "shared": it is equivalent to 0755.
# The owner has rwx access to the files and everyone else can read, access
# and execute them. This setting is useful for sharing containers storage
# with other users. For instance have a storage owned by root but shared
# to rootless users as an additional store.
# NOTE: All files within the image are made readable and executable by any
# user on the system. Even /etc/shadow within your image is now readable by
# any user.
#
# OCTAL: Users can experiment with other OCTAL Permissions.
#
# Note: The force_mask Flag is an experimental feature, it could change in the
# future. When "force_mask" is set the original permission mask is stored in
# the "user.containers.override_stat" xattr and the "mount_program" option must
# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
# extended attribute permissions to processes within containers rather then the
# "force_mask" permissions.
#
# force_mask = "shared"
[storage.options.thinpool]
# Storage Options for thinpool
# autoextend_percent determines the amount by which pool needs to be
# grown. This is specified in terms of % of pool size. So a value of 20 means
# that when threshold is hit, pool will be grown by 20% of existing
# pool size.
# autoextend_percent = "20"
# autoextend_threshold determines the pool extension threshold in terms
# of percentage of pool size. For example, if threshold is 60, that means when
# pool is 60% full, threshold has been hit.
# autoextend_threshold = "80"
# basesize specifies the size to use when creating the base device, which
# limits the size of images and containers.
# basesize = "10G"
# blocksize specifies a custom blocksize to use for the thin pool.
# blocksize="64k"
# directlvm_device specifies a custom block storage device to use for the
# thin pool. Required if you setup devicemapper.
# directlvm_device = ""
# directlvm_device_force wipes device even if device already has a filesystem.
# directlvm_device_force = "True"
# fs specifies the filesystem type to use for the base device.
# fs="xfs"
# log_level sets the log level of devicemapper.
# 0: LogLevelSuppress 0 (Default)
# 2: LogLevelFatal
# 3: LogLevelErr
# 4: LogLevelWarn
# 5: LogLevelNotice
# 6: LogLevelInfo
# 7: LogLevelDebug
# log_level = "7"
# min_free_space specifies the min free space percent in a thin pool require for
# new device creation to succeed. Valid values are from 0% - 99%.
# Value 0% disables
# min_free_space = "10%"
# mkfsarg specifies extra mkfs arguments to be used when creating the base
# device.
# mkfsarg = ""
# metadata_size is used to set the `pvcreate --metadatasize` options when
# creating thin devices. Default is 128k
# metadata_size = ""
# Size is used to set a maximum size of the container image.
# size = ""
# use_deferred_removal marks devicemapper block device for deferred removal.
# If the thinpool is in use when the driver attempts to remove it, the driver
# tells the kernel to remove it as soon as possible. Note this does not free
# up the disk space, use deferred deletion to fully remove the thinpool.
use_deferred_removal = "True"
# use_deferred_deletion marks thinpool device for deferred deletion.
# If the device is busy when the driver attempts to delete it, the driver
# will attempt to delete device every 30 seconds until successful.
# If the program using the driver exits, the driver will continue attempting
# to cleanup the next time the driver is used. Deferred deletion permanently
# deletes the device and all data stored in device will be lost.
use_deferred_deletion = "True"
# xfs_nospace_max_retries specifies the maximum number of retries XFS should
# attempt to complete IO when ENOSPC (no space) error is returned by
# underlying storage device.
# xfs_nospace_max_retries = "0"

View File

@@ -1,55 +0,0 @@
- name: Install Podman packages
apt:
state: present
name:
- podman
- podman-docker
- podman-compose
- containernetworking-plugins
- crun
- fuse-overlayfs
- slirp4netns
- name: Disable Docker Emulation warning
file:
path: /etc/containers/nodocker
state: touch
mode: 420
modification_time: preserve
access_time: preserve
- name: Configure containers.conf
copy:
dest: /etc/containers/containers.conf
src: containers.conf
- name: Configure storage.conf
copy:
dest: /etc/containers/storage.conf
src: storage.conf
- name: Configure cr.chaos registry
copy:
dest: /etc/containers/registries.conf.d/cr.chaos.conf
src: cr.chaos.conf
#- name: Configure docker-mirror registry
# copy:
# dest: /etc/containers/registries.conf.d/dr-mirror.wks.conf
# src: dr-mirror.wks.conf
#- name: Remote NFS volumes
# block:
# - name: Create mount Point
# file:
# path: /var/lib/containers/storage/volumes
# state: directory
# owner: root
# group: root
# - name: Mount containers from NFS
# ansible.posix.mount:
# src: ebin01.wks:/data/raid1-ssd/node-data/{{ ansible_nodename }}-podman-volumes
# path: /var/lib/containers/storage/volumes
# opts: auto,_netdev,defaults,x-systemd.wanted-by=podman.service,x-systemd.wanted-by=nomad.service
# state: mounted
# fstype: nfs

View File

@@ -1,25 +0,0 @@
- name: Install node-exporter
become: true
become_user: admin
homebrew:
state: present
name:
- node_exporter
# TODO: Fix this
#- name: Check service running
# become: true
# become_user: admin
# command: "/opt/homebrew/bin/brew services info --json node_exporter"
# register: service_running
# ignore_errors: True
#
#- name: Debug
# debug:
# msg: "{{ service_running }}"
#
#- name: Start service
# become: true
# become_user: admin
# command: "/opt/homebrew/bin/brew services start node_exporter"
# when: {{ service_running[*].status }} != "started"

View File

@@ -1,5 +0,0 @@
- name: Install node-exporter
apt:
state: present
name:
- prometheus-node-exporter

View File

@@ -1,2 +0,0 @@
- name: Prometheus Node Exporter
include_tasks: "{{ ansible_system | lower }}.yaml"

View File

@@ -1,13 +0,0 @@
- name: Install sudo
apt:
name: sudo
state: present
- name: ensure Admini role
user:
comment: Administrative User
user: admini
password: $6$WmyMaztTbqI6Ga19$AADxgXtK.3q/ne0v.rimlKWRmZoX0bXApWRmoVTiQAdcGrfMwiInDHWElz5zfQiBGTZM6wOnSletT9JfdO6Zl0
groups:
- sudo
append: true

View File

@@ -1,73 +0,0 @@
- hosts: all
gather_facts: true
roles:
- role: common
tags: common
- hosts: auto02.chaos
roles:
- role: podman
- hosts: pine01.wks
vars:
nomad_pool: sys
roles:
- role: nummer5
tags: nummer5
- role: armbian
tags: armbian
- role: debian
tags: debian
- hosts: pine02.wks
vars:
nomad_pool: apps
roles:
- role: nummer5
tags: nummer5
- role: armbian
tags: armbian
- role: debian
tags: debian
- hosts: pine03.wks
vars:
nomad_pool: apps
roles:
- role: nummer5
tags: nummer5
- role: armbian
tags: armbian
- role: debian
tags: debian
- hosts: pine04.wks
vars:
nomad_pool: apps
roles:
- role: nummer5
tags: nummer5
- role: armbian
tags: armbian
- role: debian
tags: debian
- hosts: pine05.wks
vars:
nomad_pool: sys
roles:
- role: nummer5
tags: nummer5
- role: armbian
tags: armbian
- role: debian
tags: debian
#- hosts: ~pine.*\.wks
# roles:
# - role: nummer5
# tags: nummer5
# - role: armbian
# tags: armbian
# - role: debian
# tags: debian