Void Linux

Verwaltung

Service Benutzer und Gruppe anlegen
groupadd -g 10101 -r vaultwarden
useradd -d /var/lib/vaultwarden -g vaultwarden -N -u 10101 -r vaultwarden

Installation

Kernel

6.1
#xbps-install --force linux6.1 linux6.1-headers
#xbps-reconfigure -f linux6.1
xbps-install linux6.1 linux6.1-headers
xbps-reconfigure -f zfsbootmenu

dracut zfs module

Failed to find module 'zfs'
dracut-install: Failed to find module 'zfs'
dracut: FAILED:  /usr/lib/dracut/dracut-install -D /var/tmp/dracut.GCXLRn/initramfs --kerneldir /lib/modules/6.1.40_1/ -m zfs
dracut: installkernel failed in module zfs
fix
l /usr/src/zfs-2.1.12
dkms add zfs/2.1.2
# Creating symlink /var/lib/dkms/zfs/2.1.12/source -> /usr/src/zfs-2.1.12
xbps-reconfigure -f linux6.1

Konfiguration

ntp
xbps-install chrony
ln -s /etc/sv/chronyd /var/service/
lastlog
touch /var/log/lastlog
chgrp utmp /var/log/lastlog
chmod 664 /var/log/lastlog

socklog als syslog Alternative

Installation
xbps-install socklog{,-void}
ln -s /etc/sv/socklog-unix /var/service/
ln -s /etc/sv/nanoklogd /var/service/

metalog als syslog Alternative

Installation
xbps-install metalog
ln -s /etc/sv/metalog /var/service/

ZFS Kernel Snapshots

/etc/kernel.d/pre-install/10-kernel-clean
#!/bin/sh

# Find the name of the current boot environment
BOOTENV="$(awk '$2 == "/" && $3 == "zfs" {print $1}' /proc/mounts)"
[ -n "${BOOTENV}" ] || exit

# Create a snapshot of the current state, differntiated by time
zfs snapshot "${BOOTENV}@kernel_upgrade_$(date +%Y-%m-%d_%H:%M:%S)" || exit

# Prune all except 2 last kernel_upgrade snapshots
zfs list -t snapshot -s creation -o name -H "${BOOTENV}" | \
  grep @kernel_upgrade_ | head -n -2 | \
  while read -r snapname; do
    zfs destroy "${snapname}"
  done

# Prune the old kernels
vkpurge rm all
chmod a+x /etc/kernel.d/pre-install/10-kernel-clean

Firewall

nftables
xbps-install nftables
ln -sv /etc/sv/nftables /var/service/
/etc/nftables.conf
#!/usr/sbin/nft -f

# This is somewhat important, otherwise it will just append to your existing
# rules. This can be somewhat confusing unless you run `nft list table inet
# filter` or similar
flush ruleset

table inet filter {
  chain input {
    type filter hook input priority 0;

    # Allow all input on loopback
    iif lo accept

    # Accept stateful traffic
    ct state established,related accept

    # Accept SSH
    tcp dport { 22, 65535 } accept

    # Accept HTTP and HTTPs
    tcp dport { 80, 443 } accept

    # Allow some icmp traffic for ipv6
    ip6 nexthdr icmpv6 icmpv6 type {
      nd-neighbor-solicit, echo-request,
      nd-router-advert, nd-neighbor-advert
    } accept

    counter drop
  }
  chain forward {
    type filter hook forward priority 0;
  }
  chain output {
    type filter hook output priority 0;
  }
}

runit

  • sv start == sv -v up
  • sv down ruft zuerst control/t und dann control/d auf
  • vlogger kann als log service verwendet werden, manpage lesen

service

Service testen
touch /etc/sv/<service>/down
ln -s /etc/sv/<service> /var/service/
sv once <service>
Service Anlegen
set SERVICE dienst
set SV_DIR /etc/sv/$SERVICE
mkdir -p $SV_DIR/log
ln -s /usr/bin/vlogger $SV_DIR/log/run
vim $SV_DIR/run
chmod 0755 $SV_DIR/run

# nur nötig wenn man Umgebungsvariablen setzen möchte
mkdir -p $SV_DIR/envdir
echo '/var/lib/caddy' > $SVDIR/envdir/HOME
echo '/var/lib' | tee $SVDIR/envdir/{XDG_CONFIG_HOME,XDG_DATA_HOME} > /dev/null
/etc/sv/dienst/run
#!/bin/sh
exec 2>&1
USER='dienst'
GROUP='dienst'
BIN='/usr/local/bin/dienst'
# nur nötig wenn man Umgebungsvariablen setzen möchte
ENVDIR='/etc/sv/dienst/envdir'
# z.B. Konfiguration testen
env -i chpst -u ${USER}:${GROUP} -c /etc/dienst/config.yaml -t || exit $?
# abhängigen Dienst starten
sv start dienst_1 || exit 1
exec env -i chpst -u ${USER}:${GROUP} ${BIN} -c /etc/dienst/config.yaml
# oder um Umgebungsvariablen zu setzen
exec env -i chpst -u ${USER}:${GROUP} -e ${ENVDIR} ${BIN} -c /etc/dienst/config.yaml

CrowdSec

Allgemeine Informationen sind auf CrowdSec hinterlegt.

für musl in Docker Container bauen

Das vmgb Image ist selbst gebaut. Einfach nur ein minimales musl Void mit den notwendigen Paketen. Dadurch wird mein eigentliches System nicht unnötig mit Paketen geflutet.

crowdsec ⇐ 1.4.3
git clone https://github.com/crowdsecurity/crowdsec.git
docker run --rm -it -v "$PWD/crowdsec":/build vmgb:latest sh
# whiptail>newt, envsubst>gettext
xbps-install -y gcc newt gettext
export BUILD_VERSION="v1.4.1"
make release
firewall bouncer ⇐ 0.0.24
git clone https://github.com/crowdsecurity/cs-firewall-bouncer.git
docker run --rm -it -v "$PWD/cs-firewall-bouncer":/build vmgb:latest make release

Installation

Bei Verwendung von wizard.sh sollte der Schalter --docker-mode verwendet werden. Dieser überspringt unter anderem Systemd Einstellungen. Für die Generierung von Konfigurationsdateien verwendet das Skript envsubst. Dieses ist im XBPS Paket gettext enthalten.

In der config.yaml oder config.yaml.local müssen zwei Parameter angepasst werden. daemonize: false und pid_dir: '' damit runit verwendet werden kann.

cs-firewall-bouncer
# envsubst
xbps-install gettext
tar xf crowdsec-firewall-bouncer.tgz
cd crowdsec-firewall-bouncer-v*/
./install.fish
# daemonize: false, set-only: true
# supported_decisions_types: ban, captcha, throttle
vim /etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml
install.fish
#!/usr/bin/env fish

set BIN_PATH_INSTALLED "/usr/local/bin/crowdsec-firewall-bouncer"
set BIN_PATH "./crowdsec-firewall-bouncer"
set CONFIG_DIR "/etc/crowdsec/bouncers"
set FW_BACKEND "nftables"

install -v -m 755 -D "$BIN_PATH" "$BIN_PATH_INSTALLED"
mkdir -p "$CONFIG_DIR"
install -v -m 0600 "./config/crowdsec-firewall-bouncer.yaml" "$CONFIG_DIR/crowdsec-firewall-bouncer.yaml"

set SUFFIX (tr -dc A-Za-z0-9 </dev/urandom | head -c 8)
set API_KEY (cscli bouncers add cs-firewall-bouncer-$SUFFIX -o raw)

API_KEY=$API_KEY BACKEND=$FW_BACKEND envsubst < ./config/crowdsec-firewall-bouncer.yaml | install -m 0600 /dev/stdin "$CONFIG_DIR/crowdsec-firewall-bouncer.yaml"

runit service

crowdsec-firewall-bouncer
set SV_DIR /etc/sv/crowdsec-firewall-bouncer
mkdir -p $SV_DIR/log
ln -s /usr/bin/vlogger $SV_DIR/log/run
vim $SV_DIR/run
chmod 0755 $SV_DIR/run
/etc/sv/crowdsec-firewall-bouncer/run
#!/bin/sh
exec 2>&1
BIN=/usr/local/bin/crowdsec-firewall-bouncer
$BIN -c /etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml -t || exit $?
sv start crowdsec || exit 1
sv start nftables || exit 1
exec $BIN -c /etc/crowdsec/bouncers/crowdsec-firewall-bouncer.yaml

caddy

Allgemeine Informationen sind auf Caddy Server hinterlegt.

runit service
set SV_DIR /etc/sv/caddy
mkdir -p $SV_DIR/{log,envdir}
ln -s /usr/bin/vlogger $SV_DIR/log/run
echo '/var/lib/caddy' > $SVDIR/envdir/HOME
echo '/var/lib' | tee $SVDIR/envdir/{XDG_CONFIG_HOME,XDG_DATA_HOME} > /dev/null
vim $SV_DIR/run
chmod 0755 $SV_DIR/run
ln -s /etc/sv/caddy /var/service
/etc/sv/caddy/run
#!/bin/sh
exec 2>&1
BIN=/usr/local/bin/caddy
CONF=/etc/caddy
ENVDIR=/etc/sv/caddy/envdir
cd $CONF
env -i chpst -u caddy:caddy -e $ENVDIR $BIN validate || exit $?
exec env -i chpst -u caddy:caddy -e $ENVDIR $BIN run
/etc/sv/caddy/control/h
#!/bin/sh
/usr/local/bin/caddy reload --config /etc/caddy/Caddyfile

Docker

Installation
xbps-install docker docker-compose
ln -s /etc/sv/docker /var/service/
/etc/sv/docker/conf
OPTS="--iptables=false"
/etc/nftables.conf
table ip nat        {
  chain prerouting      {
    type nat hook prerouting priority 0
  }
  chain postrouting     {
    type nat hook postrouting priority 100
    # You may need to change 'eth0' to your primary interface
    oif eth0 masquerade persistent
  }
}
/etc/docker/daemon.json für runit logging
{
  "log-driver": "local"
}

runit service für Docker Compose

service
set SV_DIR /etc/sv/container
mkdir -p $SV_DIR/{log,control}
ln -s /usr/bin/vlogger $SV_DIR/log/run
nvim $SV_DIR/run
nvim $SV_DIR/control/d
chmod 0755 $SV_DIR/run $SV_DIR/control/d
ln -s $SV_DIR /var/service
/etc/sv/container/run
#!/bin/sh
exec 2>&1
cd /var/lib/container
export UID=$(id -u container)
export GID=$(id -g container)
sv -v up docker || exit 1
exec /bin/docker compose up --no-log-prefix
/etc/sv/container/control/d
#!/bin/sh
cd /var/lib/container
/bin/docker compose down

podman

Installation
xbps-install podman podman-compose
nvim /etc/containers/storage.conf
zfs create zroot/var/lib/containers
zfs create zroot/var/lib/containers/storage
chmod -R 0600 /var/lib/containers
/etc/containers/storage.conf
[storage]
driver = "zfs"

Referenzen