1
0
Fork 0
mirror of https://code.forgejo.org/infrastructure/documentation synced 2024-11-21 19:11:11 +00:00

helpers to prepare a k8s node on Hetzner

This commit is contained in:
Earl Warren 2024-10-19 16:13:24 +02:00
parent 4f90ea7af5
commit 8947b16ce6
No known key found for this signature in database
GPG key ID: 0579CB2928A78A00
11 changed files with 443 additions and 213 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
*~
k3s-host/secrets.sh
k3s-host/variables.sh

286
README.md
View file

@ -790,33 +790,19 @@ stream {
https://hetzner05.forgejo.org & https://hetzner06.forgejo.org run on [EX44](https://www.hetzner.com/dedicated-rootserver/ex44) Hetzner hardware.
#### LXC
#### Imaging
```sh
lxc-helpers.sh lxc_install_lxc_inside 10.47.3 fc11
```
Using installimage from the rescue instance.
#### NFS
- `wipefs -fa /dev/nvme*n1`
- `installimage -r no -n hetzner0?`
- Debian bookworm
- `PART / ext4 100G`
- `PART /srv ext4 all`
- ESC 0 + yes
- reboot
[server](https://wiki.archlinux.org/title/NFS).
```sh
sudo apt install nfs-kernel-server nfs-common
cat <<EOF | sudo tee -a /etc/exports
/precious 10.53.101.0/24(rw,fsid=0,no_root_squash,no_subtree_check)
/precious/k8s 10.53.101.0/24(rw,nohide,insecure,no_subtree_check)
EOF
sudo exportfs -av
sudo exportfs -s
```
[client](https://wiki.archlinux.org/title/NFS).
```sh
sudo apt install nfs-common
```
#### Disk partitioning
Partitioning.
- First disk
- OS
@ -824,149 +810,83 @@ sudo apt install nfs-common
- Second disk
- a partition configured with DRBD
#### Root filesystem backups
Debian user.
- `hetzner05:/etc/cron.daily/backup-hetzner06`
`rsync -aHSv --delete-excluded --delete --numeric-ids --exclude /proc --exclude /dev --exclude /sys --exclude /srv --exclude /var/lib/lxc 10.53.100.6:/ /srv/backups/hetzner06/`
- `hetzner06:/etc/cron.daily/backup-hetzner05`
`rsync -aHSv --delete-excluded --delete --numeric-ids --exclude /proc --exclude /dev --exclude /sys --exclude /srv --exclude /var/lib/lxc 10.53.100.5:/ /srv/backups/hetzner05/`
- `ssh root@hetzner0?.forgejo.org`
- `useradd --shell /bin/bash --create-home --groups sudo debian`
- `mkdir -p /home/debian/.ssh ; cp -a .ssh/authorized_keys /home/debian/.ssh ; chown -R debian /home/debian/.ssh`
- in `/etc/sudoers` edit `%sudo ALL=(ALL:ALL) NOPASSWD:ALL`
#### Failover IP addresses
#### Install helpers
The failover IP addresses are configured on all hosts.
```
auto enp5s0
iface enp5s0 inet static
...
up ip addr add 188.40.16.47/32 dev enp5s0
iface enp5s0 inet6 static
...
up ip addr add 2a01:4f8:fff2:48::2/64 dev enp5s0
```
#### k8s node
The `10.88.1.5` and `fd10::5` IPs are assigned to the interface with VLAN 4002.
```
auto enp5s0.4002
iface enp5s0.4002 inet static
address 10.88.1.5
netmask 255.255.0.0
vlan-raw-device enp5s0
mtu 1400
up ip addr add fd10::5/48 dev enp5s0.4002
```
### k8s first server node
Each node is identifed by the last digit of the hostname.
```sh
sudo apt-get install curl
master_node_ip=10.88.1.5,fd10::5
curl -fL https://get.k3s.io | sh -s - server --cluster-init --disable=servicelb --write-kubeconfig-mode=644 --node-ip=$master_node_ip --cluster-cidr=10.42.0.0/16,fd01::/48 --service-cidr=10.43.0.0/16,fd02::/112 --flannel-ipv6-masq
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -
sudo apt-get install git etckeeper
git clone https://code.forgejo.org/infrastructure/documentation
cd documentation/k3s-host
cp variables.sh.example variables.sh
cp secrets.sh.example secrets.sh
```
### k8s second server node
Variables that must be set depending on the role of the node.
- first server node
- secrets.sh: node_drbd_shared_secret
- other server node
- secrets.sh: node_drbd_shared_secret
- secrets.sh: node_k8s_token: content of /var/lib/rancher/k3s/server/token on the first node
- variables.sh: node_k8s_existing: identifier of the first node (e.g. 5)
- etcd node
- secrets.sh: node_k8s_token: content of /var/lib/rancher/k3s/server/token on the first node
- variables.sh: node_k8s_existing: identifier of the first node (e.g. 5)
- variables.sh: node_k8s_etcd: identifier of the node whose role is just etcd (e.g. 3)
The other variables depend on the setup.
#### Firewall
`./setup.sh setup_ufw`
#### DRBD
DRBD is [configured](https://linbit.com/drbd-user-guide/drbd-guide-9_0-en/#p-work) with:
`./setup.sh setup_drbd`
Once two nodes have DRBD setup for the first time, it can be initialized by [pretending all is in sync](https://linbit.com/drbd-user-guide/drbd-guide-9_0-en/#s-skip-initial-resync) to save the initial bitmap sync since there is actually no data at all.
The token is found on the first node in the `/var/lib/rancher/k3s/server/token` file.
```sh
sudo apt-get install curl
token=???
master_ip=10.88.1.5
second_node_ip=10.88.1.6,fd10::6
curl -fL https://get.k3s.io | sh -s - server --token $token --server https://$master_ip:6443 --cluster-init --disable=servicelb --write-kubeconfig-mode=644 --node-ip=$second_node_ip --cluster-cidr=10.42.0.0/16,fd01::/48 --service-cidr=10.43.0.0/16,fd02::/112 --flannel-ipv6-masq
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -
kubectl taint nodes $(hostname) key1=value1:NoSchedule
sudo drbdadm primary r1
sudo drbdadm new-current-uuid --clear-bitmap r1/0
sudo mount /precious
```
### k8s dedicated etcd node
#### NFS
[dedicated etcd node](https://docs.k3s.io/installation/server-roles#dedicated-etcd-nodes)
`./setup.sh setup_nfs`
The token is found on one of the master nodes in the `/var/lib/rancher/k3s/server/token` file.
On the node that has the DRBD volume `/precious` mounted, set the IP of the NFS server to be used by k8s:
```sh
master_ip=10.88.1.5
etcd_node_ip=10.88.1.3,fd10::3
curl -sfL https://get.k3s.io | sh -s - server --token "$token" --server https://$master_ip:6443 --cluster-init --disable=servicelb --disable-apiserver --disable-controller-manager --disable-scheduler --write-kubeconfig-mode=644 --node-ip=$etcd_node_ip --cluster-cidr=10.42.0.0/16,fd01::/48 --service-cidr=10.43.0.0/16,fd02::/112 --flannel-ipv6-masq
kubectl taint nodes $(hostname) key1=value1:NoSchedule
sudo ip addr add 10.53.101.100/24 dev enp5s0.4001
```
### k8s networking
#### K8S
[cert-manager](https://cert-manager.io/).
For the first node `./setup.sh setup_k8s`. For nodes joining the cluster `./setup.sh setup_k8s 6` where `hetzner06` is an existing node.
```
helm install mycertmanager --set installCRDs=true oci://registry-1.docker.io/bitnamicharts/cert-manager
# wait a few seconds
cat > clusterissuer.yml <<EOF
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-http
spec:
acme:
email: contact@forgejo.org
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-http
solvers:
- http01:
ingress:
class: traefik
EOF
kubectl apply --server-side=true -f clusterissuer.yml
```
- [metallb](https://metallb.universe.tf) instead of the default load balancer because it does not allow for a public IP different from the `k8s` node IP.
`./setup.sh setup_k8s_metallb`
- [traefik](https://traefik.io/) requests with [annotations](https://github.com/traefik/traefik-helm-chart/blob/7a13fc8a61a6ad30fcec32eec497dab9d8aea686/traefik/values.yaml#L736) specific IPs from `metalldb`.
`./setup.sh setup_k8s_traefik`
- [cert-manager](https://cert-manager.io/).
`./setup.sh setup_k8s_certmanager`
- NFS storage class
`./setup.sh setup_k8s_nfs`
`k3s` is configured to use [metallb](https://metallb.universe.tf) instead of the default load balancer because it does not allow for a public IP different from the `k8s` node IP.
[metallb](https://metallb.universe.tf).
```
helm install metallb --set installCRDs=true metallb/metallb
cat > metallb.yaml <<EOF
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
spec:
addresses:
- 188.40.16.47/32
- 2a01:4f8:fff2:48::0/64
EOF
sleep 120 ; kubectl apply --server-side=true -f metallb.yml
```
[traefik](https://traefik.io/) requests with [annotations](https://github.com/traefik/traefik-helm-chart/blob/7a13fc8a61a6ad30fcec32eec497dab9d8aea686/traefik/values.yaml#L736) specific IPs from `metalldb`.
```
cat > traefik.yml <<EOF
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
ports:
web:
port: 80
redirectTo:
port: websecure
priority: 1
deployment:
replicas: 2
service:
annotations:
metallb.universe.tf/allow-shared-ip: "key-to-share-188-40-16-47"
metallb.universe.tf/loadBalancerIPs: 188.40.16.47,2a01:4f8:fff2:48::2
EOF
kubectl apply --server-side=true -f traefik.yml
```
#### Forgejo
[forgejo](https://code.forgejo.org/forgejo-helm/forgejo-helm) configuration in [ingress](https://code.forgejo.org/forgejo-helm/forgejo-helm#ingress) for the reverse proxy (`traefik`) to route the domain and for the ACME issuer (`cert-manager`) to obtain a certificate. And in [service](https://code.forgejo.org/forgejo-helm/forgejo-helm#service) for the `ssh` port to be bound to the desired IPs of the load balancer (`metallb`).
@ -1000,79 +920,17 @@ service:
type: LoadBalancer
annotations:
metallb.universe.tf/loadBalancerIPs: 188.40.16.47,2a01:4f8:fff2:48::2
metallb.universe.tf/allow-shared-ip: "key-to-share-188-40-16-47"
metallb.universe.tf/allow-shared-ip: "key-to-share-failover"
ipFamilyPolicy: PreferDualStack
port: 2222
```
Define the nfs storage class.
### K8S NFS storage creation
Define the 20GB `forgejo-data` pvc owned by user id 1000.
```sh
$ cat nfs.yml
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: nfs
namespace: default
spec:
chart: nfs-subdir-external-provisioner
repo: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
targetNamespace: default
set:
nfs.server: 10.53.101.100
nfs.path: /k8s
storageClass.name: nfs
$ kubectl apply --server-side=true -f nfs.yml
$ sleep 120 ; kubectl get storageclass nfs
```
### k8s NFS storage creation
Create the directory to be used, with the expected permissions (assuing `/k8s` is the directory exported via NFS).
```sh
sudo mkdir /precious/k8s/forgejo-data
sudo chmod 1000:1000 /precious/k8s/forgejo-data
```
Define the `forgejo-data` pvc.
```sh
$ cat pv.yml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: forgejo-data
spec:
capacity:
storage: 20Gi
nfs:
server: 10.53.101.100
path: /k8s/forgejo-data
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
mountOptions:
- noatime
- nfsvers=4.2
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: forgejo-data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 20Gi
volumeName: forgejo-data
storageClassName: nfs
volumeMode: Filesystem
$ kubectl apply --server-side=true -f pv.yml
./setup.sh setup_k8s_pvc forgejo-data 20Gi 1000
```
[Instruct the forgejo pod](https://code.forgejo.org/forgejo-helm/forgejo-helm#persistence) to use the `forgejo-data` pvc.
@ -1089,6 +947,8 @@ persistence:
### When a machine or disk is scheduled for replacement.
* `kubectl drain hetzner05` # evacuate all the pods out of the node to be shutdown
* `kubectl taint nodes hetzner05 key1=value1:NoSchedule` # prevent any pod from being created there (metallb speaker won't be drained, for instance)
* `kubectl delete node hetzner05` # let the cluster know it no longer exists so a new one by the same name can replace it
### Routing the failover IP

14
k3s-host/certmanager.yml Normal file
View file

@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-http
spec:
acme:
email: contact@forgejo.org
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-http
solvers:
- http01:
ingress:
class: traefik

8
k3s-host/metallb.yml Normal file
View file

@ -0,0 +1,8 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
spec:
addresses:
- $failover_ipv4/$failover_ipv4_range
- $failover_ipv6/$failover_ipv6_range

13
k3s-host/nfs.yml Normal file
View file

@ -0,0 +1,13 @@
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: nfs
namespace: default
spec:
chart: nfs-subdir-external-provisioner
repo: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
targetNamespace: default
set:
nfs.server: $node_nfs_server
nfs.path: /k8s
storageClass.name: nfs

33
k3s-host/pvc.yml Normal file
View file

@ -0,0 +1,33 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: $pvc_name
spec:
capacity:
storage: $pvc_capacity
nfs:
server: $node_nfs_server
path: /k8s/$pvc_name
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
mountOptions:
- noatime
- nfsvers=4.2
volumeMode: Filesystem
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: $pvc_name
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: $pvc_capacity
volumeName: $pvc_name
storageClassName: nfs
volumeMode: Filesystem

View file

@ -0,0 +1,4 @@
node_drbd_shared_secret=***
node_k8s_token=none

224
k3s-host/setup.sh Executable file
View file

@ -0,0 +1,224 @@
#!/bin/bash
SELF_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if ${VERBOSE:-false}; then
set -ex
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
else
set -e
fi
source $SELF_DIR/variables.sh
source $SELF_DIR/secrets.sh
set -o pipefail
self_node=$(hostname | sed -e 's/hetzner0//')
interface=${node_interface[$self_node]}
dependencies="retry etckeeper"
if ! which $dependencies >&/dev/null; then
sudo apt-get -q install -qq -y $dependencies
fi
function setup_ufw() {
sudo apt-get -q install -qq -y ufw
sudo ufw --force reset
sudo ufw default allow incoming
sudo ufw default allow outgoing
sudo ufw default allow routed
for from in $nodes; do
for to in $nodes; do
if test $from != $to; then
for v in ipv4 ipv6; do
eval from_ip=\${node_$v[$from]}
eval to_ip=\${node_$v[$to]}
sudo ufw allow in on $interface from $from_ip to $to_ip
done
fi
done
done
for host_ip in ${node_ipv4[$self_node]} ${node_ipv6[$self_node]}; do
sudo ufw allow in on $interface to $host_ip port 22 proto tcp
sudo ufw deny in on $interface log-all to $host_ip
done
for public_ip in $failover_ipv4 $failover_ipv6; do
sudo ufw allow in on $interface to $public_ip port 22,80,443,2000:3000 proto tcp
sudo ufw deny in on $interface log-all to $public_ip
done
sudo ufw enable
sudo systemctl start ufw
sudo systemctl enable ufw
sudo ufw status verbose
}
function setup_drbd() {
if ! test -f /etc/network/interfaces.d/drbd; then
cat <<EOF | sudo tee /etc/network/interfaces.d/drbd
auto $interface.$node_drbd_vlan
iface $interface.$node_drbd_vlan inet static
address $node_drbd_ipv4_prefix.$self_node
netmask 255.255.255.0
vlan-raw-device $interface
mtu 1400
EOF
sudo ifup $interface.$node_drbd_vlan
fi
sudo apt-get install -y drbd-utils
res_file=/etc/drbd.d/$node_drbd_resource.res
if ! sudo test -f $res_file; then
(
cat <<EOF
resource $node_drbd_resource {
net {
# A : write completion is determined when data is written to the local disk and the local TCP transmission buffer
# B : write completion is determined when data is written to the local disk and remote buffer cache
# C : write completion is determined when data is written to both the local disk and the remote disk
protocol C;
cram-hmac-alg sha1;
# any secret key for authentication among nodes
shared-secret "$node_drbd_shared_secret";
}
disk {
resync-rate 100M;
}
EOF
for node in $nodes; do
cat <<EOF
on hetzner0$node {
address $node_drbd_ipv4_prefix.$node:7788;
volume 0 {
device /dev/drbd0;
disk ${node_drbd_device[$node]};
meta-disk internal;
}
}
EOF
done
cat <<EOF
}
EOF
) | sudo tee $res_file
fi
if ! sudo drbdadm status $node_drbd_resource >&/dev/null; then
sudo drbdadm create-md $node_drbd_resource
sudo systemctl enable drbd
sudo systemctl start drbd
fi
if ! grep --quiet '^/dev/drbd0 /precious' /etc/fstab; then
echo /dev/drbd0 /precious ext4 noauto,noatime,defaults 0 0 | sudo tee -a /etc/fstab
sudo mkdir -p /precious
fi
}
function setup_nfs() {
sudo apt-get install -y nfs-kernel-server nfs-common
if ! test -f /etc/network/interfaces.d/nfs; then
cat <<EOF | sudo tee /etc/network/interfaces.d/nfs
auto $interface.$node_nfs_vlan
iface $interface.$node_nfs_vlan inet static
address $node_nfs_ipv4_prefix.$self_node
netmask 255.255.255.0
vlan-raw-device $interface
mtu 1400
EOF
sudo ifup $interface.$node_nfs_vlan
fi
if ! grep --quiet '^/precious' /etc/exports; then
cat <<EOF | sudo tee -a /etc/exports
/precious $node_nfs_ipv4_prefix.0/24(rw,fsid=0,no_root_squash,no_subtree_check)
/precious/k8s $node_nfs_ipv4_prefix.0/24(rw,nohide,insecure,no_subtree_check)
EOF
sudo exportfs -va || true # it does not matter if the expored dirs do not exist, they will
sudo exportfs -s
fi
}
function setup_k8s() {
existing=$1
if ! test -f /etc/network/interfaces.d/k8s; then
cat <<EOF | sudo tee /etc/network/interfaces.d/k8s
auto $interface.$node_k8s_vlan
iface $interface.$node_k8s_vlan inet static
address $node_k8s_ipv4_prefix.$self_node
netmask 255.255.0.0
vlan-raw-device $interface
mtu 1400
up ip addr add $node_k8s_ipv6_prefix::$self_node/48 dev $interface.$node_k8s_vlan
up ip addr add $failover_ipv4/$failover_ipv4_range dev $interface
up ip addr add $failover_ipv6/$failover_ipv6_range dev $interface
EOF
sudo ifup $interface.$node_k8s_vlan
fi
if ! grep --quiet 'export KUBECONFIG' ~/.bashrc; then
echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >>~/.bashrc
fi
if ! sudo systemctl --quiet is-active k3s; then
args=""
if test "$existing"; then
if ! test "$node_k8s_token"; then
echo "obtain the token from node $existing with sudo cat /var/lib/rancher/k3s/server/token and set node_k8s_token= in secrets.sh"
exit 1
fi
args="$args --token $node_k8s_token --server https://$node_k8s_ipv4_prefix.$existing:6443"
fi
if test "$self_node" = $node_k8s_etcd; then
args="$args --disable-apiserver --disable-controller-manager --disable-scheduler"
fi
curl -fL https://get.k3s.io | sh -s - server $args --cluster-init --disable=servicelb --write-kubeconfig-mode=644 --node-ip=$node_k8s_ipv4_prefix.$self_node,$node_k8s_ipv6_prefix::$self_node $node_k8s_cidr --flannel-ipv6-masq
if test "$self_node" = $node_k8s_etcd; then
retry --times 20 -- kubectl taint nodes $(hostname) key1=value1:NoSchedule
fi
if test "$self_node" != $node_k8s_etcd; then
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -
fi
fi
}
function setup_k8s_apply() {
retry --delay 30 --times 10 -- bash -c "$SELF_DIR/subst.sh $1 | kubectl apply --server-side=true -f -"
}
function setup_k8s_traefik() {
setup_k8s_apply traefik.yml
}
function setup_k8s_nfs() {
setup_k8s_apply nfs.yml
}
function setup_k8s_metallb() {
helm repo add metallb https://metallb.github.io/metallb
helm upgrade --install metallb --set installCRDs=true metallb/metallb
setup_k8s_apply metallb.yml
}
function setup_k8s_certmanager() {
helm upgrade --install mycertmanager --set installCRDs=true oci://registry-1.docker.io/bitnamicharts/cert-manager
setup_k8s_apply certmanager.yml
}
function setup_k8s_pvc() {
export pvc_name=$1
export pvc_capacity=$2
export pvc_owner=$3
sudo mount -o nfsvers=4.2 $node_nfs_server:/k8s /opt
sudo mkdir -p /opt/$pvc_name
sudo chown $pvc_owner:$pvc_owner /opt/$pvc_name
sudo umount /opt
setup_k8s_apply pvc.yml
}
"$@"

10
k3s-host/subst.sh Executable file
View file

@ -0,0 +1,10 @@
#!/bin/bash
SELF_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source $SELF_DIR/variables.sh
eval "cat <<EOF
$(<$1)
EOF
"

19
k3s-host/traefik.yml Normal file
View file

@ -0,0 +1,19 @@
apiVersion: helm.cattle.io/v1
kind: HelmChartConfig
metadata:
name: traefik
namespace: kube-system
spec:
valuesContent: |-
ports:
web:
port: 80
redirectTo:
port: websecure
priority: 1
deployment:
replicas: 2
service:
annotations:
metallb.universe.tf/allow-shared-ip: "key-to-share-failover"
metallb.universe.tf/loadBalancerIPs: $failover_ipv4,$failover_ipv6

42
k3s-host/variables.sh.example Executable file
View file

@ -0,0 +1,42 @@
#!/bin/bash
nodes="5 6"
node_interface=(
[3]=enp5s0
[5]=enp5s0
[6]=enp5s0
)
node_ipv4=(
[5]=88.198.58.177
[6]=65.108.204.171
)
node_ipv6=(
[5]=2a01:4f8:222:507::2
[6]=2a01:4f9:1a:a082::2
)
failover_ipv4=188.40.16.47
failover_ipv4_range=32
failover_ipv6=2a01:4f8:fff2:48::2
failover_ipv6_range=48
node_drbd_vlan=4000
node_drbd_resource=r1
node_drbd_ipv4_prefix=10.53.100
node_drbd_device=(
[5]=/dev/nvme0n1
[6]=/dev/nvme0n1
)
node_nfs_vlan=4001
node_nfs_ipv4_prefix=10.53.101
node_nfs_server=$node_nfs_ipv4_prefix.100
node_k8s_vlan=4002
node_k8s_ipv4_prefix=10.88.1
node_k8s_ipv6_prefix=fd10
node_k8s_cidr="--cluster-cidr=10.42.0.0/16,fd01::/48 --service-cidr=10.43.0.0/16,fd02::/112"
node_k8s_etcd=3