Merge pull request #208189 from numinit/update-nebula-module

This commit is contained in:
Sandro 2023-02-06 23:14:58 +01:00 committed by GitHub
commit d47709d1ef
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 227 additions and 77 deletions

View file

@ -421,6 +421,16 @@
attribute name.
</para>
</listitem>
<listitem>
<para>
Nebula now runs as a system user and group created for each
nebula network, using the <literal>CAP_NET_ADMIN</literal>
ambient capability on launch rather than starting as root.
Ensure that any files each Nebula instance needs to access are
owned by the correct user and group, by default
<literal>nebula-${networkName}</literal>.
</para>
</listitem>
<listitem>
<para>
In <literal>mastodon</literal> it is now necessary to specify
@ -803,6 +813,18 @@
<link xlink:href="options.html#opt-system.stateVersion">system.stateVersion</link>.
</para>
</listitem>
<listitem>
<para>
Nebula now supports the
<literal>services.nebula.networks.&lt;name&gt;.isRelay</literal>
and
<literal>services.nebula.networks.&lt;name&gt;.relays</literal>
configuration options for setting up or allowing traffic
relaying. See the
<link xlink:href="https://www.defined.net/blog/announcing-relay-support-in-nebula/">announcement</link>
for more details about relays.
</para>
</listitem>
<listitem>
<para>
<literal>hip</literal> has been separated into

View file

@ -101,6 +101,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [services.wordpress.sites.&lt;name&gt;.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.&lt;name&gt;.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name.
- Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`.
- In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`.
- The `--target-host` and `--build-host` options of `nixos-rebuild` no longer treat the `localhost` value specially to build on/deploy to local machine, omit the relevant flag.
@ -199,6 +201,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [Garage](https://garagehq.deuxfleurs.fr/) version is based on [system.stateVersion](options.html#opt-system.stateVersion), existing installations will keep using version 0.7. New installations will use version 0.8. In order to upgrade a Garage cluster, please follow [upstream instructions](https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/) and force [services.garage.package](options.html#opt-services.garage.package) or upgrade accordingly [system.stateVersion](options.html#opt-system.stateVersion).
- Nebula now supports the `services.nebula.networks.<name>.isRelay` and `services.nebula.networks.<name>.relays` configuration options for setting up or allowing traffic relaying. See the [announcement](https://www.defined.net/blog/announcing-relay-support-in-nebula/) for more details about relays.
- `hip` has been separated into `hip`, `hip-common` and `hipcc`.
- `services.nginx.recommendedProxySettings` now removes the `Connection` header preventing clients from closing backend connections.

View file

@ -68,6 +68,12 @@ in
description = lib.mdDoc "Whether this node is a lighthouse.";
};
isRelay = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc "Whether this node is a relay.";
};
lighthouses = mkOption {
type = types.listOf types.str;
default = [];
@ -78,6 +84,15 @@ in
example = [ "192.168.100.1" ];
};
relays = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc ''
List of IPs of relays that this node should allow traffic from.
'';
example = [ "192.168.100.1" ];
};
listen.host = mkOption {
type = types.str;
default = "0.0.0.0";
@ -157,6 +172,11 @@ in
am_lighthouse = netCfg.isLighthouse;
hosts = netCfg.lighthouses;
};
relay = {
am_relay = netCfg.isRelay;
relays = netCfg.relays;
use_relays = true;
};
listen = {
host = netCfg.listen.host;
port = netCfg.listen.port;
@ -173,25 +193,41 @@ in
configFile = format.generate "nebula-config-${netName}.yml" settings;
in
{
# Create systemd service for Nebula.
# Create the systemd service for Nebula.
"nebula@${netName}" = {
description = "Nebula VPN service for ${netName}";
wants = [ "basic.target" ];
after = [ "basic.target" "network.target" ];
before = [ "sshd.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = mkMerge [
{
Type = "simple";
Restart = "always";
ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
}
# The service needs to launch as root to access the tun device, if it's enabled.
(mkIf netCfg.tun.disable {
User = networkId;
Group = networkId;
})
];
serviceConfig = {
Type = "simple";
Restart = "always";
ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
UMask = "0027";
CapabilityBoundingSet = "CAP_NET_ADMIN";
AmbientCapabilities = "CAP_NET_ADMIN";
LockPersonality = true;
NoNewPrivileges = true;
PrivateDevices = false; # needs access to /dev/net/tun (below)
DeviceAllow = "/dev/net/tun rw";
DevicePolicy = "closed";
PrivateTmp = true;
PrivateUsers = false; # CapabilityBoundingSet needs to apply to the host namespace
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictNamespaces = true;
RestrictSUIDSGID = true;
User = networkId;
Group = networkId;
};
unitConfig.StartLimitIntervalSec = 0; # ensure Restart=always is always honoured (networks can go down for arbitrarily long)
};
}) enabledNetworks);
@ -202,7 +238,7 @@ in
# Create the service users and groups.
users.users = mkMerge (mapAttrsToList (netName: netCfg:
mkIf netCfg.tun.disable {
{
${nameToId netName} = {
group = nameToId netName;
description = "Nebula service user for network ${netName}";
@ -210,9 +246,8 @@ in
};
}) enabledNetworks);
users.groups = mkMerge (mapAttrsToList (netName: netCfg:
mkIf netCfg.tun.disable {
${nameToId netName} = {};
}) enabledNetworks);
users.groups = mkMerge (mapAttrsToList (netName: netCfg: {
${nameToId netName} = {};
}) enabledNetworks);
};
}

View file

@ -10,6 +10,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: let
environment.systemPackages = [ pkgs.nebula ];
users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
services.openssh.enable = true;
networking.interfaces.eth1.useDHCP = false;
services.nebula.networks.smoke = {
# Note that these paths won't exist when the machine is first booted.
@ -30,13 +31,14 @@ in
lighthouse = { ... } @ args:
makeNebulaNode args "lighthouse" {
networking.interfaces.eth1.ipv4.addresses = [{
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
address = "192.168.1.1";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
isLighthouse = true;
isRelay = true;
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@ -44,9 +46,9 @@ in
};
};
node2 = { ... } @ args:
makeNebulaNode args "node2" {
networking.interfaces.eth1.ipv4.addresses = [{
allowAny = { ... } @ args:
makeNebulaNode args "allowAny" {
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
address = "192.168.1.2";
prefixLength = 24;
}];
@ -55,6 +57,7 @@ in
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
relays = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@ -62,9 +65,9 @@ in
};
};
node3 = { ... } @ args:
makeNebulaNode args "node3" {
networking.interfaces.eth1.ipv4.addresses = [{
allowFromLighthouse = { ... } @ args:
makeNebulaNode args "allowFromLighthouse" {
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
address = "192.168.1.3";
prefixLength = 24;
}];
@ -73,6 +76,7 @@ in
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
relays = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
inbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
@ -80,9 +84,9 @@ in
};
};
node4 = { ... } @ args:
makeNebulaNode args "node4" {
networking.interfaces.eth1.ipv4.addresses = [{
allowToLighthouse = { ... } @ args:
makeNebulaNode args "allowToLighthouse" {
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
address = "192.168.1.4";
prefixLength = 24;
}];
@ -92,6 +96,7 @@ in
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
relays = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@ -99,9 +104,9 @@ in
};
};
node5 = { ... } @ args:
makeNebulaNode args "node5" {
networking.interfaces.eth1.ipv4.addresses = [{
disabled = { ... } @ args:
makeNebulaNode args "disabled" {
networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
address = "192.168.1.5";
prefixLength = 24;
}];
@ -111,6 +116,7 @@ in
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
relays = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
@ -123,12 +129,14 @@ in
testScript = let
setUpPrivateKey = name: ''
${name}.succeed(
"mkdir -p /root/.ssh",
"chown 700 /root/.ssh",
"cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
"chown 600 /root/.ssh/id_snakeoil",
)
${name}.start()
${name}.succeed(
"mkdir -p /root/.ssh",
"chown 700 /root/.ssh",
"cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
"chown 600 /root/.ssh/id_snakeoil",
"mkdir -p /root"
)
'';
# From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
@ -146,26 +154,48 @@ in
${name}.succeed(
"mkdir -p /etc/nebula",
"nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub",
"scp ${sshOpts} /etc/nebula/${name}.pub 192.168.1.1:/tmp/${name}.pub",
"scp ${sshOpts} /etc/nebula/${name}.pub root@192.168.1.1:/root/${name}.pub",
)
lighthouse.succeed(
'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /tmp/${name}.pub -out-crt /tmp/${name}.crt',
'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /root/${name}.pub -out-crt /root/${name}.crt'
)
${name}.succeed(
"scp ${sshOpts} 192.168.1.1:/tmp/${name}.crt /etc/nebula/${name}.crt",
"scp ${sshOpts} 192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
"scp ${sshOpts} root@192.168.1.1:/root/${name}.crt /etc/nebula/${name}.crt",
"scp ${sshOpts} root@192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
'(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true'
)
'';
in ''
start_all()
getPublicIp = node: ''
${node}.succeed("ip --brief addr show eth1 | awk '{print $3}' | tail -n1 | cut -d/ -f1").strip()
'';
# Never do this for anything security critical! (Thankfully it's just a test.)
# Restart Nebula right after the mutual block and/or restore so the state is fresh.
blockTrafficBetween = nodeA: nodeB: ''
node_a = ${getPublicIp nodeA}
node_b = ${getPublicIp nodeB}
${nodeA}.succeed("iptables -I INPUT -s " + node_b + " -j DROP")
${nodeB}.succeed("iptables -I INPUT -s " + node_a + " -j DROP")
${nodeA}.systemctl("restart nebula@smoke.service")
${nodeB}.systemctl("restart nebula@smoke.service")
'';
allowTrafficBetween = nodeA: nodeB: ''
node_a = ${getPublicIp nodeA}
node_b = ${getPublicIp nodeB}
${nodeA}.succeed("iptables -D INPUT -s " + node_b + " -j DROP")
${nodeB}.succeed("iptables -D INPUT -s " + node_a + " -j DROP")
${nodeA}.systemctl("restart nebula@smoke.service")
${nodeB}.systemctl("restart nebula@smoke.service")
'';
in ''
# Create the certificate and sign the lighthouse's keys.
${setUpPrivateKey "lighthouse"}
lighthouse.succeed(
"mkdir -p /etc/nebula",
'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
'chown -R nebula-smoke:nebula-smoke /etc/nebula'
)
# Reboot the lighthouse and verify that the nebula service comes up on boot.
@ -175,49 +205,104 @@ in
lighthouse.wait_for_unit("nebula@smoke.service")
lighthouse.succeed("ping -c5 10.0.100.1")
# Create keys for node2's nebula service and test that it comes up.
${setUpPrivateKey "node2"}
${signKeysFor "node2" "10.0.100.2/24"}
${restartAndCheckNebula "node2" "10.0.100.2"}
# Create keys for allowAny's nebula service and test that it comes up.
${setUpPrivateKey "allowAny"}
${signKeysFor "allowAny" "10.0.100.2/24"}
${restartAndCheckNebula "allowAny" "10.0.100.2"}
# Create keys for node3's nebula service and test that it comes up.
${setUpPrivateKey "node3"}
${signKeysFor "node3" "10.0.100.3/24"}
${restartAndCheckNebula "node3" "10.0.100.3"}
# Create keys for allowFromLighthouse's nebula service and test that it comes up.
${setUpPrivateKey "allowFromLighthouse"}
${signKeysFor "allowFromLighthouse" "10.0.100.3/24"}
${restartAndCheckNebula "allowFromLighthouse" "10.0.100.3"}
# Create keys for node4's nebula service and test that it comes up.
${setUpPrivateKey "node4"}
${signKeysFor "node4" "10.0.100.4/24"}
${restartAndCheckNebula "node4" "10.0.100.4"}
# Create keys for allowToLighthouse's nebula service and test that it comes up.
${setUpPrivateKey "allowToLighthouse"}
${signKeysFor "allowToLighthouse" "10.0.100.4/24"}
${restartAndCheckNebula "allowToLighthouse" "10.0.100.4"}
# Create keys for node4's nebula service and test that it does not come up.
${setUpPrivateKey "node5"}
${signKeysFor "node5" "10.0.100.5/24"}
node5.fail("systemctl status nebula@smoke.service")
node5.fail("ping -c5 10.0.100.5")
# Create keys for disabled's nebula service and test that it does not come up.
${setUpPrivateKey "disabled"}
${signKeysFor "disabled" "10.0.100.5/24"}
disabled.fail("systemctl status nebula@smoke.service")
disabled.fail("ping -c5 10.0.100.5")
# The lighthouse can ping node2 and node3 but not node5
# The lighthouse can ping allowAny and allowFromLighthouse but not disabled
lighthouse.succeed("ping -c3 10.0.100.2")
lighthouse.succeed("ping -c3 10.0.100.3")
lighthouse.fail("ping -c3 10.0.100.5")
# node2 can ping the lighthouse, but not node3 because of its inbound firewall
node2.succeed("ping -c3 10.0.100.1")
node2.fail("ping -c3 10.0.100.3")
# allowAny can ping the lighthouse, but not allowFromLighthouse because of its inbound firewall
allowAny.succeed("ping -c3 10.0.100.1")
allowAny.fail("ping -c3 10.0.100.3")
# node3 can ping the lighthouse and node2
node3.succeed("ping -c3 10.0.100.1")
node3.succeed("ping -c3 10.0.100.2")
# allowFromLighthouse can ping the lighthouse and allowAny
allowFromLighthouse.succeed("ping -c3 10.0.100.1")
allowFromLighthouse.succeed("ping -c3 10.0.100.2")
# node4 can ping the lighthouse but not node2 or node3
node4.succeed("ping -c3 10.0.100.1")
node4.fail("ping -c3 10.0.100.2")
node4.fail("ping -c3 10.0.100.3")
# block allowFromLighthouse <-> allowAny, and allowFromLighthouse -> allowAny should still work.
${blockTrafficBetween "allowFromLighthouse" "allowAny"}
allowFromLighthouse.succeed("ping -c10 10.0.100.2")
${allowTrafficBetween "allowFromLighthouse" "allowAny"}
allowFromLighthouse.succeed("ping -c10 10.0.100.2")
# node2 can ping node3 now that node3 pinged it first
node2.succeed("ping -c3 10.0.100.3")
# node4 can ping node2 if node2 pings it first
node2.succeed("ping -c3 10.0.100.4")
node4.succeed("ping -c3 10.0.100.2")
# allowToLighthouse can ping the lighthouse but not allowAny or allowFromLighthouse
allowToLighthouse.succeed("ping -c3 10.0.100.1")
allowToLighthouse.fail("ping -c3 10.0.100.2")
allowToLighthouse.fail("ping -c3 10.0.100.3")
# allowAny can ping allowFromLighthouse now that allowFromLighthouse pinged it first
allowAny.succeed("ping -c3 10.0.100.3")
# block allowAny <-> allowFromLighthouse, and allowAny -> allowFromLighthouse should still work.
${blockTrafficBetween "allowAny" "allowFromLighthouse"}
allowFromLighthouse.succeed("ping -c10 10.0.100.2")
allowAny.succeed("ping -c10 10.0.100.3")
${allowTrafficBetween "allowAny" "allowFromLighthouse"}
allowFromLighthouse.succeed("ping -c10 10.0.100.2")
allowAny.succeed("ping -c10 10.0.100.3")
# allowToLighthouse can ping allowAny if allowAny pings it first
allowAny.succeed("ping -c3 10.0.100.4")
allowToLighthouse.succeed("ping -c3 10.0.100.2")
# block allowToLighthouse <-> allowAny, and allowAny <-> allowToLighthouse should still work.
${blockTrafficBetween "allowAny" "allowToLighthouse"}
allowAny.succeed("ping -c10 10.0.100.4")
allowToLighthouse.succeed("ping -c10 10.0.100.2")
${allowTrafficBetween "allowAny" "allowToLighthouse"}
allowAny.succeed("ping -c10 10.0.100.4")
allowToLighthouse.succeed("ping -c10 10.0.100.2")
# block lighthouse <-> allowFromLighthouse and allowAny <-> allowFromLighthouse; allowFromLighthouse won't get to allowAny
${blockTrafficBetween "allowFromLighthouse" "lighthouse"}
${blockTrafficBetween "allowFromLighthouse" "allowAny"}
allowFromLighthouse.fail("ping -c3 10.0.100.2")
${allowTrafficBetween "allowFromLighthouse" "lighthouse"}
${allowTrafficBetween "allowFromLighthouse" "allowAny"}
allowFromLighthouse.succeed("ping -c3 10.0.100.2")
# block lighthouse <-> allowAny, allowAny <-> allowFromLighthouse, and allowAny <-> allowToLighthouse; it won't get to allowFromLighthouse or allowToLighthouse
${blockTrafficBetween "allowAny" "lighthouse"}
${blockTrafficBetween "allowAny" "allowFromLighthouse"}
${blockTrafficBetween "allowAny" "allowToLighthouse"}
allowFromLighthouse.fail("ping -c3 10.0.100.2")
allowAny.fail("ping -c3 10.0.100.3")
allowAny.fail("ping -c3 10.0.100.4")
${allowTrafficBetween "allowAny" "lighthouse"}
${allowTrafficBetween "allowAny" "allowFromLighthouse"}
${allowTrafficBetween "allowAny" "allowToLighthouse"}
allowFromLighthouse.succeed("ping -c3 10.0.100.2")
allowAny.succeed("ping -c3 10.0.100.3")
allowAny.succeed("ping -c3 10.0.100.4")
# block lighthouse <-> allowToLighthouse and allowToLighthouse <-> allowAny; it won't get to allowAny
${blockTrafficBetween "allowToLighthouse" "lighthouse"}
${blockTrafficBetween "allowToLighthouse" "allowAny"}
allowAny.fail("ping -c3 10.0.100.4")
allowToLighthouse.fail("ping -c3 10.0.100.2")
${allowTrafficBetween "allowToLighthouse" "lighthouse"}
${allowTrafficBetween "allowToLighthouse" "allowAny"}
allowAny.succeed("ping -c3 10.0.100.4")
allowToLighthouse.succeed("ping -c3 10.0.100.2")
'';
})

View file

@ -1,4 +1,4 @@
{ lib, buildGoModule, fetchFromGitHub }:
{ lib, buildGoModule, fetchFromGitHub, nixosTests }:
buildGoModule rec {
pname = "nebula";
@ -17,6 +17,10 @@ buildGoModule rec {
ldflags = [ "-X main.Build=${version}" ];
passthru.tests = {
inherit (nixosTests) nebula;
};
meta = with lib; {
description = "A scalable overlay networking tool with a focus on performance, simplicity and security";
longDescription = ''