nixos/podman, podman: switch to netavark network stack

This commit is contained in:
zowoq 2022-08-18 13:58:03 +10:00
parent 17c7ccb1ab
commit 469aec905b
9 changed files with 65 additions and 105 deletions

View file

@ -115,6 +115,15 @@
<link linkend="opt-services.borgbackup.jobs._name_.inhibitsSleep"><literal>services.borgbackup.jobs.&lt;name&gt;.inhibitsSleep</literal></link>.
</para>
</listitem>
<listitem>
<para>
<literal>podman</literal> now uses the
<literal>netavark</literal> network stack. Users will need to
delete all of their local containers, images, volumes, etc, by
running <literal>podman system reset --force</literal> once
before upgrading their systems.
</para>
</listitem>
<listitem>
<para>
The EC2 image module no longer fetches instance metadata in

View file

@ -40,6 +40,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `borgbackup` module now has an option for inhibiting system sleep while backups are running, defaulting to off (not inhibiting sleep), available as [`services.borgbackup.jobs.<name>.inhibitsSleep`](#opt-services.borgbackup.jobs._name_.inhibitsSleep).
- `podman` now uses the `netavark` network stack. Users will need to delete all of their local containers, images, volumes, etc, by running `podman system reset --force` once before upgrading their systems.
- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service`

View file

@ -1,7 +1,6 @@
{ config, lib, pkgs, ... }:
let
cfg = config.virtualisation.podman;
toml = pkgs.formats.toml { };
json = pkgs.formats.json { };
inherit (lib) mkOption types;
@ -27,24 +26,13 @@ let
done
'';
net-conflist = pkgs.runCommand "87-podman-bridge.conflist"
{
nativeBuildInputs = [ pkgs.jq ];
extraPlugins = builtins.toJSON cfg.defaultNetwork.extraPlugins;
jqScript = ''
. + { "plugins": (.plugins + $extraPlugins) }
'';
} ''
jq <${cfg.package}/etc/cni/net.d/87-podman-bridge.conflist \
--argjson extraPlugins "$extraPlugins" \
"$jqScript" \
>$out
'';
in
{
imports = [
./dnsname.nix
(lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "dnsname" ]
"Use virtualisation.podman.defaultNetwork.settings.dns_enabled instead.")
(lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "extraPlugins" ]
"Netavark isn't compatible with CNI plugins.")
./network-socket.nix
];
@ -149,11 +137,11 @@ in
'';
};
defaultNetwork.extraPlugins = lib.mkOption {
type = types.listOf json.type;
default = [ ];
defaultNetwork.settings = lib.mkOption {
type = json.type;
default = { };
description = lib.mdDoc ''
Extra CNI plugin configurations to add to podman's default network.
Settings for podman's default network.
'';
};
@ -164,11 +152,26 @@ in
environment.systemPackages = [ cfg.package ]
++ lib.optional cfg.dockerCompat dockerCompat;
environment.etc."cni/net.d/87-podman-bridge.conflist".source = net-conflist;
# https://github.com/containers/podman/blob/097cc6eb6dd8e598c0e8676d21267b4edb11e144/docs/tutorials/basic_networking.md#default-network
environment.etc."containers/networks/podman.json" = lib.mkIf (cfg.defaultNetwork.settings != { }) {
source = json.generate "podman.json" ({
dns_enabled = false;
driver = "bridge";
id = "0000000000000000000000000000000000000000000000000000000000000000";
internal = false;
ipam_options = { driver = "host-local"; };
ipv6_enabled = false;
name = "podman";
network_interface = "podman0";
subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
} // cfg.defaultNetwork.settings);
};
virtualisation.containers = {
enable = true; # Enable common /etc/containers configuration
containersConf.settings = lib.optionalAttrs cfg.enableNvidia {
containersConf.settings = {
network.network_backend = "netavark";
} // lib.optionalAttrs cfg.enableNvidia {
engine = {
conmon_env_vars = [ "PATH=${lib.makeBinPath [ pkgs.nvidia-podman ]}" ];
runtimes.nvidia = [ "${pkgs.nvidia-podman}/bin/nvidia-container-runtime" ];

View file

@ -1,36 +0,0 @@
{ config, lib, pkgs, ... }:
let
inherit (lib)
mkOption
mkIf
types
;
cfg = config.virtualisation.podman;
in
{
options = {
virtualisation.podman = {
defaultNetwork.dnsname.enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Enable DNS resolution in the default podman network.
'';
};
};
};
config = {
virtualisation.containers.containersConf.cniPlugins = mkIf cfg.defaultNetwork.dnsname.enable [ pkgs.dnsname-cni ];
virtualisation.podman.defaultNetwork.extraPlugins =
lib.optional cfg.defaultNetwork.dnsname.enable {
type = "dnsname";
domainName = "dns.podman";
capabilities.aliases = true;
};
};
}

View file

@ -527,7 +527,6 @@ in {
plotinus = handleTest ./plotinus.nix {};
podgrab = handleTest ./podgrab.nix {};
podman = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/default.nix {};
podman-dnsname = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/dnsname.nix {};
podman-tls-ghostunnel = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/tls-ghostunnel.nix {};
polaris = handleTest ./polaris.nix {};
pomerium = handleTestOn ["x86_64-linux"] ./pomerium.nix {};

View file

@ -13,6 +13,13 @@ import ../make-test-python.nix (
isNormalUser = true;
};
};
dns = { pkgs, ... }: {
virtualisation.podman.enable = true;
virtualisation.podman.defaultNetwork.settings.dns_enabled = true;
networking.firewall.allowedUDPPorts = [ 53 ];
};
docker = { pkgs, ... }: {
virtualisation.podman.enable = true;
@ -43,6 +50,7 @@ import ../make-test-python.nix (
podman.wait_for_unit("sockets.target")
dns.wait_for_unit("sockets.target")
docker.wait_for_unit("sockets.target")
start_all()
@ -120,6 +128,23 @@ import ../make-test-python.nix (
pid = podman.succeed("podman run --rm --init busybox readlink /proc/self").strip()
assert pid == "2"
with subtest("aardvark-dns"):
dns.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
dns.succeed(
"podman run -d --name=webserver -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin -w ${pkgs.writeTextDir "index.html" "<h1>Hi</h1>"} scratchimg ${pkgs.python3}/bin/python -m http.server 8000"
)
dns.succeed("podman ps | grep webserver")
dns.succeed("""
for i in `seq 0 120`; do
podman run --rm --name=client -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg ${pkgs.curl}/bin/curl http://webserver:8000 >/dev/console \
&& exit 0
sleep 0.5
done
exit 1
""")
dns.succeed("podman stop webserver")
dns.succeed("podman rm webserver")
with subtest("A podman member can use the docker cli"):
docker.succeed(su_cmd("docker version"))

View file

@ -1,42 +0,0 @@
import ../make-test-python.nix (
{ pkgs, lib, ... }:
let
inherit (pkgs) writeTextDir python3 curl;
webroot = writeTextDir "index.html" "<h1>Hi</h1>";
in
{
name = "podman-dnsname";
meta = {
maintainers = with lib.maintainers; [ roberth ] ++ lib.teams.podman.members;
};
nodes = {
podman = { pkgs, ... }: {
virtualisation.podman.enable = true;
virtualisation.podman.defaultNetwork.dnsname.enable = true;
};
};
testScript = ''
podman.wait_for_unit("sockets.target")
with subtest("DNS works"): # also tests inter-container tcp routing
podman.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
podman.succeed(
"podman run -d --name=webserver -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin -w ${webroot} scratchimg ${python3}/bin/python -m http.server 8000"
)
podman.succeed("podman ps | grep webserver")
podman.succeed("""
for i in `seq 0 120`; do
podman run --rm --name=client -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg ${curl}/bin/curl http://webserver:8000 >/dev/console \
&& exit 0
sleep 0.5
done
exit 1
""")
podman.succeed("podman stop webserver")
podman.succeed("podman rm webserver")
'';
}
)

View file

@ -68,7 +68,6 @@ buildGoModule rec {
${if stdenv.isDarwin then ''
mv bin/{darwin/podman,podman}
'' else ''
install -Dm644 cni/87-podman-bridge.conflist -t $out/etc/cni/net.d
install -Dm644 contrib/tmpfile/podman.conf -t $out/lib/tmpfiles.d
for s in contrib/systemd/**/*.in; do
substituteInPlace "$s" --replace "@@PODMAN@@" "podman" # don't use unwrapped binary
@ -92,7 +91,6 @@ buildGoModule rec {
# related modules
inherit (nixosTests)
podman-tls-ghostunnel
podman-dnsname
;
oci-containers-podman = nixosTests.oci-containers.podman;
};

View file

@ -15,12 +15,12 @@
, iproute2
, catatonit
, gvproxy
, aardvark-dns
, netavark
}:
# do not add qemu to this wrapper, store paths get written to the podman vm config and break when GCed
# adding aardvark-dns/netavark to `helpersBin` requires changes to the modules and tests
let
binPath = lib.makeBinPath ([
] ++ lib.optionals stdenv.isLinux [
@ -41,7 +41,9 @@ let
paths = [
gvproxy
] ++ lib.optionals stdenv.isLinux [
aardvark-dns
catatonit # added here for the pause image and also set in `containersConf` for `init_path`
netavark
podman-unwrapped.rootlessport
];
};