nvidia-container-toolkit: 1.5.0 -> 1.9.0

This commit is contained in:
guangtao 2023-03-26 01:04:29 -07:00
parent e608c90a1c
commit 40ee8d66cd
3 changed files with 67 additions and 92 deletions

View file

@ -1,71 +0,0 @@
{ lib
, glibc
, fetchFromGitHub
, makeWrapper
, buildGoPackage
, linkFarm
, writeShellScript
, containerRuntimePath
, configTemplate
}:
let
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
{
name = "runc";
path = containerRuntimePath;
}
];
warnIfXdgConfigHomeIsSet = writeShellScript "warn_if_xdg_config_home_is_set" ''
set -eo pipefail
if [ -n "$XDG_CONFIG_HOME" ]; then
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
fi
'';
in
buildGoPackage rec {
pname = "nvidia-container-runtime";
version = "3.5.0";
src = fetchFromGitHub {
owner = "NVIDIA";
repo = pname;
rev = "v${version}";
sha256 = "sha256-+LZjsN/tKqsPJamoI8xo9LFv14c3e9vVlSP4NJhElcs=";
};
goPackagePath = "github.com/nvidia/nvidia-container-runtime";
ldflags = [ "-s" "-w" ];
nativeBuildInputs = [ makeWrapper ];
postInstall = ''
mkdir -p $out/etc/nvidia-container-runtime
# nvidia-container-runtime invokes docker-runc or runc if that isn't
# available on PATH.
#
# Also set XDG_CONFIG_HOME if it isn't already to allow overriding
# configuration. This in turn allows users to have the nvidia container
# runtime enabled for any number of higher level runtimes like docker and
# podman, i.e., there's no need to have mutually exclusivity on what high
# level runtime can enable the nvidia runtime because each high level
# runtime has its own config.toml file.
wrapProgram $out/bin/nvidia-container-runtime \
--run "${warnIfXdgConfigHomeIsSet}" \
--prefix PATH : ${isolatedContainerRuntimePath} \
--set-default XDG_CONFIG_HOME $out/etc
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
--subst-var-by glibcbin ${lib.getBin glibc}
'';
meta = with lib; {
homepage = "https://github.com/NVIDIA/nvidia-container-runtime";
description = "NVIDIA container runtime";
license = licenses.asl20;
platforms = platforms.linux;
maintainers = with maintainers; [ cpcloud ];
};
}

View file

@ -1,35 +1,83 @@
{ lib
, fetchFromGitHub
, buildGoModule
, glibc
, fetchFromGitLab
, makeWrapper
, nvidia-container-runtime
, buildGoPackage
, linkFarm
, writeShellScript
, containerRuntimePath
, configTemplate
, libnvidia-container
}:
buildGoModule rec {
pname = "nvidia-container-toolkit";
version = "1.5.0";
let
isolatedContainerRuntimePath = linkFarm "isolated_container_runtime_path" [
{
name = "runc";
path = containerRuntimePath;
}
];
warnIfXdgConfigHomeIsSet = writeShellScript "warn_if_xdg_config_home_is_set" ''
set -eo pipefail
src = fetchFromGitHub {
owner = "NVIDIA";
if [ -n "$XDG_CONFIG_HOME" ]; then
echo >&2 "$(tput setaf 3)warning: \$XDG_CONFIG_HOME=$XDG_CONFIG_HOME$(tput sgr 0)"
fi
'';
in
buildGoPackage rec {
pname = "container-toolkit/container-toolkit";
version = "1.9.0";
src = fetchFromGitLab {
owner = "nvidia";
repo = pname;
rev = "v${version}";
sha256 = "sha256-YvwqnwYOrlSE6PmNNZ5xjEaEcXdHKcakIwua+tOvIJ0=";
sha256 = "sha256-b4mybNB5FqizFTraByHk5SCsNO66JaISj18nLgLN7IA=";
};
vendorSha256 = "17zpiyvf22skfcisflsp6pn56y6a793jcx89kw976fq2x5br1bz7";
goPackagePath = "github.com/NVIDIA/nvidia-container-toolkit";
ldflags = [ "-s" "-w" ];
nativeBuildInputs = [ makeWrapper ];
preBuild = ''
# replace the default hookDefaultFilePath to the $out path
substituteInPlace go/src/github.com/NVIDIA/nvidia-container-toolkit/cmd/nvidia-container-runtime/main.go \
--replace '/usr/bin/nvidia-container-runtime-hook' '${placeholder "out"}/bin/nvidia-container-runtime-hook'
'';
postInstall = ''
mv $out/bin/{pkg,${pname}}
mkdir -p $out/etc/nvidia-container-runtime
# nvidia-container-runtime invokes docker-runc or runc if that isn't
# available on PATH.
#
# Also set XDG_CONFIG_HOME if it isn't already to allow overriding
# configuration. This in turn allows users to have the nvidia container
# runtime enabled for any number of higher level runtimes like docker and
# podman, i.e., there's no need to have mutually exclusivity on what high
# level runtime can enable the nvidia runtime because each high level
# runtime has its own config.toml file.
wrapProgram $out/bin/nvidia-container-runtime \
--run "${warnIfXdgConfigHomeIsSet}" \
--prefix PATH : ${isolatedContainerRuntimePath}:${libnvidia-container}/bin \
--set-default XDG_CONFIG_HOME $out/etc
cp ${configTemplate} $out/etc/nvidia-container-runtime/config.toml
substituteInPlace $out/etc/nvidia-container-runtime/config.toml \
--subst-var-by glibcbin ${lib.getBin glibc}
ln -s $out/bin/nvidia-container-{toolkit,runtime-hook}
wrapProgram $out/bin/nvidia-container-toolkit \
--add-flags "-config ${nvidia-container-runtime}/etc/nvidia-container-runtime/config.toml"
--add-flags "-config ${placeholder "out"}/etc/nvidia-container-runtime/config.toml"
'';
meta = with lib; {
homepage = "https://github.com/NVIDIA/nvidia-container-toolkit";
description = "NVIDIA container runtime hook";
homepage = "https://gitlab.com/nvidia/container-toolkit/container-toolkit";
description = "NVIDIA Container Toolkit";
license = licenses.asl20;
platforms = platforms.linux;
maintainers = with maintainers; [ cpcloud ];

View file

@ -22756,17 +22756,15 @@ with pkgs;
mkNvidiaContainerPkg = { name, containerRuntimePath, configTemplate, additionalPaths ? [] }:
let
nvidia-container-runtime = callPackage ../applications/virtualization/nvidia-container-runtime {
inherit containerRuntimePath configTemplate;
nvidia-container-toolkit = callPackage ../applications/virtualization/nvidia-container-toolkit {
inherit containerRuntimePath configTemplate libnvidia-container;
};
libnvidia-container =(callPackage ../applications/virtualization/libnvidia-container { });
in symlinkJoin {
inherit name;
paths = [
(callPackage ../applications/virtualization/libnvidia-container { })
nvidia-container-runtime
(callPackage ../applications/virtualization/nvidia-container-toolkit {
inherit nvidia-container-runtime;
})
libnvidia-container
nvidia-container-toolkit
] ++ additionalPaths;
};