tensorflow: drop 1.x

it no longer works without our default python version and needs
many patches to compile.
This commit is contained in:
Jörg Thalheim 2021-01-07 13:55:04 +01:00
parent cc8db6e19b
commit 1976d79627
No known key found for this signature in database
GPG key ID: 003F2096411B5F92
16 changed files with 10 additions and 752 deletions

View file

@ -1,30 +0,0 @@
{ stdenv, fetchPypi, buildPythonPackage
, numpy
, absl-py
, mock
}:
buildPythonPackage rec {
pname = "tensorflow-estimator";
# This is effectively 1.15.0. Upstream tagged 1.15.0 by mistake before
# actually updating the version in setup.py, which is why this tag is called
# 1.15.1.
version = "1.15.1";
format = "wheel";
src = fetchPypi {
pname = "tensorflow_estimator";
inherit version format;
sha256 = "1fc61wmc0w22frs79j2x4g6wnv5g21xc6rix1g4bsvy9qfvvylw8";
};
propagatedBuildInputs = [ mock numpy absl-py ];
meta = with stdenv.lib; {
description = "TensorFlow Estimator is a high-level API that encapsulates model training, evaluation, prediction, and exporting.";
homepage = "http://tensorflow.org";
license = licenses.asl20;
maintainers = with maintainers; [ jyp ];
};
}

View file

@ -1,120 +0,0 @@
{ stdenv
, lib
, fetchurl
, buildPythonPackage
, isPy3k, pythonOlder, pythonAtLeast
, astor
, gast
, google-pasta
, wrapt
, numpy
, six
, termcolor
, protobuf
, absl-py
, grpcio
, mock
, backports_weakref
, tensorflow-estimator_1
, tensorflow-tensorboard
, cudaSupport ? false
, cudatoolkit ? null
, cudnn ? null
, nvidia_x11 ? null
, zlib
, python
, symlinkJoin
, keras-applications
, keras-preprocessing
, addOpenGLRunpath
}:
# We keep this binary build for two reasons:
# - the source build doesn't work on Darwin.
# - the source build is currently brittle and not easy to maintain
assert cudaSupport -> cudatoolkit != null
&& cudnn != null
&& nvidia_x11 != null;
# unsupported combination
assert ! (stdenv.isDarwin && cudaSupport);
let
packages = import ./binary-hashes.nix;
variant = if cudaSupport then "-gpu" else "";
pname = "tensorflow${variant}";
in buildPythonPackage {
inherit pname;
inherit (packages) version;
format = "wheel";
disabled = pythonAtLeast "3.8";
src = let
pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) python.pythonVersion;
pyver = if stdenv.isDarwin then builtins.substring 0 1 pyVerNoDot else pyVerNoDot;
platform = if stdenv.isDarwin then "mac" else "linux";
unit = if cudaSupport then "gpu" else "cpu";
key = "${platform}_py_${pyver}_${unit}";
in fetchurl packages.${key};
propagatedBuildInputs = [
protobuf
numpy
termcolor
grpcio
six
astor
absl-py
gast
google-pasta
wrapt
tensorflow-estimator_1
tensorflow-tensorboard
keras-applications
keras-preprocessing
] ++ lib.optional (!isPy3k) mock
++ lib.optionals (pythonOlder "3.4") [ backports_weakref ];
nativeBuildInputs = lib.optional cudaSupport addOpenGLRunpath;
# Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
# and the propageted input tensorflow-tensorboard which causes environment collisions.
# another possibility would be to have tensorboard only in the buildInputs
# https://github.com/tensorflow/tensorflow/blob/v1.7.1/tensorflow/tools/pip_package/setup.py#L79
postInstall = ''
rm $out/bin/tensorboard
'';
# Note that we need to run *after* the fixup phase because the
# libraries are loaded at runtime. If we run in preFixup then
# patchelf --shrink-rpath will remove the cuda libraries.
postFixup = let
rpath = stdenv.lib.makeLibraryPath
([ stdenv.cc.cc.lib zlib ] ++ lib.optionals cudaSupport [ cudatoolkit.out cudatoolkit.lib cudnn nvidia_x11 ]);
in
lib.optionalString stdenv.isLinux ''
rrPath="$out/${python.sitePackages}/tensorflow/:$out/${python.sitePackages}/tensorflow/contrib/tensor_forest/:${rpath}"
internalLibPath="$out/${python.sitePackages}/tensorflow/python/_pywrap_tensorflow_internal.so"
find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
patchelf --set-rpath "$rrPath" "$lib"
${lib.optionalString cudaSupport ''
addOpenGLRunpath "$lib"
''}
done
'';
meta = with stdenv.lib; {
description = "Computation using data flow graphs for scalable machine learning";
homepage = "http://tensorflow.org";
license = licenses.asl20;
maintainers = with maintainers; [ jyp abbradar ];
platforms = [ "x86_64-linux" "x86_64-darwin" ];
# Python 2.7 build uses different string encoding.
# See https://github.com/NixOS/nixpkgs/pull/37044#issuecomment-373452253
broken = stdenv.isDarwin && !isPy3k;
};
}

View file

@ -1,43 +0,0 @@
{
version = "1.14.0";
linux_py_27_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.14.0-cp27-none-linux_x86_64.whl";
sha256 = "0yywdrfk97dh1bxhibspg0raz70fx9lcczj6xlimqy4xb60clx7k";
};
linux_py_35_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.14.0-cp35-cp35m-linux_x86_64.whl";
sha256 = "1xvyb6xcrjhlwvrmrhn5vs9xy7g98smqmpv4i3hhpry4qyasphhj";
};
linux_py_36_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.14.0-cp36-cp36m-linux_x86_64.whl";
sha256 = "1psd9vyxz9f39dwj77nvrg373sxv3p5vdp9fnz81dpsm0b0mwl44";
};
linux_py_37_cpu = {
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.14.0-cp37-cp37m-linux_x86_64.whl";
sha256 = "0bg2sb1n2ag27r7ww695kg5hb0mjrw4kc5893krmixx2j71860c5";
};
linux_py_27_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.14.0-cp27-none-linux_x86_64.whl";
sha256 = "0y1x91gayg6pjddgl8ndcm63wfzhyv4s5khgl7ffzsgni1ivaqw5";
};
linux_py_35_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.14.0-cp35-cp35m-linux_x86_64.whl";
sha256 = "03piggpbz1jx8m2b95spq3jrdff4w6xx63ji07am7hyw2nsgx3mx";
};
linux_py_36_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.14.0-cp36-cp36m-linux_x86_64.whl";
sha256 = "0ypkp8cfhharsyyikb1qgf44cfm6284km9xswzvzymjzz75vg3gd";
};
linux_py_37_gpu = {
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.14.0-cp37-cp37m-linux_x86_64.whl";
sha256 = "0virp8nn2ysx4855hq29kas6fm6b3dsiybwzdxy9nnb9n2d8qlm2";
};
mac_py_2_cpu = {
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.14.0-py2-none-any.whl";
sha256 = "14f86k3pgq7z6i4s4im55zpp38f0drnm7xlclavsgcc0nxnj3z26";
};
mac_py_3_cpu = {
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.14.0-py3-none-any.whl";
sha256 = "0f3swpcjfgqhj6h5wnx8snc0xjkx4hnkqx83fmlrwpncs8c131d3";
};
}

View file

@ -1,456 +0,0 @@
{ stdenv, pkgs, bazel_0_26, buildBazelPackage, lib, fetchFromGitHub, fetchpatch, symlinkJoin
, addOpenGLRunpath
# Python deps
, buildPythonPackage, isPy3k, isPy27, pythonOlder, pythonAtLeast, python
# Python libraries
, numpy, tensorflow-tensorboard_1, backports_weakref, mock, enum34, absl-py
, future, setuptools, wheel, keras-preprocessing, keras-applications, google-pasta
, functools32
, opt-einsum
, termcolor, grpcio, six, wrapt, protobuf, tensorflow-estimator_1
# Common deps
, git, swig, which, binutils, glibcLocales, cython
# Common libraries
, jemalloc, openmpi, astor, gast, grpc, sqlite, openssl, jsoncpp, re2
, curl, snappy, flatbuffers, icu, double-conversion, libpng, libjpeg, giflib
# Upsteam by default includes cuda support since tensorflow 1.15. We could do
# that in nix as well. It would make some things easier and less confusing, but
# it would also make the default tensorflow package unfree. See
# https://groups.google.com/a/tensorflow.org/forum/#!topic/developers/iRCt5m4qUz0
, cudaSupport ? false, nvidia_x11 ? null, cudatoolkit ? null, cudnn ? null, nccl ? null
, mklSupport ? false, mkl ? null
# XLA without CUDA is broken
, xlaSupport ? cudaSupport
# Default from ./configure script
, cudaCapabilities ? [ "3.5" "5.2" ]
, sse42Support ? stdenv.hostPlatform.sse4_2Support
, avx2Support ? stdenv.hostPlatform.avx2Support
, fmaSupport ? stdenv.hostPlatform.fmaSupport
# Darwin deps
, Foundation, Security
}:
assert cudaSupport -> nvidia_x11 != null
&& cudatoolkit != null
&& cudnn != null;
# unsupported combination
assert ! (stdenv.isDarwin && cudaSupport);
assert mklSupport -> mkl != null;
let
withTensorboard = pythonOlder "3.6";
cudatoolkit_joined = symlinkJoin {
name = "${cudatoolkit.name}-merged";
paths = [
cudatoolkit.lib
cudatoolkit.out
# for some reason some of the required libs are in the targets/x86_64-linux
# directory; not sure why but this works around it
"${cudatoolkit}/targets/${stdenv.system}"
];
};
cudatoolkit_cc_joined = symlinkJoin {
name = "${cudatoolkit.cc.name}-merged";
paths = [
cudatoolkit.cc
binutils.bintools # for ar, dwp, nm, objcopy, objdump, strip
];
};
# Needed for _some_ system libraries, grep INCLUDEDIR.
includes_joined = symlinkJoin {
name = "tensorflow-deps-merged";
paths = [
pkgs.protobuf
jsoncpp
];
};
tfFeature = x: if x then "1" else "0";
version = "1.15.4";
variant = if cudaSupport then "-gpu" else "";
pname = "tensorflow${variant}";
pythonEnv = python.withPackages (_:
[ # python deps needed during wheel build time (not runtime, see the buildPythonPackage part for that)
numpy
keras-preprocessing
protobuf
wrapt
gast
astor
absl-py
termcolor
keras-applications
setuptools
wheel
] ++ lib.optionals (!isPy3k)
[ future
functools32
mock
]);
bazel-build = buildBazelPackage {
name = "${pname}-${version}";
bazel = bazel_0_26;
src = fetchFromGitHub {
owner = "tensorflow";
repo = "tensorflow";
rev = "v${version}";
sha256 = "0lg8ahyr2k7dmp0yfypk8ivl9a0xcg3j0f0dakmn5ljk8nsji0bj";
};
patches = [
# Work around https://github.com/tensorflow/tensorflow/issues/24752
../no-saved-proto.patch
# Fixes for NixOS jsoncpp
../system-jsoncpp.patch
# https://github.com/tensorflow/tensorflow/pull/29673
(fetchpatch {
name = "fix-compile-with-cuda-and-mpi.patch";
url = "https://github.com/tensorflow/tensorflow/pull/29673/commits/498e35a3bfe38dd75cf1416a1a23c07c3b59e6af.patch";
sha256 = "1m2qmwv1ysqa61z6255xggwbq6mnxbig749bdvrhnch4zydxb4di";
})
(fetchpatch {
name = "backport-pr-18950.patch";
url = "https://github.com/tensorflow/tensorflow/commit/73640aaec2ab0234d9fff138e3c9833695570c0a.patch";
sha256 = "1n9ypbrx36fc1kc9cz5b3p9qhg15xxhq4nz6ap3hwqba535nakfz";
})
(fetchpatch {
# be compatible with gast >0.2 instead of only gast 0.2.2
name = "gast-update.patch";
url = "https://github.com/tensorflow/tensorflow/commit/85751ad6c7f5fd12c6c79545d96896cba92fa8b4.patch";
sha256 = "077cpj0kzyqxzdya1dwh8df17zfzhqn7c685hx6iskvw2979zg2n";
})
./lift-gast-restriction.patch
(fetchpatch {
# fix compilation with numpy >= 1.19
name = "add-const-overload.patch";
url = "https://github.com/tensorflow/tensorflow/commit/75ea0b31477d6ba9e990e296bbbd8ca4e7eebadf.patch";
sha256 = "1xp1icacig0xm0nmb05sbrf4nw4xbln9fhc308birrv8286zx7wv";
})
# cuda 10.2 does not have "-bin2c-path" option anymore
# https://github.com/tensorflow/tensorflow/issues/34429
../cuda-10.2-no-bin2c-path.patch
];
# On update, it can be useful to steal the changes from gentoo
# https://gitweb.gentoo.org/repo/gentoo.git/tree/sci-libs/tensorflow
nativeBuildInputs = [
swig which pythonEnv
] ++ lib.optional cudaSupport addOpenGLRunpath;
buildInputs = [
jemalloc
openmpi
glibcLocales
git
# libs taken from system through the TF_SYS_LIBS mechanism
# grpc
sqlite
openssl
jsoncpp
pkgs.protobuf
curl
snappy
flatbuffers
icu
double-conversion
libpng
libjpeg
giflib
re2
pkgs.lmdb
] ++ lib.optionals cudaSupport [
cudatoolkit
cudnn
nvidia_x11
] ++ lib.optionals mklSupport [
mkl
] ++ lib.optionals stdenv.isDarwin [
Foundation
Security
];
# arbitrarily set to the current latest bazel version, overly careful
TF_IGNORE_MAX_BAZEL_VERSION = true;
# Take as many libraries from the system as possible. Keep in sync with
# list of valid syslibs in
# https://github.com/tensorflow/tensorflow/blob/master/third_party/systemlibs/syslibs_configure.bzl
TF_SYSTEM_LIBS = lib.concatStringsSep "," [
"absl_py"
"astor_archive"
"boringssl"
# Not packaged in nixpkgs
# "com_github_googleapis_googleapis"
# "com_github_googlecloudplatform_google_cloud_cpp"
"com_google_protobuf"
"com_googlesource_code_re2"
"curl"
"cython"
"double_conversion"
"flatbuffers"
"gast_archive"
"gif_archive"
# Lots of errors, requires an older version
# "grpc"
"hwloc"
"icu"
"jpeg"
"jsoncpp_git"
"keras_applications_archive"
"lmdb"
"nasm"
# "nsync" # not packaged in nixpkgs
"opt_einsum_archive"
"org_sqlite"
"pasta"
"pcre"
"png_archive"
"six_archive"
"snappy"
"swig"
"termcolor_archive"
"wrapt"
"zlib_archive"
];
INCLUDEDIR = "${includes_joined}/include";
PYTHON_BIN_PATH = pythonEnv.interpreter;
TF_NEED_GCP = true;
TF_NEED_HDFS = true;
TF_ENABLE_XLA = tfFeature xlaSupport;
CC_OPT_FLAGS = " ";
# https://github.com/tensorflow/tensorflow/issues/14454
TF_NEED_MPI = tfFeature cudaSupport;
TF_NEED_CUDA = tfFeature cudaSupport;
TF_CUDA_PATHS = lib.optionalString cudaSupport "${cudatoolkit_joined},${cudnn},${nccl}";
GCC_HOST_COMPILER_PREFIX = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin";
GCC_HOST_COMPILER_PATH = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin/gcc";
TF_CUDA_COMPUTE_CAPABILITIES = lib.concatStringsSep "," cudaCapabilities;
postPatch = ''
# https://github.com/tensorflow/tensorflow/issues/20919
sed -i '/androidndk/d' tensorflow/lite/kernels/internal/BUILD
# Tensorboard pulls in a bunch of dependencies, some of which may
# include security vulnerabilities. So we make it optional.
# https://github.com/tensorflow/tensorflow/issues/20280#issuecomment-400230560
sed -i '/tensorboard >=/d' tensorflow/tools/pip_package/setup.py
substituteInPlace tensorflow/tools/pip_package/setup.py \
--replace "numpy >= 1.16.0, < 1.19.0" "numpy >= 1.16.0"
# glibc 2.31+ does not have sys/sysctl.h
# see https://github.com/tensorflow/tensorflow/issues/45861
substituteInPlace third_party/hwloc/BUILD.bazel\
--replace "#define HAVE_SYS_SYSCTL_H 1" "#undef HAVE_SYS_SYSCTL_H"
'';
preConfigure = let
opt_flags = []
++ lib.optionals sse42Support ["-msse4.2"]
++ lib.optionals avx2Support ["-mavx2"]
++ lib.optionals fmaSupport ["-mfma"];
in ''
patchShebangs configure
# dummy ldconfig
mkdir dummy-ldconfig
echo "#!${stdenv.shell}" > dummy-ldconfig/ldconfig
chmod +x dummy-ldconfig/ldconfig
export PATH="$PWD/dummy-ldconfig:$PATH"
export PYTHON_LIB_PATH="$NIX_BUILD_TOP/site-packages"
export CC_OPT_FLAGS="${lib.concatStringsSep " " opt_flags}"
mkdir -p "$PYTHON_LIB_PATH"
# To avoid mixing Python 2 and Python 3
unset PYTHONPATH
'';
configurePhase = ''
runHook preConfigure
./configure
runHook postConfigure
'';
# FIXME: Tensorflow uses dlopen() for CUDA libraries.
NIX_LDFLAGS = lib.optionalString cudaSupport "-lcudart -lcublas -lcufft -lcurand -lcusolver -lcusparse -lcudnn";
hardeningDisable = [ "format" ];
bazelFlags = [
# temporary fixes to make the build work with bazel 0.27
"--incompatible_no_support_tools_in_action_inputs=false"
];
bazelBuildFlags = [
"--config=opt" # optimize using the flags set in the configure phase
]
++ lib.optionals (mklSupport) [ "--config=mkl" ];
bazelTarget = "//tensorflow/tools/pip_package:build_pip_package //tensorflow/tools/lib_package:libtensorflow";
fetchAttrs = {
# So that checksums don't depend on these.
TF_SYSTEM_LIBS = null;
# cudaSupport causes fetch of ncclArchive, resulting in different hashes
sha256 = if cudaSupport then
"1bi6aydidgi943hiqj0d279jbz2g173hvafdqla1ifw2qdsm73pb"
else
"0l5510fr8n22c4hx9llr0vqqhx9wlgkyxl55fxbixhssd0ai05r4";
};
buildAttrs = {
outputs = [ "out" "python" ];
preBuild = ''
patchShebangs .
'';
installPhase = ''
mkdir -p "$out"
tar -xf bazel-bin/tensorflow/tools/lib_package/libtensorflow.tar.gz -C "$out"
# Write pkgconfig file.
mkdir "$out/lib/pkgconfig"
cat > "$out/lib/pkgconfig/tensorflow.pc" << EOF
Name: TensorFlow
Version: ${version}
Description: Library for computation using data flow graphs for scalable machine learning
Requires:
Libs: -L$out/lib -ltensorflow
Cflags: -I$out/include/tensorflow
EOF
# build the source code, then copy it to $python (build_pip_package
# actually builds a symlink farm so we must dereference them).
bazel-bin/tensorflow/tools/pip_package/build_pip_package --src "$PWD/dist"
cp -Lr "$PWD/dist" "$python"
'';
postFixup = lib.optionalString cudaSupport ''
find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
addOpenGLRunpath "$lib"
done
'';
};
meta = with stdenv.lib; {
description = "Computation using data flow graphs for scalable machine learning";
homepage = "http://tensorflow.org";
license = licenses.asl20;
maintainers = with maintainers; [ jyp abbradar ];
platforms = with platforms; linux ++ darwin;
# The py2 build fails due to some issue importing protobuf. Possibly related to the fix in
# https://github.com/akesandgren/easybuild-easyblocks/commit/1f2e517ddfd1b00a342c6abb55aef3fd93671a2b
broken = !(xlaSupport -> cudaSupport) || !isPy3k;
};
};
in buildPythonPackage {
inherit version pname;
disabled = isPy27 || (pythonAtLeast "3.8");
src = bazel-build.python;
# Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
# and the propagated input tensorflow-tensorboard, which causes environment collisions.
# Another possibility would be to have tensorboard only in the buildInputs
# https://github.com/tensorflow/tensorflow/blob/v1.7.1/tensorflow/tools/pip_package/setup.py#L79
postInstall = ''
rm $out/bin/tensorboard
'';
setupPyGlobalFlags = [ "--project_name ${pname}" ];
# tensorflow/tools/pip_package/setup.py
propagatedBuildInputs = [
absl-py
astor
gast
google-pasta
keras-applications
keras-preprocessing
numpy
six
protobuf
tensorflow-estimator_1
termcolor
wrapt
grpcio
opt-einsum
] ++ lib.optionals (!isPy3k) [
mock
future
functools32
] ++ lib.optionals (pythonOlder "3.4") [
backports_weakref enum34
] ++ lib.optionals withTensorboard [
tensorflow-tensorboard_1
];
nativeBuildInputs = lib.optional cudaSupport addOpenGLRunpath;
postFixup = lib.optionalString cudaSupport ''
find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
addOpenGLRunpath "$lib"
done
'';
# Actual tests are slow and impure.
# TODO try to run them anyway
# TODO better test (files in tensorflow/tools/ci_build/builds/*test)
checkPhase = ''
${python.interpreter} <<EOF
# A simple "Hello world"
import tensorflow as tf
hello = tf.constant("Hello, world!")
sess = tf.Session()
sess.run(hello)
# Fit a simple model to random data
import numpy as np
np.random.seed(0)
tf.random.set_random_seed(0)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1, activation="linear")
])
model.compile(optimizer="sgd", loss="mse")
x = np.random.uniform(size=(1,1))
y = np.random.uniform(size=(1,))
model.fit(x, y, epochs=1)
# regression test for #77626
from tensorflow.contrib import tensor_forest
EOF
'';
passthru = {
deps = bazel-build.deps;
libtensorflow = bazel-build.out;
};
meta = bazel-build.meta // {
broken = gast.version != "0.3.2";
};
}

View file

@ -1,13 +0,0 @@
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index 992f2eae22..d9386f9b13 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -54,7 +54,7 @@ REQUIRED_PACKAGES = [
'enum34 >= 1.1.6;python_version<"3.4"',
# functools comes with python3, need to install the backport for python2
'functools32 >= 3.2.3;python_version<"3"',
- 'gast == 0.2.2',
+ 'gast >= 0.2.2',
'google_pasta >= 0.1.6',
'keras_applications >= 1.0.8',
'keras_preprocessing >= 1.0.5',

View file

@ -1,33 +0,0 @@
#!/usr/bin/env bash
version=1.14.0
hashfile=binary-hashes.nix
rm -f $hashfile
echo "{" >> $hashfile
echo "version = \"$version\";" >> $hashfile
for sys in "linux" "mac"; do
for tfpref in "cpu/tensorflow" "gpu/tensorflow_gpu"; do
for pykind in "py2-none-any" "py3-none-any" "cp27-none-linux_x86_64" "cp35-cp35m-linux_x86_64" "cp36-cp36m-linux_x86_64" "cp37-cp37m-linux_x86_64"; do
if [ $sys == "mac" ]; then
[[ $pykind =~ py.* ]] && [[ $tfpref =~ cpu.* ]]
result=$?
pyver=${pykind:2:1}
flavour=cpu
else
[[ $pykind =~ .*linux.* ]]
result=$?
pyver=${pykind:2:2}
flavour=${tfpref:0:3}
fi
if [ $result == 0 ]; then
url=https://storage.googleapis.com/tensorflow/$sys/$tfpref-$version-$pykind.whl
hash=$(nix-prefetch-url $url)
echo "${sys}_py_${pyver}_${flavour} = {" >> $hashfile
echo " url = \"$url\";" >> $hashfile
echo " sha256 = \"$hash\";" >> $hashfile
echo "};" >> $hashfile
fi
done
done
done
echo "}" >> $hashfile

View file

@ -1,10 +0,0 @@
--- a/third_party/nccl/build_defs.bzl.tpl
+++ b/third_party/nccl/build_defs.bzl.tpl
@@ -113,7 +113,6 @@ def _device_link_impl(ctx):
"--cmdline=--compile-only",
"--link",
"--compress-all",
- "--bin2c-path=%s" % bin2c.dirname,
"--create=%s" % tmp_fatbin.path,
"--embedded-fatbin=%s" % fatbin_h.path,
] + images,

View file

@ -108,7 +108,7 @@ let
patches = [
# Fixes for NixOS jsoncpp
../system-jsoncpp.patch
./system-jsoncpp.patch
./relax-dependencies.patch

View file

@ -1,14 +0,0 @@
diff --git a/tensorflow/cc/saved_model/BUILD b/tensorflow/cc/saved_model/BUILD
index 8626ed0087..27deb34387 100644
--- a/tensorflow/cc/saved_model/BUILD
+++ b/tensorflow/cc/saved_model/BUILD
@@ -49,9 +49,6 @@ cc_library(
# tf_lib depending on the build platform.
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
- ]) + if_mobile([
- # Mobile-friendly SavedModel proto. See go/portable-proto for more info.
- "//tensorflow/core:saved_model_portable_proto",
]) + if_android([
"//tensorflow/core:android_tensorflow_lib",
]) + if_ios([

View file

@ -7352,34 +7352,16 @@ in {
tensorboardx = callPackage ../development/python-modules/tensorboardx { };
tensorflow-bin_1 = callPackage ../development/python-modules/tensorflow/1/bin.nix {
tensorflow-bin_2 = callPackage ../development/python-modules/tensorflow/bin.nix {
cudaSupport = pkgs.config.cudaSupport or false;
inherit (pkgs.linuxPackages) nvidia_x11;
cudatoolkit = pkgs.cudatoolkit_10;
cudnn = pkgs.cudnn_cudatoolkit_10;
};
tensorflow-bin_2 = callPackage ../development/python-modules/tensorflow/2/bin.nix {
cudaSupport = pkgs.config.cudaSupport or false;
inherit (pkgs.linuxPackages) nvidia_x11;
cudatoolkit = pkgs.cudatoolkit_10;
cudnn = pkgs.cudnn_cudatoolkit_10;
};
tensorflow-bin = self.tensorflow-bin_2;
tensorflow-bin = self.tensorflow-bin_1;
tensorflow-build_1 = callPackage ../development/python-modules/tensorflow/1 {
cudaSupport = pkgs.config.cudaSupport or false;
inherit (pkgs.linuxPackages) nvidia_x11;
cudatoolkit = pkgs.cudatoolkit_10;
cudnn = pkgs.cudnn_cudatoolkit_10;
nccl = pkgs.nccl_cudatoolkit_10;
openssl = pkgs.openssl_1_1;
inherit (pkgs.darwin.apple_sdk.frameworks) Foundation Security;
inherit (pkgs) flatbuffers;
};
tensorflow-build_2 = callPackage ../development/python-modules/tensorflow/2 {
tensorflow-build_2 = callPackage ../development/python-modules/tensorflow {
cudaSupport = pkgs.config.cudaSupport or false;
cudatoolkit = pkgs.cudatoolkit_11;
cudnn = pkgs.cudnn_cudatoolkit_11;
@ -7389,25 +7371,20 @@ in {
inherit (pkgs) flatbuffers;
};
tensorflow-build = self.tensorflow-build_1;
tensorflow-build = self.tensorflow-build_2;
tensorflow-estimator_1 = callPackage ../development/python-modules/tensorflow-estimator/1 { };
tensorflow-estimator_2 = callPackage ../development/python-modules/tensorflow-estimator { };
tensorflow-estimator_2 = callPackage ../development/python-modules/tensorflow-estimator/2 { };
tensorflow-estimator = self.tensorflow-estimator_1;
tensorflow-estimator = self.tensorflow-estimator_2;
tensorflow-probability = callPackage ../development/python-modules/tensorflow-probability { };
tensorflow = self.tensorflow_1;
tensorflow_1 = self.tensorflow-build_1;
tensorflow = self.tensorflow_2;
tensorflow_2 = self.tensorflow-build_2;
tensorflow-tensorboard_1 = callPackage ../development/python-modules/tensorflow-tensorboard/1 { };
tensorflow-tensorboard_2 = callPackage ../development/python-modules/tensorflow-tensorboard { };
tensorflow-tensorboard_2 = callPackage ../development/python-modules/tensorflow-tensorboard/2 { };
tensorflow-tensorboard = self.tensorflow-tensorboard_1;
tensorflow-tensorboard = self.tensorflow-tensorboard_2;
tensorflowWithCuda = self.tensorflow.override { cudaSupport = true; };