Merge branch 'master' into closure-size

TODO: there was more significant refactoring of qtbase and plasma 5.5
on master, and I'm deferring pointing to correct outputs to later.
This commit is contained in:
Vladimír Čunát 2015-12-31 09:47:26 +01:00
commit f9f6f41bff
951 changed files with 117997 additions and 19014 deletions

View file

@ -1,5 +1,4 @@
{ {
// users in this list will never be mentioned by mention-bot
"userBlacklist": [ "userBlacklist": [
"civodul" "civodul"
] ]

View file

@ -1,5 +1,8 @@
# How to contribute # How to contribute
Note: contributing implies licensing those contributions
under the terms of [COPYING](./COPYING), which is an MIT-like license.
## Opening issues ## Opening issues
* Make sure you have a [GitHub account](https://github.com/signup/free) * Make sure you have a [GitHub account](https://github.com/signup/free)

View file

@ -1,4 +1,4 @@
Copyright (c) 2003-2006 Eelco Dolstra Copyright (c) 2003-2015 Eelco Dolstra and the Nixpkgs/NixOS contributors
Permission is hereby granted, free of charge, to any person obtaining Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the a copy of this software and associated documentation files (the

View file

@ -1,10 +1,12 @@
with import ./.. { }; with import ./.. { };
with lib; with lib;
let
sources = sourceFilesBySuffices ./. [".xml"];
sources-langs = ./languages-frameworks;
in
stdenv.mkDerivation { stdenv.mkDerivation {
name = "nixpkgs-manual"; name = "nixpkgs-manual";
sources = sourceFilesBySuffices ./. [".xml"];
buildInputs = [ pandoc libxml2 libxslt ]; buildInputs = [ pandoc libxml2 libxslt ];
@ -18,25 +20,39 @@ stdenv.mkDerivation {
--param callout.graphics.extension '.gif' --param callout.graphics.extension '.gif'
''; '';
buildCommand = ''
{
echo "<chapter xmlns=\"http://docbook.org/ns/docbook\""
echo " xmlns:xlink=\"http://www.w3.org/1999/xlink\""
echo " xml:id=\"users-guide-to-the-haskell-infrastructure\">"
echo ""
echo "<title>User's Guide to the Haskell Infrastructure</title>"
echo ""
pandoc ${./haskell-users-guide.md} -w docbook | \
sed -e 's|<ulink url=|<link xlink:href=|' \
-e 's|</ulink>|</link>|' \
-e 's|<sect. id=|<section xml:id=|' \
-e 's|</sect[0-9]>|</section>|'
echo ""
echo "</chapter>"
} >haskell-users-guide.xml
ln -s "$sources/"*.xml . buildCommand = let toDocbook = { useChapters ? false, inputFile, outputFile }:
let
extraHeader = ''xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" '';
in ''
{
pandoc '${inputFile}' -w docbook ${optionalString useChapters "--chapters"} \
| sed -e 's|<ulink url=|<link xlink:href=|' \
-e 's|</ulink>|</link>|' \
-e 's|<sect. id=|<section xml:id=|' \
-e 's|</sect[0-9]>|</section>|' \
-e '1s| id=| xml:id=|' \
-e '1s|\(<[^ ]* \)|\1${extraHeader}|'
} > '${outputFile}'
'';
in
''
ln -s '${sources}/'*.xml .
mkdir ./languages-frameworks
cp -s '${sources-langs}'/* ./languages-frameworks
''
+ toDocbook {
inputFile = ./haskell-users-guide.md;
outputFile = "haskell-users-guide.xml";
useChapters = true;
}
+ toDocbook {
inputFile = ./../pkgs/development/idris-modules/README.md;
outputFile = "languages-frameworks/idris.xml";
}
+ ''
cat languages-frameworks/idris.xml
echo ${nixpkgsVersion} > .version echo ${nixpkgsVersion} > .version
xmllint --noout --nonet --xinclude --noxincludenode \ xmllint --noout --nonet --xinclude --noxincludenode \

View file

@ -277,7 +277,7 @@ c = lib.makeOverridable f { a = 1; b = 2; }</programlisting>
multiPkgs = pkgs: (with pkgs; multiPkgs = pkgs: (with pkgs;
[ udev [ udev
alsaLib alsaLib
]) ++ (with []; ]);
runScript = "bash"; runScript = "bash";
}).env }).env
]]></programlisting> ]]></programlisting>

View file

@ -3,8 +3,10 @@ title: User's Guide for Haskell in Nixpkgs
author: Peter Simons author: Peter Simons
date: 2015-06-01 date: 2015-06-01
--- ---
# User's Guide to the Haskell Infrastructure
# How to install Haskell packages
## How to install Haskell packages
Nixpkgs distributes build instructions for all Haskell packages registered on Nixpkgs distributes build instructions for all Haskell packages registered on
[Hackage](http://hackage.haskell.org/), but strangely enough normal Nix package [Hackage](http://hackage.haskell.org/), but strangely enough normal Nix package
@ -111,9 +113,9 @@ version of GHC listed above, there exists a package set based on that compiler.
Also, the attributes `haskell.compiler.ghcXYC` and Also, the attributes `haskell.compiler.ghcXYC` and
`haskell.packages.ghcXYC.ghc` are synonymous for the sake of convenience. `haskell.packages.ghcXYC.ghc` are synonymous for the sake of convenience.
# How to create a development environment ## How to create a development environment
## How to install a compiler ### How to install a compiler
A simple development environment consists of a Haskell compiler and the tool A simple development environment consists of a Haskell compiler and the tool
`cabal-install`, and we saw in section [How to install Haskell packages] how `cabal-install`, and we saw in section [How to install Haskell packages] how
@ -162,7 +164,7 @@ anymore once `nix-shell` has terminated. If you find that your Haskell builds
no longer work after garbage collection, then you'll have to re-run `cabal no longer work after garbage collection, then you'll have to re-run `cabal
configure` inside of a new `nix-shell` environment. configure` inside of a new `nix-shell` environment.
## How to install a compiler with libraries ### How to install a compiler with libraries
GHC expects to find all installed libraries inside of its own `lib` directory. GHC expects to find all installed libraries inside of its own `lib` directory.
This approach works fine on traditional Unix systems, but it doesn't work for This approach works fine on traditional Unix systems, but it doesn't work for
@ -257,7 +259,7 @@ environment in your profile:
export NIX_GHC_LIBDIR="$HOME/.nix-profile/lib/ghc-$($NIX_GHC --numeric-version)" export NIX_GHC_LIBDIR="$HOME/.nix-profile/lib/ghc-$($NIX_GHC --numeric-version)"
fi fi
## How to install a compiler with libraries, hoogle and documentation indexes ### How to install a compiler with libraries, hoogle and documentation indexes
If you plan to use your environment for interactive programming, not just If you plan to use your environment for interactive programming, not just
compiling random Haskell code, you might want to replace `ghcWithPackages` in compiling random Haskell code, you might want to replace `ghcWithPackages` in
@ -319,7 +321,7 @@ page](http://kb.mozillazine.org/Links_to_local_pages_do_not_work) for
workarounds. workarounds.
## How to create ad hoc environments for `nix-shell` ### How to create ad hoc environments for `nix-shell`
The easiest way to create an ad hoc development environment is to run The easiest way to create an ad hoc development environment is to run
`nix-shell` with the appropriate GHC environment given on the command-line: `nix-shell` with the appropriate GHC environment given on the command-line:
@ -369,14 +371,14 @@ development commands. Note that you need `cabal-install` installed in your
`$PATH` already to use it here --- the `nix-shell` environment does not provide `$PATH` already to use it here --- the `nix-shell` environment does not provide
it. it.
# How to create Nix builds for your own private Haskell packages ## How to create Nix builds for your own private Haskell packages
If your own Haskell packages have build instructions for Cabal, then you can If your own Haskell packages have build instructions for Cabal, then you can
convert those automatically into build instructions for Nix using the convert those automatically into build instructions for Nix using the
`cabal2nix` utility, which you can install into your profile by running `cabal2nix` utility, which you can install into your profile by running
`nix-env -i cabal2nix`. `nix-env -i cabal2nix`.
## How to build a stand-alone project ### How to build a stand-alone project
For example, let's assume that you're working on a private project called For example, let's assume that you're working on a private project called
`foo`. To generate a Nix build expression for it, change into the project's `foo`. To generate a Nix build expression for it, change into the project's
@ -433,7 +435,7 @@ You can even use that generated file to run `nix-build`, too:
$ nix-build shell.nix $ nix-build shell.nix
## How to build projects that depend on each other ### How to build projects that depend on each other
If you have multiple private Haskell packages that depend on each other, then If you have multiple private Haskell packages that depend on each other, then
you'll have to register those packages in the Nixpkgs set to make them visible you'll have to register those packages in the Nixpkgs set to make them visible
@ -468,9 +470,9 @@ or enter an interactive shell environment suitable for building them:
$ nix-shell "<nixpkgs>" -A haskellPackages.bar.env $ nix-shell "<nixpkgs>" -A haskellPackages.bar.env
# Miscellaneous Topics ## Miscellaneous Topics
## How to build with profiling enabled ### How to build with profiling enabled
Every Haskell package set takes a function called `overrides` that you can use Every Haskell package set takes a function called `overrides` that you can use
to manipulate the package as much as you please. One useful application of this to manipulate the package as much as you please. One useful application of this
@ -494,7 +496,7 @@ following snippet in your `~/.nixpkgs/config.nix` file:
Then, replace instances of `haskellPackages` in the `cabal2nix`-generated Then, replace instances of `haskellPackages` in the `cabal2nix`-generated
`default.nix` or `shell.nix` files with `profiledHaskellPackages`. `default.nix` or `shell.nix` files with `profiledHaskellPackages`.
## How to override package versions in a compiler-specific package set ### How to override package versions in a compiler-specific package set
Nixpkgs provides the latest version of Nixpkgs provides the latest version of
[`ghc-events`](http://hackage.haskell.org/package/ghc-events), which is 0.4.4.0 [`ghc-events`](http://hackage.haskell.org/package/ghc-events), which is 0.4.4.0
@ -560,7 +562,7 @@ prefer one built with GHC 7.8.x in the first place. However, for users who
cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an cannot use GHC 7.10.x at all for some reason, the approach of downgrading to an
older version might be useful. older version might be useful.
## How to recover from GHC's infamous non-deterministic library ID bug ### How to recover from GHC's infamous non-deterministic library ID bug
GHC and distributed build farms don't get along well: GHC and distributed build farms don't get along well:
@ -586,7 +588,7 @@ command, i.e. by running:
rm /nix/var/nix/manifests/* rm /nix/var/nix/manifests/*
rm /nix/var/nix/channel-cache/* rm /nix/var/nix/channel-cache/*
## Builds on Darwin fail with `math.h` not found ### Builds on Darwin fail with `math.h` not found
Users of GHC on Darwin have occasionally reported that builds fail, because the Users of GHC on Darwin have occasionally reported that builds fail, because the
compiler complains about a missing include file: compiler complains about a missing include file:
@ -603,7 +605,7 @@ can configure the environment variables
in their `~/.bashrc` file to avoid the compiler error. in their `~/.bashrc` file to avoid the compiler error.
## Using Stack together with Nix ### Using Stack together with Nix
-- While building package zlib-0.5.4.2 using: -- While building package zlib-0.5.4.2 using:
runhaskell -package=Cabal-1.22.4.0 -clear-package-db [... lots of flags ...] runhaskell -package=Cabal-1.22.4.0 -clear-package-db [... lots of flags ...]
@ -666,7 +668,7 @@ to find out the store path of the system's zlib library. Now, you can
The same thing applies to `cabal configure`, of course, if you're The same thing applies to `cabal configure`, of course, if you're
building with `cabal-install` instead of Stack. building with `cabal-install` instead of Stack.
## Creating statically linked binaries ### Creating statically linked binaries
There are two levels of static linking. The first option is to configure the There are two levels of static linking. The first option is to configure the
build with the Cabal flag `--disable-executable-dynamic`. In Nix expressions, build with the Cabal flag `--disable-executable-dynamic`. In Nix expressions,
@ -688,7 +690,7 @@ as shared libraries only, i.e. there is just no static library available that
Cabal could link! Cabal could link!
# Other resources ## Other resources
- The Youtube video [Nix Loves Haskell](https://www.youtube.com/watch?v=BsBhi_r-OeE) - The Youtube video [Nix Loves Haskell](https://www.youtube.com/watch?v=BsBhi_r-OeE)
provides an introduction into Haskell NG aimed at beginners. The slides are provides an introduction into Haskell NG aimed at beginners. The slides are

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,41 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-coq">
<title>Coq</title>
<para>
Coq libraries should be installed in
<literal>$(out)/lib/coq/${coq.coq-version}/user-contrib/</literal>.
Such directories are automatically added to the
<literal>$COQPATH</literal> environment variable by the hook defined
in the Coq derivation.
</para>
<para>
Some libraries require OCaml and sometimes also Camlp5. The exact
versions that were used to build Coq are saved in the
<literal>coq.ocaml</literal> and <literal>coq.camlp5</literal>
attributes.
</para>
<para>
Here is a simple package example. It is a pure Coq library, thus it
only depends on Coq. Its <literal>makefile</literal> has been
generated using <literal>coq_makefile</literal> so we only have to
set the <literal>$COQLIB</literal> variable at install time.
</para>
<programlisting>
{stdenv, fetchurl, coq}:
stdenv.mkDerivation {
src = fetchurl {
url = http://coq.inria.fr/pylons/contribs/files/Karatsuba/v8.4/Karatsuba.tar.gz;
sha256 = "0ymfpv4v49k4fm63nq6gcl1hbnnxrvjjp7yzc4973n49b853c5b1";
};
name = "coq-karatsuba";
buildInputs = [ coq ];
installFlags = "COQLIB=$(out)/lib/coq/${coq.coq-version}/";
}
</programlisting>
</section>

View file

@ -0,0 +1,124 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-go">
<title>Go</title>
<para>The function <varname>buildGoPackage</varname> builds
standard Go packages.
</para>
<example xml:id='ex-buildGoPackage'><title>buildGoPackage</title>
<programlisting>
net = buildGoPackage rec {
name = "go.net-${rev}";
goPackagePath = "golang.org/x/net"; <co xml:id='ex-buildGoPackage-1' />
subPackages = [ "ipv4" "ipv6" ]; <co xml:id='ex-buildGoPackage-2' />
rev = "e0403b4e005";
src = fetchFromGitHub {
inherit rev;
owner = "golang";
repo = "net";
sha256 = "1g7cjzw4g4301a3yqpbk8n1d4s97sfby2aysl275x04g0zh8jxqp";
};
goPackageAliases = [ "code.google.com/p/go.net" ]; <co xml:id='ex-buildGoPackage-3' />
propagatedBuildInputs = [ goPackages.text ]; <co xml:id='ex-buildGoPackage-4' />
buildFlags = "--tags release"; <co xml:id='ex-buildGoPackage-5' />
disabled = isGo13;<co xml:id='ex-buildGoPackage-6' />
};
</programlisting>
</example>
<para><xref linkend='ex-buildGoPackage'/> is an example expression using buildGoPackage,
the following arguments are of special significance to the function:
<calloutlist>
<callout arearefs='ex-buildGoPackage-1'>
<para>
<varname>goPackagePath</varname> specifies the package's canonical Go import path.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-2'>
<para>
<varname>subPackages</varname> limits the builder from building child packages that
have not been listed. If <varname>subPackages</varname> is not specified, all child
packages will be built.
</para>
<para>
In this example only <literal>code.google.com/p/go.net/ipv4</literal> and
<literal>code.google.com/p/go.net/ipv6</literal> will be built.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-3'>
<para>
<varname>goPackageAliases</varname> is a list of alternative import paths
that are valid for this library.
Packages that depend on this library will automatically rename
import paths that match any of the aliases to <literal>goPackagePath</literal>.
</para>
<para>
In this example imports will be renamed from
<literal>code.google.com/p/go.net</literal> to
<literal>golang.org/x/net</literal> in every package that depend on the
<literal>go.net</literal> library.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-4'>
<para>
<varname>propagatedBuildInputs</varname> is where the dependencies of a Go library are
listed. Only libraries should list <varname>propagatedBuildInputs</varname>. If a standalone
program is being built instead, use <varname>buildInputs</varname>. If a library's tests require
additional dependencies that are not propagated, they should be listed in <varname>buildInputs</varname>.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-5'>
<para>
<varname>buildFlags</varname> is a list of flags passed to the go build command.
</para>
</callout>
<callout arearefs='ex-buildGoPackage-6'>
<para>
If <varname>disabled</varname> is <literal>true</literal>,
nix will refuse to build this package.
</para>
<para>
In this example the package will not be built for go 1.3. The <literal>isGo13</literal>
is an utility function that returns <literal>true</literal> if go used to build the
package has version 1.3.x.
</para>
</callout>
</calloutlist>
</para>
<para>
Reusable Go libraries may be found in the <varname>goPackages</varname> set. You can test
build a Go package as follows:
<screen>
$ nix-build -A goPackages.net
</screen>
</para>
<para>
You may use Go packages installed into the active Nix profiles by adding
the following to your ~/.bashrc:
<screen>
for p in $NIX_PROFILES; do
GOPATH="$p/share/go:$GOPATH"
done
</screen>
</para>
<para>To extract dependency information from a Go package in automated way use <link xlink:href="https://github.com/cstrahan/go2nix">go2nix</link>.</para>
</section>

View file

@ -0,0 +1,43 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-language-support">
<title>Support for specific programming languages and frameworks</title>
<para>The <link linkend="chap-stdenv">standard build
environment</link> makes it easy to build typical Autotools-based
packages with very little code. Any other kind of package can be
accomodated by overriding the appropriate phases of
<literal>stdenv</literal>. However, there are specialised functions
in Nixpkgs to easily build packages for other programming languages,
such as Perl or Haskell. These are described in this chapter.</para>
<xi:include href="perl.xml" />
<xi:include href="python.xml" />
<xi:include href="ruby.xml" />
<xi:include href="go.xml" />
<xi:include href="java.xml" />
<xi:include href="lua.xml" />
<xi:include href="coq.xml" />
<xi:include href="idris.xml" /> <!-- generated from ../../pkgs/development/idris-modules/README.md -->
<xi:include href="qt.xml" />
<!--
<section><title>Haskell</title>
<para>TODO</para>
</section>
<section><title>TeX / LaTeX</title>
<para>* Special support for building TeX documents</para>
</section>
-->
</chapter>

View file

@ -0,0 +1,84 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-java">
<title>Java</title>
<para>Ant-based Java packages are typically built from source as follows:
<programlisting>
stdenv.mkDerivation {
name = "...";
src = fetchurl { ... };
buildInputs = [ jdk ant ];
buildPhase = "ant";
}
</programlisting>
Note that <varname>jdk</varname> is an alias for the OpenJDK.</para>
<para>JAR files that are intended to be used by other packages should
be installed in <filename>$out/share/java</filename>. The OpenJDK has
a stdenv setup hook that adds any JARs in the
<filename>share/java</filename> directories of the build inputs to the
<envar>CLASSPATH</envar> environment variable. For instance, if the
package <literal>libfoo</literal> installs a JAR named
<filename>foo.jar</filename> in its <filename>share/java</filename>
directory, and another package declares the attribute
<programlisting>
buildInputs = [ jdk libfoo ];
</programlisting>
then <envar>CLASSPATH</envar> will be set to
<filename>/nix/store/...-libfoo/share/java/foo.jar</filename>.</para>
<para>Private JARs
should be installed in a location like
<filename>$out/share/<replaceable>package-name</replaceable></filename>.</para>
<para>If your Java package provides a program, you need to generate a
wrapper script to run it using the OpenJRE. You can use
<literal>makeWrapper</literal> for this:
<programlisting>
buildInputs = [ makeWrapper ];
installPhase =
''
mkdir -p $out/bin
makeWrapper ${jre}/bin/java $out/bin/foo \
--add-flags "-cp $out/share/java/foo.jar org.foo.Main"
'';
</programlisting>
Note the use of <literal>jre</literal>, which is the part of the
OpenJDK package that contains the Java Runtime Environment. By using
<literal>${jre}/bin/java</literal> instead of
<literal>${jdk}/bin/java</literal>, you prevent your package from
depending on the JDK at runtime.</para>
<para>It is possible to use a different Java compiler than
<command>javac</command> from the OpenJDK. For instance, to use the
Eclipse Java Compiler:
<programlisting>
buildInputs = [ jre ant ecj ];
</programlisting>
(Note that here you dont need the full JDK as an input, but just the
JRE.) The ECJ has a stdenv setup hook that sets some environment
variables to cause Ant to use ECJ, but this doesnt work with all Ant
files. Similarly, you can use the GNU Java Compiler:
<programlisting>
buildInputs = [ gcj ant ];
</programlisting>
Here, Ant will automatically use <command>gij</command> (the GNU Java
Runtime) instead of the OpenJRE.</para>
</section>

View file

@ -0,0 +1,51 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-lua">
<title>Lua</title>
<para>
Lua packages are built by the <varname>buildLuaPackage</varname> function. This function is
implemented
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules/generic/default.nix">
<filename>pkgs/development/lua-modules/generic/default.nix</filename></link>
and works similarly to <varname>buildPerlPackage</varname>. (See
<xref linkend="sec-language-perl"/> for details.)
</para>
<para>
Lua packages are defined
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/lua-packages.nix"><filename>pkgs/top-level/lua-packages.nix</filename></link>.
Most of them are simple. For example:
<programlisting>
fileSystem = buildLuaPackage {
name = "filesystem-1.6.2";
src = fetchurl {
url = "https://github.com/keplerproject/luafilesystem/archive/v1_6_2.tar.gz";
sha256 = "1n8qdwa20ypbrny99vhkmx8q04zd2jjycdb5196xdhgvqzk10abz";
};
meta = {
homepage = "https://github.com/keplerproject/luafilesystem";
hydraPlatforms = stdenv.lib.platforms.linux;
maintainers = with maintainers; [ flosse ];
};
};
</programlisting>
</para>
<para>
Though, more complicated package should be placed in a seperate file in
<link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/lua-modules"><filename>pkgs/development/lua-modules</filename></link>.
</para>
<para>
Lua packages accept additional parameter <varname>disabled</varname>, which defines
the condition of disabling package from luaPackages. For example, if package has
<varname>disabled</varname> assigned to <literal>lua.luaversion != "5.1"</literal>,
it will not be included in any luaPackages except lua51Packages, making it
only be built for lua 5.1.
</para>
</section>

View file

@ -0,0 +1,181 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-perl">
<title>Perl</title>
<para>Nixpkgs provides a function <varname>buildPerlPackage</varname>,
a generic package builder function for any Perl package that has a
standard <varname>Makefile.PL</varname>. Its implemented in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/perl-modules/generic"><filename>pkgs/development/perl-modules/generic</filename></link>.</para>
<para>Perl packages from CPAN are defined in <link
xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/perl-packages.nix"><filename>pkgs/top-level/perl-packages.nix</filename></link>,
rather than <filename>pkgs/all-packages.nix</filename>. Most Perl
packages are so straight-forward to build that they are defined here
directly, rather than having a separate function for each package
called from <filename>perl-packages.nix</filename>. However, more
complicated packages should be put in a separate file, typically in
<filename>pkgs/development/perl-modules</filename>. Here is an
example of the former:
<programlisting>
ClassC3 = buildPerlPackage rec {
name = "Class-C3-0.21";
src = fetchurl {
url = "mirror://cpan/authors/id/F/FL/FLORA/${name}.tar.gz";
sha256 = "1bl8z095y4js66pwxnm7s853pi9czala4sqc743fdlnk27kq94gz";
};
};
</programlisting>
Note the use of <literal>mirror://cpan/</literal>, and the
<literal>${name}</literal> in the URL definition to ensure that the
name attribute is consistent with the source that were actually
downloading. Perl packages are made available in
<filename>all-packages.nix</filename> through the variable
<varname>perlPackages</varname>. For instance, if you have a package
that needs <varname>ClassC3</varname>, you would typically write
<programlisting>
foo = import ../path/to/foo.nix {
inherit stdenv fetchurl ...;
inherit (perlPackages) ClassC3;
};
</programlisting>
in <filename>all-packages.nix</filename>. You can test building a
Perl package as follows:
<screen>
$ nix-build -A perlPackages.ClassC3
</screen>
<varname>buildPerlPackage</varname> adds <literal>perl-</literal> to
the start of the name attribute, so the package above is actually
called <literal>perl-Class-C3-0.21</literal>. So to install it, you
can say:
<screen>
$ nix-env -i perl-Class-C3
</screen>
(Of course you can also install using the attribute name:
<literal>nix-env -i -A perlPackages.ClassC3</literal>.)</para>
<para>So what does <varname>buildPerlPackage</varname> do? It does
the following:
<orderedlist>
<listitem><para>In the configure phase, it calls <literal>perl
Makefile.PL</literal> to generate a Makefile. You can set the
variable <varname>makeMakerFlags</varname> to pass flags to
<filename>Makefile.PL</filename></para></listitem>
<listitem><para>It adds the contents of the <envar>PERL5LIB</envar>
environment variable to <literal>#! .../bin/perl</literal> line of
Perl scripts as <literal>-I<replaceable>dir</replaceable></literal>
flags. This ensures that a script can find its
dependencies.</para></listitem>
<listitem><para>In the fixup phase, it writes the propagated build
inputs (<varname>propagatedBuildInputs</varname>) to the file
<filename>$out/nix-support/propagated-user-env-packages</filename>.
<command>nix-env</command> recursively installs all packages listed
in this file when you install a package that has it. This ensures
that a Perl package can find its dependencies.</para></listitem>
</orderedlist>
</para>
<para><varname>buildPerlPackage</varname> is built on top of
<varname>stdenv</varname>, so everything can be customised in the
usual way. For instance, the <literal>BerkeleyDB</literal> module has
a <varname>preConfigure</varname> hook to generate a configuration
file used by <filename>Makefile.PL</filename>:
<programlisting>
{ buildPerlPackage, fetchurl, db }:
buildPerlPackage rec {
name = "BerkeleyDB-0.36";
src = fetchurl {
url = "mirror://cpan/authors/id/P/PM/PMQS/${name}.tar.gz";
sha256 = "07xf50riarb60l1h6m2dqmql8q5dij619712fsgw7ach04d8g3z1";
};
preConfigure = ''
echo "LIB = ${db}/lib" > config.in
echo "INCLUDE = ${db}/include" >> config.in
'';
}
</programlisting>
</para>
<para>Dependencies on other Perl packages can be specified in the
<varname>buildInputs</varname> and
<varname>propagatedBuildInputs</varname> attributes. If something is
exclusively a build-time dependency, use
<varname>buildInputs</varname>; if its (also) a runtime dependency,
use <varname>propagatedBuildInputs</varname>. For instance, this
builds a Perl module that has runtime dependencies on a bunch of other
modules:
<programlisting>
ClassC3Componentised = buildPerlPackage rec {
name = "Class-C3-Componentised-1.0004";
src = fetchurl {
url = "mirror://cpan/authors/id/A/AS/ASH/${name}.tar.gz";
sha256 = "0xql73jkcdbq4q9m0b0rnca6nrlvf5hyzy8is0crdk65bynvs8q1";
};
propagatedBuildInputs = [
ClassC3 ClassInspector TestException MROCompat
];
};
</programlisting>
</para>
<section xml:id="ssec-generation-from-CPAN"><title>Generation from CPAN</title>
<para>Nix expressions for Perl packages can be generated (almost)
automatically from CPAN. This is done by the program
<command>nix-generate-from-cpan</command>, which can be installed
as follows:</para>
<screen>
$ nix-env -i nix-generate-from-cpan
</screen>
<para>This program takes a Perl module name, looks it up on CPAN,
fetches and unpacks the corresponding package, and prints a Nix
expression on standard output. For example:
<screen>
$ nix-generate-from-cpan XML::Simple
XMLSimple = buildPerlPackage {
name = "XML-Simple-2.20";
src = fetchurl {
url = mirror://cpan/authors/id/G/GR/GRANTM/XML-Simple-2.20.tar.gz;
sha256 = "5cff13d0802792da1eb45895ce1be461903d98ec97c9c953bc8406af7294434a";
};
propagatedBuildInputs = [ XMLNamespaceSupport XMLSAX XMLSAXExpat ];
meta = {
description = "Easily read/write XML (esp config files)";
license = "perl";
};
};
</screen>
The output can be pasted into
<filename>pkgs/top-level/perl-packages.nix</filename> or wherever else
you need it.</para>
</section>
</section>

View file

@ -0,0 +1,447 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-python">
<title>Python</title>
<para>
Currently supported interpreters are <varname>python26</varname>, <varname>python27</varname>,
<varname>python33</varname>, <varname>python34</varname>, <varname>python35</varname>
and <varname>pypy</varname>.
</para>
<para>
<varname>python</varname> is an alias to <varname>python27</varname> and <varname>python3</varname> is an alias to <varname>python34</varname>.
</para>
<para>
<varname>python26</varname> and <varname>python27</varname> do not include modules that require
external dependencies (to reduce dependency bloat). Following modules need to be added as
<varname>buildInput</varname> explicitly:
</para>
<itemizedlist>
<listitem><para><varname>python.modules.bsddb</varname></para></listitem>
<listitem><para><varname>python.modules.curses</varname></para></listitem>
<listitem><para><varname>python.modules.curses_panel</varname></para></listitem>
<listitem><para><varname>python.modules.crypt</varname></para></listitem>
<listitem><para><varname>python.modules.gdbm</varname></para></listitem>
<listitem><para><varname>python.modules.sqlite3</varname></para></listitem>
<listitem><para><varname>python.modules.tkinter</varname></para></listitem>
<listitem><para><varname>python.modules.readline</varname></para></listitem>
</itemizedlist>
<para>For convenience <varname>python27Full</varname> and <varname>python26Full</varname>
are provided with all modules included.</para>
<para>
Python packages that
use <link xlink:href="http://pypi.python.org/pypi/setuptools/"><literal>setuptools</literal></link> or <literal>distutils</literal>,
can be built using the <varname>buildPythonPackage</varname> function as documented below.
</para>
<para>
All packages depending on any Python interpreter get appended <varname>$out/${python.sitePackages}</varname>
to <literal>$PYTHONPATH</literal> if such directory exists.
</para>
<variablelist>
<title>
Useful attributes on interpreters packages:
</title>
<varlistentry>
<term><varname>libPrefix</varname></term>
<listitem><para>
Name of the folder in <literal>${python}/lib/</literal> for corresponding interpreter.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>interpreter</varname></term>
<listitem><para>
Alias for <literal>${python}/bin/${executable}.</literal>
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>buildEnv</varname></term>
<listitem><para>
Function to build python interpreter environments with extra packages bundled together.
See <xref linkend="ssec-python-build-env" /> for usage and documentation.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>sitePackages</varname></term>
<listitem><para>
Alias for <literal>lib/${libPrefix}/site-packages</literal>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>executable</varname></term>
<listitem><para>
Name of the interpreter executable, ie <literal>python3.4</literal>.
</para></listitem>
</varlistentry>
</variablelist>
<section xml:id="ssec-build-python-package"><title><varname>buildPythonPackage</varname> function</title>
<para>
The function is implemented in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/generic/default.nix">
<filename>pkgs/development/python-modules/generic/default.nix</filename></link>.
Example usage:
<programlisting language="nix">
twisted = buildPythonPackage {
name = "twisted-8.1.0";
src = pkgs.fetchurl {
url = http://tmrc.mit.edu/mirror/twisted/Twisted/8.1/Twisted-8.1.0.tar.bz2;
sha256 = "0q25zbr4xzknaghha72mq57kh53qw1bf8csgp63pm9sfi72qhirl";
};
propagatedBuildInputs = [ self.ZopeInterface ];
meta = {
homepage = http://twistedmatrix.com/;
description = "Twisted, an event-driven networking engine written in Python";
license = stdenv.lib.licenses.mit;
};
};
</programlisting>
Most of Python packages that use <varname>buildPythonPackage</varname> are defined
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/python-packages.nix"><filename>pkgs/top-level/python-packages.nix</filename></link>
and generated for each python interpreter separately into attribute sets <varname>python26Packages</varname>,
<varname>python27Packages</varname>, <varname>python35Packages</varname>, <varname>python33Packages</varname>,
<varname>python34Packages</varname> and <varname>pypyPackages</varname>.
</para>
<para>
<function>buildPythonPackage</function> mainly does four things:
<orderedlist>
<listitem><para>
In the <varname>buildPhase</varname>, it calls
<literal>${python.interpreter} setup.py bdist_wheel</literal> to build a wheel binary zipfile.
</para></listitem>
<listitem><para>
In the <varname>installPhase</varname>, it installs the wheel file using
<literal>pip install *.whl</literal>.
</para></listitem>
<listitem><para>
In the <varname>postFixup</varname> phase, <literal>wrapPythonPrograms</literal>
bash function is called to wrap all programs in <filename>$out/bin/*</filename>
directory to include <literal>$PYTHONPATH</literal> and <literal>$PATH</literal>
environment variables.
</para></listitem>
<listitem><para>
In the <varname>installCheck</varname> phase, <literal>${python.interpreter} setup.py test</literal>
is ran.
</para></listitem>
</orderedlist>
</para>
<para>By default <varname>doCheck = true</varname> is set</para>
<para>
As in Perl, dependencies on other Python packages can be specified in the
<varname>buildInputs</varname> and
<varname>propagatedBuildInputs</varname> attributes. If something is
exclusively a build-time dependency, use
<varname>buildInputs</varname>; if its (also) a runtime dependency,
use <varname>propagatedBuildInputs</varname>.
</para>
<para>
By default <varname>meta.platforms</varname> is set to the same value
as the interpreter unless overriden otherwise.
</para>
<variablelist>
<title>
<varname>buildPythonPackage</varname> parameters
(all parameters from <varname>mkDerivation</varname> function are still supported)
</title>
<varlistentry>
<term><varname>namePrefix</varname></term>
<listitem><para>
Prepended text to <varname>${name}</varname> parameter.
Defaults to <literal>"python3.3-"</literal> for Python 3.3, etc. Set it to
<literal>""</literal>
if you're packaging an application or a command line tool.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>disabled</varname></term>
<listitem><para>
If <varname>true</varname>, package is not build for
particular python interpreter version. Grep around
<filename>pkgs/top-level/python-packages.nix</filename>
for examples.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>setupPyBuildFlags</varname></term>
<listitem><para>
List of flags passed to <command>setup.py build_ext</command> command.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>pythonPath</varname></term>
<listitem><para>
List of packages to be added into <literal>$PYTHONPATH</literal>.
Packages in <varname>pythonPath</varname> are not propagated
(contrary to <varname>propagatedBuildInputs</varname>).
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>preShellHook</varname></term>
<listitem><para>
Hook to execute commands before <varname>shellHook</varname>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postShellHook</varname></term>
<listitem><para>
Hook to execute commands after <varname>shellHook</varname>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>makeWrapperArgs</varname></term>
<listitem><para>
A list of strings. Arguments to be passed to
<varname>makeWrapper</varname>, which wraps generated binaries. By
default, the arguments to <varname>makeWrapper</varname> set
<varname>PATH</varname> and <varname>PYTHONPATH</varname> environment
variables before calling the binary. Additional arguments here can
allow a developer to set environment variables which will be
available when the binary is run. For example,
<varname>makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]</varname>.
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="ssec-python-build-env"><title><function>python.buildEnv</function> function</title>
<para>
Create Python environments using low-level <function>pkgs.buildEnv</function> function. Example <filename>default.nix</filename>:
<programlisting language="nix">
<![CDATA[with import <nixpkgs> {};
python.buildEnv.override {
extraLibs = [ pkgs.pythonPackages.pyramid ];
ignoreCollisions = true;
}]]>
</programlisting>
Running <command>nix-build</command> will create
<filename>/nix/store/cf1xhjwzmdki7fasgr4kz6di72ykicl5-python-2.7.8-env</filename>
with wrapped binaries in <filename>bin/</filename>.
</para>
<para>
You can also use <varname>env</varname> attribute to create local
environments with needed packages installed (somewhat comparable to
<literal>virtualenv</literal>). For example, with the following
<filename>shell.nix</filename>:
<programlisting language="nix">
<![CDATA[with import <nixpkgs> {};
(python3.buildEnv.override {
extraLibs = with python3Packages;
[ numpy
requests
];
}).env]]>
</programlisting>
Running <command>nix-shell</command> will drop you into a shell where
<command>python</command> will have specified packages in its path.
</para>
<variablelist>
<title>
<function>python.buildEnv</function> arguments
</title>
<varlistentry>
<term><varname>extraLibs</varname></term>
<listitem><para>
List of packages installed inside the environment.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postBuild</varname></term>
<listitem><para>
Shell command executed after the build of environment.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>ignoreCollisions</varname></term>
<listitem><para>
Ignore file collisions inside the environment (default is <varname>false</varname>).
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="ssec-python-tools"><title>Tools</title>
<para>Packages inside nixpkgs are written by hand. However many tools
exist in community to help save time. No tool is preferred at the moment.
</para>
<itemizedlist>
<listitem><para>
<link xlink:href="https://github.com/proger/python2nix">python2nix</link>
by Vladimir Kirillov
</para></listitem>
<listitem><para>
<link xlink:href="https://github.com/garbas/pypi2nix">pypi2nix</link>
by Rok Garbas
</para></listitem>
<listitem><para>
<link xlink:href="https://github.com/offlinehacker/pypi2nix">pypi2nix</link>
by Jaka Hudoklin
</para></listitem>
</itemizedlist>
</section>
<section xml:id="ssec-python-development"><title>Development</title>
<para>
To develop Python packages <function>buildPythonPackage</function> has
additional logic inside <varname>shellPhase</varname> to run
<command>pip install -e . --prefix $TMPDIR/</command> for the package.
</para>
<warning><para><varname>shellPhase</varname> is executed only if <filename>setup.py</filename>
exists.</para></warning>
<para>
Given a <filename>default.nix</filename>:
<programlisting language="nix">
<![CDATA[with import <nixpkgs> {};
buildPythonPackage {
name = "myproject";
buildInputs = with pkgs.pythonPackages; [ pyramid ];
src = ./.;
}]]>
</programlisting>
Running <command>nix-shell</command> with no arguments should give you
the environment in which the package would be build with
<command>nix-build</command>.
</para>
<para>
Shortcut to setup environments with C headers/libraries and python packages:
<programlisting language="bash">$ nix-shell -p pythonPackages.pyramid zlib libjpeg git</programlisting>
</para>
<note><para>
There is a boolean value <varname>lib.inNixShell</varname> set to
<varname>true</varname> if nix-shell is invoked.
</para></note>
</section>
<section xml:id="ssec-python-faq"><title>FAQ</title>
<variablelist>
<varlistentry>
<term>How to solve circular dependencies?</term>
<listitem><para>
If you have packages <varname>A</varname> and <varname>B</varname> that
depend on each other, when packaging <varname>B</varname> override package
<varname>A</varname> not to depend on <varname>B</varname> as input
(and also the other way around).
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>install_data / data_files</varname> problems resulting into <literal>error: could not create '/nix/store/6l1bvljpy8gazlsw2aw9skwwp4pmvyxw-python-2.7.8/etc': Permission denied</literal></term>
<listitem><para>
<link xlink:href="https://bitbucket.org/pypa/setuptools/issue/130/install_data-doesnt-respect-prefix">
Known bug in setuptools <varname>install_data</varname> does not respect --prefix</link>. Example of
such package using the feature is <filename>pkgs/tools/X11/xpra/default.nix</filename>. As workaround
install it as an extra <varname>preInstall</varname> step:
<programlisting>${python.interpreter} setup.py install_data --install-dir=$out --root=$out
sed -i '/ = data_files/d' setup.py</programlisting>
</para></listitem>
</varlistentry>
<varlistentry>
<term>Rationale of non-existent global site-packages</term>
<listitem><para>
There is no need to have global site-packages in Nix. Each package has isolated
dependency tree and installing any python package will only populate <varname>$PATH</varname>
inside user environment. See <xref linkend="ssec-python-build-env" /> to create self-contained
interpreter with a set of packages.
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="ssec-python-contrib"><title>Contributing guidelines</title>
<para>
Following rules are desired to be respected:
</para>
<itemizedlist>
<listitem><para>
Make sure package builds for all python interpreters. Use <varname>disabled</varname> argument to
<function>buildPythonPackage</function> to set unsupported interpreters.
</para></listitem>
<listitem><para>
If tests need to be disabled for a package, make sure you leave a comment about reasoning.
</para></listitem>
<listitem><para>
Packages in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/python-packages.nix"><filename>pkgs/top-level/python-packages.nix</filename></link>
are sorted quasi-alphabetically to avoid merge conflicts.
</para></listitem>
</itemizedlist>
</section>
</section>

View file

@ -0,0 +1,70 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-qt">
<title>Qt</title>
<para>The information in this section applies to Qt 5.5 and later.</para>
<para>Qt is an application development toolkit for C++. Although it is
not a distinct programming language, there are special considerations
for packaging Qt-based programs and libraries. A small set of tools
and conventions has grown out of these considerations.</para>
<section xml:id="ssec-qt-libraries"><title>Libraries</title>
<para>Packages that provide libraries should be listed in
<varname>qt5LibsFun</varname> so that the library is built with each
Qt version. A set of packages is provided for each version of Qt; for
example, <varname>qt5Libs</varname> always provides libraries built
with the latest version, <varname>qt55Libs</varname> provides
libraries built with Qt 5.5, and so on. To avoid version conflicts, no
top-level attributes are created for these packages.</para>
</section>
<section xml:id="ssec-qt-programs"><title>Programs</title>
<para>Application packages do not need to be built with every Qt
version. To ensure consistency between the package's dependencies,
call the package with <literal>qt5Libs.callPackage</literal> instead
of the usual <literal>callPackage</literal>. An older version may be
selected in case of incompatibility. For example, to build with Qt
5.5, call the package with
<literal>qt55Libs.callPackage</literal>.</para>
<para>Several environment variables must be set at runtime for Qt
applications to function correctly, including:</para>
<itemizedlist>
<listitem><para><envar>QT_PLUGIN_PATH</envar></para></listitem>
<listitem><para><envar>QML_IMPORT_PATH</envar></para></listitem>
<listitem><para><envar>QML2_IMPORT_PATH</envar></para></listitem>
<listitem><para><envar>XDG_DATA_DIRS</envar></para></listitem>
</itemizedlist>
<para>To ensure that these are set correctly, the program must be wrapped by
invoking <literal>wrapQtProgram <replaceable>program</replaceable></literal>
during installation (for example, during
<literal>fixupPhase</literal>). <literal>wrapQtProgram</literal>
accepts the same options as <literal>makeWrapper</literal>.
</para>
</section>
<section xml:id="ssec-qt-kde"><title>KDE</title>
<para>Many of the considerations above also apply to KDE packages,
especially the need to set the correct environment variables at
runtime. To ensure that this is done, invoke <literal>wrapKDEProgram
<replaceable>program</replaceable></literal> during
installation. <literal>wrapKDEProgram</literal> also generates a
<literal>ksycoca</literal> database so that required data and services
can be found. Like its Qt counterpart,
<literal>wrapKDEProgram</literal> accepts the same options as
<literal>makeWrapper</literal>.</para>
</section>
</section>

View file

@ -0,0 +1,46 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="sec-language-ruby">
<title>Ruby</title>
<para>There currently is support to bundle applications that are packaged as Ruby gems. The utility "bundix" allows you to write a <filename>Gemfile</filename>, let bundler create a <filename>Gemfile.lock</filename>, and then convert
this into a nix expression that contains all Gem dependencies automatically.</para>
<para>For example, to package sensu, we did:</para>
<screen>
<![CDATA[$ cd pkgs/servers/monitoring
$ mkdir sensu
$ cat > Gemfile
source 'https://rubygems.org'
gem 'sensu'
$ bundler package --path /tmp/vendor/bundle
$ $(nix-build '<nixpkgs>' -A bundix)/bin/bundix
$ cat > default.nix
{ lib, bundlerEnv, ruby }:
bundlerEnv {
name = "sensu-0.17.1";
inherit ruby;
gemfile = ./Gemfile;
lockfile = ./Gemfile.lock;
gemset = ./gemset.nix;
meta = with lib; {
description = "A monitoring framework that aims to be simple, malleable,
and scalable.";
homepage = http://sensuapp.org/;
license = with licenses; mit;
maintainers = with maintainers; [ theuni ];
platforms = platforms.unix;
};
}]]>
</screen>
<para>Please check in the <filename>Gemfile</filename>, <filename>Gemfile.lock</filename> and the <filename>gemset.nix</filename> so future updates can be run easily.
</para>
</section>

View file

@ -12,10 +12,10 @@
<xi:include href="introduction.xml" /> <xi:include href="introduction.xml" />
<xi:include href="quick-start.xml" /> <xi:include href="quick-start.xml" />
<xi:include href="stdenv.xml" /> <xi:include href="stdenv.xml" />
<xi:include href="packageconfig.xml" /> <xi:include href="configuration.xml" />
<xi:include href="functions.xml" /> <xi:include href="functions.xml" />
<xi:include href="meta.xml" /> <xi:include href="meta.xml" />
<xi:include href="language-support.xml" /> <xi:include href="languages-frameworks/index.xml" />
<xi:include href="package-notes.xml" /> <xi:include href="package-notes.xml" />
<xi:include href="coding-conventions.xml" /> <xi:include href="coding-conventions.xml" />
<xi:include href="submitting-changes.xml" /> <xi:include href="submitting-changes.xml" />

View file

@ -261,23 +261,72 @@ Additional information.
<para>Hydra builds for master and staging should not be used as testing platform, it's a build farm for changes that have been already tested.</para> <para>Hydra builds for master and staging should not be used as testing platform, it's a build farm for changes that have been already tested.</para>
</listitem> </listitem>
<listitem>
<para>Master should only see non-breaking commits that do not cause mass rebuilds.</para>
</listitem>
<listitem>
<para>Staging should only see non-breaking mass-rebuild commits. That means it's not to be used for testing, and changes must have been well tested already. <link xlink:href="http://comments.gmane.org/gmane.linux.distributions.nixos/13447">Read policy here</link>.</para>
</listitem>
<listitem>
<para>If staging is already in a broken state, please refrain from adding extra new breakages. Stabilize it for a few days, merge into master, then resume development on staging. <link xlink:href="http://hydra.nixos.org/jobset/nixpkgs/staging#tabs-evaluations">Keep an eye on the staging evaluations here</link>. If any fixes for staging happen to be already in master, then master can be merged into staging.</para>
</listitem>
<listitem> <listitem>
<para>When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people's installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from @edolstra.</para> <para>When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people's installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from @edolstra.</para>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
<section>
<title>Master branch</title>
<itemizedlist>
<listitem>
<para>
It should only see non-breaking commits that do not cause mass rebuilds.
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Staging branch</title>
<itemizedlist>
<listitem>
<para>
It's only for non-breaking mass-rebuild commits. That means it's not to
be used for testing, and changes must have been well tested already.
<link xlink:href="http://comments.gmane.org/gmane.linux.distributions.nixos/13447">Read policy here</link>.
</para>
</listitem>
<listitem>
<para>
If the branch is already in a broken state, please refrain from adding
extra new breakages. Stabilize it for a few days, merge into master,
then resume development on staging.
<link xlink:href="http://hydra.nixos.org/jobset/nixpkgs/staging#tabs-evaluations">Keep an eye on the staging evaluations here</link>.
If any fixes for staging happen to be already in master, then master can
be merged into staging.
</para>
</listitem>
</itemizedlist>
</section>
<section>
<title>Stable release branches</title>
<itemizedlist>
<listitem>
<para>
If you're cherry-picking a commit to a stable release branch, always use
<command>git cherry-pick -xe</command> and ensure the message contains a
clear description about why this needs to be included in the stable
branch.
</para>
<para>An example of a cherry-picked commit would look like this:</para>
<screen>
nixos: Refactor the world.
The original commit message describing the reason why the world was torn apart.
(cherry picked from commit abcdef)
Reason: I just had a gut feeling that this would also be wanted by people from
the stone age.
</screen>
</listitem>
</itemizedlist>
</section>
</section> </section>
</chapter> </chapter>

View file

@ -23,6 +23,17 @@ rec {
then attrByPath (tail attrPath) default e.${attr} then attrByPath (tail attrPath) default e.${attr}
else default; else default;
/* Return if an attribute from nested attribute set exists.
For instance ["x" "y"] applied to some set e returns true, if e.x.y exists. False
is returned otherwise. */
hasAttrByPath = attrPath: e:
let attr = head attrPath;
in
if attrPath == [] then true
else if e ? ${attr}
then hasAttrByPath (tail attrPath) e.${attr}
else false;
/* Return nested attribute set in which an attribute is set. For instance /* Return nested attribute set in which an attribute is set. For instance
["x" "y"] applied with some value v returns `x.y = v;' */ ["x" "y"] applied with some value v returns `x.y = v;' */

View file

@ -85,6 +85,26 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Creative Commons Zero v1.0 Universal"; fullName = "Creative Commons Zero v1.0 Universal";
}; };
cc-by-nc-sa-20 = spdx {
spdxId = "CC-BY-NC-SA-2.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 2.0";
};
cc-by-nc-sa-25 = spdx {
spdxId = "CC-BY-NC-SA-2.5";
fullName = "Creative Commons Attribution Non Commercial Share Alike 2.5";
};
cc-by-nc-sa-30 = spdx {
spdxId = "CC-BY-NC-SA-3.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 3.0";
};
cc-by-nc-sa-40 = spdx {
spdxId = "CC-BY-NC-SA-4.0";
fullName = "Creative Commons Attribution Non Commercial Share Alike 4.0";
};
cc-by-sa-25 = spdx { cc-by-sa-25 = spdx {
spdxId = "CC-BY-SA-2.5"; spdxId = "CC-BY-SA-2.5";
fullName = "Creative Commons Attribution Share Alike 2.5"; fullName = "Creative Commons Attribution Share Alike 2.5";

View file

@ -81,6 +81,7 @@
devhell = "devhell <\"^\"@regexmail.net>"; devhell = "devhell <\"^\"@regexmail.net>";
dezgeg = "Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>"; dezgeg = "Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>";
dfoxfranke = "Daniel Fox Franke <dfoxfranke@gmail.com>"; dfoxfranke = "Daniel Fox Franke <dfoxfranke@gmail.com>";
dgonyeo = "Derek Gonyeo <derek@gonyeo.com>";
dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>"; dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>";
dochang = "Desmond O. Chang <dochang@gmail.com>"; dochang = "Desmond O. Chang <dochang@gmail.com>";
doublec = "Chris Double <chris.double@double.co.nz>"; doublec = "Chris Double <chris.double@double.co.nz>";
@ -93,12 +94,12 @@
elasticdog = "Aaron Bull Schaefer <aaron@elasticdog.com>"; elasticdog = "Aaron Bull Schaefer <aaron@elasticdog.com>";
ellis = "Ellis Whitehead <nixos@ellisw.net>"; ellis = "Ellis Whitehead <nixos@ellisw.net>";
ehmry = "Emery Hemingway <emery@vfemail.net>"; ehmry = "Emery Hemingway <emery@vfemail.net>";
enolan = "Echo Nolan <echo@echonolan.net>";
epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>"; epitrochoid = "Mabry Cervin <mpcervin@uncg.edu>";
ericbmerritt = "Eric Merritt <eric@afiniate.com>"; ericbmerritt = "Eric Merritt <eric@afiniate.com>";
ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>"; ericsagnes = "Eric Sagnes <eric.sagnes@gmail.com>";
erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>"; erikryb = "Erik Rybakken <erik.rybakken@math.ntnu.no>";
ertes = "Ertugrul Söylemez <ertesx@gmx.de>"; ertes = "Ertugrul Söylemez <ertesx@gmx.de>";
exi = "Reno Reckling <nixos@reckling.org>";
exlevan = "Alexey Levan <exlevan@gmail.com>"; exlevan = "Alexey Levan <exlevan@gmail.com>";
falsifian = "James Cook <james.cook@utoronto.ca>"; falsifian = "James Cook <james.cook@utoronto.ca>";
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>"; flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
@ -119,6 +120,7 @@
gebner = "Gabriel Ebner <gebner@gebner.org>"; gebner = "Gabriel Ebner <gebner@gebner.org>";
gfxmonk = "Tim Cuthbertson <tim@gfxmonk.net>"; gfxmonk = "Tim Cuthbertson <tim@gfxmonk.net>";
giogadi = "Luis G. Torres <lgtorres42@gmail.com>"; giogadi = "Luis G. Torres <lgtorres42@gmail.com>";
gleber = "Gleb Peregud <gleber.p@gmail.com>";
globin = "Robin Gloster <robin@glob.in>"; globin = "Robin Gloster <robin@glob.in>";
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>"; goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
gridaphobe = "Eric Seidel <eric@seidel.io>"; gridaphobe = "Eric Seidel <eric@seidel.io>";
@ -141,7 +143,8 @@
jcumming = "Jack Cummings <jack@mudshark.org>"; jcumming = "Jack Cummings <jack@mudshark.org>";
jefdaj = "Jeffrey David Johnson <jefdaj@gmail.com>"; jefdaj = "Jeffrey David Johnson <jefdaj@gmail.com>";
jfb = "James Felix Black <james@yamtime.com>"; jfb = "James Felix Black <james@yamtime.com>";
jgeerds = "Jascha Geerds <jg@ekby.de>"; jgeerds = "Jascha Geerds <jascha@jgeerds.name>";
jgillich = "Jakob Gillich <jakob@gillich.me>";
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>"; jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>"; joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
joamaki = "Jussi Maki <joamaki@gmail.com>"; joamaki = "Jussi Maki <joamaki@gmail.com>";
@ -164,6 +167,7 @@
lebastr = "Alexander Lebedev <lebastr@gmail.com>"; lebastr = "Alexander Lebedev <lebastr@gmail.com>";
leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>"; leonardoce = "Leonardo Cecchi <leonardo.cecchi@gmail.com>";
lethalman = "Luca Bruno <lucabru@src.gnome.org>"; lethalman = "Luca Bruno <lucabru@src.gnome.org>";
lewo = "Antoine Eiche <lewo@abesis.fr>";
lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>"; lhvwb = "Nathaniel Baxter <nathaniel.baxter@gmail.com>";
lihop = "Leroy Hopson <nixos@leroy.geek.nz>"; lihop = "Leroy Hopson <nixos@leroy.geek.nz>";
linquize = "Linquize <linquize@yahoo.com.hk>"; linquize = "Linquize <linquize@yahoo.com.hk>";
@ -174,6 +178,7 @@
lsix = "Lancelot SIX <lsix@lancelotsix.com>"; lsix = "Lancelot SIX <lsix@lancelotsix.com>";
ludo = "Ludovic Courtès <ludo@gnu.org>"; ludo = "Ludovic Courtès <ludo@gnu.org>";
lukego = "Luke Gorrie <luke@snabb.co>"; lukego = "Luke Gorrie <luke@snabb.co>";
luispedro = "Luis Pedro Coelho <luis@luispedro.org>";
lw = "Sergey Sofeychuk <lw@fmap.me>"; lw = "Sergey Sofeychuk <lw@fmap.me>";
madjar = "Georges Dubus <georges.dubus@compiletoi.net>"; madjar = "Georges Dubus <georges.dubus@compiletoi.net>";
magnetophon = "Bart Brouns <bart@magnetophon.nl>"; magnetophon = "Bart Brouns <bart@magnetophon.nl>";
@ -229,6 +234,7 @@
phausmann = "Philipp Hausmann <nix@314.ch>"; phausmann = "Philipp Hausmann <nix@314.ch>";
philandstuff = "Philip Potter <philip.g.potter@gmail.com>"; philandstuff = "Philip Potter <philip.g.potter@gmail.com>";
phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>"; phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>";
phunehehe = "Hoang Xuan Phu <phunehehe@gmail.com>";
pierron = "Nicolas B. Pierron <nixos@nbp.name>"; pierron = "Nicolas B. Pierron <nixos@nbp.name>";
piotr = "Piotr Pietraszkiewicz <ppietrasa@gmail.com>"; piotr = "Piotr Pietraszkiewicz <ppietrasa@gmail.com>";
pjbarnoy = "Perry Barnoy <pjbarnoy@gmail.com>"; pjbarnoy = "Perry Barnoy <pjbarnoy@gmail.com>";
@ -239,6 +245,7 @@
pmahoney = "Patrick Mahoney <pat@polycrystal.org>"; pmahoney = "Patrick Mahoney <pat@polycrystal.org>";
pmiddend = "Philipp Middendorf <pmidden@secure.mailbox.org>"; pmiddend = "Philipp Middendorf <pmidden@secure.mailbox.org>";
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>"; prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
profpatsch = "Profpatsch <mail@profpatsch.de>";
psibi = "Sibi <sibi@psibi.in>"; psibi = "Sibi <sibi@psibi.in>";
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>"; pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
puffnfresh = "Brian McKenna <brian@brianmckenna.org>"; puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
@ -262,6 +269,7 @@
rszibele = "Richard Szibele <richard_szibele@hotmail.com>"; rszibele = "Richard Szibele <richard_szibele@hotmail.com>";
rushmorem = "Rushmore Mushambi <rushmore@webenchanter.com>"; rushmorem = "Rushmore Mushambi <rushmore@webenchanter.com>";
rvl = "Rodney Lorrimar <dev+nix@rodney.id.au>"; rvl = "Rodney Lorrimar <dev+nix@rodney.id.au>";
rvlander = "Gaëtan André <rvlander@gaetanandre.eu>";
rycee = "Robert Helgesson <robert@rycee.net>"; rycee = "Robert Helgesson <robert@rycee.net>";
samuelrivas = "Samuel Rivas <samuelrivas@gmail.com>"; samuelrivas = "Samuel Rivas <samuelrivas@gmail.com>";
sander = "Sander van der Burg <s.vanderburg@tudelft.nl>"; sander = "Sander van der Burg <s.vanderburg@tudelft.nl>";
@ -315,6 +323,7 @@
vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>"; vmandela = "Venkateswara Rao Mandela <venkat.mandela@gmail.com>";
vozz = "Oliver Hunt <oliver.huntuk@gmail.com>"; vozz = "Oliver Hunt <oliver.huntuk@gmail.com>";
wedens = "wedens <kirill.wedens@gmail.com>"; wedens = "wedens <kirill.wedens@gmail.com>";
willtim = "Tim Philip Williams <tim.williams.public@gmail.com>";
winden = "Antonio Vargas Gonzalez <windenntw@gmail.com>"; winden = "Antonio Vargas Gonzalez <windenntw@gmail.com>";
wizeman = "Ricardo M. Correia <rcorreia@wizy.org>"; wizeman = "Ricardo M. Correia <rcorreia@wizy.org>";
wjlroe = "William Roe <willroe@gmail.com>"; wjlroe = "William Roe <willroe@gmail.com>";

View file

@ -237,4 +237,19 @@ rec {
then may_be_int then may_be_int
else throw "Could not convert ${str} to int."; else throw "Could not convert ${str} to int.";
# Read a list of paths from `file', relative to the `rootPath'. Lines
# beginning with `#' are treated as comments and ignored. Whitespace
# is significant.
readPathsFromFile = rootPath: file:
let
root = toString rootPath;
lines =
builtins.map (lib.removeSuffix "\n")
(lib.splitString "\n" (builtins.readFile file));
removeComments = lib.filter (line: !(lib.hasPrefix "#" line));
relativePaths = removeComments lines;
absolutePaths = builtins.map (path: builtins.toPath (root + "/" + path)) relativePaths;
in
absolutePaths;
} }

View file

@ -120,4 +120,14 @@ runTests {
expected = { success = false; value = false; }; expected = { success = false; value = false; };
}; };
testHasAttrByPathTrue = {
expr = hasAttrByPath ["a" "b"] { a = { b = "yey"; }; };
expected = true;
};
testHasAttrByPathFalse = {
expr = hasAttrByPath ["a" "b"] { a = { c = "yey"; }; };
expected = false;
};
} }

View file

@ -0,0 +1,18 @@
/* Helper expression for copy-tarballs. This returns (nearly) all
tarballs used the free packages in Nixpkgs.
Typical usage:
$ copy-tarballs.pl --expr 'import <nixpkgs/maintainers/scripts/all-tarballs.nix>'
*/
removeAttrs (import ../../pkgs/top-level/release.nix
{ # Don't apply hydraJob to jobs, because then we can't get to the
# dependency graph.
scrubJobs = false;
# No need to evaluate on i686.
supportedSystems = [ "x86_64-linux" ];
})
[ # Remove jobs whose evaluation depends on a writable Nix store.
"tarball" "unstable"
]

View file

@ -1,5 +1,5 @@
#! /usr/bin/env nix-shell #! /usr/bin/env nix-shell
#! nix-shell -i perl -p perl perlPackages.NetAmazonS3 nixUnstable #! nix-shell -i perl -p perl perlPackages.NetAmazonS3 perlPackages.FileSlurp nixUnstable
# This command uploads tarballs to tarballs.nixos.org, the # This command uploads tarballs to tarballs.nixos.org, the
# content-addressed cache used by fetchurl as a fallback for when # content-addressed cache used by fetchurl as a fallback for when
@ -17,6 +17,7 @@ use strict;
use warnings; use warnings;
use File::Basename; use File::Basename;
use File::Path; use File::Path;
use File::Slurp;
use JSON; use JSON;
use Net::Amazon::S3; use Net::Amazon::S3;
use Nix::Store; use Nix::Store;
@ -33,9 +34,21 @@ my $s3 = Net::Amazon::S3->new(
my $bucket = $s3->bucket("nixpkgs-tarballs") or die; my $bucket = $s3->bucket("nixpkgs-tarballs") or die;
my $cacheFile = "/tmp/copy-tarballs-cache";
my %cache;
$cache{$_} = 1 foreach read_file($cacheFile, err_mode => 'quiet', chomp => 1);
END() {
write_file($cacheFile, map { "$_\n" } keys %cache);
}
sub alreadyMirrored { sub alreadyMirrored {
my ($algo, $hash) = @_; my ($algo, $hash) = @_;
return defined $bucket->get_key("$algo/$hash"); my $key = "$algo/$hash";
return 1 if defined $cache{$key};
my $res = defined $bucket->get_key($key);
$cache{$key} = 1 if $res;
return $res;
} }
sub uploadFile { sub uploadFile {
@ -50,41 +63,52 @@ sub uploadFile {
my $mainKey = "sha512/$sha512_16"; my $mainKey = "sha512/$sha512_16";
# Upload the file as sha512/<hash-in-base-16>.
print STDERR "uploading $fn to $mainKey...\n";
$bucket->add_key_filename($mainKey, $fn, { 'x-amz-meta-original-name' => $name })
or die "failed to upload $fn to $mainKey\n";
# Create redirects from the other hash types. # Create redirects from the other hash types.
sub redirect { sub redirect {
my ($name, $dest) = @_; my ($name, $dest) = @_;
#print STDERR "linking $name to $dest...\n"; #print STDERR "linking $name to $dest...\n";
$bucket->add_key($name, "", { 'x-amz-website-redirect-location' => "/" . $dest }) $bucket->add_key($name, "", { 'x-amz-website-redirect-location' => "/" . $dest })
or die "failed to create redirect from $name to $dest\n"; or die "failed to create redirect from $name to $dest\n";
$cache{$name} = 1;
} }
redirect "md5/$md5_16", $mainKey; redirect "md5/$md5_16", $mainKey;
redirect "sha1/$sha1_16", $mainKey; redirect "sha1/$sha1_16", $mainKey;
redirect "sha256/$sha256_32", $mainKey; redirect "sha256/$sha256_32", $mainKey;
redirect "sha256/$sha256_16", $mainKey; redirect "sha256/$sha256_16", $mainKey;
redirect "sha512/$sha512_32", $mainKey; redirect "sha512/$sha512_32", $mainKey;
# Upload the file as sha512/<hash-in-base-16>.
print STDERR "uploading $fn to $mainKey...\n";
$bucket->add_key_filename($mainKey, $fn, { 'x-amz-meta-original-name' => $name })
or die "failed to upload $fn to $mainKey\n";
$cache{$mainKey} = 1;
} }
my $op = $ARGV[0] // ""; my $op = shift @ARGV;
if ($op eq "--file") { if ($op eq "--file") {
my $fn = $ARGV[1] // die "$0: --file requires a file name\n"; my $res = 0;
if (alreadyMirrored("sha512", hashFile("sha512", 0, $fn))) { foreach my $fn (@ARGV) {
print STDERR "$fn is already mirrored\n"; eval {
} else { if (alreadyMirrored("sha512", hashFile("sha512", 0, $fn))) {
uploadFile($fn, basename $fn); print STDERR "$fn is already mirrored\n";
} else {
uploadFile($fn, basename $fn);
}
};
if ($@) {
warn "$@\n";
$res = 1;
}
} }
exit $res;
} }
elsif ($op eq "--expr") { elsif ($op eq "--expr") {
# Evaluate find-tarballs.nix. # Evaluate find-tarballs.nix.
my $expr = $ARGV[1] // die "$0: --expr requires a Nix expression\n"; my $expr = $ARGV[0] // die "$0: --expr requires a Nix expression\n";
my $pid = open(JSON, "-|", "nix-instantiate", "--eval-only", "--json", "--strict", my $pid = open(JSON, "-|", "nix-instantiate", "--eval", "--json", "--strict",
"<nixpkgs/maintainers/scripts/find-tarballs.nix>", "<nixpkgs/maintainers/scripts/find-tarballs.nix>",
"--arg", "expr", $expr); "--arg", "expr", $expr);
my $stdout = <JSON>; my $stdout = <JSON>;
@ -104,6 +128,11 @@ elsif ($op eq "--expr") {
my $algo = $fetch->{type}; my $algo = $fetch->{type};
my $hash = $fetch->{hash}; my $hash = $fetch->{hash};
if (defined $ENV{DEBUG}) {
print "$url $algo $hash\n";
next;
}
if ($url !~ /^http:/ && $url !~ /^https:/ && $url !~ /^ftp:/ && $url !~ /^mirror:/) { if ($url !~ /^http:/ && $url !~ /^https:/ && $url !~ /^ftp:/ && $url !~ /^mirror:/) {
print STDERR "skipping $url (unsupported scheme)\n"; print STDERR "skipping $url (unsupported scheme)\n";
next; next;
@ -138,5 +167,5 @@ elsif ($op eq "--expr") {
} }
else { else {
die "Syntax: $0 --file FILENAME | --expr EXPR\n"; die "Syntax: $0 --file FILENAMES... | --expr EXPR\n";
} }

View file

@ -1,10 +1,9 @@
# This expression returns a list of all fetchurl calls used by all # This expression returns a list of all fetchurl calls used by expr.
# packages reachable from release.nix.
with import ../.. { }; with import ../.. { };
with lib; with lib;
{ expr ? removeAttrs (import ../../pkgs/top-level/release.nix { }) [ "tarball" "unstable" ] }: { expr }:
let let

View file

@ -22,8 +22,10 @@ containers.database =
</programlisting> </programlisting>
If you run <literal>nixos-rebuild switch</literal>, the container will If you run <literal>nixos-rebuild switch</literal>, the container will
be built and started. If the container was already running, it will be be built. If the container was already running, it will be
updated in place, without rebooting.</para> updated in place, without rebooting. The container can be configured to
start automatically by setting <literal>containers.database.autoStart = true</literal>
in its configuration.</para>
<para>By default, declarative containers share the network namespace <para>By default, declarative containers share the network namespace
of the host, meaning that they can listen on (privileged) of the host, meaning that they can listen on (privileged)
@ -41,13 +43,15 @@ containers.database =
This gives the container a private virtual Ethernet interface with IP This gives the container a private virtual Ethernet interface with IP
address <literal>192.168.100.11</literal>, which is hooked up to a address <literal>192.168.100.11</literal>, which is hooked up to a
virtual Ethernet interface on the host with IP address virtual Ethernet interface on the host with IP address
<literal>192.168.100.10</literal>. (See the next section for details <literal>192.168.100.10</literal>. (See the next section for details
on container networking.)</para> on container networking.)</para>
<para>To disable the container, just remove it from <para>To disable the container, just remove it from
<filename>configuration.nix</filename> and run <literal>nixos-rebuild <filename>configuration.nix</filename> and run <literal>nixos-rebuild
switch</literal>. Note that this will not delete the root directory of switch</literal>. Note that this will not delete the root directory of
the container in <literal>/var/lib/containers</literal>.</para> the container in <literal>/var/lib/containers</literal>. Containers can be
destroyed using the imperative method: <literal>nixos-container destroy
foo</literal>.</para>
<para>Declarative containers can be started and stopped using the <para>Declarative containers can be started and stopped using the
corresponding systemd service, e.g. <literal>systemctl start corresponding systemd service, e.g. <literal>systemctl start

View file

@ -26,6 +26,7 @@ effect after you run <command>nixos-rebuild</command>.</para>
<!-- FIXME: auto-include NixOS module docs --> <!-- FIXME: auto-include NixOS module docs -->
<xi:include href="postgresql.xml" /> <xi:include href="postgresql.xml" />
<xi:include href="acme.xml" />
<xi:include href="nixos.xml" /> <xi:include href="nixos.xml" />
<!-- Apache; libvirtd virtualisation --> <!-- Apache; libvirtd virtualisation -->

View file

@ -55,6 +55,7 @@ let
cp -prd $sources/* . # */ cp -prd $sources/* . # */
chmod -R u+w . chmod -R u+w .
cp ${../../modules/services/databases/postgresql.xml} configuration/postgresql.xml cp ${../../modules/services/databases/postgresql.xml} configuration/postgresql.xml
cp ${../../modules/security/acme.xml} configuration/acme.xml
cp ${../../modules/misc/nixos.xml} configuration/nixos.xml cp ${../../modules/misc/nixos.xml} configuration/nixos.xml
ln -s ${optionsDocBook} options-db.xml ln -s ${optionsDocBook} options-db.xml
echo "${version}" > version echo "${version}" > version

View file

@ -35,7 +35,7 @@
<command>systemctl stop network-manager</command>.</para></listitem> <command>systemctl stop network-manager</command>.</para></listitem>
<listitem><para>The NixOS installer doesnt do any partitioning or <listitem><para>The NixOS installer doesnt do any partitioning or
formatting yet, so you need to that yourself. Use the following formatting yet, so you need to do that yourself. Use the following
commands: commands:
<itemizedlist> <itemizedlist>

View file

@ -30,6 +30,7 @@ nixos.path = ./nixpkgs-unstable-2015-12-06/nixos;
<itemizedlist> <itemizedlist>
<listitem><para><literal>services/monitoring/longview.nix</literal></para></listitem> <listitem><para><literal>services/monitoring/longview.nix</literal></para></listitem>
<listitem><para><literal>services/web-apps/pump.io.nix</literal></para></listitem>
</itemizedlist> </itemizedlist>
</para> </para>
@ -113,6 +114,20 @@ nginx.override {
extra X11 options for nvidia and nouveau drivers, respectively. extra X11 options for nvidia and nouveau drivers, respectively.
</para> </para>
</listitem> </listitem>
<listitem>
<para>The <literal>Ctrl+Alt+Backspace</literal> key combination
no longer kills the X server by default.
There's a new option <option>services.xserver.enableCtrlAltBackspace</option>
allowing to enable the combination again.
</para>
</listitem>
<listitem>
<para><literal>emacsPackagesNg</literal> now contains all packages
from the ELPA, MELPA, and MELPA Stable repositories.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>

View file

@ -71,7 +71,6 @@ in
# FIXME: Implement powersave governor for sandy bridge or later Intel CPUs # FIXME: Implement powersave governor for sandy bridge or later Intel CPUs
powerManagement.cpuFreqGovernor = mkDefault "ondemand"; powerManagement.cpuFreqGovernor = mkDefault "ondemand";
powerManagement.scsiLinkPolicy = mkDefault "min_power";
systemd.targets.post-resume = { systemd.targets.post-resume = {
description = "Post-Resume Actions"; description = "Post-Resume Actions";

View file

@ -134,7 +134,7 @@ in
# !!! Hacky, should modularise. # !!! Hacky, should modularise.
postBuild = postBuild =
'' ''
if [ -x $out/bin/update-mime-database -a -w $out/share/mime/packages ]; then if [ -x $out/bin/update-mime-database -a -w $out/share/mime ]; then
XDG_DATA_DIRS=$out/share $out/bin/update-mime-database -V $out/share/mime > /dev/null XDG_DATA_DIRS=$out/share $out/bin/update-mime-database -V $out/share/mime > /dev/null
fi fi

View file

@ -43,6 +43,13 @@ let
LINUX /boot/bzImage LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
INITRD /boot/initrd INITRD /boot/initrd
# A variant to boot with 'nomodeset'
LABEL boot-nomodeset
MENU LABEL NixOS ${config.system.nixosVersion}${config.isoImage.appendToMenuLabel} (with nomodeset)
LINUX /boot/bzImage
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
INITRD /boot/initrd
''; '';
isolinuxMemtest86Entry = '' isolinuxMemtest86Entry = ''
@ -59,10 +66,18 @@ let
mkdir -p $out/EFI/boot mkdir -p $out/EFI/boot
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
mkdir -p $out/loader/entries mkdir -p $out/loader/entries
echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf echo "title NixOS Live CD" > $out/loader/entries/nixos-livecd.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
# A variant to boot with 'nomodeset'
echo "title NixOS Live CD (with nomodeset)" > $out/loader/entries/nixos-livecd-nomodeset.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset" >> $out/loader/entries/nixos-livecd-nomodeset.conf
echo "default nixos-livecd" > $out/loader/loader.conf echo "default nixos-livecd" > $out/loader/loader.conf
echo "timeout ${builtins.toString config.boot.loader.gummiboot.timeout}" >> $out/loader/loader.conf echo "timeout ${builtins.toString config.boot.loader.gummiboot.timeout}" >> $out/loader/loader.conf
''; '';
@ -230,7 +245,6 @@ in
boot.kernelParams = boot.kernelParams =
[ "root=LABEL=${config.isoImage.volumeID}" [ "root=LABEL=${config.isoImage.volumeID}"
"boot.shell_on_fail" "boot.shell_on_fail"
"nomodeset"
]; ];
fileSystems."/" = fileSystems."/" =

View file

@ -42,6 +42,17 @@ let cfg = config.system.autoUpgrade; in
''; '';
}; };
dates = mkOption {
default = "04:40";
type = types.str;
description = ''
Specification (in the format described by
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>5</manvolnum></citerefentry>) of the time at
which the update will occur.
'';
};
}; };
}; };
@ -73,7 +84,7 @@ let cfg = config.system.autoUpgrade; in
${config.system.build.nixos-rebuild}/bin/nixos-rebuild switch ${toString cfg.flags} ${config.system.build.nixos-rebuild}/bin/nixos-rebuild switch ${toString cfg.flags}
''; '';
startAt = mkIf cfg.enable "04:40"; startAt = optionalString cfg.enable cfg.dates;
}; };
}; };

View file

@ -238,6 +238,7 @@
heapster = 214; heapster = 214;
bepasty = 215; bepasty = 215;
pumpio = 216; pumpio = 216;
nm-openvpn = 217;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399! # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -453,6 +454,7 @@
calibre-server = 213; calibre-server = 213;
bepasty = 215; bepasty = 215;
pumpio = 216; pumpio = 216;
nm-openvpn = 217;
# When adding a gid, make sure it doesn't match an existing # When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal # uid. Users and groups with the same name should have equal

View file

@ -35,7 +35,7 @@ nixos.path = ./nixpkgs-16-03/nixos;
<para>Another option is to fetch a specific version of NixOS, with either <para>Another option is to fetch a specific version of NixOS, with either
the <literal>fetchTarball</literal> builtin, or the the <literal>fetchTarball</literal> builtin, or the
<literal>pkgs.fetchFromGithub</literal> function and use the result as an <literal>pkgs.fetchFromGitHub</literal> function and use the result as an
input. input.
<programlisting> <programlisting>

View file

@ -80,6 +80,7 @@
./programs/xfs_quota.nix ./programs/xfs_quota.nix
./programs/zsh/zsh.nix ./programs/zsh/zsh.nix
./rename.nix ./rename.nix
./security/acme.nix
./security/apparmor.nix ./security/apparmor.nix
./security/apparmor-suid.nix ./security/apparmor-suid.nix
./security/ca.nix ./security/ca.nix
@ -343,6 +344,7 @@
./services/networking/searx.nix ./services/networking/searx.nix
./services/networking/seeks.nix ./services/networking/seeks.nix
./services/networking/skydns.nix ./services/networking/skydns.nix
./services/networking/shairport-sync.nix
./services/networking/shout.nix ./services/networking/shout.nix
./services/networking/softether.nix ./services/networking/softether.nix
./services/networking/spiped.nix ./services/networking/spiped.nix

View file

@ -75,6 +75,8 @@ with lib;
# DNSCrypt-proxy # DNSCrypt-proxy
(mkRenamedOptionModule [ "services" "dnscrypt-proxy" "port" ] [ "services" "dnscrypt-proxy" "localPort" ]) (mkRenamedOptionModule [ "services" "dnscrypt-proxy" "port" ] [ "services" "dnscrypt-proxy" "localPort" ])
(mkRenamedOptionModule [ "services" "hostapd" "extraCfg" ] [ "services" "hostapd" "extraConfig" ])
# Options that are obsolete and have no replacement. # Options that are obsolete and have no replacement.
(mkRemovedOptionModule [ "boot" "initrd" "luks" "enable" ]) (mkRemovedOptionModule [ "boot" "initrd" "luks" "enable" ])
(mkRemovedOptionModule [ "programs" "bash" "enable" ]) (mkRemovedOptionModule [ "programs" "bash" "enable" ])

View file

@ -0,0 +1,202 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.security.acme;
certOpts = { ... }: {
options = {
webroot = mkOption {
type = types.str;
description = ''
Where the webroot of the HTTP vhost is located.
<filename>.well-known/acme-challenge/</filename> directory
will be created automatically if it doesn't exist.
<literal>http://example.org/.well-known/acme-challenge/</literal> must also
be available (notice unencrypted HTTP).
'';
};
email = mkOption {
type = types.nullOr types.str;
default = null;
description = "Contact email address for the CA to be able to reach you.";
};
user = mkOption {
type = types.str;
default = "root";
description = "User running the ACME client.";
};
group = mkOption {
type = types.str;
default = "root";
description = "Group running the ACME client.";
};
postRun = mkOption {
type = types.lines;
default = "";
example = "systemctl reload nginx.service";
description = ''
Commands to run after certificates are re-issued. Typically
the web server and other servers using certificates need to
be reloaded.
'';
};
plugins = mkOption {
type = types.listOf (types.enum [
"cert.der" "cert.pem" "chain.der" "chain.pem" "external_pem.sh"
"fullchain.der" "fullchain.pem" "key.der" "key.pem" "account_key.json"
]);
default = [ "fullchain.pem" "key.pem" "account_key.json" ];
description = ''
Plugins to enable. With default settings simp_le will
store public certificate bundle in <filename>fullchain.pem</filename>
and private key in <filename>key.pem</filename> in its state directory.
'';
};
extraDomains = mkOption {
type = types.attrsOf (types.nullOr types.str);
default = {};
example = {
"example.org" = "/srv/http/nginx";
"mydomain.org" = null;
};
description = ''
Extra domain names for which certificates are to be issued, with their
own server roots if needed.
'';
};
};
};
in
{
###### interface
options = {
security.acme = {
directory = mkOption {
default = "/var/lib/acme";
type = types.str;
description = ''
Directory where certs and other state will be stored by default.
'';
};
validMin = mkOption {
type = types.int;
default = 30 * 24 * 3600;
description = "Minimum remaining validity before renewal in seconds.";
};
renewInterval = mkOption {
type = types.str;
default = "weekly";
description = ''
Systemd calendar expression when to check for renewal. See
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>5</manvolnum></citerefentry>.
'';
};
certs = mkOption {
default = { };
type = types.loaOf types.optionSet;
description = ''
Attribute set of certificates to get signed and renewed.
'';
options = [ certOpts ];
example = {
"example.com" = {
webroot = "/var/www/challenges/";
email = "foo@example.com";
extraDomains = { "www.example.com" = null; "foo.example.com" = "/var/www/foo/"; };
};
"bar.example.com" = {
webroot = "/var/www/challenges/";
email = "bar@example.com";
};
};
};
};
};
###### implementation
config = mkMerge [
(mkIf (cfg.certs != { }) {
systemd.services = flip mapAttrs' cfg.certs (cert: data:
let
cpath = "${cfg.directory}/${cert}";
cmdline = [ "-v" "-d" cert "--default_root" data.webroot "--valid_min" cfg.validMin ]
++ optionals (data.email != null) [ "--email" data.email ]
++ concatMap (p: [ "-f" p ]) data.plugins
++ concatLists (mapAttrsToList (name: root: [ "-d" (if root == null then name else "${name}:${root}")]) data.extraDomains);
in nameValuePair
("acme-${cert}")
({
description = "ACME cert renewal for ${cert} using simp_le";
after = [ "network.target" ];
serviceConfig = {
Type = "oneshot";
SuccessExitStatus = [ "0" "1" ];
PermissionsStartOnly = true;
User = data.user;
Group = data.group;
PrivateTmp = true;
};
path = [ pkgs.simp_le ];
preStart = ''
mkdir -p '${cfg.directory}'
if [ ! -d '${cpath}' ]; then
mkdir -m 700 '${cpath}'
chown '${data.user}:${data.group}' '${cpath}'
fi
'';
script = ''
cd '${cpath}'
set +e
simp_le ${concatMapStringsSep " " (arg: escapeShellArg (toString arg)) cmdline}
EXITCODE=$?
set -e
echo "$EXITCODE" > /tmp/lastExitCode
exit "$EXITCODE"
'';
postStop = ''
if [ -e /tmp/lastExitCode ] && [ "$(cat /tmp/lastExitCode)" = "0" ]; then
echo "Executing postRun hook..."
${data.postRun}
fi
'';
})
);
systemd.timers = flip mapAttrs' cfg.certs (cert: data: nameValuePair
("acme-${cert}")
({
description = "timer for ACME cert renewal of ${cert}";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.renewInterval;
Unit = "acme-${cert}.service";
};
})
);
})
{ meta.maintainers = with lib.maintainers; [ abbradar fpletz globin ];
meta.doc = ./acme.xml;
}
];
}

View file

@ -0,0 +1,69 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-security-acme">
<title>SSL/TLS Certificates with ACME</title>
<para>NixOS supports automatic domain validation &amp; certificate
retrieval and renewal using the ACME protocol. This is currently only
implemented by and for Let's Encrypt. The alternative ACME client
<literal>simp_le</literal> is used under the hood.</para>
<section><title>Prerequisites</title>
<para>You need to have a running HTTP server for verification. The server must
have a webroot defined that can serve
<filename>.well-known/acme-challenge</filename>. This directory must be
writeable by the user that will run the ACME client.</para>
<para>For instance, this generic snippet could be used for Nginx:
<programlisting>
http {
server {
server_name _;
listen 80;
listen [::]:80;
location /.well-known/acme-challenge {
root /var/www/challenges;
}
location / {
return 301 https://$host$request_uri;
}
}
}
</programlisting>
</para>
</section>
<section><title>Configuring</title>
<para>To enable ACME certificate retrieval &amp; renewal for a certificate for
<literal>foo.example.com</literal>, add the following in your
<filename>configuration.nix</filename>:
<programlisting>
security.acme.certs."foo.example.com" = {
webroot = "/var/www/challenges";
email = "foo@example.com";
};
</programlisting>
</para>
<para>The private key <filename>key.pem</filename> and certificate
<filename>fullchain.pem</filename> will be put into
<filename>/var/lib/acme/foo.example.com</filename>. The target directory can
be configured with the option <literal>security.acme.directory</literal>.
</para>
<para>Refer to <xref linkend="ch-options" /> for all available configuration
options for the <literal>security.acme</literal> module.</para>
</section>
</chapter>

View file

@ -11,17 +11,8 @@ let
mopidyConf = writeText "mopidy.conf" cfg.configuration; mopidyConf = writeText "mopidy.conf" cfg.configuration;
mopidyLauncher = stdenv.mkDerivation { mopidyEnv = python.buildEnv.override {
name = "mopidy-launcher"; extraLibs = [ mopidy ] ++ cfg.extensionPackages;
phases = [ "installPhase" ];
buildInputs = [ makeWrapper python ];
installPhase = ''
mkdir -p $out/bin
ln -s ${mopidy}/bin/mopidy $out/bin/mopidy
wrapProgram $out/bin/mopidy \
--prefix PYTHONPATH : \
"${concatStringsSep ":" (map (p: "$(toPythonPath ${p})") cfg.extensionPackages)}"
'';
}; };
in { in {
@ -86,7 +77,7 @@ in {
description = "mopidy music player daemon"; description = "mopidy music player daemon";
preStart = "mkdir -p ${cfg.dataDir} && chown -R mopidy:mopidy ${cfg.dataDir}"; preStart = "mkdir -p ${cfg.dataDir} && chown -R mopidy:mopidy ${cfg.dataDir}";
serviceConfig = { serviceConfig = {
ExecStart = "${mopidyLauncher}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}"; ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}";
User = "mopidy"; User = "mopidy";
PermissionsStartOnly = true; PermissionsStartOnly = true;
}; };
@ -96,7 +87,7 @@ in {
description = "mopidy local files scanner"; description = "mopidy local files scanner";
preStart = "mkdir -p ${cfg.dataDir} && chown -R mopidy:mopidy ${cfg.dataDir}"; preStart = "mkdir -p ${cfg.dataDir} && chown -R mopidy:mopidy ${cfg.dataDir}";
serviceConfig = { serviceConfig = {
ExecStart = "${mopidyLauncher}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)} local scan"; ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)} local scan";
User = "mopidy"; User = "mopidy";
PermissionsStartOnly = true; PermissionsStartOnly = true;
Type = "oneshot"; Type = "oneshot";

View file

@ -90,7 +90,7 @@ in {
extraConfig = mkOption { extraConfig = mkOption {
type = types.attrsOf types.str; type = types.attrsOf types.str;
apply = mapAttrs' (n: v: nameValuePair ("ETCD_" + n) v); apply = mapAttrs' (n: v: nameValuePair ("FLEET_" + n) v);
default = {}; default = {};
example = literalExample '' example = literalExample ''
{ {
@ -120,7 +120,7 @@ in {
FLEET_PUBLIC_IP = cfg.publicIp; FLEET_PUBLIC_IP = cfg.publicIp;
FLEET_ETCD_CAFILE = cfg.etcdCafile; FLEET_ETCD_CAFILE = cfg.etcdCafile;
FLEET_ETCD_KEYFILE = cfg.etcdKeyfile; FLEET_ETCD_KEYFILE = cfg.etcdKeyfile;
FEELT_ETCD_CERTFILE = cfg.etcdCertfile; FLEET_ETCD_CERTFILE = cfg.etcdCertfile;
FLEET_METADATA = cfg.metadata; FLEET_METADATA = cfg.metadata;
} // cfg.extraConfig; } // cfg.extraConfig;
serviceConfig = { serviceConfig = {

View file

@ -34,6 +34,15 @@ in
}; };
package = mkOption {
type = types.package;
default = pkgs.slurm-llnl;
example = literalExample "pkgs.slurm-llnl-full";
description = ''
The packge to use for slurm binaries.
'';
};
controlMachine = mkOption { controlMachine = mkOption {
type = types.nullOr types.str; type = types.nullOr types.str;
default = null; default = null;
@ -91,38 +100,69 @@ in
###### implementation ###### implementation
config = mkIf (cfg.client.enable || cfg.server.enable) { config =
let
wrappedSlurm = pkgs.stdenv.mkDerivation {
name = "wrappedSlurm";
environment.systemPackages = [ pkgs.slurm-llnl ]; propagatedBuildInputs = [ cfg.package configFile ];
builder = pkgs.writeText "builder.sh" ''
source $stdenv/setup
mkdir -p $out/bin
find ${cfg.package}/bin -type f -executable | while read EXE
do
exename="$(basename $EXE)"
wrappername="$out/bin/$exename"
cat > "$wrappername" <<EOT
#!/bin/sh
if [ -z "$SLURM_CONF" ]
then
SLURM_CONF="${configFile}" "$EXE" "\$@"
else
"$EXE" "\$0"
fi
EOT
chmod +x "$wrappername"
done
'';
};
in mkIf (cfg.client.enable || cfg.server.enable) {
environment.systemPackages = [ wrappedSlurm ];
systemd.services.slurmd = mkIf (cfg.client.enable) { systemd.services.slurmd = mkIf (cfg.client.enable) {
path = with pkgs; [ slurm-llnl coreutils ]; path = with pkgs; [ wrappedSlurm coreutils ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-tmpfiles-clean.service" ]; after = [ "systemd-tmpfiles-clean.service" ];
serviceConfig = { serviceConfig = {
Type = "forking"; Type = "forking";
ExecStart = "${pkgs.slurm-llnl}/bin/slurmd -f ${configFile}"; ExecStart = "${wrappedSlurm}/bin/slurmd";
PIDFile = "/run/slurmd.pid"; PIDFile = "/run/slurmd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
}; };
preStart = ''
mkdir -p /var/spool
'';
}; };
systemd.services.slurmctld = mkIf (cfg.server.enable) { systemd.services.slurmctld = mkIf (cfg.server.enable) {
path = with pkgs; [ slurm-llnl munge coreutils ]; path = with pkgs; [ wrappedSlurm munge coreutils ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" "auditd.service" "munged.service" "slurmdbd.service" ]; after = [ "network.target" "munged.service" ];
requires = [ "munged.service" ]; requires = [ "munged.service" ];
serviceConfig = { serviceConfig = {
Type = "forking"; Type = "forking";
ExecStart = "${pkgs.slurm-llnl}/bin/slurmctld"; ExecStart = "${wrappedSlurm}/bin/slurmctld";
PIDFile = "/run/slurmctld.pid"; PIDFile = "/run/slurmctld.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
}; };
environment = { SLURM_CONF = "${configFile}"; };
}; };
}; };

View file

@ -48,11 +48,33 @@ in {
''; '';
}; };
listenAddress = mkOption {
default = "0.0.0.0";
example = "localhost";
type = types.str;
description = ''
Specifies the bind address on which the jenkins HTTP interface listens.
The default is the wildcard address.
'';
};
port = mkOption { port = mkOption {
default = 8080; default = 8080;
type = types.int; type = types.int;
description = '' description = ''
Specifies port number on which the jenkins HTTP interface listens. The default is 8080. Specifies port number on which the jenkins HTTP interface listens.
The default is 8080.
'';
};
prefix = mkOption {
default = "";
example = "/jenkins";
type = types.str;
description = ''
Specifies a urlPrefix to use with jenkins.
If the example /jenkins is given, the jenkins server will be
accessible using localhost:8080/jenkins.
''; '';
}; };
@ -80,7 +102,7 @@ in {
extraOptions = mkOption { extraOptions = mkOption {
type = types.listOf types.str; type = types.listOf types.str;
default = [ ]; default = [ ];
example = [ "--debug=9" "--httpListenAddress=localhost" ]; example = [ "--debug=9" ];
description = '' description = ''
Additional command line arguments to pass to Jenkins. Additional command line arguments to pass to Jenkins.
''; '';
@ -134,15 +156,18 @@ in {
''; '';
script = '' script = ''
${pkgs.jdk}/bin/java -jar ${pkgs.jenkins} --httpPort=${toString cfg.port} ${concatStringsSep " " cfg.extraOptions} ${pkgs.jdk}/bin/java -jar ${pkgs.jenkins} --httpListenAddress=${cfg.listenAddress} \
--httpPort=${toString cfg.port} \
--prefix=${cfg.prefix} \
${concatStringsSep " " cfg.extraOptions}
''; '';
postStart = '' postStart = ''
until ${pkgs.curl.bin}/bin/curl -s -L localhost:${toString cfg.port} ; do until ${pkgs.curl.bin}/bin/curl -s -L ${cfg.listenAddress}:${toString cfg.port}${cfg.prefix} ; do
sleep 10 sleep 10
done done
while true ; do while true ; do
index=`${pkgs.curl.bin}/bin/curl -s -L localhost:${toString cfg.port}` index=`${pkgs.curl.bin}/bin/curl -s -L ${cfg.listenAddress}:${toString cfg.port}${cfg.prefix}`
if [[ !("$index" =~ 'Please wait while Jenkins is restarting' || if [[ !("$index" =~ 'Please wait while Jenkins is restarting' ||
"$index" =~ 'Please wait while Jenkins is getting ready to work') ]]; then "$index" =~ 'Please wait while Jenkins is getting ready to work') ]]; then
exit 0 exit 0

View file

@ -144,7 +144,7 @@ in {
done done
echo "Asking Jenkins to reload config" echo "Asking Jenkins to reload config"
curl --silent -X POST http://localhost:${toString jenkinsCfg.port}/reload curl --silent -X POST http://${jenkinsCfg.listenAddress}:${toString jenkinsCfg.port}${jenkinsCfg.prefix}/reload
''; '';
serviceConfig = { serviceConfig = {
User = jenkinsCfg.user; User = jenkinsCfg.user;

View file

@ -39,7 +39,7 @@ with lib;
mkdir -m 0755 -p /var/lib/udisks2 mkdir -m 0755 -p /var/lib/udisks2
''; '';
#services.udev.packages = [ pkgs.udisks2 ]; services.udev.packages = [ pkgs.udisks2 ];
systemd.services.udisks2 = { systemd.services.udisks2 = {
description = "Udisks2 service"; description = "Udisks2 service";

View file

@ -3,137 +3,178 @@
with lib; with lib;
let let
cfg = config.services.dovecot2; cfg = config.services.dovecot2;
dovecotPkg = cfg.package;
dovecotConf = baseDir = "/run/dovecot2";
'' stateDir = "/var/lib/dovecot";
base_dir = /var/run/dovecot2/
protocols = ${optionalString cfg.enableImap "imap"} ${optionalString cfg.enablePop3 "pop3"} ${optionalString cfg.enableLmtp "lmtp"} protocols = concatStrings [
(optionalString cfg.enableImap "imap")
(optionalString cfg.enablePop3 "pop3")
(optionalString cfg.enableLmtp "lmtp")
];
dovecotConf = concatStrings [
'' ''
+ (if cfg.sslServerCert!="" then base_dir = ${baseDir}
protocols = ${protocols}
'' ''
ssl_cert = <${cfg.sslServerCert}
ssl_key = <${cfg.sslServerKey} (if isNull cfg.sslServerCert then ''
ssl_ca = <${cfg.sslCACert}
disable_plaintext_auth = yes
'' else ''
ssl = no ssl = no
disable_plaintext_auth = no disable_plaintext_auth = no
'' else ''
ssl_cert = <${cfg.sslServerCert}
ssl_key = <${cfg.sslServerKey}
${optionalString (!(isNull cfg.sslCACert)) ("ssl_ca = <" + cfg.sslCACert)}
disable_plaintext_auth = yes
'') '')
+ '' ''
default_internal_user = ${cfg.user} default_internal_user = ${cfg.user}
mail_location = ${cfg.mailLocation} mail_location = ${cfg.mailLocation}
maildir_copy_with_hardlinks = yes maildir_copy_with_hardlinks = yes
pop3_uidl_format = %08Xv%08Xu
auth_mechanisms = plain login auth_mechanisms = plain login
service auth { service auth {
user = root user = root
} }
''
(optionalString cfg.enablePAM ''
userdb { userdb {
driver = passwd driver = passwd
} }
passdb { passdb {
driver = pam driver = pam
args = ${optionalString cfg.showPAMFailure "failure_show_msg=yes"} dovecot2 args = ${optionalString cfg.showPAMFailure "failure_show_msg=yes"} dovecot2
} }
'')
pop3_uidl_format = %08Xv%08Xu cfg.extraConfig
'' + cfg.extraConfig; ];
modulesDir = pkgs.symlinkJoin "dovecot-modules"
(map (module: "${module}/lib/dovecot") cfg.modules);
in in
{ {
###### interface options.services.dovecot2 = {
enable = mkEnableOption "Dovecot 2.x POP3/IMAP server";
options = { enablePop3 = mkOption {
type = types.bool;
services.dovecot2 = { default = true;
description = "Start the POP3 listener (when Dovecot is enabled).";
enable = mkOption {
default = false;
description = "Whether to enable the Dovecot 2.x POP3/IMAP server.";
};
enablePop3 = mkOption {
default = true;
description = "Start the POP3 listener (when Dovecot is enabled).";
};
enableImap = mkOption {
default = true;
description = "Start the IMAP listener (when Dovecot is enabled).";
};
enableLmtp = mkOption {
default = false;
description = "Start the LMTP listener (when Dovecot is enabled).";
};
user = mkOption {
default = "dovecot2";
description = "Dovecot user name.";
};
group = mkOption {
default = "dovecot2";
description = "Dovecot group name.";
};
extraConfig = mkOption {
default = "";
example = "mail_debug = yes";
description = "Additional entries to put verbatim into Dovecot's config file.";
};
configFile = mkOption {
default = null;
description = "Config file used for the whole dovecot configuration.";
apply = v: if v != null then v else pkgs.writeText "dovecot.conf" dovecotConf;
};
mailLocation = mkOption {
default = "maildir:/var/spool/mail/%u"; /* Same as inbox, as postfix */
example = "maildir:~/mail:INBOX=/var/spool/mail/%u";
description = ''
Location that dovecot will use for mail folders. Dovecot mail_location option.
'';
};
sslServerCert = mkOption {
default = "";
description = "Server certificate";
};
sslCACert = mkOption {
default = "";
description = "CA certificate used by the server certificate.";
};
sslServerKey = mkOption {
default = "";
description = "Server key.";
};
showPAMFailure = mkOption {
default = false;
description = "Show the PAM failure message on authentication error (useful for OTPW).";
};
}; };
enableImap = mkOption {
type = types.bool;
default = true;
description = "Start the IMAP listener (when Dovecot is enabled).";
};
enableLmtp = mkOption {
type = types.bool;
default = false;
description = "Start the LMTP listener (when Dovecot is enabled).";
};
package = mkOption {
type = types.package;
default = pkgs.dovecot22;
description = "Dovecot package to use.";
};
user = mkOption {
type = types.str;
default = "dovecot2";
description = "Dovecot user name.";
};
group = mkOption {
type = types.str;
default = "dovecot2";
description = "Dovecot group name.";
};
extraConfig = mkOption {
type = types.str;
default = "";
example = "mail_debug = yes";
description = "Additional entries to put verbatim into Dovecot's config file.";
};
configFile = mkOption {
type = types.nullOr types.str;
default = null;
description = "Config file used for the whole dovecot configuration.";
apply = v: if v != null then v else pkgs.writeText "dovecot.conf" dovecotConf;
};
mailLocation = mkOption {
type = types.str;
default = "maildir:/var/spool/mail/%u"; /* Same as inbox, as postfix */
example = "maildir:~/mail:INBOX=/var/spool/mail/%u";
description = ''
Location that dovecot will use for mail folders. Dovecot mail_location option.
'';
};
modules = mkOption {
type = types.listOf types.package;
default = [];
example = [ pkgs.dovecot_pigeonhole ];
description = ''
Symlinks the contents of lib/dovecot of every given package into
/var/lib/dovecot/modules. This will make the given modules available
if a dovecot package with the module_dir patch applied (like
pkgs.dovecot22, the default) is being used.
'';
};
sslCACert = mkOption {
type = types.nullOr types.str;
default = null;
description = "Path to the server's CA certificate key.";
};
sslServerCert = mkOption {
type = types.nullOr types.str;
default = null;
description = "Path to the server's public key.";
};
sslServerKey = mkOption {
type = types.nullOr types.str;
default = null;
description = "Path to the server's private key.";
};
enablePAM = mkOption {
type = types.bool;
default = true;
description = "Wether to create a own Dovecot PAM service and configure PAM user logins.";
};
showPAMFailure = mkOption {
type = types.bool;
default = false;
description = "Show the PAM failure message on authentication error (useful for OTPW).";
};
}; };
###### implementation config = mkIf cfg.enable {
config = mkIf config.services.dovecot2.enable { security.pam.services.dovecot2 = mkIf cfg.enablePAM {};
security.pam.services.dovecot2 = {};
users.extraUsers = [ users.extraUsers = [
{ name = cfg.user; { name = cfg.user;
@ -148,36 +189,47 @@ in
} }
]; ];
users.extraGroups = singleton users.extraGroups = singleton {
{ name = cfg.group; name = cfg.group;
gid = config.ids.gids.dovecot2; gid = config.ids.gids.dovecot2;
};
systemd.services.dovecot2 = {
description = "Dovecot IMAP/POP3 server";
after = [ "keys.target" "network.target" ];
wants = [ "keys.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
mkdir -p "${baseDir}/login"
chown -R ${cfg.user}:${cfg.group} "${baseDir}"
rm -f "${stateDir}/modules"
ln -s "${modulesDir}" "${stateDir}/modules"
'';
serviceConfig = {
ExecStart = "${dovecotPkg}/sbin/dovecot -F -c ${cfg.configFile}";
Restart = "on-failure";
RestartSec = "1s";
StartLimitInterval = "1min";
}; };
};
systemd.services.dovecot2 = environment.systemPackages = [ dovecotPkg ];
{ description = "Dovecot IMAP/POP3 server";
after = [ "network.target" ]; assertions = [
wantedBy = [ "multi-user.target" ]; { assertion = cfg.enablePop3 || cfg.enableImap;
message = "dovecot needs at least one of the IMAP or POP3 listeners enabled";
preStart = }
'' { assertion = isNull cfg.sslServerCert == isNull cfg.sslServerKey
${pkgs.coreutils}/bin/mkdir -p /var/run/dovecot2 /var/run/dovecot2/login && (!(isNull cfg.sslCACert) -> !(isNull cfg.sslServerCert || isNull cfg.sslServerKey));
${pkgs.coreutils}/bin/chown -R ${cfg.user}:${cfg.group} /var/run/dovecot2 message = "dovecot needs both sslServerCert and sslServerKey defined for working crypto";
''; }
{ assertion = cfg.showPAMFailure -> cfg.enablePAM;
serviceConfig = { message = "dovecot is configured with showPAMFailure while enablePAM is disabled";
ExecStart = "${pkgs.dovecot}/sbin/dovecot -F -c ${cfg.configFile}"; }
Restart = "on-failure"; ];
RestartSec = "1s";
StartLimitInterval = "1min";
};
};
environment.systemPackages = [ pkgs.dovecot ];
assertions = [{ assertion = cfg.enablePop3 || cfg.enableImap;
message = "dovecot needs at least one of the IMAP or POP3 listeners enabled";}];
}; };

View file

@ -1,5 +1,5 @@
# Avahi daemon. # Avahi daemon.
{ config, lib, pkgs, ... }: { config, lib, utils, pkgs, ... }:
with lib; with lib;
@ -7,7 +7,9 @@ let
cfg = config.services.avahi; cfg = config.services.avahi;
inherit (pkgs) avahi; # We must escape interfaces due to the systemd interpretation
subsystemDevice = interface:
"sys-subsystem-net-devices-${utils.escapeSystemdPath interface}.device";
avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" '' avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" ''
[server] [server]
@ -21,12 +23,18 @@ let
browse-domains=${concatStringsSep ", " browseDomains} browse-domains=${concatStringsSep ", " browseDomains}
use-ipv4=${if ipv4 then "yes" else "no"} use-ipv4=${if ipv4 then "yes" else "no"}
use-ipv6=${if ipv6 then "yes" else "no"} use-ipv6=${if ipv6 then "yes" else "no"}
${optionalString (interfaces!=null) "allow-interfaces=${concatStringsSep "," interfaces}"}
[wide-area] [wide-area]
enable-wide-area=${if wideArea then "yes" else "no"} enable-wide-area=${if wideArea then "yes" else "no"}
[publish] [publish]
disable-publishing=${if publishing then "no" else "yes"} disable-publishing=${if publish.enable then "no" else "yes"}
disable-user-service-publishing=${if publish.userServices then "no" else "yes"}
publish-addresses=${if publish.userServices || publish.addresses then "yes" else "no"}
publish-hinfo=${if publish.hinfo then "yes" else "no"}
publish-workstation=${if publish.workstation then "yes" else "no"}
publish-domain=${if publish.domain then "yes" else "no"}
''; '';
in in
@ -74,14 +82,55 @@ in
description = ''Whether to use IPv6''; description = ''Whether to use IPv6'';
}; };
interfaces = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = ''
List of network interfaces that should be used by the <command>avahi-daemon</command>.
Other interfaces will be ignored. If <literal>null</literal> all local interfaces
except loopback and point-to-point will be used.
'';
};
wideArea = mkOption { wideArea = mkOption {
default = true; default = true;
description = ''Whether to enable wide-area service discovery.''; description = ''Whether to enable wide-area service discovery.'';
}; };
publishing = mkOption { publish = {
default = true; enable = mkOption {
description = ''Whether to allow publishing.''; default = false;
description = ''Whether to allow publishing in general.'';
};
userServices = mkOption {
default = false;
description = ''Whether to publish user services. Will set <literal>addresses=true</literal>.'';
};
addresses = mkOption {
default = false;
description = ''Whether to register mDNS address records for all local IP addresses.'';
};
hinfo = mkOption {
default = false;
description = ''
Whether to register an mDNS HINFO record which contains information about the
local operating system and CPU.
'';
};
workstation = mkOption {
default = false;
description = ''Whether to register a service of type "_workstation._tcp" on the local LAN.'';
};
domain = mkOption {
default = false;
description = ''Whether to announce the locally used domain name for browsing by other hosts.'';
};
}; };
nssmdns = mkOption { nssmdns = mkOption {
@ -118,29 +167,36 @@ in
system.nssModules = optional cfg.nssmdns pkgs.nssmdns; system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
environment.systemPackages = [ avahi ]; environment.systemPackages = [ pkgs.avahi ];
jobs.avahi_daemon = systemd.services.avahi-daemon =
{ name = "avahi-daemon"; let
deps = optionals (cfg.interfaces!=null) (map subsystemDevice cfg.interfaces);
in
{ description = "Avahi daemon";
wantedBy = [ "ip-up.target" ];
bindsTo = deps;
after = deps;
before = [ "ip-up.target" ];
# Receive restart event after resume
partOf = [ "post-resume.target" ];
startOn = "ip-up"; path = [ pkgs.coreutils pkgs.avahi ];
preStart = "mkdir -p /var/run/avahi-daemon";
script = script =
'' ''
export PATH="${avahi}/bin:${avahi}/sbin:$PATH"
# Make NSS modules visible so that `avahi_nss_support ()' can # Make NSS modules visible so that `avahi_nss_support ()' can
# return a sensible value. # return a sensible value.
export LD_LIBRARY_PATH="${config.system.nssModules.path}" export LD_LIBRARY_PATH="${config.system.nssModules.path}"
mkdir -p /var/run/avahi-daemon exec ${pkgs.avahi}/sbin/avahi-daemon --syslog -f "${avahiDaemonConf}"
exec ${avahi}/sbin/avahi-daemon --syslog -f "${avahiDaemonConf}"
''; '';
}; };
services.dbus.enable = true; services.dbus.enable = true;
services.dbus.packages = [avahi]; services.dbus.packages = [ pkgs.avahi ];
# Enabling Avahi without exposing it in the firewall doesn't make # Enabling Avahi without exposing it in the firewall doesn't make
# sense. # sense.

View file

@ -2,21 +2,17 @@
# TODO: # TODO:
# #
# asserts # asserts
# ensure that the nl80211 module is loaded/compiled in the kernel # ensure that the nl80211 module is loaded/compiled in the kernel
# hwMode must be a/b/g
# channel must be between 1 and 13 (maybe)
# wpa_supplicant and hostapd on the same wireless interface doesn't make any sense # wpa_supplicant and hostapd on the same wireless interface doesn't make any sense
# perhaps an assertion that there is a dhcp server and a dns server on the IP address serviced by the hostapd?
with lib; with lib;
let let
cfg = config.services.hostapd; cfg = config.services.hostapd;
configFile = pkgs.writeText "hostapd.conf" configFile = pkgs.writeText "hostapd.conf" ''
''
interface=${cfg.interface} interface=${cfg.interface}
driver=${cfg.driver} driver=${cfg.driver}
ssid=${cfg.ssid} ssid=${cfg.ssid}
@ -37,8 +33,8 @@ let
wpa_passphrase=${cfg.wpaPassphrase} wpa_passphrase=${cfg.wpaPassphrase}
'' else ""} '' else ""}
${cfg.extraCfg} ${cfg.extraConfig}
'' ; '' ;
in in
@ -65,9 +61,9 @@ in
interface = mkOption { interface = mkOption {
default = ""; default = "";
example = "wlan0"; example = "wlp2s0";
description = '' description = ''
The interfaces <command>hostapd</command> will use. The interfaces <command>hostapd</command> will use.
''; '';
}; };
@ -89,8 +85,7 @@ in
}; };
hwMode = mkOption { hwMode = mkOption {
default = "b"; default = "g";
example = "g";
type = types.string; type = types.string;
description = '' description = ''
Operation mode. Operation mode.
@ -98,17 +93,16 @@ in
''; '';
}; };
channel = mkOption { channel = mkOption {
default = 7; default = 7;
example = 11; example = 11;
type = types.int; type = types.int;
description = description = ''
''
Channel number (IEEE 802.11) Channel number (IEEE 802.11)
Please note that some drivers do not use this value from Please note that some drivers do not use this value from
<command>hostapd</command> and the channel will need to be configured <command>hostapd</command> and the channel will need to be configured
separately with <command>iwconfig</command>. separately with <command>iwconfig</command>.
''; '';
}; };
group = mkOption { group = mkOption {
@ -131,16 +125,15 @@ in
default = "my_sekret"; default = "my_sekret";
example = "any_64_char_string"; example = "any_64_char_string";
type = types.string; type = types.string;
description = description = ''
''
WPA-PSK (pre-shared-key) passphrase. Clients will need this WPA-PSK (pre-shared-key) passphrase. Clients will need this
passphrase to associate with this access point. passphrase to associate with this access point.
Warning: This passphrase will get put into a world-readable file in Warning: This passphrase will get put into a world-readable file in
the Nix store! the Nix store!
''; '';
}; };
extraCfg = mkOption { extraConfig = mkOption {
default = ""; default = "";
example = '' example = ''
auth_algo=0 auth_algo=0
@ -158,17 +151,25 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
assertions = [
{ assertion = (cfg.hwMode == "a" || cfg.hwMode == "b" || cfg.hwMode == "g");
message = "hwMode must be a/b/g";
}
{ assertion = (cfg.channel >= 1 && cfg.channel <= 13);
message = "channel must be between 1 and 13";
}];
environment.systemPackages = [ pkgs.hostapd ]; environment.systemPackages = [ pkgs.hostapd ];
systemd.services.hostapd = systemd.services.hostapd =
{ description = "hostapd wireless AP"; { description = "hostapd wireless AP";
path = [ pkgs.hostapd ]; path = [ pkgs.hostapd ];
wantedBy = [ "network.target" ]; wantedBy = [ "network.target" ];
after = [ "${cfg.interface}-cfg.service" "nat.service" "bind.service" "dhcpd.service"]; after = [ "${cfg.interface}-cfg.service" "nat.service" "bind.service" "dhcpd.service"];
serviceConfig = serviceConfig =
{ ExecStart = "${pkgs.hostapd}/bin/hostapd ${configFile}"; { ExecStart = "${pkgs.hostapd}/bin/hostapd ${configFile}";
Restart = "always"; Restart = "always";
}; };

View file

@ -30,7 +30,7 @@ in
internalIPs = mkOption { internalIPs = mkOption {
type = types.listOf types.str; type = types.listOf types.str;
example = [ "192.168.1.0/24" ]; example = [ "192.168.1.1/24" "enp1s0" ];
description = '' description = ''
The IP address ranges to listen on. The IP address ranges to listen on.
''; '';
@ -57,13 +57,42 @@ in
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
# from miniupnpd/netfilter/iptables_init.sh
networking.firewall.extraCommands = ''
iptables -t nat -N MINIUPNPD
iptables -t nat -A PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
iptables -t mangle -N MINIUPNPD
iptables -t mangle -A PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
iptables -t filter -N MINIUPNPD
iptables -t filter -A FORWARD -i ${cfg.externalInterface} ! -o ${cfg.externalInterface} -j MINIUPNPD
iptables -t nat -N MINIUPNPD-PCP-PEER
iptables -t nat -A POSTROUTING -o ${cfg.externalInterface} -j MINIUPNPD-PCP-PEER
'';
# from miniupnpd/netfilter/iptables_removeall.sh
networking.firewall.extraStopCommands = ''
iptables -t nat -F MINIUPNPD
iptables -t nat -D PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
iptables -t nat -X MINIUPNPD
iptables -t mangle -F MINIUPNPD
iptables -t mangle -D PREROUTING -i ${cfg.externalInterface} -j MINIUPNPD
iptables -t mangle -X MINIUPNPD
iptables -t filter -F MINIUPNPD
iptables -t filter -D FORWARD -i ${cfg.externalInterface} ! -o ${cfg.externalInterface} -j MINIUPNPD
iptables -t filter -X MINIUPNPD
iptables -t nat -F MINIUPNPD-PCP-PEER
iptables -t nat -D POSTROUTING -o ${cfg.externalInterface} -j MINIUPNPD-PCP-PEER
iptables -t nat -X MINIUPNPD-PCP-PEER
'';
systemd.services.miniupnpd = { systemd.services.miniupnpd = {
description = "MiniUPnP daemon"; description = "MiniUPnP daemon";
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ pkgs.miniupnpd ];
serviceConfig = { serviceConfig = {
ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -d -f ${configFile}"; ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -f ${configFile}";
PIDFile = "/var/run/miniupnpd.pid";
Type = "forking";
}; };
}; };
}; };

View file

@ -39,6 +39,9 @@ let
certrequired=${if cfg.clientCertRequired then "true" else "false"} certrequired=${if cfg.clientCertRequired then "true" else "false"}
${if cfg.sslCert == "" then "" else "sslCert="+cfg.sslCert} ${if cfg.sslCert == "" then "" else "sslCert="+cfg.sslCert}
${if cfg.sslKey == "" then "" else "sslKey="+cfg.sslKey} ${if cfg.sslKey == "" then "" else "sslKey="+cfg.sslKey}
${if cfg.sslCa == "" then "" else "sslCA="+cfg.sslCa}
${cfg.extraConfig}
''; '';
in in
{ {
@ -219,6 +222,18 @@ in
default = ""; default = "";
description = "Path to your SSL key."; description = "Path to your SSL key.";
}; };
sslCa = mkOption {
type = types.str;
default = "";
description = "Path to your SSL CA certificate.";
};
extraConfig = mkOption {
type = types.str;
default = "";
description = "Extra configuration to put into mumur.ini.";
};
}; };
}; };

View file

@ -223,9 +223,11 @@ in {
} }
{ {
name = "nm-openvpn"; name = "nm-openvpn";
gid = config.ids.gids.nm-openvpn;
}]; }];
users.extraUsers = [{ users.extraUsers = [{
name = "nm-openvpn"; name = "nm-openvpn";
uid = config.ids.uids.nm-openvpn;
}]; }];
systemd.packages = cfg.packages; systemd.packages = cfg.packages;

View file

@ -0,0 +1,80 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.shairport-sync;
in
{
###### interface
options = {
services.shairport-sync = {
enable = mkOption {
default = false;
description = ''
Enable the shairport-sync daemon.
Running with a local system-wide or remote pulseaudio server
is recommended.
'';
};
arguments = mkOption {
default = "-v -o pulse";
description = ''
Arguments to pass to the daemon. Defaults to a local pulseaudio
server.
'';
};
user = mkOption {
default = "shairport";
description = ''
User account name under which to run shairport-sync. The account
will be created.
'';
};
};
};
###### implementation
config = mkIf config.services.shairport-sync.enable {
services.avahi.enable = true;
users.extraUsers = singleton
{ name = cfg.user;
description = "Shairport user";
isSystemUser = true;
createHome = true;
home = "/var/lib/shairport-sync";
extraGroups = [ "audio" ] ++ optional config.hardware.pulseaudio.enable "pulse";
};
systemd.services.shairport-sync =
{
description = "shairport-sync";
after = [ "network.target" "avahi-daemon.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
User = cfg.user;
ExecStart = "${pkgs.shairport-sync}/bin/shairport-sync ${cfg.arguments}";
};
};
environment.systemPackages = [ pkgs.shairport-sync ];
};
}

View file

@ -247,6 +247,8 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "cups.service" "avahi-daemon.service" ]; wants = [ "cups.service" "avahi-daemon.service" ];
bindsTo = [ "cups.service" "avahi-daemon.service" ];
partOf = [ "cups.service" "avahi-daemon.service" ];
after = [ "cups.service" "avahi-daemon.service" ]; after = [ "cups.service" "avahi-daemon.service" ];
path = [ cups ]; path = [ cups ];

View file

@ -3,78 +3,115 @@ with lib;
let let
clamavUser = "clamav"; clamavUser = "clamav";
stateDir = "/var/lib/clamav"; stateDir = "/var/lib/clamav";
runDir = "/var/run/clamav";
logDir = "/var/log/clamav";
clamavGroup = clamavUser; clamavGroup = clamavUser;
cfg = config.services.clamav; cfg = config.services.clamav;
clamdConfigFile = pkgs.writeText "clamd.conf" ''
DatabaseDirectory ${stateDir}
LocalSocket ${runDir}/clamd.ctl
LogFile ${logDir}/clamav.log
PidFile ${runDir}/clamd.pid
User clamav
${cfg.daemon.extraConfig}
'';
in in
{ {
###### interface
options = { options = {
services.clamav = { services.clamav = {
daemon = {
enable = mkEnableOption "clamd daemon";
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Extra configuration for clamd. Contents will be added verbatim to the
configuration file.
'';
};
};
updater = { updater = {
enable = mkOption { enable = mkEnableOption "freshclam updater";
default = false;
description = ''
Whether to enable automatic ClamAV virus definitions database updates.
'';
};
frequency = mkOption { frequency = mkOption {
default = 12; default = 12;
description = '' description = ''
Number of database checks per day. Number of database checks per day.
''; '';
}; };
config = mkOption { config = mkOption {
default = ""; default = "";
description = '' description = ''
Extra configuration for freshclam. Contents will be added verbatim to the Extra configuration for freshclam. Contents will be added verbatim to the
configuration file. configuration file.
''; '';
}; };
}; };
}; };
}; };
###### implementation config = mkIf cfg.updater.enable or cfg.daemon.enable {
config = mkIf cfg.updater.enable {
environment.systemPackages = [ pkgs.clamav ]; environment.systemPackages = [ pkgs.clamav ];
users.extraUsers = singleton users.extraUsers = singleton {
{ name = clamavUser; name = clamavUser;
uid = config.ids.uids.clamav; uid = config.ids.uids.clamav;
description = "ClamAV daemon user"; description = "ClamAV daemon user";
home = stateDir; home = stateDir;
}; };
users.extraGroups = singleton users.extraGroups = singleton {
{ name = clamavGroup; name = clamavGroup;
gid = config.ids.gids.clamav; gid = config.ids.gids.clamav;
}; };
services.clamav.updater.config = '' services.clamav.updater.config = mkIf cfg.updater.enable ''
DatabaseDirectory ${stateDir} DatabaseDirectory ${stateDir}
Foreground yes Foreground yes
Checks ${toString cfg.updater.frequency} Checks ${toString cfg.updater.frequency}
DatabaseMirror database.clamav.net DatabaseMirror database.clamav.net
''; '';
jobs = { systemd.services.clamd = mkIf cfg.daemon.enable {
clamav_updater = { description = "ClamAV daemon (clamd)";
name = "clamav-updater"; path = [ pkgs.clamav ];
startOn = "started network-interfaces"; after = [ "network.target" "freshclam.service" ];
stopOn = "stopping network-interfaces"; requires = [ "freshclam.service" ];
wantedBy = [ "multi-user.target" ];
preStart = '' preStart = ''
mkdir -m 0755 -p ${stateDir} mkdir -m 0755 -p ${logDir}
chown ${clamavUser}:${clamavGroup} ${stateDir} mkdir -m 0755 -p ${runDir}
''; chown ${clamavUser}:${clamavGroup} ${logDir}
exec = "${pkgs.clamav}/bin/freshclam --daemon --config-file=${pkgs.writeText "freshclam.conf" cfg.updater.config}"; chown ${clamavUser}:${clamavGroup} ${runDir}
}; '';
serviceConfig = {
ExecStart = "${pkgs.clamav}/bin/clamd --config-file=${clamdConfigFile}";
Type = "forking";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "on-failure";
RestartSec = "10s";
StartLimitInterval = "1min";
};
}; };
systemd.services.freshclam = mkIf cfg.updater.enable {
description = "ClamAV updater (freshclam)";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.clamav ];
preStart = ''
mkdir -m 0755 -p ${stateDir}
chown ${clamavUser}:${clamavGroup} ${stateDir}
'';
serviceConfig = {
ExecStart = "${pkgs.clamav}/bin/freshclam --daemon --config-file=${pkgs.writeText "freshclam.conf" cfg.updater.config}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "on-failure";
RestartSec = "10s";
StartLimitInterval = "1min";
};
};
}; };
} }

View file

@ -140,9 +140,6 @@ in {
# Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/ # Update user dirs as described in http://freedesktop.org/wiki/Software/xdg-user-dirs/
${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update ${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
# Find the mouse
export XCURSOR_PATH=~/.icons:${config.system.path}/share/icons
${gnome3.gnome_session}/bin/gnome-session& ${gnome3.gnome_session}/bin/gnome-session&
waitPID=$! waitPID=$!
''; '';

View file

@ -8,9 +8,7 @@ let
cfg = xcfg.desktopManager.kde5; cfg = xcfg.desktopManager.kde5;
xorg = pkgs.xorg; xorg = pkgs.xorg;
kf5 = pkgs.kf5_stable; kde5 = pkgs.kde5;
plasma5 = pkgs.plasma5_stable;
kdeApps = pkgs.kdeApps_stable;
in in
@ -57,12 +55,12 @@ in
services.xserver.desktopManager.session = singleton { services.xserver.desktopManager.session = singleton {
name = "kde5"; name = "kde5";
bgSupport = true; bgSupport = true;
start = ''exec ${plasma5.plasma-workspace}/bin/startkde;''; start = ''exec ${kde5.plasma-workspace}/bin/startkde;'';
}; };
security.setuidOwners = singleton { security.setuidOwners = singleton {
program = "kcheckpass"; program = "kcheckpass";
source = "${plasma5.plasma-workspace}/lib/libexec/kcheckpass"; source = "${kde5.plasma-workspace}/lib/libexec/kcheckpass";
owner = "root"; owner = "root";
group = "root"; group = "root";
setuid = true; setuid = true;
@ -72,61 +70,61 @@ in
[ [
pkgs.qt4 # qtconfig is the only way to set Qt 4 theme pkgs.qt4 # qtconfig is the only way to set Qt 4 theme
kf5.frameworkintegration kde5.frameworkintegration
kf5.kinit kde5.kinit
plasma5.breeze kde5.breeze
plasma5.kde-cli-tools kde5.kde-cli-tools
plasma5.kdeplasma-addons kde5.kdeplasma-addons
plasma5.kgamma5 kde5.kgamma5
plasma5.khelpcenter kde5.khelpcenter
plasma5.khotkeys kde5.khotkeys
plasma5.kinfocenter kde5.kinfocenter
plasma5.kmenuedit kde5.kmenuedit
plasma5.kscreen kde5.kscreen
plasma5.ksysguard kde5.ksysguard
plasma5.kwayland kde5.kwayland
plasma5.kwin kde5.kwin
plasma5.kwrited kde5.kwrited
plasma5.milou kde5.milou
plasma5.oxygen kde5.oxygen
plasma5.polkit-kde-agent kde5.polkit-kde-agent
plasma5.systemsettings kde5.systemsettings
plasma5.plasma-desktop kde5.plasma-desktop
plasma5.plasma-workspace kde5.plasma-workspace
plasma5.plasma-workspace-wallpapers kde5.plasma-workspace-wallpapers
kdeApps.ark kde5.ark
kdeApps.dolphin kde5.dolphin
kdeApps.dolphin-plugins kde5.dolphin-plugins
kdeApps.ffmpegthumbs kde5.ffmpegthumbs
kdeApps.gwenview kde5.gwenview
kdeApps.kate kde5.kate
kdeApps.kdegraphics-thumbnailers kde5.kdegraphics-thumbnailers
kdeApps.konsole kde5.konsole
kdeApps.okular kde5.okular
kdeApps.print-manager kde5.print-manager
# Oxygen icons moved to KDE Frameworks 5.16 and later. # Oxygen icons moved to KDE Frameworks 5.16 and later.
(kdeApps.oxygen-icons or kf5.oxygen-icons5) (kde5.oxygen-icons or kde5.oxygen-icons5)
pkgs.hicolor_icon_theme pkgs.hicolor_icon_theme
plasma5.kde-gtk-config kde5.kde-gtk-config
] ]
# Plasma 5.5 and later has a Breeze GTK theme. # Plasma 5.5 and later has a Breeze GTK theme.
# If it is not available, Orion is very similar to Breeze. # If it is not available, Orion is very similar to Breeze.
++ lib.optional (!(lib.hasAttr "breeze-gtk" plasma5)) pkgs.orion ++ lib.optional (!(lib.hasAttr "breeze-gtk" kde5)) pkgs.orion
# Install Breeze icons if available # Install Breeze icons if available
++ lib.optional (lib.hasAttr "breeze-icons" kf5) kf5.breeze-icons ++ lib.optional (lib.hasAttr "breeze-icons" kde5) kde5.breeze-icons
# Optional hardware support features # Optional hardware support features
++ lib.optional config.hardware.bluetooth.enable plasma5.bluedevil ++ lib.optional config.hardware.bluetooth.enable kde5.bluedevil
++ lib.optional config.networking.networkmanager.enable plasma5.plasma-nm ++ lib.optional config.networking.networkmanager.enable kde5.plasma-nm
++ lib.optional config.hardware.pulseaudio.enable plasma5.plasma-pa ++ lib.optional config.hardware.pulseaudio.enable kde5.plasma-pa
++ lib.optional config.powerManagement.enable plasma5.powerdevil ++ lib.optional config.powerManagement.enable kde5.powerdevil
++ lib.optionals cfg.phonon.gstreamer.enable ++ lib.optionals cfg.phonon.gstreamer.enable
[ [
@ -137,7 +135,7 @@ in
pkgs.gst_all.gstPluginsUgly pkgs.gst_all.gstPluginsUgly
pkgs.gst_all.gstPluginsBad pkgs.gst_all.gstPluginsBad
pkgs.gst_all.gstFfmpeg # for mp3 playback pkgs.gst_all.gstFfmpeg # for mp3 playback
pkgs.phonon_qt5_backend_gstreamer pkgs.qt55.phonon-backend-gstreamer
pkgs.gst_all_1.gstreamer pkgs.gst_all_1.gstreamer
pkgs.gst_all_1.gst-plugins-base pkgs.gst_all_1.gst-plugins-base
pkgs.gst_all_1.gst-plugins-good pkgs.gst_all_1.gst-plugins-good
@ -149,7 +147,7 @@ in
++ lib.optionals cfg.phonon.vlc.enable ++ lib.optionals cfg.phonon.vlc.enable
[ [
pkgs.phonon_qt5_backend_vlc pkgs.phonon_qt5_backend_vlc
pkgs.phonon_backend_vlc pkgs.qt55.phonon-backend-vlc
]; ];
environment.pathsToLink = [ "/share" ]; environment.pathsToLink = [ "/share" ];
@ -166,9 +164,14 @@ in
GST_PLUGIN_SYSTEM_PATH_1_0 = [ "/lib/gstreamer-1.0" ]; GST_PLUGIN_SYSTEM_PATH_1_0 = [ "/lib/gstreamer-1.0" ];
}; };
fonts.fonts = [ (plasma5.oxygen-fonts or pkgs.noto-fonts) ]; # Enable GTK applications to load SVG icons
environment.variables = mkIf (lib.hasAttr "breeze-icons" kde5) {
GDK_PIXBUF_MODULE_FILE = "${pkgs.librsvg}/lib/gdk-pixbuf-2.0/2.10.0/loaders.cache";
};
programs.ssh.askPassword = "${plasma5.ksshaskpass}/bin/ksshaskpass"; fonts.fonts = [ (kde5.oxygen-fonts or pkgs.noto-fonts) ];
programs.ssh.askPassword = "${kde5.ksshaskpass}/bin/ksshaskpass";
# Enable helpful DBus services. # Enable helpful DBus services.
services.udisks2.enable = true; services.udisks2.enable = true;
@ -180,8 +183,8 @@ in
services.xserver.displayManager.sddm = { services.xserver.displayManager.sddm = {
theme = "breeze"; theme = "breeze";
themes = [ themes = [
plasma5.plasma-workspace kde5.plasma-workspace
(kdeApps.oxygen-icons or kf5.oxygen-icons5) (kde5.oxygen-icons or kde5.oxygen-icons5)
]; ];
}; };

View file

@ -37,7 +37,7 @@ let
# file provided by services.xserver.displayManager.session.script # file provided by services.xserver.displayManager.session.script
xsession = wm: dm: pkgs.writeScript "xsession" xsession = wm: dm: pkgs.writeScript "xsession"
'' ''
#! /bin/sh #! ${pkgs.bash}/bin/bash
. /etc/profile . /etc/profile
cd "$HOME" cd "$HOME"

View file

@ -13,9 +13,16 @@ let
# lightdm runs with clearenv(), but we need a few things in the enviornment for X to startup # lightdm runs with clearenv(), but we need a few things in the enviornment for X to startup
xserverWrapper = writeScript "xserver-wrapper" xserverWrapper = writeScript "xserver-wrapper"
'' ''
#! /bin/sh #! ${pkgs.bash}/bin/bash
${concatMapStrings (n: "export ${n}=\"${getAttr n xEnv}\"\n") (attrNames xEnv)} ${concatMapStrings (n: "export ${n}=\"${getAttr n xEnv}\"\n") (attrNames xEnv)}
exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs}
display=$(echo "$@" | xargs -n 1 | grep -P ^:\\d\$ | head -n 1 | sed s/^://)
if [ -z "$display" ]
then additionalArgs=":0 -logfile /var/log/X.0.log"
else additionalArgs="-logfile /var/log/X.$display.log"
fi
exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs} $additionalArgs "$@"
''; '';
usersConf = writeText "users.conf" usersConf = writeText "users.conf"
@ -39,7 +46,6 @@ let
greeter-session = ${cfg.greeter.name} greeter-session = ${cfg.greeter.name}
${cfg.extraSeatDefaults} ${cfg.extraSeatDefaults}
''; '';
in in
{ {
# Note: the order in which lightdm greeter modules are imported # Note: the order in which lightdm greeter modules are imported
@ -98,7 +104,6 @@ in
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.xserver.displayManager.slim.enable = false; services.xserver.displayManager.slim.enable = false;
services.xserver.displayManager.job = { services.xserver.displayManager.job = {
@ -149,5 +154,7 @@ in
services.xserver.displayManager.lightdm.background = mkDefault "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png"; services.xserver.displayManager.lightdm.background = mkDefault "${pkgs.nixos-artwork}/share/artwork/gnome/Gnome_Dark.png";
services.xserver.tty = null; # We might start multiple X servers so let the tty increment themselves..
services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there
}; };
} }

View file

@ -17,6 +17,16 @@ let
exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs} "$@" exec ${dmcfg.xserverBin} ${dmcfg.xserverArgs} "$@"
''; '';
Xsetup = pkgs.writeScript "Xsetup" ''
#!/bin/sh
${cfg.setupScript}
'';
Xstop = pkgs.writeScript "Xstop" ''
#!/bin/sh
${cfg.stopScript}
'';
cfgFile = pkgs.writeText "sddm.conf" '' cfgFile = pkgs.writeText "sddm.conf" ''
[General] [General]
HaltCommand=${pkgs.systemd}/bin/systemctl poweroff HaltCommand=${pkgs.systemd}/bin/systemctl poweroff
@ -39,6 +49,8 @@ let
SessionCommand=${dmcfg.session.script} SessionCommand=${dmcfg.session.script}
SessionDir=${dmcfg.session.desktops} SessionDir=${dmcfg.session.desktops}
XauthPath=${pkgs.xorg.xauth}/bin/xauth XauthPath=${pkgs.xorg.xauth}/bin/xauth
DisplayCommand=${Xsetup}
DisplayStopCommand=${Xstop}
${optionalString cfg.autoLogin.enable '' ${optionalString cfg.autoLogin.enable ''
[Autologin] [Autologin]
@ -98,6 +110,27 @@ in
''; '';
}; };
setupScript = mkOption {
type = types.str;
default = "";
example = ''
# workaround for using NVIDIA Optimus without Bumblebee
xrandr --setprovideroutputsource modesetting NVIDIA-0
xrandr --auto
'';
description = ''
A script to execute when starting the display server.
'';
};
stopScript = mkOption {
type = types.str;
default = "";
description = ''
A script to execute when stopping the display server.
'';
};
autoLogin = mkOption { autoLogin = mkOption {
default = {}; default = {};
description = '' description = ''
@ -105,7 +138,7 @@ in
''; '';
type = types.submodule { type = types.submodule {
options = { options = {
enable = mkOption { enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -130,7 +163,7 @@ in
will work only the first time. will work only the first time.
''; '';
}; };
}; };
}; };
}; };
@ -142,14 +175,16 @@ in
assertions = [ assertions = [
{ assertion = cfg.autoLogin.enable -> cfg.autoLogin.user != null; { assertion = cfg.autoLogin.enable -> cfg.autoLogin.user != null;
message = "SDDM auto-login requires services.xserver.displayManager.sddm.autoLogin.user to be set"; message = ''
SDDM auto-login requires services.xserver.displayManager.sddm.autoLogin.user to be set
'';
} }
{ assertion = cfg.autoLogin.enable -> elem defaultSessionName dmcfg.session.names; { assertion = cfg.autoLogin.enable -> elem defaultSessionName dmcfg.session.names;
message = '' message = ''
SDDM auto-login requires that services.xserver.desktopManager.default and SDDM auto-login requires that services.xserver.desktopManager.default and
services.xserver.windowMananger.default are set to valid values. The current services.xserver.windowMananger.default are set to valid values. The current
default session: ${defaultSessionName} is not valid. default session: ${defaultSessionName} is not valid.
''; '';
} }
]; ];

View file

@ -98,13 +98,16 @@ in {
requires = [ "display-manager.service" ]; requires = [ "display-manager.service" ];
after = [ "display-manager.service" ]; after = [ "display-manager.service" ];
wantedBy = [ "graphical.target" ]; wantedBy = [ "graphical.target" ];
serviceConfig.ExecStart = '' serviceConfig = {
${cfg.package}/bin/redshift \ ExecStart = ''
-l ${cfg.latitude}:${cfg.longitude} \ ${cfg.package}/bin/redshift \
-t ${toString cfg.temperature.day}:${toString cfg.temperature.night} \ -l ${cfg.latitude}:${cfg.longitude} \
-b ${toString cfg.brightness.day}:${toString cfg.brightness.night} \ -t ${toString cfg.temperature.day}:${toString cfg.temperature.night} \
${lib.strings.concatStringsSep " " cfg.extraOptions} -b ${toString cfg.brightness.day}:${toString cfg.brightness.night} \
''; ${lib.strings.concatStringsSep " " cfg.extraOptions}
'';
RestartSec = 3;
};
environment = { DISPLAY = ":0"; }; environment = { DISPLAY = ":0"; };
serviceConfig.Restart = "always"; serviceConfig.Restart = "always";
}; };

View file

@ -280,6 +280,13 @@ in
''; '';
}; };
xkbDir = mkOption {
type = types.path;
description = ''
Path used for -xkbdir xserver parameter.
'';
};
config = mkOption { config = mkOption {
type = types.lines; type = types.lines;
description = '' description = ''
@ -381,13 +388,13 @@ in
}; };
tty = mkOption { tty = mkOption {
type = types.int; type = types.nullOr types.int;
default = 7; default = 7;
description = "Virtual console for the X server."; description = "Virtual console for the X server.";
}; };
display = mkOption { display = mkOption {
type = types.int; type = types.nullOr types.int;
default = 0; default = 0;
description = "Display number for the X server."; description = "Display number for the X server.";
}; };
@ -409,6 +416,16 @@ in
if possible. if possible.
''; '';
}; };
enableCtrlAltBackspace = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the DontZap option, which binds Ctrl+Alt+Backspace
to forcefully kill X. This can lead to data loss and is disabled
by default.
'';
};
}; };
}; };
@ -452,7 +469,7 @@ in
target = "X11/xorg.conf"; target = "X11/xorg.conf";
} }
# -xkbdir command line option does not seems to be passed to xkbcomp. # -xkbdir command line option does not seems to be passed to xkbcomp.
{ source = "${pkgs.xkeyboard_config}/etc/X11/xkb"; { source = "${cfg.xkbDir}";
target = "X11/xkb"; target = "X11/xkb";
} }
]); ]);
@ -517,11 +534,12 @@ in
services.xserver.displayManager.xserverArgs = services.xserver.displayManager.xserverArgs =
[ "-ac" [ "-ac"
"-terminate" "-terminate"
"-logfile" "/var/log/X.${toString cfg.display}.log"
"-config ${configFile}" "-config ${configFile}"
":${toString cfg.display}" "vt${toString cfg.tty}" "-xkbdir" "${cfg.xkbDir}"
"-xkbdir" "${pkgs.xkeyboard_config}/etc/X11/xkb" ] ++ optional (cfg.display != null) ":${toString cfg.display}"
] ++ optional (!cfg.enableTCP) "-nolisten tcp"; ++ optional (cfg.tty != null) "vt${toString cfg.tty}"
++ optionals (cfg.display != null) [ "-logfile" "/var/log/X.${toString cfg.display}.log" ]
++ optional (!cfg.enableTCP) "-nolisten tcp";
services.xserver.modules = services.xserver.modules =
concatLists (catAttrs "modules" cfg.drivers) ++ concatLists (catAttrs "modules" cfg.drivers) ++
@ -529,10 +547,13 @@ in
xorg.xf86inputevdev xorg.xf86inputevdev
]; ];
services.xserver.xkbDir = mkDefault "${pkgs.xkeyboard_config}/etc/X11/xkb";
services.xserver.config = services.xserver.config =
'' ''
Section "ServerFlags" Section "ServerFlags"
Option "AllowMouseOpenFail" "on" Option "AllowMouseOpenFail" "on"
Option "DontZap" "${if cfg.enableCtrlAltBackspace then "off" else "on"}"
${cfg.serverFlagsSection} ${cfg.serverFlagsSection}
EndSection EndSection

View file

@ -10,8 +10,11 @@ let
realGrub = if cfg.version == 1 then pkgs.grub realGrub = if cfg.version == 1 then pkgs.grub
else if cfg.zfsSupport then pkgs.grub2.override { zfsSupport = true; } else if cfg.zfsSupport then pkgs.grub2.override { zfsSupport = true; }
else if cfg.enableTrustedBoot then pkgs.trustedGrub else if cfg.trustedBoot.enable
else pkgs.grub2; then if cfg.trustedBoot.isHPLaptop
then pkgs.trustedGrub-for-HP
else pkgs.trustedGrub
else pkgs.grub2;
grub = grub =
# Don't include GRUB if we're only generating a GRUB menu (e.g., # Don't include GRUB if we're only generating a GRUB menu (e.g.,
@ -369,24 +372,37 @@ in
''; '';
}; };
enableTrustedBoot = mkOption { trustedBoot = {
default = false;
type = types.bool; enable = mkOption {
description = '' default = false;
Enable trusted boot. GRUB will measure all critical components during type = types.bool;
the boot process to offer TCG (TPM) support. description = ''
''; Enable trusted boot. GRUB will measure all critical components during
}; the boot process to offer TCG (TPM) support.
'';
};
systemHasTPM = mkOption {
default = "";
example = "YES_TPM_is_activated";
type = types.string;
description = ''
Assertion that the target system has an activated TPM. It is a safety
check before allowing the activation of 'trustedBoot.enable'. TrustedBoot
WILL FAIL TO BOOT YOUR SYSTEM if no TPM is available.
'';
};
isHPLaptop = mkOption {
default = false;
type = types.bool;
description = ''
Use a special version of TrustedGRUB that is needed by some HP laptops
and works only for the HP laptops.
'';
};
systemHasTPM = mkOption {
default = "";
example = "YES_TPM_is_activated";
type = types.string;
description = ''
Assertion that the target system has an activated TPM. It is a safety
check before allowing the activation of 'enableTrustedBoot'. TrustedBoot
WILL FAIL TO BOOT YOUR SYSTEM if no TPM is available.
'';
}; };
}; };
@ -452,19 +468,19 @@ in
message = "You cannot have duplicated devices in mirroredBoots"; message = "You cannot have duplicated devices in mirroredBoots";
} }
{ {
assertion = !cfg.enableTrustedBoot || cfg.version == 2; assertion = !cfg.trustedBoot.enable || cfg.version == 2;
message = "Trusted GRUB is only available for GRUB 2"; message = "Trusted GRUB is only available for GRUB 2";
} }
{ {
assertion = !cfg.efiSupport || !cfg.enableTrustedBoot; assertion = !cfg.efiSupport || !cfg.trustedBoot.enable;
message = "Trusted GRUB does not have EFI support"; message = "Trusted GRUB does not have EFI support";
} }
{ {
assertion = !cfg.zfsSupport || !cfg.enableTrustedBoot; assertion = !cfg.zfsSupport || !cfg.trustedBoot.enable;
message = "Trusted GRUB does not have ZFS support"; message = "Trusted GRUB does not have ZFS support";
} }
{ {
assertion = !cfg.enableTrustedBoot || cfg.systemHasTPM == "YES_TPM_is_activated"; assertion = !cfg.trustedBoot.enable || cfg.trustedBoot.systemHasTPM == "YES_TPM_is_activated";
message = "Trusted GRUB can break the system! Confirm that the system has an activated TPM by setting 'systemHasTPM'."; message = "Trusted GRUB can break the system! Confirm that the system has an activated TPM by setting 'systemHasTPM'.";
} }
] ++ flip concatMap cfg.mirroredBoots (args: [ ] ++ flip concatMap cfg.mirroredBoots (args: [

View file

@ -214,7 +214,7 @@ in
done done
''] ++ (map (pool: '' ''] ++ (map (pool: ''
echo "importing root ZFS pool \"${pool}\"..." echo "importing root ZFS pool \"${pool}\"..."
zpool import -N $ZFS_FORCE "${pool}" zpool import -d /dev/disk/by-id -N $ZFS_FORCE "${pool}"
'') rootPools)); '') rootPools));
}; };
@ -255,7 +255,7 @@ in
}; };
script = '' script = ''
zpool_cmd="${zfsUserPkg}/sbin/zpool" zpool_cmd="${zfsUserPkg}/sbin/zpool"
("$zpool_cmd" list "${pool}" >/dev/null) || "$zpool_cmd" import -N ${optionalString cfgZfs.forceImportAll "-f"} "${pool}" ("$zpool_cmd" list "${pool}" >/dev/null) || "$zpool_cmd" import -d /dev/disk/by-id -N ${optionalString cfgZfs.forceImportAll "-f"} "${pool}"
''; '';
}; };
in listToAttrs (map createImportService dataPools) // { in listToAttrs (map createImportService dataPools) // {

View file

@ -83,13 +83,13 @@ in
# FIXME: get rid of "|| true" (necessary to make it idempotent). # FIXME: get rid of "|| true" (necessary to make it idempotent).
ip route add default via "${cfg.defaultGateway}" ${ ip route add default via "${cfg.defaultGateway}" ${
optionalString (cfg.defaultGatewayWindowSize != null) optionalString (cfg.defaultGatewayWindowSize != null)
"window ${cfg.defaultGatewayWindowSize}"} || true "window ${toString cfg.defaultGatewayWindowSize}"} || true
''} ''}
${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6 != "") '' ${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6 != "") ''
# FIXME: get rid of "|| true" (necessary to make it idempotent). # FIXME: get rid of "|| true" (necessary to make it idempotent).
ip -6 route add ::/0 via "${cfg.defaultGateway6}" ${ ip -6 route add ::/0 via "${cfg.defaultGateway6}" ${
optionalString (cfg.defaultGatewayWindowSize != null) optionalString (cfg.defaultGatewayWindowSize != null)
"window ${cfg.defaultGatewayWindowSize}"} || true "window ${toString cfg.defaultGatewayWindowSize}"} || true
''} ''}
''; '';
}; };

View file

@ -11,7 +11,7 @@ with lib;
let cfg = config.ec2; in let cfg = config.ec2; in
{ {
imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-grow-partition.nix ]; imports = [ ../profiles/headless.nix ./ec2-data.nix ./amazon-grow-partition.nix ./amazon-init.nix ];
config = { config = {

View file

@ -44,7 +44,6 @@ let
nixos-rebuild switch nixos-rebuild switch
''; '';
in { in {
imports = [ "${modulesPath}/virtualisation/amazon-image.nix" ];
boot.postBootCommands = '' boot.postBootCommands = ''
${bootScript} & ${bootScript} &
''; '';

View file

@ -156,6 +156,12 @@ in
after = [ "ip-up.target" ]; after = [ "ip-up.target" ];
wants = [ "ip-up.target" ]; wants = [ "ip-up.target" ];
environment = {
GIT_SSL_CAINFO = "/etc/ssl/certs/ca-certificates.crt";
OPENSSL_X509_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt";
SSL_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt";
};
path = [ pkgs.e2fsprogs ]; path = [ pkgs.e2fsprogs ];
description = "Windows Azure Agent Service"; description = "Windows Azure Agent Service";
unitConfig.ConditionPathExists = "/etc/waagent.conf"; unitConfig.ConditionPathExists = "/etc/waagent.conf";

View file

@ -69,7 +69,8 @@ in
description = '' description = ''
The postStart phase of the systemd service. You may need to The postStart phase of the systemd service. You may need to
override this if you are passing in flags to docker which override this if you are passing in flags to docker which
don't cause the socket file to be created. don't cause the socket file to be created. This option is ignored
if socket activation is used.
''; '';
}; };
@ -81,22 +82,29 @@ in
config = mkIf cfg.enable (mkMerge [ config = mkIf cfg.enable (mkMerge [
{ environment.systemPackages = [ pkgs.docker ]; { environment.systemPackages = [ pkgs.docker ];
users.extraGroups.docker.gid = config.ids.gids.docker; users.extraGroups.docker.gid = config.ids.gids.docker;
}
(mkIf cfg.socketActivation {
systemd.services.docker = { systemd.services.docker = {
description = "Docker Application Container Engine"; description = "Docker Application Container Engine";
after = [ "network.target" "docker.socket" ]; wantedBy = optional (!cfg.socketActivation) "multi-user.target";
requires = [ "docker.socket" ]; after = [ "network.target" ] ++ (optional cfg.socketActivation "docker.socket") ;
requires = optional cfg.socketActivation "docker.socket";
serviceConfig = { serviceConfig = {
ExecStart = "${pkgs.docker}/bin/docker daemon --host=fd:// --group=docker --storage-driver=${cfg.storageDriver} ${cfg.extraOptions}"; ExecStart = "${pkgs.docker}/bin/docker daemon --group=docker --storage-driver=${cfg.storageDriver} ${optionalString cfg.socketActivation "--host=fd://"} ${cfg.extraOptions}";
# I'm not sure if that limits aren't too high, but it's what # I'm not sure if that limits aren't too high, but it's what
# goes in config bundled with docker itself # goes in config bundled with docker itself
LimitNOFILE = 1048576; LimitNOFILE = 1048576;
LimitNPROC = 1048576; LimitNPROC = 1048576;
} // proxy_env; } // proxy_env;
};
path = [ pkgs.kmod ] ++ (optional (cfg.storageDriver == "zfs") pkgs.zfs);
environment.MODULE_DIR = "/run/current-system/kernel-modules/lib/modules";
postStart = if cfg.socketActivation then "" else cfg.postStart;
# Presumably some containers are running we don't want to interrupt
restartIfChanged = false;
};
}
(mkIf cfg.socketActivation {
systemd.sockets.docker = { systemd.sockets.docker = {
description = "Docker Socket for the API"; description = "Docker Socket for the API";
wantedBy = [ "sockets.target" ]; wantedBy = [ "sockets.target" ];
@ -108,29 +116,6 @@ in
}; };
}; };
}) })
(mkIf (!cfg.socketActivation) {
systemd.services.docker = {
description = "Docker Application Container Engine";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${pkgs.docker}/bin/docker daemon --group=docker --storage-driver=${cfg.storageDriver} ${cfg.extraOptions}";
# I'm not sure if that limits aren't too high, but it's what
# goes in config bundled with docker itself
LimitNOFILE = 1048576;
LimitNPROC = 1048576;
} // proxy_env;
path = [ pkgs.kmod ] ++ (optional (cfg.storageDriver == "zfs") pkgs.zfs);
environment.MODULE_DIR = "/run/current-system/kernel-modules/lib/modules";
postStart = cfg.postStart;
# Presumably some containers are running we don't want to interrupt
restartIfChanged = false;
};
})
]); ]);
} }

View file

@ -40,16 +40,17 @@ let
if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then
TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir) TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
fi fi
# Create a directory for exchanging data with the VM. # Create a directory for exchanging data with the VM.
mkdir -p $TMPDIR/xchg mkdir -p $TMPDIR/xchg
${if cfg.useBootLoader then '' ${if cfg.useBootLoader then ''
# Create a writable copy/snapshot of the boot disk # Create a writable copy/snapshot of the boot disk.
# A writable boot disk can be booted from automatically # A writable boot disk can be booted from automatically.
${pkgs.qemu_kvm}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1 ${pkgs.qemu_kvm}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1
${if cfg.useEFIBoot then '' ${if cfg.useEFIBoot then ''
# VM needs a writable flash BIOS # VM needs a writable flash BIOS.
cp ${bootDisk}/bios.bin $TMPDIR || exit 1 cp ${bootDisk}/bios.bin $TMPDIR || exit 1
chmod 0644 $TMPDIR/bios.bin || exit 1 chmod 0644 $TMPDIR/bios.bin || exit 1
'' else '' '' else ''
@ -76,14 +77,14 @@ let
-virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \ -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
-virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \ -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \
${if cfg.useBootLoader then '' ${if cfg.useBootLoader then ''
-drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=writeback,werror=report \ -drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=none,werror=report \
-drive index=1,id=drive2,file=$TMPDIR/disk.img,media=disk \ -drive index=1,id=drive2,file=$TMPDIR/disk.img,media=disk \
${if cfg.useEFIBoot then '' ${if cfg.useEFIBoot then ''
-pflash $TMPDIR/bios.bin \ -pflash $TMPDIR/bios.bin \
'' else '' '' else ''
''} ''}
'' else '' '' else ''
-drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=writeback,werror=report \ -drive index=0,id=drive1,file=$NIX_DISK_IMAGE,if=${cfg.qemu.diskInterface},cache=none,werror=report \
-kernel ${config.system.build.toplevel}/kernel \ -kernel ${config.system.build.toplevel}/kernel \
-initrd ${config.system.build.toplevel}/initrd \ -initrd ${config.system.build.toplevel}/initrd \
-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo} ${kernelConsole} $QEMU_KERNEL_PARAMS" \ -append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo} ${kernelConsole} $QEMU_KERNEL_PARAMS" \
@ -297,6 +298,7 @@ in
virtualisation.qemu = { virtualisation.qemu = {
options = options =
mkOption { mkOption {
type = types.listOf types.unspecified;
default = []; default = [];
example = [ "-vga std" ]; example = [ "-vga std" ];
description = "Options passed to QEMU."; description = "Options passed to QEMU.";
@ -425,19 +427,19 @@ in
${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} = ${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} =
{ device = "store"; { device = "store";
fsType = "9p"; fsType = "9p";
options = "trans=virtio,version=9p2000.L,msize=1048576,cache=loose"; options = "trans=virtio,version=9p2000.L,cache=loose";
neededForBoot = true; neededForBoot = true;
}; };
"/tmp/xchg" = "/tmp/xchg" =
{ device = "xchg"; { device = "xchg";
fsType = "9p"; fsType = "9p";
options = "trans=virtio,version=9p2000.L,msize=1048576,cache=loose"; options = "trans=virtio,version=9p2000.L,cache=loose";
neededForBoot = true; neededForBoot = true;
}; };
"/tmp/shared" = "/tmp/shared" =
{ device = "shared"; { device = "shared";
fsType = "9p"; fsType = "9p";
options = "trans=virtio,version=9p2000.L,msize=1048576"; options = "trans=virtio,version=9p2000.L";
neededForBoot = true; neededForBoot = true;
}; };
} // optionalAttrs cfg.writableStore } // optionalAttrs cfg.writableStore

View file

@ -71,6 +71,7 @@ in rec {
(all nixos.tests.misc) (all nixos.tests.misc)
(all nixos.tests.nat.firewall) (all nixos.tests.nat.firewall)
(all nixos.tests.nat.standalone) (all nixos.tests.nat.standalone)
(all nixos.tests.networking.scripted.loopback)
(all nixos.tests.networking.scripted.static) (all nixos.tests.networking.scripted.static)
(all nixos.tests.networking.scripted.dhcpSimple) (all nixos.tests.networking.scripted.dhcpSimple)
(all nixos.tests.networking.scripted.dhcpOneIf) (all nixos.tests.networking.scripted.dhcpOneIf)

View file

@ -47,6 +47,7 @@ let
}; };
inherit iso; inherit iso;
passthru = { inherit config; }; passthru = { inherit config; };
preferLocalBuild = true;
} }
'' ''
mkdir -p $out/nix-support mkdir -p $out/nix-support
@ -149,6 +150,7 @@ in rec {
maintainers = maintainers.eelco; maintainers = maintainers.eelco;
}; };
ova = config.system.build.virtualBoxOVA; ova = config.system.build.virtualBoxOVA;
preferLocalBuild = true;
} }
'' ''
mkdir -p $out/nix-support mkdir -p $out/nix-support
@ -168,6 +170,7 @@ in rec {
boot.loader.grub.device = mkDefault "/dev/sda"; boot.loader.grub.device = mkDefault "/dev/sda";
}); });
}).config.system.build.toplevel; }).config.system.build.toplevel;
preferLocalBuild = true;
} }
"mkdir $out; ln -s $toplevel $out/dummy"); "mkdir $out; ln -s $toplevel $out/dummy");
@ -220,7 +223,7 @@ in rec {
tests.dockerRegistry = hydraJob (import tests/docker-registry.nix { system = "x86_64-linux"; }); tests.dockerRegistry = hydraJob (import tests/docker-registry.nix { system = "x86_64-linux"; });
tests.etcd = hydraJob (import tests/etcd.nix { system = "x86_64-linux"; }); tests.etcd = hydraJob (import tests/etcd.nix { system = "x86_64-linux"; });
tests.ec2-nixops = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-nixops; tests.ec2-nixops = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-nixops;
#tests.ec2-config = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-config; tests.ec2-config = hydraJob (import tests/ec2.nix { system = "x86_64-linux"; }).boot-ec2-config;
tests.firefox = callTest tests/firefox.nix {}; tests.firefox = callTest tests/firefox.nix {};
tests.firewall = callTest tests/firewall.nix {}; tests.firewall = callTest tests/firewall.nix {};
tests.fleet = hydraJob (import tests/fleet.nix { system = "x86_64-linux"; }); tests.fleet = hydraJob (import tests/fleet.nix { system = "x86_64-linux"; });
@ -256,6 +259,7 @@ in rec {
tests.mysqlReplication = callTest tests/mysql-replication.nix {}; tests.mysqlReplication = callTest tests/mysql-replication.nix {};
tests.nat.firewall = callTest tests/nat.nix { withFirewall = true; }; tests.nat.firewall = callTest tests/nat.nix { withFirewall = true; };
tests.nat.standalone = callTest tests/nat.nix { withFirewall = false; }; tests.nat.standalone = callTest tests/nat.nix { withFirewall = false; };
tests.networking.networkd.loopback = callTest tests/networking.nix { networkd = true; test = "loopback"; };
tests.networking.networkd.static = callTest tests/networking.nix { networkd = true; test = "static"; }; tests.networking.networkd.static = callTest tests/networking.nix { networkd = true; test = "static"; };
tests.networking.networkd.dhcpSimple = callTest tests/networking.nix { networkd = true; test = "dhcpSimple"; }; tests.networking.networkd.dhcpSimple = callTest tests/networking.nix { networkd = true; test = "dhcpSimple"; };
tests.networking.networkd.dhcpOneIf = callTest tests/networking.nix { networkd = true; test = "dhcpOneIf"; }; tests.networking.networkd.dhcpOneIf = callTest tests/networking.nix { networkd = true; test = "dhcpOneIf"; };
@ -264,6 +268,7 @@ in rec {
tests.networking.networkd.macvlan = callTest tests/networking.nix { networkd = true; test = "macvlan"; }; tests.networking.networkd.macvlan = callTest tests/networking.nix { networkd = true; test = "macvlan"; };
tests.networking.networkd.sit = callTest tests/networking.nix { networkd = true; test = "sit"; }; tests.networking.networkd.sit = callTest tests/networking.nix { networkd = true; test = "sit"; };
tests.networking.networkd.vlan = callTest tests/networking.nix { networkd = true; test = "vlan"; }; tests.networking.networkd.vlan = callTest tests/networking.nix { networkd = true; test = "vlan"; };
tests.networking.scripted.loopback = callTest tests/networking.nix { networkd = false; test = "loopback"; };
tests.networking.scripted.static = callTest tests/networking.nix { networkd = false; test = "static"; }; tests.networking.scripted.static = callTest tests/networking.nix { networkd = false; test = "static"; };
tests.networking.scripted.dhcpSimple = callTest tests/networking.nix { networkd = false; test = "dhcpSimple"; }; tests.networking.scripted.dhcpSimple = callTest tests/networking.nix { networkd = false; test = "dhcpSimple"; };
tests.networking.scripted.dhcpOneIf = callTest tests/networking.nix { networkd = false; test = "dhcpOneIf"; }; tests.networking.scripted.dhcpOneIf = callTest tests/networking.nix { networkd = false; test = "dhcpOneIf"; };

View file

@ -31,6 +31,17 @@ import ./make-test.nix ({ pkgs, networkd, test, ... }:
}; };
}; };
testCases = { testCases = {
loopback = {
name = "Loopback";
machine.networking.useNetworkd = networkd;
testScript = ''
startAll;
$machine->waitForUnit("network-interfaces.target");
$machine->waitForUnit("network.target");
$machine->succeed("ip addr show lo | grep -q 'inet 127.0.0.1/8 '");
$machine->succeed("ip addr show lo | grep -q 'inet6 ::1/128 '");
'';
};
static = { static = {
name = "Static"; name = "Static";
nodes.router = router; nodes.router = router;

80
nixos/tests/slurm.nix Normal file
View file

@ -0,0 +1,80 @@
import ./make-test.nix ({ pkgs, ... }:
let mungekey = "mungeverryweakkeybuteasytointegratoinatest";
slurmconfig = {
client.enable = true;
controlMachine = "control";
nodeName = ''
control
NodeName=node[1-3] CPUs=1 State=UNKNOWN
'';
partitionName = "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP";
};
in {
name = "slurm";
nodes =
let
computeNode =
{ config, pkgs, ...}:
{
# TODO slrumd port and slurmctld port should be configurations and
# automatically allowed by the firewall.
networking.firewall.enable = false;
services.munge.enable = true;
services.slurm = slurmconfig;
};
in {
control =
{ config, pkgs, ...}:
{
networking.firewall.enable = false;
services.munge.enable = true;
services.slurm = {
server.enable = true;
} // slurmconfig;
};
node1 = computeNode;
node2 = computeNode;
node3 = computeNode;
};
testScript =
''
startAll;
# Set up authentification across the cluster
foreach my $node (($control,$node1,$node2,$node3))
{
$node->waitForUnit("default.target");
$node->succeed("mkdir /etc/munge");
$node->succeed("echo '${mungekey}' > /etc/munge/munge.key");
$node->succeed("chmod 0400 /etc/munge/munge.key");
$node->succeed("systemctl restart munged");
}
# Restart the services since they have probably failed due to the munge init
# failure
subtest "can_start_slurmctld", sub {
$control->succeed("systemctl restart slurmctld");
$control->waitForUnit("slurmctld.service");
};
subtest "can_start_slurmd", sub {
foreach my $node (($control,$node1,$node2,$node3))
{
$node->succeed("systemctl restart slurmd.service");
$node->waitForUnit("slurmd");
}
};
# Test that the cluster work and can distribute jobs;
subtest "run_distributed_command", sub {
# Run `hostname` on 3 nodes of the partition (so on all the 3 nodes).
# The output must contain the 3 different names
$control->succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq");
};
'';
})

View file

@ -2,6 +2,7 @@
, qtscriptgenerator, gettext, curl , libxml2, mysql, taglib , qtscriptgenerator, gettext, curl , libxml2, mysql, taglib
, taglib_extras, loudmouth , kdelibs , qca2, libmtp, liblastfm, libgpod , taglib_extras, loudmouth , kdelibs , qca2, libmtp, liblastfm, libgpod
, phonon , strigi, soprano, qjson, ffmpeg, libofa, nepomuk_core ? null , phonon , strigi, soprano, qjson, ffmpeg, libofa, nepomuk_core ? null
, lz4, lzo, snappy, libaio
}: }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
@ -23,8 +24,14 @@ stdenv.mkDerivation rec {
qtscriptgenerator stdenv.cc.libc gettext curl libxml2 mysql.lib qtscriptgenerator stdenv.cc.libc gettext curl libxml2 mysql.lib
taglib taglib_extras loudmouth kdelibs phonon strigi soprano qca2 taglib taglib_extras loudmouth kdelibs phonon strigi soprano qca2
libmtp liblastfm libgpod qjson ffmpeg libofa nepomuk_core libmtp liblastfm libgpod qjson ffmpeg libofa nepomuk_core
lz4 lzo snappy libaio
]; ];
# This is already fixed upstream, will be release in 2.9
preConfigure = ''
sed -i -e 's/STRLESS/VERSION_LESS/g' cmake/modules/FindTaglib.cmake
'';
cmakeFlags = "-DKDE4_BUILD_TESTS=OFF"; cmakeFlags = "-DKDE4_BUILD_TESTS=OFF";
propagatedUserEnvPkgs = [ qtscriptgenerator ]; propagatedUserEnvPkgs = [ qtscriptgenerator ];

View file

@ -93,6 +93,11 @@ stdenv.mkDerivation rec {
"-DENABLE_UDISKS2=ON" "-DENABLE_UDISKS2=ON"
]; ];
# This is already fixed upstream but not released yet. Maybe in version 2.
preConfigure = ''
sed -i -e 's/STRLESS/VERSION_LESS/g' cmake/FindTaglib.cmake
'';
postInstall = stdenv.lib.optionalString withQt5 '' postInstall = stdenv.lib.optionalString withQt5 ''
wrapQtProgram "$out/bin/cantata" wrapQtProgram "$out/bin/cantata"
''; '';

View file

@ -6,23 +6,24 @@ let
archUrl = name: arch: "http://dl.google.com/linux/musicmanager/deb/pool/main/g/google-musicmanager-beta/${name}_${arch}.deb"; archUrl = name: arch: "http://dl.google.com/linux/musicmanager/deb/pool/main/g/google-musicmanager-beta/${name}_${arch}.deb";
in in
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
version = "beta_1.0.221.5230-r0"; # friendly to nix-env version sorting algo version = "beta_1.0.243.1116-r0"; # friendly to nix-env version sorting algo
product = "google-musicmanager"; product = "google-musicmanager";
name = "${product}-${version}"; name = "${product}-${version}";
# When looking for newer versions, since google doesn't let you list their repo dirs, # When looking for newer versions, since google doesn't let you list their repo dirs,
# curl http://dl.google.com/linux/musicmanager/deb/dists/stable/Release # curl http://dl.google.com/linux/musicmanager/deb/dists/stable/Release
# fetch an appropriate packages file eg main/binary-amd64/Packages # fetch an appropriate packages file such as main/binary-amd64/Packages:
# curl http://dl.google.com/linux/musicmanager/deb/dists/stable/main/binary-amd64/Packages
# which will contain the links to all available *.debs for the arch. # which will contain the links to all available *.debs for the arch.
src = if stdenv.system == "x86_64-linux" src = if stdenv.system == "x86_64-linux"
then fetchurl { then fetchurl {
url = archUrl name "amd64"; url = archUrl name "amd64";
sha256 = "1h0ssbz6y9xi2szalgb5wcxi8m1ylg4qf2za6zgvi908hpan7q37"; sha256 = "54f97f449136e173492d36084f2c01244b84f02d6e223fb8a40661093e0bec7c";
} }
else fetchurl { else fetchurl {
url = archUrl name "i386"; url = archUrl name "i386";
sha256 = "0q8cnzx7s25bpqlbp40d43mwd6m8kvhvdifkqlgc9phpydnqpd1i"; sha256 = "121a7939015e2270afa3f1c73554102e2b4f2e6a31482ff7be5e7c28dd101d3c";
}; };
unpackPhase = '' unpackPhase = ''

View file

@ -0,0 +1,23 @@
{ stdenv, fetchurl, pythonPackages, mopidy }:
pythonPackages.buildPythonPackage rec {
name = "mopidy-gmusic-${version}";
version = "1.0.0";
src = fetchurl {
url = "https://github.com/mopidy/mopidy-gmusic/archive/v${version}.tar.gz";
sha256 = "0yfilzfamy1bxnmgb1xk56jrk4sz0i7vcnc0a8klrm9sc7agnm9i";
};
propagatedBuildInputs = [ mopidy pythonPackages.requests2 pythonPackages.gmusicapi ];
doCheck = false;
meta = with stdenv.lib; {
homepage = http://www.mopidy.com/;
description = "Mopidy extension for playing music from Google Play Music";
license = licenses.asl20;
maintainers = [ maintainers.jgillich ];
hydraPlatforms = [];
};
}

View file

@ -3,11 +3,11 @@
pythonPackages.buildPythonPackage rec { pythonPackages.buildPythonPackage rec {
name = "mopidy-mopify-${version}"; name = "mopidy-mopify-${version}";
version = "1.5.1"; version = "1.5.8";
src = fetchurl { src = fetchurl {
url = "https://github.com/dirkgroenen/mopidy-mopify/archive/${version}.tar.gz"; url = "https://github.com/dirkgroenen/mopidy-mopify/archive/${version}.tar.gz";
sha256 = "0hhdss4i5436dj37pndxk81a4g3g8f6zqjyv04lhpqcww01290as"; sha256 = "1gq88i5hbyskwhqf51myndqgmrndkyy6gs022sc387fy3dwxmvn0";
}; };
propagatedBuildInputs = with pythonPackages; [ mopidy configobj ]; propagatedBuildInputs = with pythonPackages; [ mopidy configobj ];

View file

@ -0,0 +1,24 @@
{ stdenv, fetchFromGitHub, pythonPackages, mopidy }:
pythonPackages.buildPythonPackage rec {
name = "mopidy-musicbox-webclient-${version}";
version = "2.0.0";
src = fetchFromGitHub {
owner = "pimusicbox";
repo = "mopidy-musicbox-webclient";
rev = "v${version}";
sha256 = "0gnw6jn55jr6q7bdp70mk3cm5f6jy8lm3s7ayfmisihhjbl3rnaq";
};
propagatedBuildInputs = [ mopidy ];
doCheck = false;
meta = with stdenv.lib; {
description = "Mopidy extension for playing music from SoundCloud";
license = licenses.mit;
maintainers = [ maintainers.spwhitt ];
};
}

View file

@ -0,0 +1,24 @@
{ stdenv, fetchFromGitHub, pythonPackages, mopidy }:
pythonPackages.buildPythonPackage rec {
name = "mopidy-soundcloud-${version}";
version = "2.0.1";
src = fetchFromGitHub {
owner = "mopidy";
repo = "mopidy-soundcloud";
rev = "v${version}";
sha256 = "05yvjnivj26wjish7x1xrd9l5z8i14b610a8pbifnq3cq7y2m22r";
};
propagatedBuildInputs = [ mopidy ];
doCheck = false;
meta = with stdenv.lib; {
description = "Mopidy extension for playing music from SoundCloud";
license = licenses.mit;
maintainers = [ maintainers.spwhitt ];
};
}

View file

@ -0,0 +1,24 @@
{ stdenv, fetchFromGitHub, pythonPackages, mopidy, mopidy-spotify }:
pythonPackages.buildPythonPackage rec {
name = "mopidy-spotify-tunigo-${version}";
version = "0.2.1";
src = fetchFromGitHub {
owner = "trygveaa";
repo = "mopidy-spotify-tunigo";
rev = "v${version}";
sha256 = "0827wghbgrscncnshz30l97hgg0g5bsnm0ad8596zh7cai0ibss0";
};
propagatedBuildInputs = [ mopidy mopidy-spotify pythonPackages.tunigo ];
doCheck = false;
meta = with stdenv.lib; {
description = "Mopidy extension for providing the browse feature of Spotify";
license = licenses.asl20;
maintainers = [ maintainers.spwhitt ];
};
}

View file

@ -0,0 +1,24 @@
{ stdenv, fetchFromGitHub, pythonPackages, mopidy }:
pythonPackages.buildPythonPackage rec {
name = "mopidy-youtube-${version}";
version = "2.0.1";
src = fetchFromGitHub {
owner = "mopidy";
repo = "mopidy-youtube";
rev = "v${version}";
sha256 = "1si7j7m5kg0cxlhkw8s2mbnpmc9mb3l69n5sgklb1yv1s55iia6z";
};
propagatedBuildInputs = with pythonPackages; [ mopidy pafy ];
doCheck = false;
meta = with stdenv.lib; {
description = "Mopidy extension for playing music from YouTube";
license = licenses.asl20;
maintainers = [ maintainers.spwhitt ];
};
}

View file

@ -1,19 +1,19 @@
{ stdenv, fetchurl, pythonPackages, pygobject, gst_python { stdenv, fetchurl, pythonPackages, pygobject, gst_python
, gst_plugins_good, gst_plugins_base , gst_plugins_good, gst_plugins_base, gst_plugins_ugly
}: }:
pythonPackages.buildPythonPackage rec { pythonPackages.buildPythonPackage rec {
name = "mopidy-${version}"; name = "mopidy-${version}";
version = "1.0.5"; version = "1.1.1";
src = fetchurl { src = fetchurl {
url = "https://github.com/mopidy/mopidy/archive/v${version}.tar.gz"; url = "https://github.com/mopidy/mopidy/archive/v${version}.tar.gz";
sha256 = "0lhmm2w2djf6mb3acw1yq1k4j74v1lf4kgx24dsdnpkgsycrv5q6"; sha256 = "1xfyg8xqgnrb98wx7a4fzr4vlzkffjhkc1s36ka63rwmx86vqhyw";
}; };
propagatedBuildInputs = with pythonPackages; [ propagatedBuildInputs = with pythonPackages; [
gst_python pygobject pykka tornado gst_plugins_base gst_plugins_good gst_python pygobject pykka tornado requests2 gst_plugins_base gst_plugins_good gst_plugins_ugly
]; ];
# There are no tests # There are no tests

View file

@ -1,5 +1,5 @@
{ stdenv, fetchurl, boost, mpd_clientlib, ncurses, pkgconfig, readline { stdenv, fetchurl, boost, mpd_clientlib, ncurses, pkgconfig, readline
, libiconv , libiconv, icu
, outputsSupport ? false # outputs screen , outputsSupport ? false # outputs screen
, visualizerSupport ? false, fftw ? null # visualizer screen , visualizerSupport ? false, fftw ? null # visualizer screen
, clockSupport ? false # clock screen , clockSupport ? false # clock screen
@ -15,11 +15,11 @@ assert taglibSupport -> (taglib != null);
with stdenv.lib; with stdenv.lib;
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "ncmpcpp-${version}"; name = "ncmpcpp-${version}";
version = "0.6.7"; version = "0.7";
src = fetchurl { src = fetchurl {
url = "http://ncmpcpp.rybczak.net/stable/${name}.tar.bz2"; url = "http://ncmpcpp.rybczak.net/stable/${name}.tar.bz2";
sha256 = "0yr1ib14qkgbsv839anpzkfbwkm6gg8wv4bf98ar7q5l2p2pv008"; sha256 = "0xzz0g9whqjcjaaqmsw5ph1zvpi2j5v3i5k73g7916rca3q4z4jh";
}; };
configureFlags = [ "BOOST_LIB_SUFFIX=" ] configureFlags = [ "BOOST_LIB_SUFFIX=" ]
@ -32,7 +32,7 @@ stdenv.mkDerivation rec {
nativeBuildInputs = [ pkgconfig ]; nativeBuildInputs = [ pkgconfig ];
buildInputs = [ boost mpd_clientlib ncurses readline libiconv ] buildInputs = [ boost mpd_clientlib ncurses readline libiconv icu ]
++ optional curlSupport curl ++ optional curlSupport curl
++ optional visualizerSupport fftw ++ optional visualizerSupport fftw
++ optional taglibSupport taglib; ++ optional taglibSupport taglib;

View file

@ -1,12 +1,12 @@
{ stdenv, fetchurl, alsaLib, gtk, pkgconfig }: { stdenv, fetchurl, alsaLib, gtk, pkgconfig }:
let version = "5417"; in stdenv.mkDerivation rec {
stdenv.mkDerivation {
name = "praat-${version}"; name = "praat-${version}";
version = "5.4.17";
src = fetchurl { src = fetchurl {
url = "http://www.fon.hum.uva.nl/praat/praat${version}_sources.tar.gz"; url = "https://github.com/praat/praat/archive/v${version}.tar.gz";
sha256 = "1bspl963pb1s6k3cd9p3g5j518pxg6hkrann945lqsrvbzaa20kl"; sha256 = "0s2hrksghg686059vc90h3ywhd2702pqcvy99icw27q5mdk6dqsx";
}; };
configurePhase = '' configurePhase = ''

View file

@ -1,7 +1,7 @@
{ fetchurl, stdenv, dpkg, xorg, qt4, alsaLib, makeWrapper, openssl, freetype { fetchurl, stdenv, dpkg, xorg, qt4, alsaLib, makeWrapper, openssl_1_0_1, freetype
, glib, pango, cairo, atk, gdk_pixbuf, gtk, cups, nspr, nss, libpng, GConf , glib, pango, cairo, atk, gdk_pixbuf, gtk, cups, nspr, nss, libpng, GConf
, libgcrypt, chromium, udev, fontconfig , libgcrypt, chromium, udev, fontconfig
, dbus, expat }: , dbus, expat, ffmpeg_0_10 }:
assert stdenv.system == "x86_64-linux"; assert stdenv.system == "x86_64-linux";
@ -15,6 +15,7 @@ let
cups cups
dbus dbus
expat expat
ffmpeg_0_10
fontconfig fontconfig
freetype freetype
GConf GConf
@ -66,8 +67,8 @@ stdenv.mkDerivation {
# Work around Spotify referring to a specific minor version of # Work around Spotify referring to a specific minor version of
# OpenSSL. # OpenSSL.
ln -s ${openssl.out}/lib/libssl.so $libdir/libssl.so.1.0.0 ln -s ${openssl_1_0_1.out}/lib/libssl.so $libdir/libssl.so.1.0.0
ln -s ${openssl.out}/lib/libcrypto.so $libdir/libcrypto.so.1.0.0 ln -s ${openssl_1_0_1.out}/lib/libcrypto.so $libdir/libcrypto.so.1.0.0
ln -s ${nspr.out}/lib/libnspr4.so $libdir/libnspr4.so ln -s ${nspr.out}/lib/libnspr4.so $libdir/libnspr4.so
ln -s ${nspr.out}/lib/libplc4.so $libdir/libplc4.so ln -s ${nspr.out}/lib/libplc4.so $libdir/libplc4.so

View file

@ -11,9 +11,7 @@ assert enableXMPP -> libjreen != null;
assert enableKDE -> kdelibs != null; assert enableKDE -> kdelibs != null;
assert enableTelepathy -> telepathy_qt != null; assert enableTelepathy -> telepathy_qt != null;
let stdenv.mkDerivation rec {
quazipQt4 = quazip.override { qt = qt4; };
in stdenv.mkDerivation rec {
name = "tomahawk-${version}"; name = "tomahawk-${version}";
version = "0.8.4"; version = "0.8.4";
@ -29,7 +27,7 @@ in stdenv.mkDerivation rec {
buildInputs = [ buildInputs = [
cmake pkgconfig attica boost gnutls libechonest liblastfm lucenepp phonon cmake pkgconfig attica boost gnutls libechonest liblastfm lucenepp phonon
qca2 qjson qt4 qtkeychain quazipQt4 sparsehash taglib websocketpp qca2 qjson qt4 qtkeychain quazip sparsehash taglib websocketpp
makeWrapper makeWrapper
] ++ stdenv.lib.optional enableXMPP libjreen ] ++ stdenv.lib.optional enableXMPP libjreen
++ stdenv.lib.optional enableKDE kdelibs ++ stdenv.lib.optional enableKDE kdelibs

View file

@ -8,7 +8,7 @@ stdenv.mkDerivation rec {
url = "https://github.com/zamaudio/zam-plugins.git"; url = "https://github.com/zamaudio/zam-plugins.git";
deepClone = true; deepClone = true;
rev = "91fe56931a3e57b80f18c740d2dde6b44f962aee"; rev = "91fe56931a3e57b80f18c740d2dde6b44f962aee";
sha256 = "17slpywjs04xbcylyqjg6kqbpqwqbigf843y437yfvj1ar6ir1jp"; sha256 = "0n29zxg4l2m3jsnfw6q2alyzaw7ibbv9nvk57k07sv3lh2yy3f30";
}; };
buildInputs = [ boost libX11 mesa liblo libjack2 ladspaH lv2 pkgconfig rubberband libsndfile ]; buildInputs = [ boost libX11 mesa liblo libjack2 ladspaH lv2 pkgconfig rubberband libsndfile ];

View file

@ -45,6 +45,11 @@ let
enableParallelBuilding = true; enableParallelBuilding = true;
postInstall = ''
# remove empty scripts
rm "$out/share/sddm/scripts/Xsetup" "$out/share/sddm/scripts/Xstop"
'';
meta = with stdenv.lib; { meta = with stdenv.lib; {
description = "QML based X11 display manager"; description = "QML based X11 display manager";
homepage = https://github.com/sddm/sddm; homepage = https://github.com/sddm/sddm;

View file

@ -16,11 +16,11 @@ let
}; };
in stdenv.mkDerivation rec { in stdenv.mkDerivation rec {
name = "atom-${version}"; name = "atom-${version}";
version = "1.2.0"; version = "1.3.1";
src = fetchurl { src = fetchurl {
url = "https://github.com/atom/atom/releases/download/v${version}/atom-amd64.deb"; url = "https://github.com/atom/atom/releases/download/v${version}/atom-amd64.deb";
sha256 = "05s3kvsz6pzh4gm22aaps1nccp76skfshdzlqwg0qn0ljz58sdqh"; sha256 = "17q5vrvjsyxcd8favp0sldfvhcwr0ba6ws32df6iv2iyla5h94y1";
name = "${name}.deb"; name = "${name}.deb";
}; };

View file

@ -0,0 +1,13 @@
diff --git a/lib/careadlinkat.h b/lib/careadlinkat.h
index 5cdb813..7a272e8 100644
--- a/lib/careadlinkat.h
+++ b/lib/careadlinkat.h
@@ -23,6 +23,8 @@
#include <fcntl.h>
#include <unistd.h>
+#define AT_FDCWD -2
+
struct allocator;
/* Assuming the current directory is FD, get the symbolic link value

View file

@ -0,0 +1,38 @@
source $stdenv/setup
# This hook is supposed to be run on Linux. It patches the proper locations of
# the crt{1,i,n}.o files into the build to ensure that Emacs is linked with
# *our* versions, not the ones found in the system, as it would do by default.
# On other platforms, this appears to be unnecessary.
preConfigure() {
for i in Makefile.in ./src/Makefile.in ./lib-src/Makefile.in ./leim/Makefile.in; do
substituteInPlace $i --replace /bin/pwd pwd
done
case "${system}" in
x86_64-linux) glibclibdir=lib64 ;;
i686-linux) glibclibdir=lib ;;
*) return;
esac
libc=$(cat ${NIX_CC}/nix-support/orig-libc)
echo "libc: $libc"
for i in src/s/*.h src/m/*.h; do
substituteInPlace $i \
--replace /usr/${glibclibdir}/crt1.o $libc/${glibclibdir}/crt1.o \
--replace /usr/${glibclibdir}/crti.o $libc/${glibclibdir}/crti.o \
--replace /usr/${glibclibdir}/crtn.o $libc/${glibclibdir}/crtn.o \
--replace /usr/lib/crt1.o $libc/${glibclibdir}/crt1.o \
--replace /usr/lib/crti.o $libc/${glibclibdir}/crti.o \
--replace /usr/lib/crtn.o $libc/${glibclibdir}/crtn.o
done
}
preInstall () {
for i in Makefile.in ./src/Makefile.in ./lib-src/Makefile.in ./leim/Makefile.in; do
substituteInPlace $i --replace /bin/pwd pwd
done
}
genericBuild

View file

@ -0,0 +1,113 @@
{ stdenv, fetchgit, ncurses, xlibsWrapper, libXaw, libXpm, Xaw3d
, pkgconfig, gettext, libXft, dbus, libpng, libjpeg, libungif
, libtiff, librsvg, texinfo, gconf, libxml2, imagemagick, gnutls
, alsaLib, cairo, acl, gpm, AppKit, Foundation, libobjc
, autoconf, automake
, withX ? !stdenv.isDarwin
, withGTK3 ? false, gtk3 ? null
, withGTK2 ? true, gtk2
}:
assert (libXft != null) -> libpng != null; # probably a bug
assert stdenv.isDarwin -> libXaw != null; # fails to link otherwise
assert withGTK2 -> withX || stdenv.isDarwin;
assert withGTK3 -> withX || stdenv.isDarwin;
assert withGTK2 -> !withGTK3 && gtk2 != null;
assert withGTK3 -> !withGTK2 && gtk3 != null;
let
toolkit =
if withGTK3 then "gtk3"
else if withGTK2 then "gtk2"
else "lucid";
in
stdenv.mkDerivation rec {
name = "emacs-25.0.50-1b5630e";
builder = ./builder.sh;
src = fetchgit {
url = "git://git.savannah.gnu.org/emacs.git";
rev = "1b5630eb47d3f4bade09708c958ab006b83b3fc0";
sha256 = "0n3qbri84akmy7ad1pbv89j4jn4x9pnkz0p4nbhh6m1c37cbz58l";
};
patches = stdenv.lib.optionals stdenv.isDarwin [
./at-fdcwd.patch
];
postPatch = ''
sed -i 's|/usr/share/locale|${gettext}/share/locale|g' lisp/international/mule-cmds.el
'';
buildInputs =
[ ncurses gconf libxml2 gnutls alsaLib pkgconfig texinfo acl gpm gettext
autoconf automake ]
++ stdenv.lib.optional stdenv.isLinux dbus
++ stdenv.lib.optionals withX
[ xlibsWrapper libXaw Xaw3d libXpm libpng libjpeg libungif libtiff librsvg libXft
imagemagick gconf ]
++ stdenv.lib.optional (withX && withGTK2) gtk2
++ stdenv.lib.optional (withX && withGTK3) gtk3
++ stdenv.lib.optional (stdenv.isDarwin && withX) cairo;
propagatedBuildInputs = stdenv.lib.optionals stdenv.isDarwin [ AppKit Foundation libobjc
];
NIX_LDFLAGS = stdenv.lib.optional stdenv.isDarwin
"/System/Library/Frameworks/CoreFoundation.framework/Versions/A/CoreFoundation";
configureFlags =
if stdenv.isDarwin
then [ "--with-ns" "--disable-ns-self-contained" ]
else if withX
then [ "--with-x-toolkit=${toolkit}" "--with-xft" ]
else [ "--with-x=no" "--with-xpm=no" "--with-jpeg=no" "--with-png=no"
"--with-gif=no" "--with-tiff=no" ];
NIX_CFLAGS_COMPILE = stdenv.lib.optionalString (stdenv.isDarwin && withX)
"-I${cairo}/include/cairo";
preBuild = ''
find . -name '*.elc' -delete
'';
postInstall = ''
mkdir -p $out/share/emacs/site-lisp/
cp ${./site-start.el} $out/share/emacs/site-lisp/site-start.el
'' + stdenv.lib.optionalString stdenv.isDarwin ''
mkdir -p $out/Applications
mv nextstep/Emacs.app $out/Applications
'';
doCheck = !stdenv.isDarwin;
meta = with stdenv.lib; {
description = "GNU Emacs 25 (pre), the extensible, customizable text editor";
homepage = http://www.gnu.org/software/emacs/;
license = licenses.gpl3Plus;
maintainers = with maintainers; [ chaoflow lovek323 simons the-kenny ];
platforms = platforms.all;
# So that Exuberant ctags is preferred
priority = 1;
longDescription = ''
GNU Emacs is an extensible, customizable text editorand more. At its
core is an interpreter for Emacs Lisp, a dialect of the Lisp
programming language with extensions to support text editing.
The features of GNU Emacs include: content-sensitive editing modes,
including syntax coloring, for a wide variety of file types including
plain text, source code, and HTML; complete built-in documentation,
including a tutorial for new users; full Unicode support for nearly all
human languages and their scripts; highly customizable, using Emacs
Lisp code or a graphical interface; a large number of extensions that
add other functionality, including a project planner, mail and news
reader, debugger interface, calendar, and more. Many of these
extensions are distributed with GNU Emacs; others are available
separately.
'';
};
}

View file

@ -0,0 +1,17 @@
;; NixOS specific load-path
(setq load-path
(append (reverse (mapcar (lambda (x) (concat x "/share/emacs/site-lisp/"))
(split-string (or (getenv "NIX_PROFILES") ""))))
load-path))
;;; Make `woman' find the man pages
(eval-after-load 'woman
'(setq woman-manpath
(append (reverse (mapcar (lambda (x) (concat x "/share/man/"))
(split-string (or (getenv "NIX_PROFILES") ""))))
woman-manpath)))
;; Make tramp work for remote NixOS machines
;;; NOTE: You might want to add
(eval-after-load 'tramp
'(add-to-list 'tramp-remote-path "/run/current-system/sw/bin"))

View file

@ -201,10 +201,10 @@
"tiny": { "tiny": {
"fetch": { "fetch": {
"tag": "fetchurl", "tag": "fetchurl",
"url": "http://elpa.gnu.org/packages/tiny-0.1.tar", "url": "http://elpa.gnu.org/packages/tiny-0.1.1.tar",
"sha256": "04iyidzjgnm4ka575wxqdak19h8j4dlni2ahf0bkq1q9by79xq1q" "sha256": "1nhg8375qdn457wj0xmfaj72s87xbabk2w1nl6q7rjvwxv08yyn7"
}, },
"version": "0.1", "version": "0.1.1",
"deps": [] "deps": []
}, },
"coffee-mode": { "coffee-mode": {
@ -246,10 +246,10 @@
"org": { "org": {
"fetch": { "fetch": {
"tag": "fetchurl", "tag": "fetchurl",
"url": "http://elpa.gnu.org/packages/org-20151123.tar", "url": "http://elpa.gnu.org/packages/org-20151221.tar",
"sha256": "13ybzjg6k61paldfln6isc6149hvilwsgsnhyirig42bz1z0vjbb" "sha256": "01p8c70bd2mp3w08vpha0dvpljhj4r5797b0m9q16z4zhxqaqbqx"
}, },
"version": "20151123", "version": "20151221",
"deps": [] "deps": []
}, },
"bug-hunter": { "bug-hunter": {
@ -594,10 +594,10 @@
"hydra": { "hydra": {
"fetch": { "fetch": {
"tag": "fetchurl", "tag": "fetchurl",
"url": "http://elpa.gnu.org/packages/hydra-0.13.3.tar", "url": "http://elpa.gnu.org/packages/hydra-0.13.4.tar",
"sha256": "1il0maxkxm2nxwz6y6v85zhf6a8f52gfq51h1filcnlzg10b5arm" "sha256": "11msy6n075iv00c2r9f85bzx3srnj403rhlga1rgsl6vsryf21fj"
}, },
"version": "0.13.3", "version": "0.13.4",
"deps": [ "deps": [
"cl-lib" "cl-lib"
] ]
@ -734,10 +734,10 @@
"ggtags": { "ggtags": {
"fetch": { "fetch": {
"tag": "fetchurl", "tag": "fetchurl",
"url": "http://elpa.gnu.org/packages/ggtags-0.8.10.el", "url": "http://elpa.gnu.org/packages/ggtags-0.8.11.el",
"sha256": "0bigf87idd2rh40akyjiy1qvym6y3hvvx6khyb233b231s400aj9" "sha256": "1q2bp2b7lylf7n6c1psfn5swyjg0y78ykm0ak2kd84pbyhqak2mq"
}, },
"version": "0.8.10", "version": "0.8.11",
"deps": [ "deps": [
"cl-lib", "cl-lib",
"emacs" "emacs"
@ -953,10 +953,10 @@
"swiper": { "swiper": {
"fetch": { "fetch": {
"tag": "fetchurl", "tag": "fetchurl",
"url": "http://elpa.gnu.org/packages/swiper-0.5.1.tar", "url": "http://elpa.gnu.org/packages/swiper-0.7.0.tar",
"sha256": "06kd6r90fnjz3lapm52pgsx4dhnd95mkzq9y4khkzqny59h0vmm6" "sha256": "1bzzx41zcf3yk6r6csqzlffwwrw9gyk8ab026r55l6416b6rcynx"
}, },
"version": "0.5.1", "version": "0.7.0",
"deps": [ "deps": [
"emacs" "emacs"
] ]
@ -1018,10 +1018,10 @@
"transcribe": { "transcribe": {
"fetch": { "fetch": {
"tag": "fetchurl", "tag": "fetchurl",
"url": "http://elpa.gnu.org/packages/transcribe-0.5.0.el", "url": "http://elpa.gnu.org/packages/transcribe-1.0.2.el",
"sha256": "1wxfv96sjcxins8cyqijsb16fc3n0m13kvaw0hjam8x91wamcbxq" "sha256": "0b0qaq0b3l37h6wfs4j80csmfcbidcd8a8wk6mwn6p4cdi7msr15"
}, },
"version": "0.5.0", "version": "1.0.2",
"deps": [] "deps": []
}, },
"websocket": { "websocket": {

View file

@ -1,8 +1,21 @@
pkgs: with pkgs; /*
# Updating
To update the list of packages from ELPA,
1. Clone https://github.com/ttuegel/emacs2nix
2. Run `./elpa-packages.sh` from emacs2nix
3. Copy the new elpa-packages.json file into Nixpkgs
4. `git commit -m "elpa-packages $(date -Idate)"`
*/
{ fetchurl, lib, stdenv, texinfo }:
let let
inherit (stdenv.lib) makeScope mapAttrs; inherit (lib) makeScope mapAttrs;
json = builtins.readFile ./elpa-packages.json; json = builtins.readFile ./elpa-packages.json;
manifest = builtins.fromJSON json; manifest = builtins.fromJSON json;
@ -10,17 +23,20 @@ let
mkPackage = self: name: recipe: mkPackage = self: name: recipe:
let drv = let drv =
{ elpaBuild, stdenv, fetchurl }: { elpaBuild, stdenv, fetchurl }:
let fetch = { inherit fetchurl; }."${recipe.fetch.tag}" let
or (abort "emacs-${name}: unknown fetcher '${recipe.fetch.tag}'"); unknownFetcher =
args = builtins.removeAttrs recipe.fetch [ "tag" ]; abort "emacs-${name}: unknown fetcher '${recipe.fetch.tag}'";
src = fetch args; fetch =
{ inherit fetchurl; }."${recipe.fetch.tag}"
or unknownFetcher;
args = builtins.removeAttrs recipe.fetch [ "tag" ];
src = fetch args;
in elpaBuild { in elpaBuild {
pname = name; pname = name;
inherit (recipe) version; inherit (recipe) version;
inherit src; inherit src;
deps = packageRequires =
let lookupDep = d: let lookupDep = d: self."${d}" or null;
self."${d}" or (abort "emacs-${name}: missing dependency ${d}");
in map lookupDep recipe.deps; in map lookupDep recipe.deps;
meta = { meta = {
homepage = "http://elpa.gnu.org/packages/${name}.html"; homepage = "http://elpa.gnu.org/packages/${name}.html";
@ -29,14 +45,42 @@ let
}; };
in self.callPackage drv {}; in self.callPackage drv {};
packages = self: in
let
elpaPackages = mapAttrs (mkPackage self) manifest;
elpaBuild = import ../../../build-support/emacs/melpa.nix { self:
inherit (pkgs) lib stdenv fetchurl texinfo;
inherit (self) emacs;
};
in elpaPackages // { inherit elpaBuild elpaPackages; };
in makeScope pkgs.newScope packages let
super = mapAttrs (mkPackage self) manifest;
elpaBuild = import ../../../build-support/emacs/melpa.nix {
inherit fetchurl lib stdenv texinfo;
inherit (self) emacs;
};
markBroken = pkg: pkg.override {
elpaBuild = args: self.elpaBuild (args // {
meta = (args.meta or {}) // { broken = true; };
});
};
elpaPackages = super // {
ace-window = markBroken super.ace-window;
ada-mode = markBroken super.ada-mode;
beacon = markBroken super.beacon;
bug-hunter = markBroken super.bug-hunter;
company-math = markBroken super.company-math;
company-statistics = markBroken super.company-statistics;
context-coloring = markBroken super.context-coloring;
dict-tree = markBroken super.dict-tree;
el-search = markBroken super.el-search;
ergoemacs-mode = markBroken super.ergoemacs-mode;
exwm = markBroken super.exwm;
gnugo = markBroken super.gnugo;
iterators = markBroken super.iterators;
midi-kbd = markBroken super.midi-kbd;
stream = markBroken super.stream;
tNFA = markBroken super.tNFA;
trie = markBroken super.trie;
xelb = markBroken super.xelb;
};
in elpaPackages // { inherit elpaBuild elpaPackages; }

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,75 @@
/*
# Updating
To update the list of packages from MELPA,
1. Clone https://github.com/ttuegel/emacs2nix
2. Clone https://github.com/milkypostman/melpa
3. Run `./melpa-packages.sh PATH_TO_MELPA_CLONE` from emacs2nix
4. Copy the new melpa-packages.json file into Nixpkgs
5. `git commit -m "melpa-packages $(date -Idate)"`
*/
{ lib }:
let
inherit (lib) makeScope mapAttrs;
json = builtins.readFile ./melpa-packages.json;
manifest = builtins.fromJSON json;
mkPackage = self: name: recipe:
let drv =
{ melpaBuild, stdenv, fetchbzr, fetchcvs, fetchFromGitHub, fetchFromGitLab
, fetchgit, fetchhg, fetchsvn, fetchurl }:
let
unknownFetcher =
abort "emacs-${name}: unknown fetcher '${recipe.fetch.tag}'";
fetch =
{
inherit fetchbzr fetchcvs fetchFromGitHub fetchFromGitLab fetchgit fetchhg
fetchsvn fetchurl;
}."${recipe.fetch.tag}"
or unknownFetcher;
args = builtins.removeAttrs recipe.fetch [ "tag" ];
src = fetch args;
recipeFile = fetchurl {
url = "https://raw.githubusercontent.com/milkypostman/melpa/${recipe.recipe.commit}/recipes/${name}";
inherit (recipe.recipe) sha256;
};
in melpaBuild {
pname = name;
inherit (recipe) version;
inherit recipeFile src;
packageRequires =
let lookupDep = d: self."${d}" or null;
in map lookupDep recipe.deps;
meta = {
homepage = "http://melpa.org/#/${name}";
license = stdenv.lib.licenses.free;
};
};
in self.callPackage drv {};
in
self:
let
super = mapAttrs (mkPackage self) manifest;
markBroken = pkg: pkg.override {
melpaBuild = args: self.melpaBuild (args // {
meta = (args.meta or {}) // { broken = true; };
});
};
melpaPackages = super // {
# broken upstream
ack-menu = markBroken super.ack-menu;
};
in
melpaPackages // { inherit melpaPackages; }

Some files were not shown because too many files have changed in this diff Show more