Merge branch 'master' of github.com:nixos/nixpkgs into pleasant-ruby

Conflicts:
	pkgs/applications/version-management/redmine/default.nix
	pkgs/development/interpreters/ruby/gem.nix
	pkgs/development/interpreters/ruby/generated.nix
	pkgs/development/interpreters/ruby/patches.nix
	pkgs/development/tools/vagrant/default.nix
	pkgs/servers/consul/default.nix
This commit is contained in:
Charles Strahan 2014-12-28 14:29:52 -05:00
commit 145733c479
3039 changed files with 76780 additions and 42041 deletions

View file

@ -1 +1 @@
14.11
15.05

View file

@ -1,10 +1,10 @@
Nixpkgs is a collection of packages for [Nix](http://nixos.org/nix/) package
manager. Nixpkgs also includes [NixOS](http://nixos.org/nixos/) linux distribution source code.
Nixpkgs is a collection of packages for [Nix](https://nixos.org/nix/) package
manager. Nixpkgs also includes [NixOS](https://nixos.org/nixos/) linux distribution source code.
* [NixOS installation instructions](http://nixos.org/nixos/manual/#ch-installation)
* [Manual (How to write packages for Nix)](http://nixos.org/nixpkgs/manual/)
* [Manual (NixOS)](http://nixos.org/nixos/manual/)
* [Continuous build](http://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Tests](http://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Mailing list](http://lists.science.uu.nl/mailman/listinfo/nix-dev)
* [NixOS installation instructions](https://nixos.org/nixos/manual/#ch-installation)
* [Manual (How to write packages for Nix)](https://nixos.org/nixpkgs/manual/)
* [Manual (NixOS)](https://nixos.org/nixos/manual/)
* [Continuous build](https://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Tests](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Mailing list](https://lists.science.uu.nl/mailman/listinfo/nix-dev)
* [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)

View file

@ -10,9 +10,7 @@
<listitem><para><command>$ git clone git://github.com/NixOS/nixpkgs.git</command></para></listitem>
<listitem><para><command>$ cd nixpkgs/pkgs/top-level</command></para></listitem>
<listitem><para><command>$ nix-build -A tarball release.nix</command></para></listitem>
<listitem><para><command>$ nix-build -A manual nixpkgs/pkgs/top-level/release.nix</command></para></listitem>
<listitem><para>Inside the built derivation you shall see <literal>manual/index.html</literal> file.</para></listitem>

View file

@ -108,7 +108,7 @@ a <varname>preConfigure</varname> hook to generate a configuration
file used by <filename>Makefile.PL</filename>:
<programlisting>
{buildPerlPackage, fetchurl, db}:
{ buildPerlPackage, fetchurl, db }:
buildPerlPackage rec {
name = "BerkeleyDB-0.36";
@ -191,45 +191,424 @@ you need it.</para>
</section>
<section><title>Python</title>
<section xml:id="python"><title>Python</title>
<para>
Currently supported interpreters are <varname>python26</varname>, <varname>python27</varname>,
<varname>python32</varname>, <varname>python33</varname>, <varname>python34</varname>
and <varname>pypy</varname>.
</para>
<para>
<varname>python</varname> is an alias of <varname>python27</varname> and <varname>python3</varname> is an alias of <varname>python34</varname>.
</para>
<para>
<varname>python26</varname> and <varname>python27</varname> do not include modules that require
external dependencies (to reduce dependency bloat). Following modules need to be added as
<varname>buildInput</varname> explicitly:
</para>
<itemizedlist>
<listitem><para><varname>python.modules.bsddb</varname></para></listitem>
<listitem><para><varname>python.modules.curses</varname></para></listitem>
<listitem><para><varname>python.modules.curses_panel</varname></para></listitem>
<listitem><para><varname>python.modules.crypt</varname></para></listitem>
<listitem><para><varname>python.modules.gdbm</varname></para></listitem>
<listitem><para><varname>python.modules.sqlite3</varname></para></listitem>
<listitem><para><varname>python.modules.tkinter</varname></para></listitem>
<listitem><para><varname>python.modules.readline</varname></para></listitem>
</itemizedlist>
<para>For convenience <varname>python27Full</varname> and <varname>python26Full</varname>
are provided with all modules included.</para>
<para>
Python packages that
use <link xlink:href="http://pypi.python.org/pypi/setuptools/"><literal>setuptools</literal></link>,
which many Python packages do nowadays, can be built very simply using
the <varname>buildPythonPackage</varname> function. This function is
implemented
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/generic/default.nix"><filename>pkgs/development/python-modules/generic/default.nix</filename></link>
and works similarly to <varname>buildPerlPackage</varname>. (See
<xref linkend="ssec-language-perl"/> for details.)
use <link xlink:href="http://pypi.python.org/pypi/setuptools/"><literal>setuptools</literal></link> or <literal>distutils</literal>,
can be built using the <varname>buildPythonPackage</varname> function as documented below.
</para>
<para>
Python packages that use <varname>buildPythonPackage</varname> are
defined
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/python-packages.nix"><filename>pkgs/top-level/python-packages.nix</filename></link>.
Most of them are simple. For example:
<programlisting>
twisted = buildPythonPackage {
name = "twisted-8.1.0";
src = fetchurl {
url = http://tmrc.mit.edu/mirror/twisted/Twisted/8.1/Twisted-8.1.0.tar.bz2;
sha256 = "0q25zbr4xzknaghha72mq57kh53qw1bf8csgp63pm9sfi72qhirl";
};
propagatedBuildInputs = [ pkgs.ZopeInterface ];
meta = {
homepage = http://twistedmatrix.com/;
description = "Twisted, an event-driven networking engine written in Python";
license = "MIT";
};
};
</programlisting>
All packages depending on any Python interpreter get appended <varname>$out/${python.libPrefix}/site-packages</varname>
to <literal>$PYTHONPATH</literal> if such directory exists.
</para>
<variablelist>
<title>
Useful attributes on interpreters packages:
</title>
<varlistentry>
<term><varname>libPrefix</varname></term>
<listitem><para>
Name of the folder in <literal>${python}/lib/</literal> for corresponding interpreter.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>interpreter</varname></term>
<listitem><para>
Alias for <literal>${python}/bin/${executable}.</literal>
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>buildEnv</varname></term>
<listitem><para>
Function to build python interpreter environments with extra packages bundled together.
See <xref linkend="python-build-env" /> for usage and documentation.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>sitePackages</varname></term>
<listitem><para>
Alias for <literal>lib/${libPrefix}/site-packages</literal>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>executable</varname></term>
<listitem><para>
Name of the interpreter executable, ie <literal>python3.4</literal>.
</para></listitem>
</varlistentry>
</variablelist>
<section xml:id="build-python-package"><title><varname>buildPythonPackage</varname> function</title>
<para>
The function is implemented in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/generic/default.nix">
<filename>pkgs/development/python-modules/generic/default.nix</filename></link>.
Example usage:
<programlisting language="nix">
twisted = buildPythonPackage {
name = "twisted-8.1.0";
src = pkgs.fetchurl {
url = http://tmrc.mit.edu/mirror/twisted/Twisted/8.1/Twisted-8.1.0.tar.bz2;
sha256 = "0q25zbr4xzknaghha72mq57kh53qw1bf8csgp63pm9sfi72qhirl";
};
propagatedBuildInputs = [ self.ZopeInterface ];
meta = {
homepage = http://twistedmatrix.com/;
description = "Twisted, an event-driven networking engine written in Python";
license = stdenv.lib.licenses.mit;
};
};
</programlisting>
Most of Python packages that use <varname>buildPythonPackage</varname> are defined
in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/python-packages.nix"><filename>pkgs/top-level/python-packages.nix</filename></link>
and generated for each python interpreter separately into attribute sets <varname>python26Packages</varname>,
<varname>python27Packages</varname>, <varname>python32Packages</varname>, <varname>python33Packages</varname>,
<varname>python34Packages</varname> and <varname>pypyPackages</varname>.
</para>
<para>
<function>buildPythonPackage</function> mainly does four things:
<orderedlist>
<listitem><para>
In the <varname>configurePhase</varname>, it patches
<literal>setup.py</literal> to always include setuptools before
distutils for monkeypatching machinery to take place.
</para></listitem>
<listitem><para>
In the <varname>buildPhase</varname>, it calls
<literal>${python.interpreter} setup.py build ...</literal>
</para></listitem>
<listitem><para>
In the <varname>installPhase</varname>, it calls
<literal>${python.interpreter} setup.py install ...</literal>
</para></listitem>
<listitem><para>
In the <varname>postFixup</varname> phase, <literal>wrapPythonPrograms</literal>
bash function is called to wrap all programs in <filename>$out/bin/*</filename>
directory to include <literal>$PYTHONPATH</literal> and <literal>$PATH</literal>
environment variables.
</para></listitem>
</orderedlist>
</para>
<para>By default <varname>doCheck = true</varname> is set and tests are run with
<literal>${python.interpreter} setup.py test</literal> command in <varname>checkPhase</varname>.</para>
<para><varname>propagatedBuildInputs</varname> packages are propagated to user environment.</para>
<para>
By default <varname>meta.platforms</varname> is set to the same value
as the interpreter unless overriden otherwise.
</para>
<variablelist>
<title>
<varname>buildPythonPackage</varname> parameters
(all parameters from <varname>mkDerivation</varname> function are still supported)
</title>
<varlistentry>
<term><varname>namePrefix</varname></term>
<listitem><para>
Prepended text to <varname>${name}</varname> parameter.
Defaults to <literal>"python3.3-"</literal> for Python 3.3, etc. Set it to
<literal>""</literal>
if you're packaging an application or a command line tool.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>disabled</varname></term>
<listitem><para>
If <varname>true</varname>, package is not build for
particular python interpreter version. Grep around
<filename>pkgs/top-level/python-packages.nix</filename>
for examples.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>setupPyInstallFlags</varname></term>
<listitem><para>
List of flags passed to <command>setup.py install</command> command.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>setupPyBuildFlags</varname></term>
<listitem><para>
List of flags passed to <command>setup.py build</command> command.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>pythonPath</varname></term>
<listitem><para>
List of packages to be added into <literal>$PYTHONPATH</literal>.
Packages in <varname>pythonPath</varname> are not propagated into user environment
(contrary to <varname>propagatedBuildInputs</varname>).
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>preShellHook</varname></term>
<listitem><para>
Hook to execute commands before <varname>shellHook</varname>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postShellHook</varname></term>
<listitem><para>
Hook to execute commands after <varname>shellHook</varname>.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>distutilsExtraCfg</varname></term>
<listitem><para>
Extra lines passed to <varname>[easy_install]</varname> section of
<filename>distutils.cfg</filename> (acts as global setup.cfg
configuration).
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="python-build-env"><title><function>python.buildEnv</function> function</title>
<para>
Create Python environments using low-level <function>pkgs.buildEnv</function> function. Example <filename>default.nix</filename>:
<programlisting language="nix">
<![CDATA[
with import <nixpkgs> {};
python.buildEnv.override {
extraLibs = [ pkgs.pythonPackages.pyramid ];
ignoreCollisions = true;
}
]]>
</programlisting>
Running <command>nix-build</command> will create
<filename>/nix/store/cf1xhjwzmdki7fasgr4kz6di72ykicl5-python-2.7.8-env</filename>
with wrapped binaries in <filename>bin/</filename>.
</para>
<variablelist>
<title>
<function>python.buildEnv</function> arguments
</title>
<varlistentry>
<term><varname>extraLibs</varname></term>
<listitem><para>
List of packages installed inside the environment.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>postBuild</varname></term>
<listitem><para>
Shell command executed after the build of environment.
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>ignoreCollisions</varname></term>
<listitem><para>
Ignore file collisions inside the environment (default is <varname>false</varname>).
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="python-tools"><title>Tools</title>
<para>Packages inside nixpkgs are written by hand. However many tools
exist in community to help save time. No tool is preferred at the moment.
</para>
<itemizedlist>
<listitem><para>
<link xlink:href="https://github.com/proger/python2nix">python2nix</link>
by Vladimir Kirillov
</para></listitem>
<listitem><para>
<link xlink:href="https://github.com/garbas/pypi2nix">pypi2nix</link>
by Rok Garbas
</para></listitem>
<listitem><para>
<link xlink:href="https://github.com/offlinehacker/pypi2nix">pypi2nix</link>
by Jaka Hudoklin
</para></listitem>
</itemizedlist>
</section>
<section xml:id="python-development"><title>Development</title>
<para>
To develop Python packages <function>buildPythonPackage</function> has
additional logic inside <varname>shellPhase</varname> to run
<command>${python.interpreter} setup.py develop</command> for the package.
</para>
<para>
Given a <filename>default.nix</filename>:
<programlisting language="nix">
<![CDATA[
with import <nixpkgs> {};
buildPythonPackage {
name = "myproject";
buildInputs = with pkgs.pythonPackages; [ pyramid ];
src = ./.;
}
]]>
</programlisting>
Running <command>nix-shell</command> with no arguments should give you
the environment in which the package would be build with
<command>nix-build</command>.
</para>
<para>
Shortcut to setup environments with C headers/libraries and python packages:
<programlisting language="bash">$ nix-shell -p pythonPackages.pyramid zlib libjpeg git</programlisting>
</para>
<note><para>
There is a boolean value <varname>lib.inNixShell</varname> set to
<varname>true</varname> if nix-shell is invoked.
</para></note>
</section>
<section xml:id="python-faq"><title>FAQ</title>
<variablelist>
<varlistentry>
<term>How to solve circular dependencies?</term>
<listitem><para>
If you have packages <varname>A</varname> and <varname>B</varname> that
depend on each other, when packaging <varname>B</varname> override package
<varname>A</varname> not to depend on <varname>B</varname> as input
(and also the other way around).
</para></listitem>
</varlistentry>
<varlistentry>
<term><varname>install_data / data_files</varname> problems resulting into <literal>error: could not create '/nix/store/6l1bvljpy8gazlsw2aw9skwwp4pmvyxw-python-2.7.8/etc': Permission denied</literal></term>
<listitem><para>
<link xlink:href="https://bitbucket.org/pypa/setuptools/issue/130/install_data-doesnt-respect-prefix">
Known bug in setuptools <varname>install_data</varname> does not respect --prefix</link>. Example of
such package using the feature is <filename>pkgs/tools/X11/xpra/default.nix</filename>. As workaround
install it as an extra <varname>preInstall</varname> step:
<programlisting>${python.interpreter} setup.py install_data --install-dir=$out --root=$out
sed -i '/ = data_files/d' setup.py</programlisting>
</para></listitem>
</varlistentry>
<varlistentry>
<term>Rationale of non-existent global site-packages</term>
<listitem><para>
There is no need to have global site-packages in Nix. Each package has isolated
dependency tree and installing any python package will only populate <varname>$PATH</varname>
inside user environment. See <xref linkend="python-build-env" /> to create self-contained
interpreter with a set of packages.
</para></listitem>
</varlistentry>
</variablelist>
</section>
<section xml:id="python-contrib"><title>Contributing guidelines</title>
<para>
Following rules are desired to be respected:
</para>
<itemizedlist>
<listitem><para>
Make sure package builds for all python interpreters. Use <varname>disabled</varname> argument to
<function>buildPythonPackage</function> to set unsupported interpreters.
</para></listitem>
<listitem><para>
If tests need to be disabled for a package, make sure you leave a comment about reasoning.
</para></listitem>
<listitem><para>
Packages in <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/python-packages.nix"><filename>pkgs/top-level/python-packages.nix</filename></link>
are sorted quasi-alphabetically to avoid merge conflicts.
</para></listitem>
</itemizedlist>
</section>
</section>

View file

@ -184,10 +184,10 @@ if test "$noSysDirs" = "1"; then
if test "$noSysDirs" = "1"; then
# Figure out what extra flags to pass to the gcc compilers
# being generated to make sure that they use our glibc.
if test -e $NIX_GCC/nix-support/orig-glibc; then
glibc=$(cat $NIX_GCC/nix-support/orig-glibc)
if test -e $NIX_CC/nix-support/orig-glibc; then
glibc=$(cat $NIX_CC/nix-support/orig-glibc)
# Ugh. Copied from gcc-wrapper/builder.sh. We can't just
# source in $NIX_GCC/nix-support/add-flags, since that
# source in $NIX_CC/nix-support/add-flags, since that
# would cause *this* GCC to be linked against the
# *previous* GCC. Need some more modularity there.
extraCFlags="-B$glibc/lib -isystem $glibc/include"

View file

@ -1,44 +0,0 @@
<?xml version="1.0"?>
<xsl:stylesheet
version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:str="http://exslt.org/strings"
extension-element-prefixes="str">
<xsl:output method="xml"/>
<xsl:template match="function|command|literal|varname|filename|option|quote">`<xsl:apply-templates/>'</xsl:template>
<xsl:template match="token"><xsl:text> </xsl:text><xsl:apply-templates /><xsl:text>
</xsl:text></xsl:template>
<xsl:template match="screen|programlisting">
<screen><xsl:apply-templates select="str:split(., '&#xA;')" /></screen>
</xsl:template>
<xsl:template match="section[following::section]">
<section>
<xsl:apply-templates />
<screen><xsl:text>
</xsl:text></screen>
</section>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{name(.)}" namespace="{namespace-uri(.)}">
<xsl:copy-of select="namespace::*" />
<xsl:for-each select="@*">
<xsl:attribute name="{name(.)}" namespace="{namespace-uri(.)}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:for-each>
<xsl:apply-templates/>
</xsl:element>
</xsl:template>
<xsl:template match="text()">
<xsl:value-of select="translate(., '‘’“”—', concat(&quot;`'&quot;, '&quot;&quot;-'))" />
</xsl:template>
</xsl:stylesheet>

View file

@ -1120,12 +1120,9 @@ echo @foo@
<varlistentry>
<term>Python</term>
<listitem><para>Adds the
<filename>lib/python2.5/site-packages</filename> subdirectory of
<filename>lib/${python.libPrefix}/site-packages</filename> subdirectory of
each build input to the <envar>PYTHONPATH</envar> environment
variable.</para>
<note><para>This should be generalised: the Python version
shouldnt be hard-coded.</para></note></listitem>
variable.</para></listitem>
</varlistentry>
<varlistentry>

View file

@ -1,314 +1,410 @@
let
lib = import ./default.nix;
spdx = lic: lic // {
url = "http://spdx.org/licenses/${lic.shortName}";
};
url = "http://spdx.org/licenses/${lic.spdxId}";
};
in
rec {
lib.mapAttrs (n: v: v // { shortName = n; }) rec {
/* License identifiers from spdx.org where possible.
* If you cannot find your license here, then look for a similar license or
* add it to this list. The URL mentioned above is a good source for inspiration.
*/
afl21 = spdx {
spdxId = "AFL-2.1";
fullName = "Academic Free License";
};
agpl3 = spdx {
shortName = "AGPL-3.0";
spdxId = "AGPL-3.0";
fullName = "GNU Affero General Public License v3.0";
};
agpl3Plus = {
shortName = "AGPL-3.0+";
fullName = "GNU Affero General Public License v3.0 or later";
inherit (agpl3) url;
};
amazonsl = {
fullName = "Amazon Software License";
url = http://aws.amazon.com/asl/;
free = false;
};
amd = {
shortName = "amd";
fullName = "AMD License Agreement";
url = http://developer.amd.com/amd-license-agreement/;
};
apsl20 = spdx {
shortName = "APSL-2.0";
spdxId = "APSL-2.0";
fullName = "Apple Public Source License 2.0";
};
artistic1 = spdx {
spdxId = "Artistic-1.0";
fullName = "Artistic License 1.0";
};
artistic2 = spdx {
shortName = "Artistic-2.0";
spdxId = "Artistic-2.0";
fullName = "Artistic License 2.0";
};
asl20 = spdx {
shortName = "Apache-2.0";
spdxId = "Apache-2.0";
fullName = "Apache License 2.0";
};
boost = spdx {
shortName = "BSL-1.0";
spdxId = "BSL-1.0";
fullName = "Boost Software License 1.0";
};
bsd2 = spdx {
shortName = "BSD-2-Clause";
spdxId = "BSD-2-Clause";
fullName = ''BSD 2-clause "Simplified" License'';
};
bsd3 = spdx {
shortName = "BSD-3-Clause";
spdxId = "BSD-3-Clause";
fullName = ''BSD 3-clause "New" or "Revised" License'';
};
bsdOriginal = spdx {
shortName = "BSD-4-Clause";
spdxId = "BSD-4-Clause";
fullName = ''BSD 4-clause "Original" or "Old" License'';
};
cc0 = spdx {
shortName = "CC0-1.0";
fullName = ''Creative Commons Zero v1.0 Universal'';
spdxId = "CC0-1.0";
fullName = "Creative Commons Zero v1.0 Universal";
};
cc-by-30 = spdx {
shortName = "CC-BY-3.0";
spdxId = "CC-BY-3.0";
fullName = "Creative Commons Attribution 3.0";
};
cc-by-sa-30 = spdx {
shortName = "CC-BY-SA-3.0";
spdxId = "CC-BY-SA-3.0";
fullName = "Creative Commons Attribution Share Alike 3.0";
};
cc-by-40 = spdx {
shortName = "CC-BY-4.0";
spdxId = "CC-BY-4.0";
fullName = "Creative Commons Attribution 4.0";
};
cddl = spdx {
shortName = "CDDL-1.0";
spdxId = "CDDL-1.0";
fullName = "Common Development and Distribution License 1.0";
};
cecill20 = spdx {
spdxId = "CECILL-2.0";
fullName = "CeCILL Free Software License Agreement v2.0";
};
cecill-b = spdx {
shortName = "CECILL-B";
spdxId = "CECILL-B";
fullName = "CeCILL-B Free Software License Agreement";
};
cecill-c = spdx {
shortName = "CECILL-C";
spdxId = "CECILL-C";
fullName = "CeCILL-C Free Software License Agreement";
};
cpl10 = spdx {
shortName = "CPL-1.0";
spdxId = "CPL-1.0";
fullName = "Common Public License 1.0";
};
epl10 = spdx {
shortName = "EPL-1.0";
spdxId = "EPL-1.0";
fullName = "Eclipse Public License 1.0";
};
free = "free";
free = {
fullName = "Unspecified free software license";
};
gpl1 = spdx {
spdxId = "GPL-1.0";
fullName = "GNU General Public License v1.0 only";
};
gpl1Plus = spdx {
spdxId = "GPL-1.0+";
fullName = "GNU General Public License v1.0 or later";
};
gpl2 = spdx {
shortName = "GPL-2.0";
spdxId = "GPL-2.0";
fullName = "GNU General Public License v2.0 only";
};
gpl2ClasspathPlus = {
fullName = "GNU General Public License v2.0 or later (with Classpath exception)";
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
};
gpl2Oss = {
shortName = "GPL-2.0-with-OSS";
fullName = "GNU General Public License version 2 only (with OSI approved licenses linking exception)";
url = http://www.mysql.com/about/legal/licensing/foss-exception;
};
gpl2Plus = spdx {
shortName = "GPL-2.0+";
spdxId = "GPL-2.0+";
fullName = "GNU General Public License v2.0 or later";
};
gpl3 = spdx {
shortName = "GPL-3.0";
spdxId = "GPL-3.0";
fullName = "GNU General Public License v3.0 only";
};
gpl3Plus = spdx {
shortName = "GPL-3.0+";
spdxId = "GPL-3.0+";
fullName = "GNU General Public License v3.0 or later";
};
gpl3ClasspathPlus = {
shortName = "GPL-3.0+-with-classpath-exception";
fullName = "GNU General Public License v3.0 or later (with Classpath exception)";
url = https://fedoraproject.org/wiki/Licensing/GPL_Classpath_Exception;
};
# Intel's license, seems free
iasl = {
fullName = "iASL";
url = http://www.calculate-linux.org/packages/licenses/iASL;
};
inria = {
shortName = "INRIA-NCLA";
fullName = "INRIA Non-Commercial License Agreement";
url = "http://compcert.inria.fr/doc/LICENSE";
};
ipa = spdx {
shortName = "IPA";
spdxId = "IPA";
fullName = "IPA Font License";
};
ipl10 = spdx {
shortName = "IPL-1.0";
spdxId = "IPL-1.0";
fullName = "IBM Public License v1.0";
};
isc = spdx {
shortName = "ISC";
spdxId = "ISC";
fullName = "ISC License";
};
lgpl2 = spdx {
shortName = "LGPL-2.0";
spdxId = "LGPL-2.0";
fullName = "GNU Library General Public License v2 only";
};
lgpl2Plus = spdx {
shortName = "LGPL-2.0+";
spdxId = "LGPL-2.0+";
fullName = "GNU Library General Public License v2 or later";
};
lgpl21 = spdx {
shortName = "LGPL-2.1";
spdxId = "LGPL-2.1";
fullName = "GNU Library General Public License v2.1 only";
};
lgpl21Plus = spdx {
shortName = "LGPL-2.1+";
spdxId = "LGPL-2.1+";
fullName = "GNU Library General Public License v2.1 or later";
};
lgpl3 = spdx {
shortName = "LGPL-3.0";
spdxId = "LGPL-3.0";
fullName = "GNU Lesser General Public License v3.0 only";
};
lgpl3Plus = spdx {
shortName = "LGPL-3.0+";
spdxId = "LGPL-3.0+";
fullName = "GNU Lesser General Public License v3.0 or later";
};
libpng = spdx {
shortName = "Libpng";
spdxId = "Libpng";
fullName = "libpng License";
};
libtiff = {
shortName = "libtiff";
fullName = "libtiff license";
url = https://fedoraproject.org/wiki/Licensing/libtiff;
libtiff = spdx {
spdxId = "libtiff";
fullName = "libtiff License";
};
llgpl21 = {
shortName = "LLGPL-2.1";
fullName = "Lisp LGPL; GNU Lesser General Public License version 2.1 with Franz Inc. preamble for clarification of LGPL terms in context of Lisp";
url = http://opensource.franz.com/preamble.html;
};
lppl12 = spdx {
spdxId = "LPPL-1.2";
fullName = "LaTeX Project Public License v1.2";
};
lppl13c = spdx {
spdxId = "LPPL-1.3c";
fullName = "LaTeX Project Public License v1.3c";
};
lpl-102 = spdx {
shortName = "LPL-1.02";
spdxId = "LPL-1.02";
fullName = "Lucent Public License v1.02";
};
# spdx.org does not (yet) differentiate between the X11 and Expat versions
# for details see http://en.wikipedia.org/wiki/MIT_License#Various_versions
mit = spdx {
shortName = "MIT";
spdxId = "MIT";
fullName = "MIT License";
};
mpl11 = spdx {
shortName = "MPL-1.1";
spdxId = "MPL-1.1";
fullName = "Mozilla Public License 1.1";
};
mpl20 = spdx {
shortName = "MPL-2.0";
spdxId = "MPL-2.0";
fullName = "Mozilla Public License 2.0";
};
msrla = {
shortName = "MSR-LA";
fullName = "Microsoft Research License Agreement";
url = "http://research.microsoft.com/en-us/projects/pex/msr-la.txt";
};
ncsa = spdx {
shortName = "NCSA";
spdxId = "NCSA";
fullName = "University of Illinois/NCSA Open Source License";
};
ofl = spdx {
shortName = "OFL-1.1";
spdxId = "OFL-1.1";
fullName = "SIL Open Font License 1.1";
};
openssl = spdx {
shortName = "OpenSSL";
spdxId = "OpenSSL";
fullName = "OpenSSL License";
};
php301 = spdx {
spdxId = "PHP-3.01";
fullName = "PHP License v3.01";
};
postgresql = spdx {
shortName = "PostgreSQL";
spdxId = "PostgreSQL";
fullName = "PostgreSQL License";
};
psfl = spdx {
shortName = "Python-2.0";
spdxId = "Python-2.0";
fullName = "Python Software Foundation License version 2";
#url = http://docs.python.org/license.html;
};
publicDomain = {
shortName = "Public Domain";
fullname = "Public Domain";
fullName = "Public Domain";
};
qpl = spdx {
spdxId = "QPL-1.0";
fullName = "Q Public License 1.0";
};
qwt = {
fullName = "Qwt License, Version 1.0";
url = http://qwt.sourceforge.net/qwtlicense.html;
};
ruby = spdx {
spdxId = "Ruby";
fullName = "Ruby License";
};
sgi-b-20 = spdx {
spdxId = "SGI-B-2.0";
fullName = "SGI Free Software License B v2.0";
};
sleepycat = spdx {
shortName = "Sleepycat";
fullName = "Sleepycat License";
spdxId = "Sleepycat";
fullName = "Sleepycat License";
};
tcltk = {
shortName = "Tcl/Tk";
fullName = "Tcl/Tk license";
url = http://www.tcl.tk/software/tcltk/license.html;
tcltk = spdx {
spdxId = "TCL";
fullName = "TCL/TK License";
};
unfree = "unfree";
unfree = {
fullName = "Unfree";
free = false;
};
unfreeRedistributable = "unfree-redistributable";
unfreeRedistributable = {
fullName = "Unfree redistributable";
free = false;
};
unfreeRedistributableFirmware = "unfree-redistributable-firmware";
unfreeRedistributableFirmware = {
fullName = "Unfree redistributable firmware";
# Note: we currently consider these "free" for inclusion in the
# channel and NixOS images.
};
unlicense = {
shortName = "Unlicense";
fullName = "Unlicense";
url = http://unlicense.org/;
unlicense = spdx {
spdxId = "Unlicense";
fullName = "The Unlicense";
};
vsl10 = spdx {
spdxId = "VSL-1.0";
fullName = "Vovida Software License v1.0";
};
w3c = spdx {
spdxId = "W3C";
fullName = "W3C Software Notice and License";
};
wadalab = {
shortName = "wadalab";
fullName = "Wadalab Font License";
url = https://fedoraproject.org/wiki/Licensing:Wadalab?rd=Licensing/Wadalab;
};
wtfpl = spdx {
spdxId = "WTFPL";
fullName = "Do What The F*ck You Want To Public License";
};
zlib = spdx {
shortName = "Zlib";
spdxId = "Zlib";
fullName = "zlib License";
};
zpt20 = spdx { # FIXME: why zpt* instead of zpl*
shortName = "ZPL-2.0";
spdxId = "ZPL-2.0";
fullName = "Zope Public License 2.0";
};
zpt21 = spdx {
shortName = "ZPL-2.1";
spdxId = "ZPL-2.1";
fullName = "Zope Public License 2.1";
};

View file

@ -223,4 +223,14 @@ rec {
crossLists = f: foldl (fs: args: concatMap (f: map f args) fs) [f];
# Remove duplicate elements from the list
unique = list:
if list == [] then
[]
else
let
x = head list;
xs = unique (drop 1 list);
in [x] ++ remove x xs;
}

View file

@ -5,12 +5,13 @@
alphabetically sorted. */
_1126 = "Christian Lask <mail@elfsechsundzwanzig.de>";
abbradar = "Nikolay Amiantov <ab@fmap.me>";
aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>";
aherrmann = "Andreas Herrmann <andreash87@gmx.ch>";
ak = "Alexander Kjeldaas <ak@formalprivacy.com>";
akc = "Anders Claesson <akc@akc.is>";
algorith = "Dries Van Daele <dries_van_daele@telenet.be>";
all = "Nix Committers <nix-commits@lists.science.uu.nl>";
abbradar = "Nikolay Amiantov <ab@fmap.me>";
amiddelk = "Arie Middelkoop <amiddelk@gmail.com>";
amorsillo = "Andrew Morsillo <andrew.morsillo@gmail.com>";
AndersonTorres = "Anderson Torres <torres.anderson.85@gmail.com>";
@ -23,7 +24,9 @@
aszlig = "aszlig <aszlig@redmoonstudios.org>";
auntie = "Jonathan Glines <auntieNeo@gmail.com>";
aycanirican = "Aycan iRiCAN <iricanaycan@gmail.com>";
balajisivaraman = "Balaji Sivaraman<sivaraman.balaji@gmail.com>";
bbenoist = "Baptist BENOIST <return_0@live.com>";
bdimcheff = "Brandon Dimcheff <brandon@dimcheff.com>";
bennofs = "Benno Fünfstück <benno.fuenfstueck@gmail.com>";
berdario = "Dario Bertini <berdario@gmail.com>";
bergey = "Daniel Bergey <bergey@teallabs.org>";
@ -45,26 +48,34 @@
coroa = "Jonas Hörsch <jonas@chaoflow.net>";
cstrahan = "Charles Strahan <charles.c.strahan@gmail.com>";
DamienCassou = "Damien Cassou <damien.cassou@gmail.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
davidrusu = "David Rusu <davidrusu.me@gmail.com>";
dbohdan = "Danyil Bohdan <danyil.bohdan@gmail.com>";
DerGuteMoritz = "Moritz Heidkamp <moritz@twoticketsplease.de>";
devhell = "devhell <\"^\"@regexmail.net>";
dmalikov = "Dmitry Malikov <malikov.d.y@gmail.com>";
doublec = "Chris Double <chris.double@double.co.nz>";
ederoyd46 = "Matthew Brown <matt@ederoyd.co.uk>";
eduarrrd = "Eduard Bachmakov <e.bachmakov@gmail.com>";
edwtjo = "Edward Tjörnhammar <ed@cflags.cc>";
eelco = "Eelco Dolstra <eelco.dolstra@logicblox.com>";
eikek = "Eike Kettner <eike.kettner@posteo.de>";
ellis = "Ellis Whitehead <nixos@ellisw.net>";
emery = "Emery Hemingway <emery@vfemail.net>";
ertes = "Ertugrul Söylemez <ertesx@gmx.de>";
exlevan = "Alexey Levan <exlevan@gmail.com>";
falsifian = "James Cook <james.cook@utoronto.ca>";
flosse = "Markus Kohlhase <mail@markus-kohlhase.de>";
fluffynukeit = "Daniel Austin <dan@fluffynukeit.com>";
fpletz = "Franz Pletz <fpletz@fnordicwalking.de>";
ftrvxmtrx = "Siarhei Zirukin <ftrvxmtrx@gmail.com>";
funfunctor = "Edward O'Callaghan <eocallaghan@alterapraxis.com>";
fuuzetsu = "Mateusz Kowalczyk <fuuzetsu@fuuzetsu.co.uk>";
gal_bolle = "Florent Becker <florent.becker@ens-lyon.org>";
garbas = "Rok Garbas <rok@garbas.si>";
gavin = "Gavin Rogers <gavin@praxeology.co.uk>";
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
guibert = "David Guibert <david.guibert@gmail.com>";
henrytill = "Henry Till <henrytill@gmail.com>";
hinton = "Tom Hinton <t@larkery.com>";
hrdinka = "Christoph Hrdinka <c.nix@hrdinka.at>";
ianwookim = "Ian-Woo Kim <ianwookim@gmail.com>";
@ -73,11 +84,16 @@
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
jcumming = "Jack Cummings <jack@mudshark.org>";
jgeerds = "Jascha Geerds <jg@ekby.de>";
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
joamaki = "Jussi Maki <joamaki@gmail.com>";
joelteon = "Joel Taylor <me@joelt.io>";
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jwiegley = "John Wiegley <johnw@newartisans.com>";
jzellner = "Jeff Zellner <jeffz@eml.cc>";
kkallio = "Karn Kallio <tierpluspluslists@gmail.com>";
koral = "Koral <koral@mailoo.org>";
kovirobi = "Kovacsics Robert <kovirobi@gmail.com>";
kragniz = "Louis Taylor <kragniz@gmail.com>";
ktosiek = "Tomasz Kontusz <tomasz.kontusz@gmail.com>";
lethalman = "Luca Bruno <lucabru@src.gnome.org>";
@ -86,28 +102,37 @@
lovek323 = "Jason O'Conal <jason@oconal.id.au>";
ludo = "Ludovic Courtès <ludo@gnu.org>";
madjar = "Georges Dubus <georges.dubus@compiletoi.net>";
magnetophon = "Bart Brouns <bart@magnetophon.nl>";
manveru = "Michael Fellinger <m.fellinger@gmail.com>";
marcweber = "Marc Weber <marco-oweber@gmx.de>";
matejc = "Matej Cotman <cotman.matej@gmail.com>";
meditans = "Carlo Nucera <meditans@gmail.com>";
meisternu = "Matt Miemiec <meister@krutt.org>";
michelk = "Michel Kuhlmann <michel@kuhlmanns.info>";
modulistic = "Pablo Costa <modulistic@gmail.com>";
mornfall = "Petr Ročkai <me@mornfall.net>";
MP2E = "Cray Elliott <MP2E@archlinux.us>";
msackman = "Matthew Sackman <matthew@wellquite.org>";
mtreskin = "Max Treskin <zerthurd@gmail.com>";
muflax = "Stefan Dorn <mail@muflax.com>";
nathan-gs = "Nathan Bijnens <nathan@nathan.gs>";
nckx = "Tobias Geerinckx-Rice <tobias.geerinckx.rice@gmail.com>";
notthemessiah = "Brian Cohen <brian.cohen.88@gmail.com>";
nslqqq = "Nikita Mikhailov <nslqqq@gmail.com>";
ocharles = "Oliver Charles <ollie@ocharles.org.uk>";
offline = "Jaka Hudoklin <jakahudoklin@gmail.com>";
olcai = "Erik Timan <dev@timan.info>";
orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
page = "Carles Pagès <page@cubata.homelinux.net>";
pashev = "Igor Pashev <pashev.igor@gmail.com>";
phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>";
pierron = "Nicolas B. Pierron <nixos@nbp.name>";
piotr = "Piotr Pietraszkiewicz <ppietrasa@gmail.com>";
pkmx = "Chih-Mao Chen <pkmx.tw@gmail.com>";
plcplc = "Philip Lykke Carlsen <plcplc@gmail.com>";
prikhi = "Pavan Rikhi <pavan.rikhi@gmail.com>";
pSub = "Pascal Wittmann <mail@pascal-wittmann.de>";
puffnfresh = "Brian McKenna <brian@brianmckenna.org>";
qknight = "Joachim Schiele <js@lastlog.de>";
raskin = "Michael Raskin <7c6f434c@mail.ru>";
redbaron = "Maxim Ivanov <ivanov.maxim@gmail.com>";
@ -122,21 +147,27 @@
rszibele = "Richard Szibele <richard_szibele@hotmail.com>";
rycee = "Robert Helgesson <robert@rycee.net>";
sander = "Sander van der Burg <s.vanderburg@tudelft.nl>";
schristo = "Scott Christopher <schristopher@konputa.com>";
sepi = "Raffael Mancini <raffael@mancini.lu>";
shell = "Shell Turner <cam.turn@gmail.com>";
shlevy = "Shea Levy <shea@shealevy.com>";
simons = "Peter Simons <simons@cryp.to>";
sjmackenzie = "Stewart Mackenzie <setori88@gmail.com>";
skeidel = "Sven Keidel <svenkeidel@gmail.com>";
smironov = "Sergey Mironov <ierton@gmail.com>";
sprock = "Roger Mason <rmason@mun.ca>";
spwhitt = "Spencer Whitt <sw@swhitt.me>";
sztupi = "Attila Sztupak <attila.sztupak@gmail.com>";
tailhook = "Paul Colomiets <paul@colomiets.name>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
thoughtpolice = "Austin Seipp <aseipp@pobox.com>";
titanous = "Jonathan Rudenberg <jonathan@titanous.com>";
tomberek = "Thomas Bereknyei <tomberek@gmail.com>";
tstrobel = "Thomas Strobel <ts468@cam.ac.uk>";
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";
tv = "Tomislav Viljetić <tv@shackspace.de>";
twey = "James Twey Kay <twey@twey.co.uk>";
urkud = "Yury G. Kudryashov <urkud+nix@ya.ru>";
vandenoever = "Jos van den Oever <jos@vandenoever.info>";
vbgl = "Vincent Laporte <Vincent.Laporte@gmail.com>";
@ -151,10 +182,12 @@
wjlroe = "William Roe <willroe@gmail.com>";
wkennington = "William A. Kennington III <william@wkennington.com>";
wmertens = "Wout Mertens <Wout.Mertens@gmail.com>";
wscott = "Wayne Scott <wsc9tt@gmail.com>";
wyvie = "Elijah Rum <elijahrum@gmail.com>";
yarr = "Dmitry V. <savraz@gmail.com>";
z77z = "Marco Maggesi <maggesi@math.unifi.it>";
zef = "Zef Hemel <zef@zef.me>";
zimbatm = "zimbatm <zimbatm@zimbatm.com>";
zoomulator = "Kim Simmons <zoomulator@gmail.com>";
Gonzih = "Max Gonzih <gonzih@gmail.com>";
}

View file

@ -58,7 +58,7 @@ rec {
if m ? config || m ? options then
let badAttrs = removeAttrs m ["imports" "options" "config" "key" "_file"]; in
if badAttrs != {} then
throw "Module `${key}' has an unsupported attribute `${head (attrNames badAttrs)}'."
throw "Module `${key}' has an unsupported attribute `${head (attrNames badAttrs)}'. This is caused by assignments to the top-level attributes `config' or `options'."
else
{ file = m._file or file;
key = toString m.key or key;

View file

@ -31,6 +31,23 @@ rec {
type = lib.types.bool;
};
# This option accept anything, but it does not produce any result. This
# is useful for sharing a module across different module sets without
# having to implement similar features as long as the value of the options
# are not expected.
mkSinkUndeclaredOptions = attrs: mkOption ({
internal = true;
visible = false;
default = false;
description = "Sink for option definitions.";
type = mkOptionType {
name = "sink";
check = x: true;
merge = loc: defs: false;
};
apply = x: throw "Option value is not readable because the option is not declared.";
} // attrs);
mergeDefaultOption = loc: defs:
let list = getValues defs; in
if length list == 1 then head list

View file

@ -0,0 +1,14 @@
*~
,*
.*.swp
.*.swo
result
result-*
/doc/NEWS.html
/doc/NEWS.txt
/doc/manual.html
/doc/manual.pdf
.version-suffix
.DS_Store
.git

View file

@ -0,0 +1,12 @@
FROM busybox
RUN dir=`mktemp -d` && trap 'rm -rf "$dir"' EXIT && \
wget -O- https://nixos.org/releases/nix/nix-1.7/nix-1.7-x86_64-linux.tar.bz2 | bzcat | tar x -C $dir && \
mkdir -m 0755 /nix && USER=root sh $dir/*/install && \
echo ". /root/.nix-profile/etc/profile.d/nix.sh" >> /etc/profile
ADD . /root/nix/nixpkgs
ONBUILD ENV NIX_PATH nixpkgs=/root/nix/nixpkgs:nixos=/root/nix/nixpkgs/nixos
ONBUILD ENV PATH /root/.nix-profile/bin:/root/.nix-profile/sbin:/bin:/sbin:/usr/bin:/usr/sbin
ONBUILD ENV ENV /etc/profile
ENV ENV /etc/profile

View file

@ -3,7 +3,7 @@
stdenv.mkDerivation {
name = "nix-generate-from-cpan-1";
buildInputs = [ makeWrapper perl perlPackages.YAMLLibYAML perlPackages.JSON ];
buildInputs = [ makeWrapper perl perlPackages.YAMLLibYAML perlPackages.JSON perlPackages.CPANPLUS ];
unpackPhase = "true";
buildPhase = "true";
@ -19,4 +19,4 @@ stdenv.mkDerivation {
maintainers = [ stdenv.lib.maintainers.eelco ];
description = "Utility to generate a Nix expression for a Perl package from CPAN";
};
}
}

View file

@ -100,7 +100,7 @@ sub get_deps {
foreach my $n (keys %{$deps}) {
next if $n eq "perl";
# Hacky way to figure out if this module is part of Perl.
if ($n !~ /^JSON/ && $n !~ /^YAML/) {
if ($n !~ /^JSON/ && $n !~ /^YAML/ && $n !~ /^Module::Pluggable/) {
eval "use $n;";
if (!$@) {
print STDERR "skipping Perl-builtin module $n\n";

View file

@ -62,7 +62,7 @@ for bin in $(find $binaryDist -executable -type f) :; do
)
if test "$names" = "glibc"; then names="stdenv.glibc"; fi
if echo $names | grep -c "gcc" &> /dev/null; then names="stdenv.gcc.gcc"; fi
if echo $names | grep -c "gcc" &> /dev/null; then names="stdenv.cc.gcc"; fi
if test $lib != $libPath; then
interpreter="--interpreter \${$names}/lib/$lib"

View file

@ -32,9 +32,7 @@ elif [[ $1 == build ]]; then
nix-build pkgs/top-level/release.nix -A tarball
else
echo "=== Checking PR"
# The current HEAD is the PR merged into origin/master, so we compare
# against origin/master
nox-review wip --against origin/master
nox-review pr ${TRAVIS_PULL_REQUEST}
fi
else
echo "$0: Unknown option $1" >&2

View file

@ -0,0 +1,111 @@
#!/bin/sh
: ${NIXOS_CHANNELS:=https://nixos.org/channels/}
: ${CHANNELS_NAMESPACE:=refs/heads/channels/}
# List all channels which are currently in the repository which we would
# have to remove if they are not found again.
deadChannels=$(git for-each-ref --format="%(refname)" $CHANNELS_NAMESPACE)
function updateRef() {
local channelName=$1
local newRev=$2
# if the inputs are not valid, then we do not update any branch.
test -z "$newRev" -o -z "$channelName" && return;
# Update the local refs/heads/channels/* branches to be in-sync with the
# channel references.
local branch=$CHANNELS_NAMESPACE$channelName
oldRev=$(git rev-parse --short $branch 2>/dev/null || true)
if test "$oldRev" != "$newRev"; then
if git update-ref $branch $newRev 2>/dev/null; then
if test -z "$oldRev"; then
echo " * [new branch] $newRev -> ${branch#refs/heads/}"
else
echo " $oldRev..$newRev -> ${branch#refs/heads/}"
fi
else
if test -z "$oldRev"; then
echo " * [missing rev] $newRev -> ${branch#refs/heads/}"
else
echo " [missing rev] $oldRev..$newRev -> ${branch#refs/heads/}"
fi
fi
fi
# Filter out the current channel from the list of dead channels.
deadChannels=$(grep -v $CHANNELS_NAMESPACE$channelName <<EOF
$deadChannels
EOF
)
}
# Find the name of all channels which are listed in the directory.
echo "Fetching channels from $NIXOS_CHANNELS:"
for channelName in : $(curl -s $NIXOS_CHANNELS | sed -n '/folder/ { s,.*href=",,; s,/".*,,; p }'); do
test "$channelName" = : && continue;
# Do not follow redirections, such that we can extract the
# short-changeset from the name of the directory where we are
# redirected to.
sha1=$(curl -sI $NIXOS_CHANNELS$channelName | sed -n '/Location/ { s,.*\.\([a-f0-9]*\)[ \r]*$,\1,; p; }')
updateRef "remotes/$channelName" "$sha1"
done
echo "Fetching channels from nixos-version:"
if currentSystem=$(nixos-version 2>/dev/null); then
# If the system is entirely build from a custom nixpkgs version,
# then the version is not annotated in git version. This sed
# expression is basically matching that the expressions end with
# ".<sha1> (Name)" to extract the sha1.
sha1=$(echo $currentSystem | sed -n 's,^.*\.\([a-f0-9]*\) *(.*)$,\1,; T skip; p; :skip;')
updateRef current-system "$sha1"
fi
echo "Fetching channels from ~/.nix-defexpr:"
for revFile in : $(find -L ~/.nix-defexpr/ -maxdepth 4 -name svn-revision); do
test "$revFile" = : && continue;
# Deconstruct a path such as, into:
#
# /home/luke/.nix-defexpr/channels_root/nixos/nixpkgs/svn-revision
# channelName = root/nixos
#
# /home/luke/.nix-defexpr/channels/nixpkgs/svn-revision
# channelName = nixpkgs
#
user=${revFile#*.nix-defexpr/channels}
repo=${user#*/}
repo=${repo%%/*}
user=${user%%/*}
user=${user#_}
test -z "$user" && user=$USER
channelName="$user${user:+/}$repo"
sha1=$(cat $revFile | sed -n 's,^.*\.\([a-f0-9]*\)$,\1,; T skip; p; :skip;')
updateRef "$channelName" "$sha1"
done
# Suggest to remove channel branches which are no longer found by this
# script. This is to handle the cases where a local/remote channel
# disappear. We should not attempt to remove manually any branches, as they
# might be user branches.
if test -n "$deadChannels"; then
echo "
Some old channel branches are still in your repository, if you
want to remove them, run the following command(s):
"
while read branch; do
echo " git update-ref -d $branch"
done <<EOF
$deadChannels
EOF
echo
fi

View file

@ -4,26 +4,26 @@ export LANG=C LC_ALL=C LC_COLLATE=C
# Load git log
raw_git_log="$(git log)"
git_data="$(echo "$raw_git_log" | grep 'Author:' |
sed -e 's/^ *Author://; s/\\//g; s/^ *//; s/ *$//;
git_data="$(echo "$raw_git_log" | grep 'Author:' |
sed -e 's/^ *Author://; s/\\//g; s/^ *//; s/ *$//;
s/ @ .*//; s/ *[<]/\t/; s/[>]//')"
# Name - nick - email correspondence from log and from maintainer list
# Also there are a few manual entries
maintainers="$(cat "$(dirname "$0")/../../lib/maintainers.nix" |
maintainers="$(cat "$(dirname "$0")/../../lib/maintainers.nix" |
grep '=' | sed -re 's/\\"/''/g;
s/ *([^ =]*) *= *" *(.*[^ ]) *[<](.*)[>] *".*/\1\t\2\t\3/')"
git_lines="$( ( echo "$git_data";
git_lines="$( ( echo "$git_data";
cat "$(dirname "$0")/vanity-manual-equalities.txt") | sort |uniq)"
emails="$(
( echo "$maintainers" | cut -f 3; echo "$git_data" | cut -f 2 ) |
sort | uniq | grep -E ".+@.+[.].+"
emails="$(
( echo "$maintainers" | cut -f 3; echo "$git_data" | cut -f 2 ) |
sort | uniq | grep -E ".+@.+[.].+"
)"
fetchGithubName () {
commitid="$(
echo "$raw_git_log" | grep -B3 "Author: .*[<]$1[>]" | head -n 3 |
echo "$raw_git_log" | grep -B3 "Author: .*[<]$1[>]" | head -n 3 |
grep '^commit ' | tail -n 1 | sed -e 's/^commit //'
)"
userid="$(
@ -36,7 +36,7 @@ fetchGithubName () {
}
[ -n "$NIXPKGS_GITHUB_NAME_CACHE" ] && {
echo "$emails" | while read email; do
echo "$emails" | while read email; do
line="$(grep "$email " "$NIXPKGS_GITHUB_NAME_CACHE")"
[ -z "$line" ] && {
echo "$email $(fetchGithubName "$email")" >> \
@ -47,11 +47,11 @@ fetchGithubName () {
# For RDF
normalize_name () {
sed -e 's/%/%25/g; s/ /%20/g; s/'\''/%27/g; s/"/%22/g;'
sed -e 's/%/%25/g; s/ /%20/g; s/'\''/%27/g; s/"/%22/g; s/`/%60/g'
}
denormalize_name () {
sed -e 's/%20/ /g; s/%27/'\''/g; s/%22/"/g; s/%25/%/g;';
sed -e 's/%20/ /g; s/%27/'\''/g; s/%22/"/g; s/%60/`/g; s/%25/%/g;';
}
n3="$(mktemp --suffix .n3)"
@ -80,8 +80,8 @@ name_list="$(
?x <my://can-be>+ ?y.
?x <my://is-name> ?g.
}
" | tail -n +2 |
sed -re 's@<my://name/@@g; s@<my://@@g; s@>@@g;' |
" | tail -n +2 |
sed -re 's@<my://name/@@g; s@<my://@@g; s@>@@g;' |
sort -k 2,3 -t ' '
)"
github_name_list="$(
@ -89,14 +89,14 @@ github_name_list="$(
select ?x ?y where {
?x (<my://can-be>+ / <my://at-github>) ?y.
}
" | tail -n +2 |
" | tail -n +2 |
sed -re 's@<my://(name|github)/@@g; s@<my://@@g; s@>@@g;'
)"
# Take first spelling option for every person
name_list_canonical="$(echo "$name_list" | cut -f 1,2 | uniq -f1)"
cleaner_script="$(echo "$name_list_canonical" | denormalize_name |
cleaner_script="$(echo "$name_list_canonical" | denormalize_name |
sed -re 's/(.*)\t(.*)/s#^\2$#\1#g/g')"
# Add github usernames
@ -104,9 +104,9 @@ if [ -n "$NIXPKGS_GITHUB_NAME_CACHE" ]; then
github_adder_script="$(echo "$github_name_list" |
grep -E "$(echo "$name_list_canonical" | cut -f 2 |
tr '\n' '|' )" |
sort | uniq |
sort | uniq |
sed -re 's/(.*)\t(.*)/s| \1$| \1\t\2|g;/' |
denormalize_name
denormalize_name
)"
else
github_adder_script=''
@ -117,6 +117,6 @@ echo "$name_list" | denormalize_name
echo
echo "$git_data" | cut -f 1 |
sed -re "$cleaner_script" |
sed -e "$cleaner_script" |
sort | uniq -c | sort -k1n | sed -re "$github_adder_script" |
sed -re 's/^ *([0-9]+) /\1\t/'

View file

@ -12,9 +12,9 @@ pre-built binary. That is, whenever a command like
<command>nixos-rebuild</command> needs a path in the Nix store, Nix
will try to download that path from the Internet rather than build it
from source. The default binary cache is
<uri>http://cache.nixos.org/</uri>. If this cache is unreachable, Nix
operations may take a long time due to HTTP connection timeouts. You
can disable the use of the binary cache by adding <option>--option
<uri>https://cache.nixos.org/</uri>. If this cache is unreachable,
Nix operations may take a long time due to HTTP connection timeouts.
You can disable the use of the binary cache by adding <option>--option
use-binary-caches false</option>, e.g.
<screen>
@ -30,4 +30,4 @@ $ nixos-rebuild switch --option binary-caches http://my-cache.example.org/
</para>
</section>
</section>

View file

@ -56,7 +56,7 @@ root file system), you can use
boot.initrd.extraKernelModules = [ "cifs" ];
</programlisting>
This causes the specified modules and their dependencies to be added
to the initial ramdark.</para>
to the initial ramdisk.</para>
<para>Kernel runtime parameters can be set through
<option>boot.kernel.sysctl</option>, e.g.

View file

@ -13,7 +13,7 @@ use NetworkManager. You can enable NetworkManager by setting:
services.networkmanager.enable = true;
</programlisting>
Some desktop managers (e.g., GNOME) enable NetworkManager
some desktop managers (e.g., GNOME) enable NetworkManager
automatically for you.</para>
<para>All users that should have permission to change network settings

View file

@ -30,34 +30,13 @@ let
else
fn;
# Convert the list of options into an XML file and a JSON file. The builtin
# unsafeDiscardStringContext is used to prevent the realisation of the store
# paths which are used in options definitions.
# Convert the list of options into an XML file. The builtin
# unsafeDiscardStringContext is used to prevent the realisation of
# the store paths which are used in options definitions.
optionsXML = builtins.toFile "options.xml" (builtins.unsafeDiscardStringContext (builtins.toXML optionsList'));
optionsJSON = builtins.toFile "options.json" (builtins.unsafeDiscardStringContext (builtins.toJSON optionsList'));
# Tools-friendly version of the list of NixOS options.
options' = stdenv.mkDerivation {
name = "options";
buildCommand = ''
# Export list of options in different format.
dst=$out/share/doc/nixos
mkdir -p $dst
cp ${optionsJSON} $dst/options.json
cp ${optionsXML} $dst/options.xml
mkdir -p $out/nix-support
echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
echo "file xml $dst/options.xml" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "List of NixOS options in various formats.";
};
optionsDocBook = runCommand "options-db.xml" {} ''
optionsXML=${options'}/share/doc/nixos/options.xml
optionsXML=${optionsXML}
if grep /nixpkgs/nixos/modules $optionsXML; then
echo "The manual appears to depend on the location of Nixpkgs, which is bad"
echo "since this prevents sharing via the NixOS channel. This is typically"
@ -83,8 +62,25 @@ let
in rec {
# Tools-friendly version of the list of NixOS options.
options = options';
# The NixOS options in JSON format.
optionsJSON = stdenv.mkDerivation {
name = "options-json";
buildCommand = ''
# Export list of options in different format.
dst=$out/share/doc/nixos
mkdir -p $dst
cp ${builtins.toFile "options.json" (builtins.unsafeDiscardStringContext (builtins.toJSON
(listToAttrs (map (o: { name = o.name; value = removeAttrs o ["name" "visible" "internal"]; }) optionsList'))))
} $dst/options.json
mkdir -p $out/nix-support
echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
''; # */
meta.description = "List of NixOS options in JSON format";
};
# Generate the NixOS manual.
manual = stdenv.mkDerivation {

View file

@ -39,24 +39,13 @@ $ firefox result/log.html
</para>
<para>It is also possible to run the test environment interactively,
allowing you to experiment with the VMs. For example:
<title>Running Tests interactively</title>
<screen>
$ nix-build login.nix -A driver
$ ./result/bin/nixos-run-vms
</screen>
The script <command>nixos-run-vms</command> starts the virtual
machines defined by test. The root file system of the VMs is created
on the fly and kept across VM restarts in
<filename>./</filename><varname>hostname</varname><filename>.qcow2</filename>.</para>
<para>Finally, the test itself can be run interactively. This is
<para>The test itself can be run interactively. This is
particularly useful when developing or debugging a test:
<screen>
$ nix-build tests/ -A nfs.driver
$ nix-build nixos/tests/login.nix -A driver
$ ./result/bin/nixos-test-driver
starting VDE switch for network 1
&gt;
@ -66,6 +55,7 @@ You can then take any Perl statement, e.g.
<screen>
&gt; startAll
&gt; testScript
&gt; $machine->succeed("touch /tmp/foo")
</screen>
@ -74,4 +64,16 @@ script and drops you back into the test driver command line upon its
completion. This allows you to inspect the state of the VMs after the
test (e.g. to debug the test script).</para>
</section>
<para>To just start and experiment with the VMs, run:
<screen>
$ nix-build nixos/tests/login.nix -A driver
$ ./result/bin/nixos-run-vms
</screen>
The script <command>nixos-run-vms</command> starts the virtual
machines defined by test. The root file system of the VMs is created
on the fly and kept across VM restarts in
<filename>./</filename><varname>hostname</varname><filename>.qcow2</filename>.</para>
</section>

View file

@ -40,20 +40,22 @@ rebuild everything from source. So you may want to create a local
branch based on your current NixOS version:
<screen>
$ nixos-version
14.04.273.ea1952b (Baboon)
$ git checkout -b local ea1952b
$ <replaceable>/my/sources</replaceable>/nixpkgs/maintainers/scripts/update-channel-branches.sh
Fetching channels from https://nixos.org/channels:
* [new branch] cbe467e -> channels/remotes/nixos-unstable
Fetching channels from nixos-version:
* [new branch] 9ff4738 -> channels/current-system
Fetching channels from ~/.nix-defexpr:
* [new branch] 0d4acad -> channels/root/nixos
$ git checkout -b local channels/current-system
</screen>
Or, to base your local branch on the latest version available in the
NixOS channel:
<screen>
$ curl -sI http://nixos.org/channels/nixos-unstable/ | grep Location
Location: http://releases.nixos.org/nixos/unstable/nixos-14.10pre43986.acaf4a6/
$ git checkout -b local acaf4a6
$ <replaceable>/my/sources</replaceable>/nixpkgs/maintainers/scripts/update-channel-branches.sh
$ git checkout -b local channels/remotes/nixos-unstable
</screen>
You can then use <command>git rebase</command> to sync your local
@ -92,4 +94,4 @@ to <command>nix-env</command>, as it will break after interpreting expressions
in <filename>nixos/</filename> as packages.</para>
-->
</chapter>
</chapter>

View file

@ -11,14 +11,9 @@
<listitem><para>Boot from the CD.</para></listitem>
<listitem><para>The CD contains a basic NixOS installation. (It
also contains Memtest86+, useful if you want to test new hardware.)
also contains Memtest86+, useful if you want to test new hardware).
When its finished booting, it should have detected most of your
hardware and brought up networking (check
<command>ifconfig</command>). Networking is necessary for the
installer, since it will download lots of stuff (such as source
tarballs or Nixpkgs channel binaries). Its best if you have a DHCP
server on your network. Otherwise configure networking manually
using <command>ifconfig</command>.</para></listitem>
hardware.</para></listitem>
<listitem><para>The NixOS manual is available on virtual console 8
(press Alt+F8 to access).</para></listitem>
@ -29,6 +24,16 @@
<listitem><para>If you downloaded the graphical ISO image, you can
run <command>start display-manager</command> to start KDE.</para></listitem>
<listitem><para>The boot process should have brought up networking (check
<command>ip a</command>). Networking is necessary for the
installer, since it will download lots of stuff (such as source
tarballs or Nixpkgs channel binaries). Its best if you have a DHCP
server on your network. Otherwise configure networking manually
using <command>ifconfig</command>.</para>
<para>To manually configure the network on the graphical installer,
first disable network-manager with
<command>systemctl stop network-manager</command>.</para></listitem>
<listitem><para>The NixOS installer doesnt do any partitioning or
formatting yet, so you need to that yourself. Use the following
commands:

View file

@ -8,9 +8,14 @@
<para>NixOS ISO images can be downloaded from the <link
xlink:href="http://nixos.org/nixos/download.html">NixOS
homepage</link>. These can be burned onto a CD. It is also possible
to copy them onto a USB stick and install NixOS from there. For
details, see the <link
homepage</link>. There are a number of installation options. If
you happen to have an optical drive and a spare CD, burning the
image to CD and booting from that is probably the easiest option.
Most people will need to prepare a USB stick to boot from.
Unetbootin is recommended and the process is described in brief below.
Note that systems which use UEFI require some additional manual steps.
If you run into difficulty a number of alternative methods are presented
in the <link
xlink:href="https://nixos.org/wiki/Installing_NixOS_from_a_USB_stick">NixOS
Wiki</link>.</para>

View file

@ -15,7 +15,7 @@ been built. These channels are:
<itemizedlist>
<listitem>
<para>Stable channels, such as <literal
xlink:href="http://nixos.org/channels/nixos-14.04">nixos-14.04</literal>.
xlink:href="https://nixos.org/channels/nixos-14.04">nixos-14.04</literal>.
These only get conservative bug fixes and package upgrades. For
instance, a channel update may cause the Linux kernel on your
system to be upgraded from 3.4.66 to 3.4.67 (a minor bug fix), but
@ -26,7 +26,7 @@ been built. These channels are:
</listitem>
<listitem>
<para>The unstable channel, <literal
xlink:href="http://nixos.org/channels/nixos-unstable">nixos-unstable</literal>.
xlink:href="https://nixos.org/channels/nixos-unstable">nixos-unstable</literal>.
This corresponds to NixOSs main development branch, and may thus
see radical changes between channel updates. Its not recommended
for production systems.</para>
@ -34,7 +34,7 @@ been built. These channels are:
</itemizedlist>
To see what channels are available, go to <link
xlink:href="http://nixos.org/channels"/>. (Note that the URIs of the
xlink:href="https://nixos.org/channels"/>. (Note that the URIs of the
various channels redirect to a directory that contains the channels
latest version and includes ISO images and VirtualBox
appliances.)</para>
@ -53,20 +53,20 @@ nixos https://nixos.org/channels/nixos-unstable
To switch to a different NixOS channel, do
<screen>
$ nix-channel --add http://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
$ nix-channel --add https://nixos.org/channels/<replaceable>channel-name</replaceable> nixos
</screen>
(Be sure to include the <literal>nixos</literal> parameter at the
end.) For instance, to use the NixOS 14.04 stable channel:
<screen>
$ nix-channel --add http://nixos.org/channels/nixos-14.04 nixos
$ nix-channel --add https://nixos.org/channels/nixos-14.04 nixos
</screen>
But it you want to live on the bleeding edge:
But if you want to live on the bleeding edge:
<screen>
$ nix-channel --add http://nixos.org/channels/nixos-unstable nixos
$ nix-channel --add https://nixos.org/channels/nixos-unstable nixos
</screen>
</para>

View file

@ -10,7 +10,7 @@
<para>This section lists the release notes for each stable version of NixOS.</para>
</partintro>
<xi:include href="rl-1411.xml" />
<xi:include href="rl-1412.xml" />
<xi:include href="rl-1404.xml" />
<xi:include href="rl-1310.xml" />

View file

@ -1,22 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-14.11">
<title>Release 14.11 (“Caterpillar”, 2014/11/??)</title>
<para>When upgrading from a previous release, please be aware of the
following incompatible changes:
<itemizedlist>
<listitem><para>The host side of a container virtual Ethernet pair
is now called <literal>ve-<replaceable>container-name</replaceable></literal>
rather than <literal>c-<replaceable>container-name</replaceable></literal>.</para></listitem>
</itemizedlist>
</para>
</chapter>

View file

@ -0,0 +1,167 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-release-14.12">
<title>Release 14.12 (“Caterpillar”, 2014/12/??)</title>
<para>In addition to numerous new and upgraded packages, this release has the following highlights:
<itemizedlist>
<listitem><para>Systemd has been updated to version 217, which has numerous
<link xlink:href="http://lists.freedesktop.org/archives/systemd-devel/2014-October/024662.html">improvements
.</link></para></listitem>
<listitem><para><link xlink:href="http://thread.gmane.org/gmane.linux.distributions.nixos/15165">
Nix has been updated to 1.8.</link></para></listitem>
<listitem><para>NixOS is now based on Glibc 2.20.</para></listitem>
<listitem><para>KDE has been updated to 4.14.</para></listitem>
<listitem><para>The default Linux kernel has been updated to 3.14.</para></listitem>
<listitem><para><option>users.mutableUsers</option> set to <literal>true</literal> now respect any changes
made after initial creation of a user or a group.
</para></listitem>
</itemizedlist></para>
<para>Following new services were added since the last release:
<itemizedlist>
<listitem><para>parallels-guest</para></listitem>
<listitem><para>docker</para></listitem>
<listitem><para>lxc</para></listitem>
<listitem><para>openvswitch</para></listitem>
<listitem><para>fluxbox</para></listitem>
<listitem><para>bspwm</para></listitem>
<listitem><para>gdm</para></listitem>
<listitem><para>fcgiwrap</para></listitem>
<listitem><para>peerflix</para></listitem>
<listitem><para>fail2ban</para></listitem>
<listitem><para>chronos</para></listitem>
<listitem><para>znc</para></listitem>
<listitem><para>unifi</para></listitem>
<listitem><para>teamspeak3</para></listitem>
<listitem><para>strongswan</para></listitem>
<listitem><para>seeks</para></listitem>
<listitem><para>radicale</para></listitem>
<listitem><para>prosody</para></listitem>
<listitem><para>polipo</para></listitem>
<listitem><para>openntpd</para></listitem>
<listitem><para>nsd</para></listitem>
<listitem><para>mailpile</para></listitem>
<listitem><para>i2pd</para></listitem>
<listitem><para>dnscrypt-proxy</para></listitem>
<listitem><para>consul</para></listitem>
<listitem><para>atftpd</para></listitem>
<listitem><para>scollector</para></listitem>
<listitem><para>collectd</para></listitem>
<listitem><para>bosun</para></listitem>
<listitem><para>riemann</para></listitem>
<listitem><para>zookeeper</para></listitem>
<listitem><para>uhub</para></listitem>
<listitem><para>siproxd</para></listitem>
<listitem><para>redmine</para></listitem>
<listitem><para>phd</para></listitem>
<listitem><para>mesos</para></listitem>
<listitem><para>gitlab</para></listitem>
<listitem><para>gitolite</para></listitem>
<listitem><para>etcd</para></listitem>
<listitem><para>docker-registry</para></listitem>
<listitem><para>cpuminer-cryptonight</para></listitem>
<listitem><para>thermald</para></listitem>
<listitem><para>mlmmj</para></listitem>
<listitem><para>tcsd</para></listitem>
<listitem><para>gnome3.seahorse</para></listitem>
<listitem><para>gnome3.gvfs</para></listitem>
<listitem><para>gnome3.gnome-online-miners</para></listitem>
<listitem><para>gnome3.gnome-documents</para></listitem>
<listitem><para>geoclue2</para></listitem>
<listitem><para>opentsdb</para></listitem>
<listitem><para>neo4j</para></listitem>
<listitem><para>monetdb</para></listitem>
<listitem><para>influxdb</para></listitem>
<listitem><para>hbase</para></listitem>
<listitem><para>torque/mrom</para></listitem>
<listitem><para>torque/server</para></listitem>
<listitem><para>kubernetes</para></listitem>
<listitem><para>fleet</para></listitem>
<listitem><para>crashplan</para></listitem>
<listitem><para>mopidy</para></listitem>
<listitem><para>liquidsoap</para></listitem>
</itemizedlist>
</para>
<para>When upgrading from a previous release, please be aware of the
following incompatible changes:
<itemizedlist>
<listitem><para>The default version of Apache httpd is now 2.4. If
you use the <option>extraConfig</option> option to pass literal
Apache configuration text, you may need to update it — see <link
xlink:href="http://httpd.apache.org/docs/2.4/upgrading.html">Apaches
documentation</link> for details. If you wish to continue to use
httpd 2.2, add the following line to your NixOS configuration:
rogramlisting>
rvices.httpd.package = pkgs.apacheHttpd_2_2;
programlisting>
</para></listitem>
<listitem><para>PHP 5.3 has been removed because it is no longer
supported by the PHP project. A <link
xlink:href="http://php.net/migration54">migration guide</link> is
available.</para></listitem>
<listitem><para>The host side of a container virtual Ethernet pair
is now called <literal>ve-<replaceable>container-name</replaceable></literal>
rather than <literal>c-<replaceable>container-name</replaceable></literal>.</para></listitem>
<listitem><para>GNOME 3.10 support has been dropped. The default GNOME version is now 3.12.</para></listitem>
<listitem><para>VirtualBox has been upgraded to 4.3.20 release. Users may be required to run
<command>rm -rf /tmp.vbox*</command>. <literal>imports = [ &lt;nixpkgs/nixos/modules/programs/virtualbox.nix&gt; ]</literal>
is no longer necessary, use <literal>services.virtualboxHost.enable = true</literal> instead.
</para>
<para>Also, hardening mode is now enabled by default, which means that unless you want to use
USB support, you no longer need to be a member of the <literal>vboxusers</literal> group.
</para></listitem>
<listitem><para>Chromium has been updated to 39.0.2171.65. <option>enablePepperPDF</option> is now enabled by default.
<literal>chromium*Wrapper</literal> packages no longer exist, because upstream removed NSAPI support.
<literal>chromium-stable</literal> has been renamed to <literal>chromium</literal>.
</para></listitem>
<listitem><para>Python packaging documentation is now part of nixpkgs manual. To override
the python packages available to a custom python you now use <literal>pkgs.pythonFull.buildEnv.override</literal>
instead of <literal>pkgs.pythonFull.override</literal>.
</para></listitem>
<listitem><para><literal>boot.resumeDevice = "8:6"</literal> is no longer supported. Most users will
want to leave it undefined, which takes the swap partitions automatically. There is an evaluation
assertion to ensure that the string starts with a slash.
</para></listitem>
<listitem><para>The system-wide default timezone for NixOS installations
changed from <literal>CET</literal> to <literal>UTC</literal>. To choose
a different timezone for your system, configure
<literal>time.timeZone</literal> in
<literal>configuration.nix</literal>. A fairly complete list of possible
values for that setting is available at <link
xlink:href="https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"/>.</para></listitem>
<listitem><para>GNU screen has been updated to 4.2.1, which breaks
the ability to connect to sessions created by older versions of
screen.</para></listitem>
</itemizedlist>
</para>
</chapter>

View file

@ -11,15 +11,16 @@
, prefix ? []
}:
let extraArgs_ = extraArgs; pkgs_ = pkgs; system_ = system; in
rec {
let extraArgs_ = extraArgs; pkgs_ = pkgs; system_ = system;
extraModules = let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
in if e == "" then [] else [(import (builtins.toPath e))];
in rec {
# Merge the option definitions in all modules, forming the full
# system configuration.
inherit (pkgs.lib.evalModules {
inherit prefix;
modules = modules ++ baseModules;
modules = modules ++ extraModules ++ baseModules;
args = extraArgs;
check = check && options.environment.checkConfigurationOptions.value;
}) config options;

View file

@ -16,6 +16,9 @@
# symlink to `object' that will be added to the tarball.
storeContents ? []
# Extra commands to be executed before archiving files
, extraCommands ? ""
# Extra tar arguments
, extraArgs ? ""
}:
@ -25,7 +28,7 @@ stdenv.mkDerivation {
builder = ./make-system-tarball.sh;
buildInputs = [perl xz];
inherit fileName pathsFromGraph extraArgs;
inherit fileName pathsFromGraph extraArgs extraCommands;
# !!! should use XML.
sources = map (x: x.source) contents;

View file

@ -33,7 +33,7 @@ for i in $storePaths; do
done
# TODO tar ruxo
# TODO tar ruxo
# Also include a manifest of the closures in a format suitable for
# nix-store --load-db.
printRegistration=1 perl $pathsFromGraph closure-* > nix-path-registration
@ -48,6 +48,8 @@ for ((n = 0; n < ${#objects[*]}; n++)); do
fi
done
$extraCommands
mkdir -p $out/tarball
tar cvJf $out/tarball/$fileName.tar.xz * $extraArgs

View file

@ -19,7 +19,8 @@ in
{
imports = [ ./amazon-base-config.nix ];
ec2.hvm = true;
boot.loader.grub.device = lib.mkOverride 0 "nodev";
boot.loader.grub.device = lib.mkOverride 0 "/dev/xvdg";
boot.kernelParams = [ "console=ttyS0" ];
boot.initrd.extraUtilsCommands = ''
cp -v ${pkgs.gawk}/bin/gawk $out/bin/gawk

View file

@ -19,8 +19,17 @@ parser.add_argument('--key', dest='key_name', action='store_true', help='Keypair
args = parser.parse_args()
instance_type = "m3.medium" if args.hvm else "m1.small"
ebs_size = 8 if args.hvm else 20
if args.hvm:
virtualization_type = "hvm"
root_block = "/dev/sda1"
image_type = 'hvm'
else:
virtualization_type = "paravirtual"
root_block = "/dev/sda"
image_type = 'ebs'
ebs_size = 20
# Start a NixOS machine in the given region.
f = open("ebs-creator-config.nix", "w")
@ -66,7 +75,7 @@ m.run_command("mount {0} /mnt".format(device))
m.run_command("touch /mnt/.ebs")
m.run_command("mkdir -p /mnt/etc/nixos")
m.run_command("nix-channel --add http://nixos.org/channels/nixos-{} nixos".format(args.channel))
m.run_command("nix-channel --add https://nixos.org/channels/nixos-{} nixos".format(args.channel))
m.run_command("nix-channel --update")
version = m.run_command("nix-instantiate --eval-only -A lib.nixpkgsVersion '<nixpkgs>'", capture_stdout=True).split(' ')[0].replace('"','').strip()
@ -76,10 +85,6 @@ if args.hvm:
m.upload_file("./amazon-hvm-config.nix", "/mnt/etc/nixos/configuration.nix")
m.upload_file("./amazon-hvm-install-config.nix", "/mnt/etc/nixos/amazon-hvm-install-config.nix")
m.run_command("NIXOS_CONFIG=/etc/nixos/amazon-hvm-install-config.nix nixos-install")
m.run_command('nix-env -iA nixos.pkgs.grub')
m.run_command('cp /nix/store/*-grub-0.97*/lib/grub/i386-pc/* /mnt/boot/grub')
m.run_command('echo "(hd1) /dev/xvdg" > device.map')
m.run_command('echo -e "root (hd1,0)\nsetup (hd1)" | grub --device-map=device.map --batch')
else:
m.upload_file("./amazon-base-config.nix", "/mnt/etc/nixos/configuration.nix")
m.run_command("nixos-install")
@ -87,7 +92,7 @@ else:
m.run_command("umount /mnt")
if args.hvm:
ami_name = "nixos-{0}-x86_64-ebs-hvm".format(version)
ami_name = "nixos-{0}-x86_64-hvm".format(version)
description = "NixOS {0} (x86_64; EBS root; hvm)".format(version)
else:
ami_name = "nixos-{0}-x86_64-ebs".format(version)
@ -102,58 +107,40 @@ def check():
m.connect()
volume = m._conn.get_all_volumes([], filters={'attachment.instance-id': m.resource_id, 'attachment.device': "/dev/sdg"})[0]
if args.hvm:
instance = m._conn.run_instances( image_id="ami-5f491f36"
, instance_type=instance_type
, key_name=args.key_name
, placement=m.zone
, security_groups=["eelco-test"]).instances[0]
nixops.util.check_wait(lambda: instance.update() == 'running', max_tries=120)
instance.stop()
nixops.util.check_wait(lambda: instance.update() == 'stopped', max_tries=120)
old_root_volume = m._conn.get_all_volumes([], filters={'attachment.instance-id': instance.id, 'attachment.device': "/dev/sda1"})[0]
old_root_volume.detach()
volume.detach()
nixops.util.check_wait(lambda: volume.update() == 'available', max_tries=120)
nixops.util.check_wait(lambda: old_root_volume.update() == 'available', max_tries=120)
volume.attach(instance.id, '/dev/sda1')
nixops.util.check_wait(lambda: volume.update() == 'in-use', max_tries=120)
ami_id = m._conn.create_image(instance.id, ami_name, description)
time.sleep(5)
image = m._conn.get_all_images([ami_id])[0]
nixops.util.check_wait(lambda: image.update() == 'available', max_tries=120)
instance.terminate()
# Create a snapshot.
snapshot = volume.create_snapshot(description=description)
print >> sys.stderr, "created snapshot {0}".format(snapshot.id)
else:
# Create a snapshot.
snapshot = volume.create_snapshot(description=description)
print >> sys.stderr, "created snapshot {0}".format(snapshot.id)
nixops.util.check_wait(check, max_tries=120)
nixops.util.check_wait(check, max_tries=120)
m._conn.create_tags([snapshot.id], {'Name': ami_name})
m._conn.create_tags([snapshot.id], {'Name': ami_name})
if not args.keep: depl.destroy_resources()
if not args.keep: depl.destroy_resources()
# Register the image.
aki = m._conn.get_all_images(filters={'manifest-location': 'ec2*pv-grub-hd0_1.03-x86_64*'})[0]
print >> sys.stderr, "using kernel image {0} - {1}".format(aki.id, aki.location)
# Register the image.
aki = m._conn.get_all_images(filters={'manifest-location': '*pv-grub-hd0_1.03-x86_64*'})[0]
print >> sys.stderr, "using kernel image {0} - {1}".format(aki.id, aki.location)
block_map = BlockDeviceMapping()
block_map[root_block] = BlockDeviceType(snapshot_id=snapshot.id, delete_on_termination=True, size=ebs_size, volume_type="gp2")
block_map['/dev/sdb'] = BlockDeviceType(ephemeral_name="ephemeral0")
block_map['/dev/sdc'] = BlockDeviceType(ephemeral_name="ephemeral1")
block_map['/dev/sdd'] = BlockDeviceType(ephemeral_name="ephemeral2")
block_map['/dev/sde'] = BlockDeviceType(ephemeral_name="ephemeral3")
block_map = BlockDeviceMapping()
block_map['/dev/sda'] = BlockDeviceType(snapshot_id=snapshot.id, delete_on_termination=True)
block_map['/dev/sdb'] = BlockDeviceType(ephemeral_name="ephemeral0")
block_map['/dev/sdc'] = BlockDeviceType(ephemeral_name="ephemeral1")
block_map['/dev/sdd'] = BlockDeviceType(ephemeral_name="ephemeral2")
block_map['/dev/sde'] = BlockDeviceType(ephemeral_name="ephemeral3")
ami_id = m._conn.register_image(
common_args = dict(
name=ami_name,
description=description,
architecture="x86_64",
root_device_name="/dev/sda",
kernel_id=aki.id,
block_device_map=block_map)
root_device_name=root_block,
block_device_map=block_map,
virtualization_type=virtualization_type,
delete_root_volume_on_termination=True
)
if not args.hvm:
common_args['kernel_id']=aki.id
ami_id = m._conn.register_image(**common_args)
print >> sys.stderr, "registered AMI {0}".format(ami_id)
@ -197,17 +184,12 @@ test_depl.nix_exprs = [os.path.abspath("./ebs-test.nix")]
test_depl.deploy(create_only=True)
test_depl.machines['machine'].run_command("nixos-version")
if args.hvm:
image_type = 'hvm'
else:
image_type = 'ebs'
# Log the AMI ID.
f = open("{0}.{1}.ami-id".format(args.region, image_type), "w")
f.write("{0}".format(ami_id))
f.close()
for dest in [ 'us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'sa-east-1']:
for dest in [ 'us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'eu-central-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'sa-east-1']:
if args.region != dest:
print >> sys.stderr, "copying image from region {0} to {1}".format(args.region, dest)
conn = boto.ec2.connect_to_region(dest)

View file

@ -0,0 +1,193 @@
{ config, pkgs, ... }:
with pkgs.lib;
let fcBool = x: if x then "<bool>true</bool>" else "<bool>false</bool>";
in
{
options = {
fonts = {
fontconfig = {
ultimate = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
Enable fontconfig-ultimate settings (formerly known as
Infinality). Besides the customizable settings in this NixOS
module, fontconfig-ultimate also provides many font-specific
rendering tweaks.
'';
};
allowBitmaps = mkOption {
type = types.bool;
default = true;
description = ''
Allow bitmap fonts. Set to <literal>false</literal> to ban all
bitmap fonts.
'';
};
allowType1 = mkOption {
type = types.bool;
default = false;
description = ''
Allow Type-1 fonts. Default is <literal>false</literal> because of
poor rendering.
'';
};
useEmbeddedBitmaps = mkOption {
type = types.bool;
default = false;
description = ''Use embedded bitmaps in fonts like Calibri.'';
};
forceAutohint = mkOption {
type = types.bool;
default = false;
description = ''
Force use of the TrueType Autohinter. Useful for debugging or
free-software purists.
'';
};
renderMonoTTFAsBitmap = mkOption {
type = types.bool;
default = false;
description = ''Render some monospace TTF fonts as bitmaps.'';
};
substitutions = mkOption {
type = types.str // {
check = flip elem ["none" "free" "combi" "ms"];
};
default = "free";
description = ''
Font substitutions to replace common Type 1 fonts with nicer
TrueType fonts. <literal>free</literal> uses free fonts,
<literal>ms</literal> uses Microsoft fonts,
<literal>combi</literal> uses a combination, and
<literal>none</literal> disables the substitutions.
'';
};
rendering = mkOption {
type = types.attrs;
default = pkgs.fontconfig-ultimate.rendering.ultimate;
description = ''
FreeType rendering settings presets. The default is
<literal>pkgs.fontconfig-ultimate.rendering.ultimate</literal>.
The other available styles are:
<literal>ultimate-lighter</literal>,
<literal>ultimate-darker</literal>,
<literal>ultimate-lightest</literal>,
<literal>ultimate-darkest</literal>,
<literal>default</literal> (the original Infinality default),
<literal>osx</literal>,
<literal>ipad</literal>,
<literal>ubuntu</literal>,
<literal>linux</literal>,
<literal>winxplight</literal>,
<literal>win7light</literal>,
<literal>winxp</literal>,
<literal>win7</literal>,
<literal>vanilla</literal>,
<literal>classic</literal>,
<literal>nudge</literal>,
<literal>push</literal>,
<literal>shove</literal>,
<literal>sharpened</literal>,
<literal>infinality</literal>. Any of the presets may be
customized by editing the attributes. To disable, set this option
to the empty attribute set <literal>{}</literal>.
'';
};
};
};
};
};
config =
let ultimate = config.fonts.fontconfig.ultimate;
fontconfigUltimateConf = ''
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
${optionalString (!ultimate.allowBitmaps) ''
<!-- Reject bitmap fonts -->
<selectfont>
<rejectfont>
<pattern>
<patelt name="scalable"><bool>false</bool></patelt>
</pattern>
</rejectfont>
</selectfont>
''}
${optionalString ultimate.allowType1 ''
<!-- Reject Type 1 fonts -->
<selectfont>
<rejectfont>
<pattern>
<patelt name="fontformat">
<string>Type 1</string>
</patelt>
</pattern>
</rejectfont>
</selectfont>
''}
<!-- Use embedded bitmaps in fonts like Calibri? -->
<match target="font">
<edit name="embeddedbitmap" mode="assign">
${fcBool ultimate.useEmbeddedBitmaps}
</edit>
</match>
<!-- Force autohint always -->
<match target="font">
<edit name="force_autohint" mode="assign">
${fcBool ultimate.forceAutohint}
</edit>
</match>
<!-- Render some monospace TTF fonts as bitmaps -->
<match target="pattern">
<edit name="bitmap_monospace" mode="assign">
${fcBool ultimate.renderMonoTTFAsBitmap}
</edit>
</match>
${optionalString (ultimate.substitutions != "none") ''
<!-- Type 1 font substitutions -->
<include ignore_missing="yes">${pkgs.fontconfig-ultimate.confd}/etc/fonts/presets/${ultimate.substitutions}</include>
''}
<include ignore_missing="yes">${pkgs.fontconfig-ultimate.confd}/etc/fonts/conf.d</include>
</fontconfig>
'';
in mkIf (config.fonts.fontconfig.enable && ultimate.enable) {
environment.etc."fonts/conf.d/52-fontconfig-ultimate.conf" = {
text = fontconfigUltimateConf;
};
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/52-fontconfig-ultimate.conf" = {
text = fontconfigUltimateConf;
};
environment.variables = ultimate.rendering;
};
}

View file

@ -8,47 +8,250 @@ with lib;
fonts = {
enableFontConfig = mkOption { # !!! should be enableFontconfig
type = types.bool;
default = true;
description = ''
If enabled, a Fontconfig configuration file will be built
pointing to a set of default fonts. If you don't care about
running X11 applications or any other program that uses
Fontconfig, you can turn this option off and prevent a
dependency on all those fonts.
'';
fontconfig = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
If enabled, a Fontconfig configuration file will be built
pointing to a set of default fonts. If you don't care about
running X11 applications or any other program that uses
Fontconfig, you can turn this option off and prevent a
dependency on all those fonts.
'';
};
antialias = mkOption {
type = types.bool;
default = true;
description = "Enable font antialiasing.";
};
dpi = mkOption {
type = types.int;
default = 0;
description = ''
Force DPI setting. Setting to <literal>0</literal> disables DPI
forcing; the DPI detected for the display will be used.
'';
};
defaultFonts = {
monospace = mkOption {
type = types.listOf types.str;
default = ["DejaVu Sans Mono"];
description = ''
System-wide default monospace font(s). Multiple fonts may be
listed in case multiple languages must be supported.
'';
};
sansSerif = mkOption {
type = types.listOf types.str;
default = ["DejaVu Sans"];
description = ''
System-wide default sans serif font(s). Multiple fonts may be
listed in case multiple languages must be supported.
'';
};
serif = mkOption {
type = types.listOf types.str;
default = ["DejaVu Serif"];
description = ''
System-wide default serif font(s). Multiple fonts may be listed
in case multiple languages must be supported.
'';
};
};
hinting = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable TrueType hinting.";
};
autohint = mkOption {
type = types.bool;
default = true;
description = ''
Enable the autohinter, which provides hinting for otherwise
un-hinted fonts. The results are usually lower quality than
correctly-hinted fonts.
'';
};
style = mkOption {
type = types.str // {
check = flip elem ["none" "slight" "medium" "full"];
};
default = "full";
description = ''
TrueType hinting style, one of <literal>none</literal>,
<literal>slight</literal>, <literal>medium</literal>, or
<literal>full</literal>.
'';
};
};
includeUserConf = mkOption {
type = types.bool;
default = true;
description = ''
Include the user configuration from
<filename>~/.config/fontconfig/fonts.conf</filename> or
<filename>~/.config/fontconfig/conf.d</filename>.
'';
};
subpixel = {
rgba = mkOption {
type = types.string // {
check = flip elem ["rgb" "bgr" "vrgb" "vbgr" "none"];
};
default = "rgb";
description = ''
Subpixel order, one of <literal>none</literal>,
<literal>rgb</literal>, <literal>bgr</literal>,
<literal>vrgb</literal>, or <literal>vbgr</literal>.
'';
};
lcdfilter = mkOption {
type = types.str // {
check = flip elem ["none" "default" "light" "legacy"];
};
default = "default";
description = ''
FreeType LCD filter, one of <literal>none</literal>,
<literal>default</literal>, <literal>light</literal>, or
<literal>legacy</literal>.
'';
};
};
};
};
};
config =
let fontconfig = config.fonts.fontconfig;
fcBool = x: "<bool>" + (if x then "true" else "false") + "</bool>";
nixosConf = ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
config = mkIf config.fonts.enableFontConfig {
<!-- Default rendering settings -->
<match target="font">
<edit mode="assign" name="hinting">
${fcBool fontconfig.hinting.enable}
</edit>
<edit mode="assign" name="autohint">
${fcBool fontconfig.hinting.autohint}
</edit>
<edit mode="assign" name="hintstyle">
<const>hint${fontconfig.hinting.style}</const>
</edit>
<edit mode="assign" name="antialias">
${fcBool fontconfig.antialias}
</edit>
<edit mode="assign" name="rgba">
<const>${fontconfig.subpixel.rgba}</const>
</edit>
<edit mode="assign" name="lcdfilter">
<const>lcd${fontconfig.subpixel.lcdfilter}</const>
</edit>
</match>
# Bring in the default (upstream) fontconfig configuration.
environment.etc."fonts/fonts.conf".source =
pkgs.makeFontsConf { fontDirectories = config.fonts.fonts; };
<!-- Default fonts -->
${optionalString (fontconfig.defaultFonts.sansSerif != []) ''
<alias>
<family>sans-serif</family>
<prefer>
${concatStringsSep "\n"
(map (font: "<family>${font}</family>")
fontconfig.defaultFonts.sansSerif)}
</prefer>
</alias>
''}
${optionalString (fontconfig.defaultFonts.serif != []) ''
<alias>
<family>serif</family>
<prefer>
${concatStringsSep "\n"
(map (font: "<family>${font}</family>")
fontconfig.defaultFonts.serif)}
</prefer>
</alias>
''}
${optionalString (fontconfig.defaultFonts.monospace != []) ''
<alias>
<family>monospace</family>
<prefer>
${concatStringsSep "\n"
(map (font: "<family>${font}</family>")
fontconfig.defaultFonts.monospace)}
</prefer>
</alias>
''}
environment.etc."fonts/conf.d/00-nixos.conf".text =
''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
${optionalString (fontconfig.dpi != 0) ''
<match target="pattern">
<edit name="dpi" mode="assign">
<double>${fontconfig.dpi}</double>
</edit>
</match>
''}
<!-- Set the default hinting style to "slight". -->
<match target="font">
<edit mode="assign" name="hintstyle">
<const>hintslight</const>
</edit>
</match>
</fontconfig>
'';
in mkIf fontconfig.enable {
</fontconfig>
'';
# Fontconfig 2.10 backward compatibility
environment.systemPackages = [ pkgs.fontconfig ];
# Bring in the default (upstream) fontconfig configuration, only for fontconfig 2.10
environment.etc."fonts/fonts.conf".source =
pkgs.makeFontsConf { fontconfig = pkgs.fontconfig_210; fontDirectories = config.fonts.fonts; };
};
environment.etc."fonts/conf.d/98-nixos.conf".text = nixosConf;
# Versioned fontconfig > 2.10. Take shared fonts.conf from fontconfig.
# Otherwise specify only font directories.
environment.etc."fonts/${pkgs.fontconfig.configVersion}/fonts.conf".source =
"${pkgs.fontconfig}/etc/fonts/fonts.conf";
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/00-nixos.conf".text =
''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<!-- Font directories -->
${concatStringsSep "\n" (map (font: "<dir>${font}</dir>") config.fonts.fonts)}
</fontconfig>
'';
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/98-nixos.conf".text = nixosConf;
environment.etc."fonts/${pkgs.fontconfig.configVersion}/conf.d/99-user.conf" = {
enable = fontconfig.includeUserConf;
text = ''
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<include ignore_missing="yes" prefix="xdg">fontconfig/conf.d</include>
<include ignore_missing="yes" prefix="xdg">fontconfig/fonts.conf</include>
</fontconfig>
'';
};
environment.systemPackages = [ pkgs.fontconfig ];
};
}

View file

@ -13,14 +13,6 @@ with lib;
type = types.listOf types.path;
example = literalExample "[ pkgs.dejavu_fonts ]";
description = "List of primary font paths.";
apply = list: list ++
[ # - the user's current profile
"~/.nix-profile/lib/X11/fonts"
"~/.nix-profile/share/fonts"
# - the default profile
"/nix/var/nix/profiles/default/lib/X11/fonts"
"/nix/var/nix/profiles/default/share/fonts"
];
};
};
@ -33,7 +25,7 @@ with lib;
[ pkgs.xorg.fontbhttf
pkgs.xorg.fontbhlucidatypewriter100dpi
pkgs.xorg.fontbhlucidatypewriter75dpi
pkgs.ttf_bitstream_vera
pkgs.dejavu_fonts
pkgs.freefont_ttf
pkgs.liberation_ttf
pkgs.xorg.fontbh100dpi

View file

@ -39,6 +39,73 @@ in
'';
};
networking.proxy = {
default = lib.mkOption {
type = types.nullOr types.str;
default = null;
description = ''
This option specifies the default value for httpProxy, httpsProxy, ftpProxy and rsyncProxy.
'';
example = "http://127.0.0.1:3128";
};
httpProxy = lib.mkOption {
type = types.nullOr types.str;
default = cfg.proxy.default;
description = ''
This option specifies the http_proxy environment variable.
'';
example = "http://127.0.0.1:3128";
};
httpsProxy = lib.mkOption {
type = types.nullOr types.str;
default = cfg.proxy.default;
description = ''
This option specifies the https_proxy environment variable.
'';
example = "http://127.0.0.1:3128";
};
ftpProxy = lib.mkOption {
type = types.nullOr types.str;
default = cfg.proxy.default;
description = ''
This option specifies the ftp_proxy environment variable.
'';
example = "http://127.0.0.1:3128";
};
rsyncProxy = lib.mkOption {
type = types.nullOr types.str;
default = cfg.proxy.default;
description = ''
This option specifies the rsync_proxy environment variable.
'';
example = "http://127.0.0.1:3128";
};
noProxy = lib.mkOption {
type = types.nullOr types.str;
default = null;
description = ''
This option specifies the no_proxy environment variable.
If a default proxy is used and noProxy is null,
then noProxy will be set to 127.0.0.1,localhost.
'';
example = "127.0.0.1,localhost,.localdomain";
};
envVars = lib.mkOption {
type = types.attrs;
internal = true;
default = {};
description = ''
Environment variables used for the network proxy.
'';
};
};
};
config = {
@ -73,7 +140,7 @@ in
'' + optionalString config.services.nscd.enable ''
# Invalidate the nscd cache whenever resolv.conf is
# regenerated.
libc_restart='${pkgs.systemd}/bin/systemctl try-restart --no-block nscd.service'
libc_restart='${pkgs.systemd}/bin/systemctl try-restart --no-block nscd.service 2> /dev/null'
'' + optionalString cfg.dnsSingleRequest ''
# only send one DNS request at a time
resolv_conf_options='single-request'
@ -84,13 +151,59 @@ in
dnsmasq_conf=/etc/dnsmasq-conf.conf
dnsmasq_resolv=/etc/dnsmasq-resolv.conf
'';
};
} // (optionalAttrs config.services.resolved.enable (
if dnsmasqResolve then {
"dnsmasq-resolv.conf".source = "/run/systemd/resolve/resolv.conf";
} else {
"resolv.conf".source = "/run/systemd/resolve/resolv.conf";
}
));
networking.proxy.envVars =
optionalAttrs (cfg.proxy.default != null) {
# other options already fallback to proxy.default
no_proxy = "127.0.0.1,localhost";
} // optionalAttrs (cfg.proxy.httpProxy != null) {
http_proxy = cfg.proxy.httpProxy;
} // optionalAttrs (cfg.proxy.httpsProxy != null) {
https_proxy = cfg.proxy.httpsProxy;
} // optionalAttrs (cfg.proxy.rsyncProxy != null) {
rsync_proxy = cfg.proxy.rsyncProxy;
} // optionalAttrs (cfg.proxy.ftpProxy != null) {
ftp_proxy = cfg.proxy.ftpProxy;
} // optionalAttrs (cfg.proxy.noProxy != null) {
no_proxy = cfg.proxy.noProxy;
};
# Install the proxy environment variables
environment.sessionVariables = cfg.proxy.envVars;
# The ip-up target is started when we have IP connectivity. So
# services that depend on IP connectivity (like ntpd) should be
# pulled in by this target.
systemd.targets.ip-up.description = "Services Requiring IP Connectivity";
# This is needed when /etc/resolv.conf is being overriden by networkd
# and other configurations. If the file is destroyed by an environment
# activation then it must be rebuilt so that applications which interface
# with /etc/resolv.conf directly don't break.
system.activationScripts.resolvconf = stringAfter [ "etc" "tmpfs" "var" ]
''
# Systemd resolved controls its own resolv.conf
rm -f /run/resolvconf/interfaces/systemd
${optionalString config.services.resolved.enable ''
rm -rf /run/resolvconf/interfaces
mkdir -p /run/resolvconf/interfaces
ln -s /run/systemd/resolve/resolv.conf /run/resolvconf/interfaces/systemd
''}
# Make sure resolv.conf is up to date if not managed by systemd
${optionalString (!config.services.resolved.enable) ''
${pkgs.openresolv}/bin/resolvconf -u
''}
'';
};
}
}

View file

@ -24,7 +24,7 @@ with lib;
programs.ssh.setXAuthLocation = false;
security.pam.services.su.forwardXAuth = lib.mkForce false;
fonts.enableFontConfig = false;
fonts.fontconfig.enable = false;
nixpkgs.config.packageOverrides = pkgs:
{ dbus = pkgs.dbus.override { useX11 = false; }; };

View file

@ -35,29 +35,27 @@ in
config = {
environment.etc =
[ # Name Service Switch configuration file. Required by the C library.
# !!! Factor out the mdns stuff. The avahi module should define
# an option used by this module.
{ source = pkgs.writeText "nsswitch.conf"
''
passwd: files ldap
group: files ldap
shadow: files ldap
hosts: files ${optionalString nssmdns "mdns_minimal [NOTFOUND=return]"} dns ${optionalString nssmdns "mdns"} ${optionalString nsswins "wins"} myhostname
networks: files dns
ethers: files
services: files
protocols: files
'';
target = "nsswitch.conf";
}
];
# Name Service Switch configuration file. Required by the C
# library. !!! Factor out the mdns stuff. The avahi module
# should define an option used by this module.
environment.etc."nsswitch.conf".text =
''
passwd: files ldap
group: files ldap
shadow: files ldap
hosts: files ${optionalString nssmdns "mdns_minimal [NOTFOUND=return]"} dns ${optionalString nssmdns "mdns"} ${optionalString nsswins "wins"} myhostname mymachines
networks: files dns
ethers: files
services: files
protocols: files
'';
# Use nss-myhostname to ensure that our hostname always resolves to
# a valid IP address. It returns all locally configured IP
# addresses, or ::1 and 127.0.0.2 as fallbacks.
system.nssModules = [ pkgs.systemd ];
# Systemd provides nss-myhostname to ensure that our hostname
# always resolves to a valid IP address. It returns all locally
# configured IP addresses, or ::1 and 127.0.0.2 as
# fallbacks. Systemd also provides nss-mymachines to return IP
# addresses of local containers.
system.nssModules = [ config.systemd.package ];
};
}

View file

@ -14,10 +14,14 @@ in
time = {
timeZone = mkOption {
default = "CET";
default = "UTC";
type = types.str;
example = "America/New_York";
description = "The time zone used when displaying times and dates.";
description = ''
The time zone used when displaying times and dates. See <link
xlink:href="https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"/>
for a comprehensive list of possible values for this setting.
'';
};
hardwareClockInLocalTime = mkOption {

View file

@ -6,6 +6,15 @@ use JSON;
make_path("/var/lib/nixos", { mode => 0755 });
sub hashPassword {
my ($password) = @_;
my $salt = "";
my @chars = ('.', '/', 0..9, 'A'..'Z', 'a'..'z');
$salt .= $chars[rand 64] for (1..8);
return crypt($password, '$6$' . $salt . '$');
}
# Functions for allocating free GIDs/UIDs. FIXME: respect ID ranges in
# /etc/login.defs.
sub allocId {
@ -114,7 +123,7 @@ foreach my $g (@{$spec->{groups}}) {
}
# Update the persistent list of declarative groups.
write_file($declGroupsFile, join(" ", sort(keys %groupsOut)));
write_file($declGroupsFile, { binmode => ':utf8' }, join(" ", sort(keys %groupsOut)));
# Merge in the existing /etc/group.
foreach my $name (keys %groupsCur) {
@ -131,7 +140,7 @@ foreach my $name (keys %groupsCur) {
# Rewrite /etc/group. FIXME: acquire lock.
my @lines = map { join(":", $_->{name}, $_->{password}, $_->{gid}, $_->{members}) . "\n" }
(sort { $a->{gid} <=> $b->{gid} } values(%groupsOut));
write_file("/etc/group.tmp", @lines);
write_file("/etc/group.tmp", { binmode => ':utf8' }, @lines);
rename("/etc/group.tmp", "/etc/group") or die;
system("nscd --invalidate group");
@ -160,6 +169,12 @@ foreach my $u (@{$spec->{users}}) {
} else {
$u->{uid} = allocUid($u->{isSystemUser}) if !defined $u->{uid};
if (defined $u->{initialPassword}) {
$u->{hashedPassword} = hashPassword($u->{initialPassword});
} elsif (defined $u->{initialHashedPassword}) {
$u->{hashedPassword} = $u->{initialHashedPassword};
}
# Create a home directory.
if ($u->{createHome}) {
make_path($u->{home}, { mode => 0700 }) if ! -e $u->{home};
@ -174,6 +189,8 @@ foreach my $u (@{$spec->{users}}) {
} else {
warn "warning: password file $u->{passwordFile} does not exist\n";
}
} elsif (defined $u->{password}) {
$u->{hashedPassword} = hashPassword($u->{password});
}
$u->{fakePassword} = $existing->{fakePassword} // "x";
@ -181,7 +198,7 @@ foreach my $u (@{$spec->{users}}) {
}
# Update the persistent list of declarative users.
write_file($declUsersFile, join(" ", sort(keys %usersOut)));
write_file($declUsersFile, { binmode => ':utf8' }, join(" ", sort(keys %usersOut)));
# Merge in the existing /etc/passwd.
foreach my $name (keys %usersCur) {
@ -197,7 +214,7 @@ foreach my $name (keys %usersCur) {
# Rewrite /etc/passwd. FIXME: acquire lock.
@lines = map { join(":", $_->{name}, $_->{fakePassword}, $_->{uid}, $_->{gid}, $_->{description}, $_->{home}, $_->{shell}) . "\n" }
(sort { $a->{uid} <=> $b->{uid} } (values %usersOut));
write_file("/etc/passwd.tmp", @lines);
write_file("/etc/passwd.tmp", { binmode => ':utf8' }, @lines);
rename("/etc/passwd.tmp", "/etc/passwd") or die;
system("nscd --invalidate passwd");
@ -208,32 +225,22 @@ my %shadowSeen;
foreach my $line (-f "/etc/shadow" ? read_file("/etc/shadow") : ()) {
chomp $line;
my ($name, $password, @rest) = split(':', $line, -9);
my ($name, $hashedPassword, @rest) = split(':', $line, -9);
my $u = $usersOut{$name};;
next if !defined $u;
$password = $u->{hashedPassword} if defined $u->{hashedPassword} && !$spec->{mutableUsers}; # FIXME
push @shadowNew, join(":", $name, $password, @rest) . "\n";
$hashedPassword = "!" if !$spec->{mutableUsers};
$hashedPassword = $u->{hashedPassword} if defined $u->{hashedPassword} && !$spec->{mutableUsers}; # FIXME
push @shadowNew, join(":", $name, $hashedPassword, @rest) . "\n";
$shadowSeen{$name} = 1;
}
foreach my $u (values %usersOut) {
next if defined $shadowSeen{$u->{name}};
my $password = "!";
$password = $u->{hashedPassword} if defined $u->{hashedPassword};
my $hashedPassword = "!";
$hashedPassword = $u->{hashedPassword} if defined $u->{hashedPassword};
# FIXME: set correct value for sp_lstchg.
push @shadowNew, join(":", $u->{name}, $password, "1::::::") . "\n";
push @shadowNew, join(":", $u->{name}, $hashedPassword, "1::::::") . "\n";
}
write_file("/etc/shadow.tmp", { perms => 0600 }, @shadowNew);
write_file("/etc/shadow.tmp", { binmode => ':utf8', perms => 0600 }, @shadowNew);
rename("/etc/shadow.tmp", "/etc/shadow") or die;
# Call chpasswd to apply password. FIXME: generate the hashes directly
# and merge into the /etc/shadow updating above.
foreach my $u (@{$spec->{users}}) {
if (defined $u->{password}) {
my $pid = open(PW, "| chpasswd") or die;
print PW "$u->{name}:$u->{password}\n";
close PW or die "unable to change password of user $u->{name}: $?\n";
}
}

View file

@ -8,19 +8,19 @@ let
cfg = config.users;
passwordDescription = ''
The options <literal>hashedPassword</literal>,
<literal>password</literal> and <literal>passwordFile</literal>
The options <option>hashedPassword</option>,
<option>password</option> and <option>passwordFile</option>
controls what password is set for the user.
<literal>hashedPassword</literal> overrides both
<literal>password</literal> and <literal>passwordFile</literal>.
<literal>password</literal> overrides <literal>passwordFile</literal>.
<option>hashedPassword</option> overrides both
<option>password</option> and <option>passwordFile</option>.
<option>password</option> overrides <option>passwordFile</option>.
If none of these three options are set, no password is assigned to
the user, and the user will not be able to do password logins.
If the option <literal>users.mutableUsers</literal> is true, the
If the option <option>users.mutableUsers</option> is true, the
password defined in one of the three options will only be set when
the user is created for the first time. After that, you are free to
change the password with the ordinary user management commands. If
<literal>users.mutableUsers</literal> is false, you cannot change
<option>users.mutableUsers</option> is false, you cannot change
user passwords, they will always be set according to the password
options.
'';
@ -155,7 +155,7 @@ let
default = false;
description = ''
If true, the user's shell will be set to
<literal>cfg.defaultUserShell</literal>.
<option>users.defaultUserShell</option>.
'';
};
@ -163,7 +163,7 @@ let
type = with types; uniq (nullOr str);
default = null;
description = ''
Specifies the (hashed) password for the user.
Specifies the hashed password for the user.
${passwordDescription}
'';
};
@ -191,6 +191,37 @@ let
${passwordDescription}
'';
};
initialHashedPassword = mkOption {
type = with types; uniq (nullOr str);
default = null;
description = ''
Specifies the initial hashed password for the user, i.e. the
hashed password assigned if the user does not already
exist. If <option>users.mutableUsers</option> is true, the
password can be changed subsequently using the
<command>passwd</command> command. Otherwise, it's
equivalent to setting the <option>password</option> option.
'';
};
initialPassword = mkOption {
type = with types; uniq (nullOr str);
default = null;
description = ''
Specifies the initial password for the user, i.e. the
password assigned if the user does not already exist. If
<option>users.mutableUsers</option> is true, the password
can be changed subsequently using the
<command>passwd</command> command. Otherwise, it's
equivalent to setting the <option>password</option>
option. The same caveat applies: the password specified here
is world-readable in the Nix store, so it should only be
used for guest accounts or passwords that will be changed
promptly.
'';
};
};
config = mkMerge
@ -204,6 +235,14 @@ let
useDefaultShell = mkDefault true;
isSystemUser = mkDefault false;
})
# If !mutableUsers, setting initialPassword is equivalent to
# setting password (and similarly for hashed passwords).
(mkIf (!cfg.mutableUsers && config.initialPassword != null) {
password = mkDefault config.initialPassword;
})
(mkIf (!cfg.mutableUsers && config.initialHashedPassword != null) {
hashedPassword = mkDefault config.initialHashedPassword;
})
];
};
@ -306,7 +345,8 @@ let
users = mapAttrsToList (n: u:
{ inherit (u)
name uid group description home shell createHome isSystemUser
password passwordFile hashedPassword;
password passwordFile hashedPassword
initialPassword initialHashedPassword;
}) cfg.extraUsers;
groups = mapAttrsToList (n: g:
{ inherit (g) name gid;
@ -386,24 +426,12 @@ in {
options = [ groupOpts ];
};
# FIXME: obsolete - will remove.
security.initialRootPassword = mkOption {
type = types.str;
default = "!";
example = "";
description = ''
The (hashed) password for the root account set on initial
installation. The empty string denotes that root can login
locally without a password (but not via remote services such
as SSH, or indirectly via <command>su</command> or
<command>sudo</command>). The string <literal>!</literal>
prevents root from logging in using a password.
Note that setting this option sets
<literal>users.extraUsers.root.hashedPassword</literal>.
Also, if <literal>users.mutableUsers</literal> is false
you cannot change the root password manually, so in that case
the name of this option is a bit misleading, since it will define
the root password beyond the user initialisation phase.
'';
visible = false;
};
};
@ -421,7 +449,7 @@ in {
shell = mkDefault cfg.defaultUserShell;
group = "root";
extraGroups = [ "grsecurity" ];
hashedPassword = mkDefault config.security.initialRootPassword;
initialHashedPassword = mkDefault config.security.initialRootPassword;
};
nobody = {
uid = ids.uids.nobody;

View file

@ -12,7 +12,8 @@ with lib;
default = false;
type = types.bool;
description = ''
Turn on this option if you want to enable all the firmware shipped with Debian/Ubuntu.
Turn on this option if you want to enable all the firmware shipped with Debian/Ubuntu
and iwlwifi.
'';
};
@ -22,7 +23,11 @@ with lib;
###### implementation
config = mkIf config.hardware.enableAllFirmware {
hardware.firmware = [ "${pkgs.firmwareLinuxNonfree}/lib/firmware" ];
hardware.firmware = [
"${pkgs.firmwareLinuxNonfree}/lib/firmware"
"${pkgs.iwlegacy}/lib/firmware"
"${pkgs.iwlwifi}/lib/firmware"
];
};
}

View file

@ -16,7 +16,6 @@ let
[ p.mesa_drivers
p.mesa_noglu # mainly for libGL
(if cfg.s3tcSupport then p.libtxc_dxtn else p.libtxc_dxtn_s2tc)
p.udev
];
};

View file

@ -30,7 +30,7 @@ with lib;
boot.kernelModules = [ "bbswitch" ];
boot.extraModulePackages = [ kernel.bbswitch kernel.nvidia_x11 ];
environment.systemPackages = [ pkgs.bumblebee ];
environment.systemPackages = [ pkgs.bumblebee pkgs.primus ];
systemd.services.bumblebeed = {
description = "Bumblebee Hybrid Graphics Switcher";

View file

@ -11,7 +11,8 @@ let
# FIXME: should introduce an option like
# hardware.video.nvidia.package for overriding the default NVIDIA
# driver.
enabled = elem "nvidia" drivers || elem "nvidiaLegacy173" drivers || elem "nvidiaLegacy304" drivers;
enabled = elem "nvidia" drivers || elem "nvidiaLegacy173" drivers
|| elem "nvidiaLegacy304" drivers || elem "nvidiaLegacy340" drivers;
nvidia_x11 =
if elem "nvidia" drivers then
@ -20,6 +21,8 @@ let
config.boot.kernelPackages.nvidia_x11_legacy173
else if elem "nvidiaLegacy304" drivers then
config.boot.kernelPackages.nvidia_x11_legacy304
else if elem "nvidiaLegacy340" drivers then
config.boot.kernelPackages.nvidia_x11_legacy340
else throw "impossible";
in

View file

@ -45,6 +45,9 @@ with lib;
# Add support for cow filesystems and their utilities
boot.supportedFilesystems = [ "zfs" "btrfs" ];
# Configure host id for ZFS to work
networking.hostId = "8425e349";
# Allow the user to log in as root without a password.
security.initialRootPassword = "";
users.extraUsers.root.initialHashedPassword = "";
}

View file

@ -14,6 +14,10 @@ with lib;
# Include gparted for partitioning disks
environment.systemPackages = [ pkgs.gparted ];
# Provide networkmanager for easy wireless configuration.
networking.networkmanager.enable = true;
networking.wireless.enable = mkForce false;
# KDE complains if power management is disabled (to be precise, if
# there is no power management backend such as upower).
powerManagement.enable = true;

View file

@ -176,7 +176,10 @@ in
# UUID of the USB stick. It would be nicer to write
# `root=/dev/disk/by-label/...' here, but UNetbootin doesn't
# recognise that.
boot.kernelParams = [ "root=LABEL=${config.isoImage.volumeID}" ];
boot.kernelParams =
[ "root=LABEL=${config.isoImage.volumeID}"
"boot.shell_on_fail"
];
fileSystems."/" =
{ fsType = "tmpfs";

View file

@ -76,7 +76,6 @@ in
pkgs.ntfsprogs # for resizing NTFS partitions
pkgs.btrfsProgs
pkgs.jfsutils
pkgs.jfsrec
# Some compression/archiver tools.
pkgs.unzip

View file

@ -80,7 +80,7 @@ had booted this nixos. Run:
* `grep local-cmds run/current-system/init`
Then you can proceed normally subscribing to a nixos channel:
nix-channel --add http://nixos.org/channels/nixos-unstable
nix-channel --add https://nixos.org/channels/nixos-unstable
nix-channel --update
Testing:

View file

@ -235,7 +235,7 @@ chomp $virt;
# Check if we're a VirtualBox guest. If so, enable the guest
# additions.
if ($virt eq "oracle") {
push @attrs, "services.virtualbox.enable = true;"
push @attrs, "services.virtualboxGuest.enable = true;"
}
@ -430,7 +430,7 @@ my $hwConfig = <<EOF;
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, pkgs, ... }:
{ config, lib, pkgs, ... }:
{
imports =${\multiLineList(" ", @imports)};
@ -491,7 +491,7 @@ EOF
$bootLoaderConfig
# networking.hostName = "nixos"; # Define your hostname.
# networking.wireless.enable = true; # Enables wireless.
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
# Select internationalisation properties.
# i18n = {
@ -500,6 +500,9 @@ $bootLoaderConfig
# defaultLocale = "en_US.UTF-8";
# };
# Set your time zone.
# time.timeZone = "Europe/Amsterdam";
# List packages installed in system profile. To search by name, run:
# \$ nix-env -qaP | grep wget
# environment.systemPackages = with pkgs; [

View file

@ -30,8 +30,7 @@ while [ "$#" -gt 0 ]; do
case "$i" in
-I)
given_path="$1"; shift 1
absolute_path=$(readlink -m $given_path)
extraBuildFlags+=("$i" "/mnt$absolute_path")
extraBuildFlags+=("$i" "$given_path")
;;
--root)
mountPoint="$1"; shift 1
@ -89,6 +88,12 @@ ln -s /run $mountPoint/var/run
rm -f $mountPoint/etc/{resolv.conf,hosts}
cp -Lf /etc/resolv.conf /etc/hosts $mountPoint/etc/
if [ -e "$SSL_CERT_FILE" ]; then
cp -Lf "$SSL_CERT_FILE" "$mountPoint/tmp/ca-cert.crt"
export SSL_CERT_FILE=/tmp/ca-cert.crt
# For Nix 1.7
export CURL_CA_BUNDLE=/tmp/ca-cert.crt
fi
if [ -n "$runChroot" ]; then
if ! [ -L $mountPoint/nix/var/nix/profiles/system ]; then
@ -244,7 +249,7 @@ chroot $mountPoint /nix/var/nix/profiles/system/activate
# Ask the user to set a root password.
if [ -t 0 ] ; then
if [ "$(chroot $mountPoint nix-instantiate --eval '<nixos>' -A config.users.mutableUsers)" = true ] && [ -t 0 ] ; then
echo "setting root password..."
chroot $mountPoint /var/setuid-wrappers/passwd
fi

View file

@ -13,6 +13,7 @@ usage () {
xml=false
verbose=false
nixPath=""
option=""
@ -26,6 +27,7 @@ for arg; do
while test "$sarg" != "-"; do
case $sarg in
--*) longarg=$arg; sarg="--";;
-I) argfun="include_nixpath";;
-*) usage;;
esac
# remove the first letter option
@ -53,6 +55,9 @@ for arg; do
var=$(echo $argfun | sed 's,^set_,,')
eval $var=$arg
;;
include_nixpath)
nixPath="-I $arg $nixPath"
;;
esac
argfun=""
fi
@ -69,18 +74,114 @@ fi
#############################
evalNix(){
nix-instantiate - --eval-only "$@"
result=$(nix-instantiate ${nixPath:+$nixPath} - --eval-only "$@" 2>&1)
if test $? -eq 0; then
cat <<EOF
$result
EOF
return 0;
else
sed -n '
/^error/ { s/, at (string):[0-9]*:[0-9]*//; p; };
/^warning: Nix search path/ { p; };
' <<EOF
$result
EOF
return 1;
fi
}
header="let
nixos = import <nixpkgs/nixos> {};
nixpkgs = import <nixpkgs> {};
in with nixpkgs.lib;
"
# This function is used for converting the option definition path given by
# the user into accessors for reaching the definition and the declaration
# corresponding to this option.
generateAccessors(){
if result=$(evalNix --strict --show-trace <<EOF
$header
let
path = "${option:+$option}";
pathList = splitString "." path;
walkOptions = attrsNames: result:
if attrsNames == [] then
result
else
let name = head attrsNames; rest = tail attrsNames; in
if isOption result.options then
walkOptions rest {
options = result.options.type.getSubOptions "";
opt = ''(\${result.opt}.type.getSubOptions "")'';
cfg = ''\${result.cfg}."\${name}"'';
}
else
walkOptions rest {
options = result.options.\${name};
opt = ''\${result.opt}."\${name}"'';
cfg = ''\${result.cfg}."\${name}"'';
}
;
walkResult = (if path == "" then x: x else walkOptions pathList) {
options = nixos.options;
opt = ''nixos.options'';
cfg = ''nixos.config'';
};
in
''let option = \${walkResult.opt}; config = \${walkResult.cfg}; in''
EOF
)
then
echo $result
else
# In case of error we want to ignore the error message roduced by the
# script above, as it is iterating over each attribute, which does not
# produce a nice error message. The following code is a fallback
# solution which is cause a nicer error message in the next
# evaluation.
echo "\"let option = nixos.options${option:+.$option}; config = nixos.config${option:+.$option}; in\""
fi
}
header="$header
$(eval echo $(generateAccessors))
"
evalAttr(){
local prefix="$1"
local strict="$2"
local suffix="$3"
echo "(import <nixos> {}).$prefix${option:+.$option}${suffix:+.$suffix}" | evalNix ${strict:+--strict}
# If strict is set, then set it to "true".
test -n "$strict" && strict=true
evalNix ${strict:+--strict} <<EOF
$header
let
value = $prefix${suffix:+.$suffix};
strict = ${strict:-false};
cleanOutput = x: with nixpkgs.lib;
if isDerivation x then x.outPath
else if isFunction x then "<CODE>"
else if strict then
if isAttrs x then mapAttrs (n: cleanOutput) x
else if isList x then map cleanOutput x
else x
else x;
in
cleanOutput value
EOF
}
evalOpt(){
evalAttr "options" "" "$@"
evalAttr "option" "" "$@"
}
evalCfg(){
@ -90,8 +191,11 @@ evalCfg(){
findSources(){
local suffix=$1
echo "(import <nixos> {}).options${option:+.$option}.$suffix" |
evalNix --strict
evalNix --strict <<EOF
$header
option.$suffix
EOF
}
# Given a result from nix-instantiate, recover the list of attributes it
@ -121,13 +225,12 @@ nixMap() {
# the output of nixos-option with other tools such as nixos-gui.
if $xml; then
evalNix --xml --no-location <<EOF
$header
let
reach = attrs: attrs${option:+.$option};
nixos = import <nixos> {};
nixpkgs = import <nixpkgs> {};
sources = builtins.map (f: f.source);
opt = reach nixos.options;
cfg = reach nixos.config;
opt = option;
cfg = config;
in
with nixpkgs.lib;

View file

@ -156,7 +156,7 @@ if [ -n "$buildNix" ]; then
exit 1
fi
if ! nix-store -r $nixStorePath --add-root $tmpDir/nix --indirect \
--option extra-binary-caches http://cache.nixos.org/; then
--option extra-binary-caches https://cache.nixos.org/; then
echo "warning: don't know how to get latest Nix" >&2
fi
# Older version of nix-store -r don't support --add-root.

View file

@ -157,6 +157,23 @@
redmine = 147;
seeks = 148;
prosody = 149;
i2pd = 150;
dnscrypt-proxy = 151;
systemd-network = 152;
systemd-resolve = 153;
systemd-timesync = 154;
liquidsoap = 155;
etcd = 156;
docker-registry = 157;
hbase = 158;
opentsdb = 159;
scollector = 160;
bosun = 161;
kubernetes = 162;
peerflix = 163;
chronos = 164;
gitlab = 165;
tox-bootstrapd = 166;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -197,6 +214,7 @@
privoxy = 32;
disnix = 33;
osgi = 34;
tor = 35;
ghostOne = 40;
git = 41;
fourstore = 42;
@ -276,11 +294,23 @@
mlmmj = 135;
riemann = 137;
riemanndash = 138;
hbase = 139;
opentsdb = 140;
uhub = 142;
mailpile = 146;
redmine = 147;
seeks = 148;
prosody = 149;
i2pd = 150;
systemd-network = 152;
systemd-resolve = 153;
systemd-timesync = 154;
liquidsoap = 155;
scollector = 156;
bosun = 157;
kubernetes = 158;
fleet = 159;
gitlab = 160;
# When adding a gid, make sure it doesn't match an existing uid. And don't use gids above 399!

View file

@ -53,7 +53,7 @@ with lib;
mkDefault (if pathExists fn then readFile fn else "master");
# Note: code names must only increase in alphabetical order.
system.nixosCodeName = "Caterpillar";
system.nixosCodeName = "Dingo";
# Generate /etc/os-release. See
# http://0pointer.de/public/systemd-man/os-release.html for the

View file

@ -1,6 +1,7 @@
[
./config/fonts/corefonts.nix
./config/fonts/fontconfig.nix
./config/fonts/fontconfig-ultimate.nix
./config/fonts/fontdir.nix
./config/fonts/fonts.nix
./config/fonts/ghostscript.nix
@ -58,6 +59,7 @@
./programs/dconf.nix
./programs/environment.nix
./programs/info.nix
./programs/light.nix
./programs/nano.nix
./programs/screen.nix
./programs/shadow.nix
@ -66,6 +68,7 @@
./programs/ssmtp.nix
./programs/uim.nix
./programs/venus.nix
./programs/virtualbox-host.nix
./programs/wvdial.nix
./programs/freetds.nix
./programs/zsh/zsh.nix
@ -88,15 +91,19 @@
./services/audio/alsa.nix
# Disabled as fuppes it does no longer builds.
# ./services/audio/fuppes.nix
./services/audio/liquidsoap.nix
./services/audio/mpd.nix
./services/audio/mopidy.nix
./services/backup/almir.nix
./services/backup/bacula.nix
./services/backup/crashplan.nix
./services/backup/mysql-backup.nix
./services/backup/postgresql-backup.nix
./services/backup/rsnapshot.nix
./services/backup/sitecopy-backup.nix
./services/backup/tarsnap.nix
./services/cluster/fleet.nix
./services/cluster/kubernetes.nix
./services/computing/torque/server.nix
./services/computing/torque/mom.nix
./services/continuous-integration/jenkins/default.nix
@ -105,13 +112,15 @@
./services/databases/4store.nix
./services/databases/couchdb.nix
./services/databases/firebird.nix
./services/databases/hbase.nix
./services/databases/influxdb.nix
./services/databases/memcached.nix
./services/databases/monetdb.nix
./services/databases/mongodb.nix
./services/databases/mysql.nix
./services/databases/neo4j.nix
./services/databases/neo4j.nix
./services/databases/openldap.nix
./services/databases/opentsdb.nix
./services/databases/postgresql.nix
./services/databases/redis.nix
./services/databases/virtuoso.nix
@ -128,6 +137,7 @@
./services/desktops/gnome3/seahorse.nix
./services/desktops/gnome3/sushi.nix
./services/desktops/gnome3/tracker.nix
./services/desktops/profile-sync-daemon.nix
./services/desktops/telepathy.nix
./services/games/ghost-one.nix
./services/games/minecraft-server.nix
@ -163,8 +173,11 @@
./services/misc/cgminer.nix
./services/misc/dictd.nix
./services/misc/disnix.nix
./services/misc/docker-registry.nix
./services/misc/etcd.nix
./services/misc/felix.nix
./services/misc/folding-at-home.nix
./services/misc/gitlab.nix
./services/misc/gitolite.nix
./services/misc/gpsd.nix
./services/misc/mesos-master.nix
@ -183,6 +196,7 @@
./services/misc/uhub.nix
./services/misc/zookeeper.nix
./services/monitoring/apcupsd.nix
./services/monitoring/bosun.nix
./services/monitoring/collectd.nix
./services/monitoring/dd-agent.nix
./services/monitoring/graphite.nix
@ -191,6 +205,7 @@
./services/monitoring/nagios.nix
./services/monitoring/riemann.nix
./services/monitoring/riemann-dash.nix
./services/monitoring/scollector.nix
./services/monitoring/smartd.nix
./services/monitoring/statsd.nix
./services/monitoring/systemhealth.nix
@ -219,6 +234,7 @@
./services/networking/ddclient.nix
./services/networking/dhcpcd.nix
./services/networking/dhcpd.nix
./services/networking/dnscrypt-proxy.nix
./services/networking/dnsmasq.nix
./services/networking/ejabberd.nix
./services/networking/firewall.nix
@ -230,6 +246,7 @@
./services/networking/gvpe.nix
./services/networking/haproxy.nix
./services/networking/hostapd.nix
./services/networking/i2pd.nix
./services/networking/ifplugd.nix
./services/networking/iodined.nix
./services/networking/ircd-hybrid/default.nix
@ -263,11 +280,13 @@
./services/networking/spiped.nix
./services/networking/ssh/lshd.nix
./services/networking/ssh/sshd.nix
./services/networking/strongswan.nix
./services/networking/supybot.nix
./services/networking/syncthing.nix
./services/networking/tcpcrypt.nix
./services/networking/teamspeak3.nix
./services/networking/tftpd.nix
./services/networking/tox-bootstrapd.nix
./services/networking/unbound.nix
./services/networking/unifi.nix
./services/networking/vsftpd.nix
@ -279,6 +298,7 @@
./services/networking/znc.nix
./services/printing/cupsd.nix
./services/scheduling/atd.nix
./services/scheduling/chronos.nix
./services/scheduling/cron.nix
./services/scheduling/fcron.nix
./services/search/elasticsearch.nix
@ -291,11 +311,13 @@
./services/security/torify.nix
./services/security/tor.nix
./services/security/torsocks.nix
./services/system/cloud-init.nix
./services/system/dbus.nix
./services/system/kerberos.nix
./services/system/nscd.nix
./services/system/uptimed.nix
./services/torrent/deluge.nix
./services/torrent/peerflix.nix
./services/torrent/transmission.nix
./services/ttys/agetty.nix
./services/ttys/gpm.nix
@ -344,6 +366,7 @@
./system/boot/loader/efi.nix
./system/boot/loader/generations-dir/generations-dir.nix
./system/boot/loader/grub/grub.nix
./system/boot/loader/grub/ipxe.nix
./system/boot/loader/grub/memtest.nix
./system/boot/loader/gummiboot/gummiboot.nix
./system/boot/loader/init-script/init-script.nix
@ -357,6 +380,7 @@
./system/boot/tmp.nix
./system/etc/etc.nix
./system/upstart/upstart.nix
./tasks/bcache.nix
./tasks/cpu-freq.nix
./tasks/encrypted-devices.nix
./tasks/filesystems.nix
@ -374,6 +398,8 @@
./tasks/kbd.nix
./tasks/lvm.nix
./tasks/network-interfaces.nix
./tasks/network-interfaces-systemd.nix
./tasks/network-interfaces-scripted.nix
./tasks/scsi-link-power-management.nix
./tasks/swraid.nix
./tasks/trackpoint.nix
@ -382,8 +408,10 @@
./virtualisation/containers.nix
./virtualisation/docker.nix
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix
#./virtualisation/nova.nix
./virtualisation/openvswitch.nix
./virtualisation/parallels-guest.nix
./virtualisation/virtualbox-guest.nix
#./virtualisation/xen-dom0.nix
]

View file

@ -34,7 +34,6 @@
pkgs.xfsprogs
pkgs.jfsutils
pkgs.f2fs-tools
#pkgs.jfsrec # disabled because of Boost dependency
# Some compression/archiver tools.
pkgs.unzip

View file

@ -0,0 +1,56 @@
{ config, lib, pkgs, ... }:
with lib;
let
pkgs2storeContents = l : map (x: { object = x; symlink = "none"; }) l;
in {
# Docker image config.
imports = [
../installer/cd-dvd/channel.nix
./minimal.nix
./clone-config.nix
];
# Create the tarball
system.build.tarball = import ../../lib/make-system-tarball.nix {
inherit (pkgs) stdenv perl xz pathsFromGraph;
contents = [];
extraArgs = "--owner=0";
# Add init script to image
storeContents = [
{ object = config.system.build.toplevel + "/init";
symlink = "/init";
}
] ++ (pkgs2storeContents [ pkgs.stdenv ]);
# Some container managers like lxc need these
extraCommands = "mkdir -p proc sys dev";
};
boot.isContainer = true;
boot.postBootCommands =
''
# After booting, register the contents of the Nix store in the Nix
# database.
if [ -f /nix-path-registration ]; then
${config.nix.package}/bin/nix-store --load-db < /nix-path-registration &&
rm /nix-path-registration
fi
# nixos-rebuild also requires a "system" profile
${config.nix.package}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
'';
# Disable some features that are not useful in a container.
sound.enable = mkDefault false;
services.udisks2.enable = mkDefault false;
# Install new init script
system.activationScripts.installInitScript = ''
ln -fs $systemConfig/init /init
'';
}

View file

@ -40,7 +40,6 @@ in
# TODO: move most of these elsewhere
environment.profileRelativeEnvVars =
{ PATH = [ "/bin" "/sbin" "/lib/kde4/libexec" ];
MANPATH = [ "/man" "/share/man" ];
INFOPATH = [ "/info" "/share/info" ];
PKG_CONFIG_PATH = [ "/lib/pkgconfig" ];
TERMINFO_DIRS = [ "/share/terminfo" ];

View file

@ -0,0 +1,26 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.light;
in
{
options = {
programs.light = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Whether to install Light backlight control with setuid wrapper.
'';
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.light ];
security.setuidPrograms = [ "light" ];
};
}

View file

@ -59,6 +59,15 @@ in
'';
};
agentTimeout = mkOption {
type = types.nullOr types.string;
default = null;
example = "1h";
description = ''
How long to keep the private keys in memory. Use null to keep them forever.
'';
};
package = mkOption {
default = pkgs.openssh;
description = ''
@ -99,7 +108,10 @@ in
wantedBy = [ "default.target" ];
serviceConfig =
{ ExecStartPre = "${pkgs.coreutils}/bin/rm -f %t/ssh-agent";
ExecStart = "${cfg.package}/bin/ssh-agent -a %t/ssh-agent";
ExecStart =
"${cfg.package}/bin/ssh-agent " +
optionalString (cfg.agentTimeout != null) ("-t ${cfg.agentTimeout} ") +
"-a %t/ssh-agent";
StandardOutput = "null";
Type = "forking";
Restart = "on-failure";

View file

@ -20,6 +20,7 @@ in
networking.defaultMailServer = {
directDelivery = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
@ -35,6 +36,7 @@ in
};
hostName = mkOption {
type = types.str;
example = "mail.example.org";
description = ''
The host name of the default mail server to use to deliver
@ -42,7 +44,17 @@ in
'';
};
root = mkOption {
type = types.str;
default = "";
example = "root@example.org";
description = ''
The e-mail to which mail for users with UID &lt; 1000 is forwarded.
'';
};
domain = mkOption {
type = types.str;
default = "";
example = "example.org";
description = ''
@ -51,6 +63,7 @@ in
};
useTLS = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
@ -60,6 +73,7 @@ in
};
useSTARTTLS = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
@ -70,6 +84,7 @@ in
};
authUser = mkOption {
type = types.str;
default = "";
example = "foo@example.org";
description = ''
@ -78,6 +93,7 @@ in
};
authPass = mkOption {
type = types.str;
default = "";
example = "correctHorseBatteryStaple";
description = ''
@ -96,6 +112,7 @@ in
''
MailHub=${cfg.hostName}
FromLineOverride=YES
${if cfg.root != "" then "root=${cfg.root}" else ""}
${if cfg.domain != "" then "rewriteDomain=${cfg.domain}" else ""}
UseTLS=${if cfg.useTLS then "YES" else "NO"}
UseSTARTTLS=${if cfg.useSTARTTLS then "YES" else "NO"}

View file

@ -0,0 +1,115 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.virtualboxHost;
virtualbox = config.boot.kernelPackages.virtualbox.override {
inherit (cfg) enableHardening;
};
in
{
options.services.virtualboxHost = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable host-side support for VirtualBox.
<note><para>
In order to pass USB devices from the host to the guests, the user
needs to be in the <literal>vboxusers</literal> group.
</para></note>
'';
};
addNetworkInterface = mkOption {
type = types.bool;
default = true;
description = ''
Automatically set up a vboxnet0 host-only network interface.
'';
};
enableHardening = mkOption {
type = types.bool;
default = true;
description = ''
Enable hardened VirtualBox, which ensures that only the binaries in the
system path get access to the devices exposed by the kernel modules
instead of all users in the vboxusers group.
<important><para>
Disabling this can put your system's security at risk, as local users
in the vboxusers group can tamper with the VirtualBox device files.
</para></important>
'';
};
};
config = mkIf cfg.enable (mkMerge [{
boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
boot.extraModulePackages = [ virtualbox ];
environment.systemPackages = [ virtualbox ];
security.setuidOwners = let
mkSuid = program: {
inherit program;
source = "${virtualbox}/libexec/virtualbox/${program}";
owner = "root";
group = "vboxusers";
setuid = true;
};
in mkIf cfg.enableHardening (map mkSuid [
"VBoxHeadless"
"VBoxNetAdpCtl"
"VBoxNetDHCP"
"VBoxNetNAT"
"VBoxSDL"
"VBoxVolInfo"
"VirtualBox"
]);
users.extraGroups.vboxusers.gid = config.ids.gids.vboxusers;
services.udev.extraRules =
''
KERNEL=="vboxdrv", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
KERNEL=="vboxdrvu", OWNER="root", GROUP="root", MODE="0666", TAG+="systemd"
KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
'';
# Since we lack the right setuid binaries, set up a host-only network by default.
} (mkIf cfg.addNetworkInterface {
systemd.services."vboxnet0" =
{ description = "VirtualBox vboxnet0 Interface";
requires = [ "dev-vboxnetctl.device" ];
after = [ "dev-vboxnetctl.device" ];
wantedBy = [ "network.target" "sys-subsystem-net-devices-vboxnet0.device" ];
path = [ virtualbox ];
serviceConfig.RemainAfterExit = true;
serviceConfig.Type = "oneshot";
serviceConfig.PrivateTmp = true;
environment.VBOX_USER_HOME = "/tmp";
script =
''
if ! [ -e /sys/class/net/vboxnet0 ]; then
VBoxManage hostonlyif create
cat /tmp/VBoxSVC.log >&2
fi
'';
postStop =
''
VBoxManage hostonlyif remove vboxnet0
'';
};
networking.interfaces.vboxnet0.ip4 = [ { address = "192.168.56.1"; prefixLength = 24; } ];
})]);
}

View file

@ -1,48 +1,8 @@
{ config, lib, pkgs, ... }:
with lib;
let virtualbox = config.boot.kernelPackages.virtualbox; in
{
boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
boot.extraModulePackages = [ virtualbox ];
environment.systemPackages = [ virtualbox ];
users.extraGroups.vboxusers.gid = config.ids.gids.vboxusers;
services.udev.extraRules =
''
KERNEL=="vboxdrv", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
KERNEL=="vboxdrvu", OWNER="root", GROUP="root", MODE="0666", TAG+="systemd"
KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
'';
# Since we lack the right setuid binaries, set up a host-only network by default.
systemd.services."vboxnet0" =
{ description = "VirtualBox vboxnet0 Interface";
requires = [ "dev-vboxnetctl.device" ];
after = [ "dev-vboxnetctl.device" ];
wantedBy = [ "network.target" "sys-subsystem-net-devices-vboxnet0.device" ];
path = [ virtualbox ];
serviceConfig.RemainAfterExit = true;
serviceConfig.Type = "oneshot";
script =
''
if ! [ -e /sys/class/net/vboxnet0 ]; then
VBoxManage hostonlyif create
fi
'';
postStop =
''
VBoxManage hostonlyif remove vboxnet0
'';
};
networking.interfaces.vboxnet0.ip4 = [ { address = "192.168.56.1"; prefixLength = 24; } ];
let
msg = "Importing <nixpkgs/nixos/modules/programs/virtualbox.nix> is "
+ "deprecated, please use `services.virtualboxHost.enable = true' "
+ "instead.";
in {
config.warnings = [ msg ];
config.services.virtualboxHost.enable = true;
}

View file

@ -74,6 +74,7 @@ in zipModules ([]
++ obsolete [ "environment" "x11Packages" ] [ "environment" "systemPackages" ]
++ obsolete [ "environment" "enableBashCompletion" ] [ "programs" "bash" "enableCompletion" ]
++ obsolete [ "environment" "nix" ] [ "nix" "package" ]
++ obsolete [ "fonts" "enableFontConfig" ] [ "fonts" "fontconfig" "enable" ]
++ obsolete [ "fonts" "extraFonts" ] [ "fonts" "fonts" ]
++ obsolete [ "security" "extraSetuidPrograms" ] [ "security" "setuidPrograms" ]
@ -107,6 +108,12 @@ in zipModules ([]
++ obsolete [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ]
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "xbmc" ]
# VirtualBox
++ obsolete [ "services" "virtualbox" "enable" ] [ "services" "virtualboxGuest" "enable" ]
# proxy
++ obsolete [ "nix" "proxy" ] [ "networking" "proxy" "default" ]
# KDE
++ deprecated [ "kde" "extraPackages" ] [ "environment" "kdePackages" ]
# ++ obsolete [ "environment" "kdePackages" ] [ "environment" "systemPackages" ] # !!! doesn't work!
@ -131,5 +138,6 @@ in zipModules ([]
++ obsolete' [ "programs" "bash" "enable" ]
++ obsolete' [ "services" "samba" "defaultShare" ]
++ obsolete' [ "services" "syslog-ng" "serviceName" ]
++ obsolete' [ "services" "syslog-ng" "listenToJournal" ]
)

View file

@ -16,6 +16,7 @@ with lib;
{ SSL_CERT_FILE = "/etc/ssl/certs/ca-bundle.crt";
# FIXME: unneeded - remove eventually.
OPENSSL_X509_CERT_FILE = "/etc/ssl/certs/ca-bundle.crt";
# FIXME: unneeded - remove eventually.
GIT_SSL_CAINFO = "/etc/ssl/certs/ca-bundle.crt";
};

View file

@ -226,8 +226,8 @@ in
[ { assertion = cfg.stable || cfg.testing;
message = ''
If grsecurity is enabled, you must select either the
stable patch (with kernel 3.2), or the testing patch (with
kernel 3.13) to continue.
stable patch (with kernel 3.14), or the testing patch (with
kernel 3.17) to continue.
'';
}
{ assertion = (cfg.stable -> !cfg.testing) || (cfg.testing -> !cfg.stable);

View file

@ -46,6 +46,14 @@ in
<filename>sudoers</filename> file.
'';
};
security.sudo.extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Extra configuration text appended to <filename>sudoers</filename>.
'';
};
};
@ -55,7 +63,8 @@ in
security.sudo.configFile =
''
# Don't edit this file. Set the NixOS option security.sudo.configFile instead.
# Don't edit this file. Set the NixOS options security.sudo.configFile
# or security.sudo.extraConfig instead.
# Environment variables to keep for root and %wheel.
Defaults:root,%wheel env_keep+=TERMINFO_DIRS
@ -69,6 +78,7 @@ in
# Users in the "wheel" group can do anything.
%wheel ALL=(ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL
${cfg.extraConfig}
'';
security.setuidPrograms = [ "sudo" "sudoedit" ];
@ -80,11 +90,10 @@ in
environment.etc = singleton
{ source =
pkgs.runCommand "sudoers"
{src = pkgs.writeText "sudoers-in" cfg.configFile; }
{ src = pkgs.writeText "sudoers-in" cfg.configFile; }
# Make sure that the sudoers file is syntactically valid.
# (currently disabled - NIXOS-66)
"${pkgs.sudo}/sbin/visudo -f $src -c &&
cp $src $out";
"${pkgs.sudo}/sbin/visudo -f $src -c && cp $src $out";
target = "sudoers";
mode = "0440";
};

View file

@ -0,0 +1,74 @@
{ config, lib, pkgs, ... }:
with lib;
let
streams = builtins.attrNames config.services.liquidsoap.streams;
streamService =
name:
let stream = builtins.getAttr name config.services.liquidsoap.streams; in
{ inherit name;
value = {
after = [ "network-online.target" "sound.target" ];
description = "${name} liquidsoap stream";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.wget ];
preStart =
''
mkdir -p /var/log/liquidsoap
chown liquidsoap -R /var/log/liquidsoap
'';
serviceConfig = {
PermissionsStartOnly="true";
ExecStart = "${pkgs.liquidsoap}/bin/liquidsoap ${stream}";
User = "liquidsoap";
};
};
};
in
{
##### interface
options = {
services.liquidsoap.streams = mkOption {
description =
''
Set of Liquidsoap streams to start,
one systemd service per stream.
'';
default = {};
example = {
myStream1 = literalExample "\"/etc/liquidsoap/myStream1.liq\"";
myStream2 = literalExample "./myStream2.liq";
myStream3 = literalExample "\"out(playlist(\"/srv/music/\"))\"";
};
type = types.attrsOf (types.either types.path types.str);
};
};
##### implementation
config = mkIf (builtins.length streams != 0) {
users.extraUsers.liquidsoap = {
uid = config.ids.uids.liquidsoap;
group = "liquidsoap";
extraGroups = [ "audio" ];
description = "Liquidsoap streaming user";
home = "/var/lib/liquidsoap";
createHome = true;
};
users.extraGroups.liquidsoap.gid = config.ids.gids.liquidsoap;
systemd.services = builtins.listToAttrs ( map streamService streams );
};
}

View file

@ -15,7 +15,6 @@ let
state_file "${cfg.dataDir}/state"
sticker_file "${cfg.dataDir}/sticker.sql"
log_file "syslog"
user "mpd"
${if cfg.network.host != "any" then
"bind_to_address ${cfg.network.host}" else ""}
${if cfg.network.port != 6600 then
@ -99,6 +98,9 @@ in {
path = [ pkgs.mpd ];
preStart = "mkdir -p ${cfg.dataDir} && chown -R mpd:mpd ${cfg.dataDir}";
script = "exec mpd --no-daemon ${mpdConf}";
serviceConfig = {
User = "mpd";
};
};
users.extraUsers.mpd = {

View file

@ -109,6 +109,7 @@ in {
};
sqlalchemy_engine_url = mkOption {
default = "postgresql:///bacula";
example = ''
postgresql://bacula:bacula@localhost:5432/bacula
mysql+mysqlconnector://<user>:<password>@<hostname>/<database>'

View file

@ -0,0 +1,63 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.crashplan;
crashplan = pkgs.crashplan;
varDir = "/var/lib/crashplan";
in
with lib;
{
options = {
services.crashplan = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Starts crashplan background service.
'';
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ crashplan ];
systemd.services.crashplan = {
description = "CrashPlan Backup Engine";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
preStart = ''
ensureDir() {
dir=$1
mode=$2
if ! test -e $dir; then
${pkgs.coreutils}/bin/mkdir -m $mode -p $dir
elif [ "$(${pkgs.coreutils}/bin/stat -c %a $dir)" != "$mode" ]; then
${pkgs.coreutils}/bin/chmod $mode $dir
fi
}
ensureDir ${crashplan.vardir} 755
ensureDir ${crashplan.vardir}/conf 700
ensureDir ${crashplan.manifestdir} 700
ensureDir ${crashplan.vardir}/cache 700
ensureDir ${crashplan.vardir}/backupArchives 700
ensureDir ${crashplan.vardir}/log 777
'';
serviceConfig = {
Type = "forking";
EnvironmentFile = "${crashplan}/bin/run.conf";
ExecStart = "${crashplan}/bin/CrashPlanEngine start";
ExecStop = "${crashplan}/bin/CrashPlanEngine stop";
PIDFile = "${crashplan.vardir}/CrashPlanEngine.pid";
WorkingDirectory = crashplan;
};
};
};
}

View file

@ -39,11 +39,20 @@ in
as retain options.
'';
};
package = mkOption {
type = types.package;
default = pkgs.rsnapshot;
example = literalExample "pkgs.rsnapshotGit";
description = ''
RSnapshot package to use.
'';
};
};
};
config = mkIf cfg.enable (let
myRsnapshot = pkgs.rsnapshot.override { configFile = rsnapshotCfg; };
myRsnapshot = cfg.package.override { configFile = rsnapshotCfg; };
rsnapshotCfg = with pkgs; writeText "gen-rsnapshot.conf" (''
config_version 1.2
cmd_cp ${coreutils}/bin/cp

View file

@ -0,0 +1,150 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.fleet;
in {
##### Interface
options.services.fleet = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable fleet service.
'';
};
listen = mkOption {
type = types.listOf types.str;
default = [ "/var/run/fleet.sock" ];
example = [ "/var/run/fleet.sock" "127.0.0.1:49153" ];
description = ''
Fleet listening addresses.
'';
};
etcdServers = mkOption {
type = types.listOf types.str;
default = [ "http://127.0.0.1:4001" ];
description = ''
Fleet list of etcd endpoints to use.
'';
};
publicIp = mkOption {
type = types.nullOr types.str;
default = "";
description = ''
Fleet IP address that should be published with the local Machine's
state and any socket information. If not set, fleetd will attempt
to detect the IP it should publish based on the machine's IP
routing information.
'';
};
etcdCafile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Fleet TLS ca file when SSL certificate authentication is enabled
in etcd endpoints.
'';
};
etcdKeyfile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Fleet TLS key file when SSL certificate authentication is enabled
in etcd endpoints.
'';
};
etcdCertfile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Fleet TLS cert file when SSL certificate authentication is enabled
in etcd endpoints.
'';
};
metadata = mkOption {
type = types.attrsOf types.str;
default = {};
apply = attrs: concatMapStringsSep "," (n: "${n}=${attrs."${n}"}") (attrNames attrs);
example = literalExample ''
{
region = "us-west";
az = "us-west-1";
}
'';
description = ''
Key/value pairs that are published with the local to the fleet registry.
This data can be used directly by a client of fleet to make scheduling decisions.
'';
};
extraConfig = mkOption {
type = types.attrsOf types.str;
apply = mapAttrs' (n: v: nameValuePair ("ETCD_" + n) v);
default = {};
example = literalExample ''
{
VERBOSITY = 1;
ETCD_REQUEST_TIMEOUT = "2.0";
AGENT_TTL = "40s";
}
'';
description = ''
Fleet extra config. See
<link xlink:href="https://github.com/coreos/fleet/blob/master/Documentation/deployment-and-configuration.md"/>
for configuration options.
'';
};
};
##### Implementation
config = mkIf cfg.enable {
systemd.services.fleet = {
description = "Fleet Init System Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "fleet.socket" "etcd.service" "docker.service" ];
requires = [ "fleet.socket" ];
environment = {
FLEET_ETCD_SERVERS = concatStringsSep "," cfg.etcdServers;
FLEET_PUBLIC_IP = cfg.publicIp;
FLEET_ETCD_CAFILE = cfg.etcdCafile;
FLEET_ETCD_KEYFILE = cfg.etcdKeyfile;
FEELT_ETCD_CERTFILE = cfg.etcdCertfile;
FLEET_METADATA = cfg.metadata;
} // cfg.extraConfig;
serviceConfig = {
ExecStart = "${pkgs.fleet}/bin/fleetd";
Group = "fleet";
};
};
systemd.sockets.fleet = {
description = "Fleet Socket for the API";
wantedBy = [ "sockets.target" ];
listenStreams = cfg.listen;
socketConfig = {
ListenStream = "/var/run/fleet.sock";
SocketMode = "0660";
SocketUser = "root";
SocketGroup = "fleet";
};
};
services.etcd.enable = mkDefault true;
virtualisation.docker.enable = mkDefault true;
environment.systemPackages = [ pkgs.fleet ];
users.extraGroups.fleet.gid = config.ids.gids.fleet;
};
}

View file

@ -0,0 +1,462 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.kubernetes;
in {
###### interface
options.services.kubernetes = {
package = mkOption {
description = "Kubernetes package to use.";
type = types.package;
};
verbose = mkOption {
description = "Kubernetes enable verbose mode for debugging";
default = false;
type = types.bool;
};
etcdServers = mkOption {
description = "Kubernetes list of etcd servers to watch.";
default = [ "127.0.0.1:4001" ];
type = types.listOf types.str;
};
roles = mkOption {
description = ''
Kubernetes role that this machine should take.
Master role will enable etcd, apiserver, scheduler and controller manager
services. Node role will enable etcd, docker, kubelet and proxy services.
'';
default = [];
type = types.listOf (types.enum ["master" "node"]);
};
dataDir = mkOption {
description = "Kubernetes root directory for managing kubelet files.";
default = "/var/lib/kubernetes";
type = types.path;
};
apiserver = {
enable = mkOption {
description = "Whether to enable kubernetes apiserver.";
default = false;
type = types.bool;
};
address = mkOption {
description = "Kubernetes apiserver listening address.";
default = "127.0.0.1";
type = types.str;
};
publicAddress = mkOption {
description = ''
Kubernetes apiserver public listening address used for read only and
secure port.
'';
default = cfg.apiserver.address;
type = types.str;
};
port = mkOption {
description = "Kubernets apiserver listening port.";
default = 8080;
type = types.int;
};
readOnlyPort = mkOption {
description = "Kubernets apiserver read-only port.";
default = 7080;
type = types.int;
};
securePort = mkOption {
description = "Kubernetes apiserver secure port.";
default = 6443;
type = types.int;
};
tlsCertFile = mkOption {
description = "Kubernetes apiserver certificate file.";
default = "";
type = types.str;
};
tlsPrivateKeyFile = mkOption {
description = "Kubernetes apiserver private key file.";
default = "";
type = types.str;
};
tokenAuth = mkOption {
description = ''
Kubernetes apiserver token authentication file. See
<link xlink:href="https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/authentication.md"/>
'';
default = {};
example = literalExample ''
{
alice = "abc123";
bob = "xyz987";
}
'';
type = types.attrsOf types.str;
};
authorizationMode = mkOption {
description = ''
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC). See
<link xlink:href="https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/authorization.md"/>
'';
default = "AlwaysAllow";
type = types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC"];
};
authorizationPolicy = mkOption {
description = ''
Kubernetes apiserver authorization policy file. See
<link xlink:href="https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/authorization.md"/>
'';
default = [];
example = literalExample ''
[
{user = "admin";}
{user = "scheduler"; readonly = true; kind= "pods";}
{user = "scheduler"; kind = "bindings";}
{user = "kubelet"; readonly = true; kind = "bindings";}
{user = "kubelet"; kind = "events";}
{user= "alice"; ns = "projectCaribou";}
{user = "bob"; readonly = true; ns = "projectCaribou";}
]
'';
type = types.listOf types.attrs;
};
allowPrivileged = mkOption {
description = "Whether to allow privileged containers on kubernetes.";
default = false;
type = types.bool;
};
portalNet = mkOption {
description = "Kubernetes CIDR notation IP range from which to assign portal IPs";
default = "10.10.10.10/16";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes apiserver extra command line options.";
default = "";
type = types.str;
};
};
scheduler = {
enable = mkOption {
description = "Whether to enable kubernetes scheduler.";
default = false;
type = types.bool;
};
address = mkOption {
description = "Kubernetes scheduler listening address.";
default = "127.0.0.1";
type = types.str;
};
port = mkOption {
description = "Kubernets scheduler listening port.";
default = 10251;
type = types.int;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes scheduler extra command line options.";
default = "";
type = types.str;
};
};
controllerManager = {
enable = mkOption {
description = "Whether to enable kubernetes controller manager.";
default = false;
type = types.bool;
};
address = mkOption {
description = "Kubernetes controller manager listening address.";
default = "127.0.0.1";
type = types.str;
};
port = mkOption {
description = "Kubernets controller manager listening port.";
default = 10252;
type = types.int;
};
master = mkOption {
description = "Kubernetes apiserver address";
default = "${cfg.apiserver.address}:${toString cfg.apiserver.port}";
type = types.str;
};
machines = mkOption {
description = "Kubernetes apiserver list of machines to schedule to schedule onto";
default = [];
type = types.listOf types.str;
};
extraOpts = mkOption {
description = "Kubernetes scheduler extra command line options.";
default = "";
type = types.str;
};
};
kubelet = {
enable = mkOption {
description = "Whether to enable kubernetes kubelet.";
default = false;
type = types.bool;
};
address = mkOption {
description = "Kubernetes kubelet info server listening address.";
default = "0.0.0.0";
type = types.str;
};
port = mkOption {
description = "Kubernets kubelet info server listening port.";
default = 10250;
type = types.int;
};
hostname = mkOption {
description = "Kubernetes kubelet hostname override";
default = config.networking.hostName;
type = types.str;
};
allowPrivileged = mkOption {
description = "Whether to allow kubernetes containers to request privileged mode.";
default = false;
type = types.bool;
};
extraOpts = mkOption {
description = "Kubernetes kubelet extra command line options.";
default = "";
type = types.str;
};
};
proxy = {
enable = mkOption {
description = "Whether to enable kubernetes proxy.";
default = false;
type = types.bool;
};
address = mkOption {
description = "Kubernetes proxy listening address.";
default = "0.0.0.0";
type = types.str;
};
extraOpts = mkOption {
description = "Kubernetes proxy extra command line options.";
default = "";
type = types.str;
};
};
};
###### implementation
config = mkMerge [
(mkIf cfg.apiserver.enable {
systemd.services.kubernetes-apiserver = {
description = "Kubernetes Api Server";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "etcd.service" ];
serviceConfig = {
ExecStart = let
authorizationPolicyFile =
pkgs.writeText "kubernetes-policy"
(builtins.toJSON cfg.apiserver.authorizationPolicy);
tokenAuthFile =
pkgs.writeText "kubernetes-auth"
(concatImapStringsSep "\n" (i: v: v + "," + (toString i))
(mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth));
in ''${cfg.package}/bin/kube-apiserver \
-etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
-address=${cfg.apiserver.address} \
-port=${toString cfg.apiserver.port} \
-read_only_port=${toString cfg.apiserver.readOnlyPort} \
-public_address_override=${cfg.apiserver.publicAddress} \
-allow_privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
${optionalString (cfg.apiserver.tlsCertFile!="")
"-tls_cert_file=${cfg.apiserver.tlsCertFile}"} \
${optionalString (cfg.apiserver.tlsPrivateKeyFile!="")
"-tls_private_key_file=${cfg.apiserver.tlsPrivateKeyFile}"} \
${optionalString (cfg.apiserver.tokenAuth!=[])
"-token_auth_file=${tokenAuthFile}"} \
-authorization_mode=${cfg.apiserver.authorizationMode} \
${optionalString (cfg.apiserver.authorizationMode == "ABAC")
"-authorization_policy_file=${authorizationPolicyFile}"} \
${optionalString (cfg.apiserver.tlsCertFile!="" && cfg.apiserver.tlsCertFile!="")
"-secure_port=${toString cfg.apiserver.securePort}"} \
-portal_net=${cfg.apiserver.portalNet} \
-logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.apiserver.extraOpts}
'';
User = "kubernetes";
};
postStart = ''
until ${pkgs.curl}/bin/curl -s -o /dev/null 'http://${cfg.apiserver.address}:${toString cfg.apiserver.port}/'; do
sleep 1;
done
'';
};
})
(mkIf cfg.scheduler.enable {
systemd.services.kubernetes-scheduler = {
description = "Kubernetes Scheduler Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-scheduler \
-address=${cfg.scheduler.address} \
-port=${toString cfg.scheduler.port} \
-master=${cfg.scheduler.master} \
-logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.scheduler.extraOpts}
'';
User = "kubernetes";
};
};
})
(mkIf cfg.controllerManager.enable {
systemd.services.kubernetes-controller-manager = {
description = "Kubernetes Controller Manager Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-controller-manager \
-address=${cfg.controllerManager.address} \
-port=${toString cfg.controllerManager.port} \
-master=${cfg.controllerManager.master} \
${optionalString (cfg.controllerManager.machines != [])
"-machines=${concatStringsSep "," cfg.controllerManager.machines}"} \
-logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.controllerManager.extraOpts}
'';
User = "kubernetes";
};
};
})
(mkIf cfg.kubelet.enable {
systemd.services.kubernetes-kubelet = {
description = "Kubernetes Kubelet Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "etcd.service" "docker.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kubelet \
-etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
-address=${cfg.kubelet.address} \
-port=${toString cfg.kubelet.port} \
-hostname_override=${cfg.kubelet.hostname} \
-allow_privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
-root_dir=${cfg.dataDir} \
-logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.kubelet.extraOpts}
'';
User = "kubernetes";
PermissionsStartOnly = true;
WorkingDirectory = cfg.dataDir;
};
};
})
(mkIf cfg.proxy.enable {
systemd.services.kubernetes-proxy = {
description = "Kubernetes Proxy Service";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" "etcd.service" ];
serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-proxy \
-etcd_servers=${concatMapStringsSep "," (s: "http://${s}") cfg.etcdServers} \
-bind_address=${cfg.proxy.address} \
-logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.proxy.extraOpts}
'';
};
};
})
(mkIf (any (el: el == "master") cfg.roles) {
services.kubernetes.apiserver.enable = mkDefault true;
services.kubernetes.scheduler.enable = mkDefault true;
services.kubernetes.controllerManager.enable = mkDefault true;
})
(mkIf (any (el: el == "node") cfg.roles) {
virtualisation.docker.enable = mkDefault true;
services.kubernetes.kubelet.enable = mkDefault true;
services.kubernetes.proxy.enable = mkDefault true;
})
(mkIf (any (el: el == "node" || el == "master") cfg.roles) {
services.etcd.enable = mkDefault true;
})
(mkIf (
cfg.apiserver.enable ||
cfg.scheduler.enable ||
cfg.controllerManager.enable ||
cfg.kubelet.enable ||
cfg.proxy.enable
) {
services.kubernetes.package = mkDefault pkgs.kubernetes;
environment.systemPackages = [ cfg.package ];
users.extraUsers = singleton {
name = "kubernetes";
uid = config.ids.uids.kubernetes;
description = "Kubernetes user";
extraGroups = [ "docker" ];
group = "kubernetes";
home = cfg.dataDir;
createHome = true;
};
users.extraGroups.kubernetes.gid = config.ids.gids.kubernetes;
})
];
}

View file

@ -56,14 +56,13 @@ with lib;
{ name = endpointUser;
uid = config.ids.uids.fourstorehttp;
description = "4Store SPARQL endpoint user";
# home = stateDir;
};
services.avahi.enable = true;
jobs.fourStoreEndpoint = {
name = "4store-endpoint";
startOn = "filesystem";
startOn = "ip-up";
exec = ''
${run} '${pkgs.rdf4store}/bin/4s-httpd -D ${cfg.options} ${if cfg.listenAddress!=null then "-H ${cfg.listenAddress}" else "" } -p ${toString cfg.port} ${cfg.database}'

View file

@ -54,7 +54,7 @@ with lib;
jobs.fourStore = {
name = "4store";
startOn = "filesystem";
startOn = "ip-up";
preStart = ''
mkdir -p ${stateDir}/

View file

@ -0,0 +1,133 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.hbase;
configFile = pkgs.writeText "hbase-site.xml" ''
<configuration>
<property>
<name>hbase.rootdir</name>
<value>file://${cfg.dataDir}/hbase</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>${cfg.dataDir}/zookeeper</value>
</property>
</configuration>
'';
configDir = pkgs.runCommand "hbase-config-dir" {} ''
mkdir -p $out
cp ${cfg.package}/conf/* $out/
rm $out/hbase-site.xml
ln -s ${configFile} $out/hbase-site.xml
'' ;
in {
###### interface
options = {
services.hbase = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run HBase.
'';
};
package = mkOption {
type = types.package;
default = pkgs.hbase;
example = literalExample "pkgs.hbase";
description = ''
HBase package to use.
'';
};
user = mkOption {
type = types.string;
default = "hbase";
description = ''
User account under which HBase runs.
'';
};
group = mkOption {
type = types.string;
default = "hbase";
description = ''
Group account under which HBase runs.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/hbase";
description = ''
Specifies location of HBase database files. This location should be
writable and readable for the user the HBase service runs as
(hbase by default).
'';
};
logDir = mkOption {
type = types.path;
default = "/var/log/hbase";
description = ''
Specifies the location of HBase log files.
'';
};
};
};
###### implementation
config = mkIf config.services.hbase.enable {
systemd.services.hbase = {
description = "HBase Server";
wantedBy = [ "multi-user.target" ];
environment = {
JAVA_HOME = "${pkgs.jre}";
HBASE_LOG_DIR = cfg.logDir;
};
preStart =
''
mkdir -p ${cfg.dataDir};
mkdir -p ${cfg.logDir};
if [ "$(id -u)" = 0 ]; then
chown ${cfg.user}:${cfg.group} ${cfg.dataDir}
chown ${cfg.user}:${cfg.group} ${cfg.logDir}
fi
'';
serviceConfig = {
PermissionsStartOnly = true;
User = cfg.user;
Group = cfg.group;
ExecStart = "${cfg.package}/bin/hbase --config ${configDir} master start";
};
};
users.extraUsers.hbase = {
description = "HBase Server user";
group = "hbase";
uid = config.ids.uids.hbase;
};
users.extraGroups.hbase.gid = config.ids.gids.hbase;
};
}

View file

@ -0,0 +1,100 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.opentsdb;
configFile = pkgs.writeText "opentsdb.conf" ''
tsd.core.auto_create_metrics = true
tsd.http.request.enable_chunked = true
'';
in {
###### interface
options = {
services.opentsdb = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to run OpenTSDB.
'';
};
package = mkOption {
type = types.package;
default = pkgs.opentsdb;
example = literalExample "pkgs.opentsdb";
description = ''
OpenTSDB package to use.
'';
};
user = mkOption {
type = types.string;
default = "opentsdb";
description = ''
User account under which OpenTSDB runs.
'';
};
group = mkOption {
type = types.string;
default = "opentsdb";
description = ''
Group account under which OpenTSDB runs.
'';
};
port = mkOption {
type = types.int;
default = 4242;
description = ''
Which port OpenTSDB listens on.
'';
};
};
};
###### implementation
config = mkIf config.services.opentsdb.enable {
systemd.services.opentsdb = {
description = "OpenTSDB Server";
wantedBy = [ "multi-user.target" ];
requires = [ "hbase.service" ];
environment.JAVA_HOME = "${pkgs.jre}";
path = [ pkgs.gnuplot ];
preStart =
''
COMPRESSION=NONE HBASE_HOME=${config.services.hbase.package} ${cfg.package}/share/opentsdb/tools/create_table.sh
'';
serviceConfig = {
PermissionsStartOnly = true;
User = cfg.user;
Group = cfg.group;
ExecStart = "${cfg.package}/bin/tsdb tsd --staticroot=${cfg.package}/share/opentsdb/static --cachedir=/tmp/opentsdb --port=${toString cfg.port} --config=${configFile}";
};
};
users.extraUsers.opentsdb = {
description = "OpenTSDB Server user";
group = "opentsdb";
uid = config.ids.uids.opentsdb;
};
users.extraGroups.opentsdb.gid = config.ids.gids.opentsdb;
};
}

View file

@ -63,7 +63,7 @@ with lib;
jobs.virtuoso = {
name = "virtuoso";
startOn = "filesystem";
startOn = "ip-up";
preStart = ''
mkdir -p ${stateDir}

View file

@ -1,6 +1,6 @@
# gvfs backends
{ config, lib, ... }:
{ config, lib, pkgs, ... }:
with lib;
@ -37,6 +37,8 @@ in
services.dbus.packages = [ gnome3.gvfs ];
services.udev.packages = [ pkgs.libmtp ];
};
}

View file

@ -0,0 +1,139 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.psd;
configFile = ''
${optionalString (cfg.users != [ ]) ''
USERS="${concatStringsSep " " cfg.users}"
''}
${optionalString (cfg.browsers != [ ]) ''
BROWSERS="${concatStringsSep " " cfg.browsers}"
''}
${optionalString (cfg.volatile != "") "VOLATILE=${cfg.volatile}"}
${optionalString (cfg.daemonFile != "") "DAEMON_FILE=${cfg.daemonFile}"}
'';
in {
options.services.psd = with types; {
enable = mkOption {
type = bool;
default = false;
description = ''
Whether to enable the Profile Sync daemon.
'';
};
users = mkOption {
type = listOf str;
default = [ ];
example = [ "demo" ];
description = ''
A list of users whose browser profiles should be sync'd to tmpfs.
'';
};
browsers = mkOption {
type = listOf str;
default = [ ];
example = [ "chromium" "firefox" ];
description = ''
A list of browsers to sync. Available choices are:
chromium chromium-dev conkeror.mozdev.org epiphany firefox
firefox-trunk google-chrome google-chrome-beta google-chrome-unstable
heftig-aurora icecat luakit midori opera opera-developer opera-beta
qupzilla palemoon rekonq seamonkey
An empty list will enable all browsers.
'';
};
resyncTimer = mkOption {
type = str;
default = "1h";
example = "1h 30min";
description = ''
The amount of time to wait before syncing browser profiles back to the
disk.
Takes a systemd.unit time span. The time unit defaults to seconds if
omitted.
'';
};
volatile = mkOption {
type = str;
default = "/run/psd-profiles";
description = ''
The directory where browser profiles should reside(this should be
mounted as a tmpfs). Do not include a trailing backslash.
'';
};
daemonFile = mkOption {
type = str;
default = "/run/psd";
description = ''
Where the pid and backup configuration files will be stored.
'';
};
};
config = mkIf cfg.enable {
systemd = {
services = {
psd = {
description = "Profile Sync daemon";
wants = [ "psd-resync.service" "local-fs.target" ];
wantedBy = [ "multi-user.target" ];
preStart = "mkdir -p ${cfg.volatile}";
path = with pkgs; [ glibc rsync gawk ];
unitConfig = {
RequiresMountsFor = [ "/home/" ];
};
serviceConfig = {
Type = "oneshot";
RemainAfterExit = "yes";
ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon sync";
ExecStop = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon unsync";
};
};
psd-resync = {
description = "Timed profile resync";
after = [ "psd.service" ];
wants = [ "psd-resync.timer" ];
partOf = [ "psd.service" ];
path = with pkgs; [ glibc rsync gawk ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon resync";
};
};
};
timers.psd-resync = {
description = "Timer for profile sync daemon - ${cfg.resyncTimer}";
partOf = [ "psd-resync.service" "psd.service" ];
timerConfig = {
OnUnitActiveSec = "${cfg.resyncTimer}";
};
};
};
environment.etc."psd.conf".text = configFile;
};
}

View file

@ -19,6 +19,8 @@ in {
###### implementation
config = mkIf cfg.enable {
services.dbus.packages = [ pkgs.thermald ];
systemd.services.thermald = {
description = "Thermal Daemon Service";
wantedBy = [ "multi-user.target" ];

View file

@ -31,6 +31,7 @@ let
buildCommand = ''
mkdir -p $out
shopt -s nullglob
set +o pipefail
# Set a reasonable $PATH for programs called by udev rules.
echo 'ENV{PATH}="${udevPath}/bin:${udevPath}/sbin"' > $out/00-path.rules
@ -87,7 +88,7 @@ let
done
${optionalString config.networking.usePredictableInterfaceNames ''
cp ${./80-net-name-slot.rules} $out/80-net-name-slot.rules
cp ${./80-net-setup-link.rules} $out/80-net-setup-link.rules
''}
# If auto-configuration is disabled, then remove

View file

@ -7,6 +7,13 @@ let
pluginPath = lib.concatStringsSep ":" cfg.plugins;
havePluginPath = lib.length cfg.plugins > 0;
ops = lib.optionalString;
verbosityFlag = {
debug = "--debug";
info = "--verbose";
warn = ""; # intentionally empty
error = "--quiet";
fatal = "--silent";
}."${cfg.logLevel}";
in
@ -37,6 +44,12 @@ in
description = "The paths to find other logstash plugins in.";
};
logLevel = mkOption {
type = types.enum [ "debug" "info" "warn" "error" "fatal" ];
default = "warn";
description = "Logging verbosity level.";
};
watchdogTimeout = mkOption {
type = types.int;
default = 10;
@ -124,6 +137,7 @@ in
"${cfg.package}/bin/logstash agent " +
"-w ${toString cfg.filterWorkers} " +
ops havePluginPath "--pluginpath ${pluginPath} " +
"${verbosityFlag} " +
"--watchdog-timeout ${toString cfg.watchdogTimeout} " +
"-f ${writeText "logstash.conf" ''
input {

View file

@ -43,15 +43,6 @@ in {
The package providing syslog-ng binaries.
'';
};
listenToJournal = mkOption {
type = types.bool;
default = true;
description = ''
Whether syslog-ng should listen to the syslog socket used
by journald, and therefore receive all logs that journald
produces.
'';
};
extraModulePaths = mkOption {
type = types.listOf types.str;
default = [];
@ -74,7 +65,7 @@ in {
configHeader = mkOption {
type = types.lines;
default = ''
@version: 3.5
@version: 3.6
@include "scl.conf"
'';
description = ''
@ -86,18 +77,13 @@ in {
};
config = mkIf cfg.enable {
systemd.sockets.syslog = mkIf cfg.listenToJournal {
wantedBy = [ "sockets.target" ];
socketConfig.Service = "syslog-ng.service";
};
systemd.services.syslog-ng = {
description = "syslog-ng daemon";
preStart = "mkdir -p /{var,run}/syslog-ng";
wantedBy = optional (!cfg.listenToJournal) "multi-user.target";
wantedBy = [ "multi-user.target" ];
after = [ "multi-user.target" ]; # makes sure hostname etc is set
serviceConfig = {
Type = "notify";
Sockets = if cfg.listenToJournal then "syslog.socket" else null;
StandardOutput = "null";
Restart = "on-failure";
ExecStart = "${cfg.package}/sbin/syslog-ng ${concatStringsSep " " syslogngOptions}";

View file

@ -84,7 +84,7 @@ in
startOn = "started network-interfaces";
stopOn = "stopping network-interfaces";
path = [ pkgs.nfsUtils pkgs.sshfsFuse ];
path = [ pkgs.nfs-utils pkgs.sshfsFuse ];
preStop =
''

View file

@ -0,0 +1,206 @@
# The following was taken from github.com/crohr/syslogger and is BSD
# licensed.
require 'syslog'
require 'logger'
require 'thread'
class Syslogger
VERSION = "1.6.0"
attr_reader :level, :ident, :options, :facility, :max_octets
attr_accessor :formatter
MAPPING = {
Logger::DEBUG => Syslog::LOG_DEBUG,
Logger::INFO => Syslog::LOG_INFO,
Logger::WARN => Syslog::LOG_WARNING,
Logger::ERROR => Syslog::LOG_ERR,
Logger::FATAL => Syslog::LOG_CRIT,
Logger::UNKNOWN => Syslog::LOG_ALERT
}
#
# Initializes default options for the logger
# <tt>ident</tt>:: the name of your program [default=$0].
# <tt>options</tt>:: syslog options [default=<tt>Syslog::LOG_PID | Syslog::LOG_CONS</tt>].
# Correct values are:
# LOG_CONS : writes the message on the console if an error occurs when sending the message;
# LOG_NDELAY : no delay before sending the message;
# LOG_PERROR : messages will also be written on STDERR;
# LOG_PID : adds the process number to the message (just after the program name)
# <tt>facility</tt>:: the syslog facility [default=nil] Correct values include:
# Syslog::LOG_DAEMON
# Syslog::LOG_USER
# Syslog::LOG_SYSLOG
# Syslog::LOG_LOCAL2
# Syslog::LOG_NEWS
# etc.
#
# Usage:
# logger = Syslogger.new("my_app", Syslog::LOG_PID | Syslog::LOG_CONS, Syslog::LOG_LOCAL0)
# logger.level = Logger::INFO # use Logger levels
# logger.warn "warning message"
# logger.debug "debug message"
#
def initialize(ident = $0, options = Syslog::LOG_PID | Syslog::LOG_CONS, facility = nil)
@ident = ident
@options = options || (Syslog::LOG_PID | Syslog::LOG_CONS)
@facility = facility
@level = Logger::INFO
@mutex = Mutex.new
@formatter = Logger::Formatter.new
end
%w{debug info warn error fatal unknown}.each do |logger_method|
# Accepting *args as message could be nil.
# Default params not supported in ruby 1.8.7
define_method logger_method.to_sym do |*args, &block|
return true if @level > Logger.const_get(logger_method.upcase)
message = args.first || block && block.call
add(Logger.const_get(logger_method.upcase), message)
end
unless logger_method == 'unknown'
define_method "#{logger_method}?".to_sym do
@level <= Logger.const_get(logger_method.upcase)
end
end
end
# Log a message at the Logger::INFO level. Useful for use with Rack::CommonLogger
def write(msg)
add(Logger::INFO, msg)
end
# Logs a message at the Logger::INFO level.
def <<(msg)
add(Logger::INFO, msg)
end
# Low level method to add a message.
# +severity+:: the level of the message. One of Logger::DEBUG, Logger::INFO, Logger::WARN, Logger::ERROR, Logger::FATAL, Logger::UNKNOWN
# +message+:: the message string.
# If nil, the method will call the block and use the result as the message string.
# If both are nil or no block is given, it will use the progname as per the behaviour of both the standard Ruby logger, and the Rails BufferedLogger.
# +progname+:: optionally, overwrite the program name that appears in the log message.
def add(severity, message = nil, progname = nil, &block)
if message.nil? && block.nil? && !progname.nil?
message, progname = progname, nil
end
progname ||= @ident
@mutex.synchronize do
Syslog.open(progname, @options, @facility) do |s|
s.mask = Syslog::LOG_UPTO(MAPPING[@level])
communication = clean(message || block && block.call)
if self.max_octets
buffer = "#{tags_text}"
communication.bytes do |byte|
buffer.concat(byte)
# if the last byte we added is potentially part of an escape, we'll go ahead and add another byte
if buffer.bytesize >= self.max_octets && !['%'.ord,'\\'.ord].include?(byte)
s.log(MAPPING[severity],buffer)
buffer = ""
end
end
s.log(MAPPING[severity],buffer) unless buffer.empty?
else
s.log(MAPPING[severity],"#{tags_text}#{communication}")
end
end
end
end
# Set the max octets of the messages written to the log
def max_octets=(max_octets)
@max_octets = max_octets
end
# Sets the minimum level for messages to be written in the log.
# +level+:: one of <tt>Logger::DEBUG</tt>, <tt>Logger::INFO</tt>, <tt>Logger::WARN</tt>, <tt>Logger::ERROR</tt>, <tt>Logger::FATAL</tt>, <tt>Logger::UNKNOWN</tt>
def level=(level)
level = Logger.const_get(level.to_s.upcase) if level.is_a?(Symbol)
unless level.is_a?(Fixnum)
raise ArgumentError.new("Invalid logger level `#{level.inspect}`")
end
@level = level
end
# Sets the ident string passed along to Syslog
def ident=(ident)
@ident = ident
end
# Tagging code borrowed from ActiveSupport gem
def tagged(*tags)
new_tags = push_tags(*tags)
yield self
ensure
pop_tags(new_tags.size)
end
def push_tags(*tags)
tags.flatten.reject{ |i| i.respond_to?(:empty?) ? i.empty? : !i }.tap do |new_tags|
current_tags.concat new_tags
end
end
def pop_tags(size = 1)
current_tags.pop size
end
def clear_tags!
current_tags.clear
end
protected
# Borrowed from SyslogLogger.
def clean(message)
message = message.to_s.dup
message.strip! # remove whitespace
message.gsub!(/\n/, '\\n') # escape newlines
message.gsub!(/%/, '%%') # syslog(3) freaks on % (printf)
message.gsub!(/\e\[[^m]*m/, '') # remove useless ansi color codes
message
end
private
def tags_text
tags = current_tags
if tags.any?
tags.collect { |tag| "[#{tag}] " }.join
end
end
def current_tags
Thread.current[:syslogger_tagged_logging_tags] ||= []
end
end
worker_processes 2
working_directory ENV["GITLAB_PATH"]
pid ENV["UNICORN_PATH"] + "/tmp/pids/unicorn.pid"
listen ENV["UNICORN_PATH"] + "/tmp/sockets/gitlab.socket", :backlog => 1024
listen "127.0.0.1:8080", :tcp_nopush => true
timeout 60
logger Syslogger.new
preload_app true
GC.respond_to?(:copy_on_write_friendly=) and
GC.copy_on_write_friendly = true
check_client_connection false
after_fork do |server, worker|
defined?(ActiveRecord::Base) and
ActiveRecord::Base.establish_connection
end

View file

@ -0,0 +1,82 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.dockerRegistry;
in {
###### interface
options.services.dockerRegistry = {
enable = mkOption {
description = "Whether to enable docker registry server.";
default = false;
type = types.bool;
};
host = mkOption {
description = "Docker registry host or ip to bind to.";
default = "127.0.0.1";
type = types.str;
};
port = mkOption {
description = "Docker registry port to bind to.";
default = 5000;
type = types.int;
};
storagePath = mkOption {
type = types.path;
default = "/var/lib/docker/registry";
description = "Docker registry strorage path.";
};
extraConfig = mkOption {
description = ''
Docker extra registry configuration. See
<link xlink:href="https://github.com/docker/docker-registry/blob/master/config/config_sample.yml"/>
'';
default = {};
type = types.attrsOf types.str;
};
};
config = mkIf cfg.enable {
systemd.services.docker-registry = {
description = "Docker Container Registry";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
environment = {
REGISTRY_HOST = cfg.host;
REGISTRY_PORT = toString cfg.port;
GUNICORN_OPTS = "[--preload]"; # see https://github.com/docker/docker-registry#sqlalchemy
STORAGE_PATH = cfg.storagePath;
} // cfg.extraConfig;
serviceConfig = {
ExecStart = "${pkgs.pythonPackages.docker_registry}/bin/docker-registry";
User = "docker-registry";
Group = "docker";
PermissionsStartOnly = true;
};
preStart = ''
mkdir -p ${cfg.storagePath}
if [ "$(id -u)" = 0 ]; then
chown -R docker-registry:docker ${cfg.storagePath}
fi
'';
postStart = ''
until ${pkgs.curl}/bin/curl -s -o /dev/null 'http://${cfg.host}:${toString cfg.port}/'; do
sleep 1;
done
'';
};
users.extraGroups.docker.gid = mkDefault config.ids.gids.docker;
users.extraUsers.docker-registry.uid = config.ids.uids.docker-registry;
};
}

View file

@ -0,0 +1,144 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.etcd;
in {
options.services.etcd = {
enable = mkOption {
description = "Whether to enable etcd.";
default = false;
type = types.uniq types.bool;
};
name = mkOption {
description = "Etcd unique node name.";
default = config.networking.hostName;
type = types.str;
};
advertiseClientUrls = mkOption {
description = "Etcd list of this member's client URLs to advertise to the rest of the cluster.";
default = cfg.listenClientUrls;
type = types.listOf types.str;
};
listenClientUrls = mkOption {
description = "Etcd list of URLs to listen on for client traffic.";
default = ["http://localhost:4001"];
type = types.listOf types.str;
};
listenPeerUrls = mkOption {
description = "Etcd list of URLs to listen on for peer traffic.";
default = ["http://localhost:7001"];
type = types.listOf types.str;
};
initialAdvertisePeerUrls = mkOption {
description = "Etcd list of this member's peer URLs to advertise to rest of the cluster.";
default = cfg.listenPeerUrls;
type = types.listOf types.str;
};
initialCluster = mkOption {
description = "Etcd initial cluster configuration for bootstrapping.";
default = ["${cfg.name}=http://localhost:7001"];
type = types.listOf types.str;
};
initialClusterState = mkOption {
description = "Etcd initial cluster configuration for bootstrapping.";
default = "new";
type = types.enum ["new" "existing"];
};
initialClusterToken = mkOption {
description = "Etcd initial cluster token for etcd cluster during bootstrap.";
default = "etcd-cluster";
type = types.str;
};
discovery = mkOption {
description = "Etcd discovery url";
default = "";
type = types.str;
};
extraConf = mkOption {
description = ''
Etcd extra configuration. See
<link xlink:href='https://github.com/coreos/etcd/blob/master/Documentation/configuration.md#environment-variables' />
'';
type = types.attrsOf types.str;
default = {};
example = literalExample ''
{
"CORS": "*",
"NAME": "default-name",
"MAX_RESULT_BUFFER": "1024",
"MAX_CLUSTER_SIZE": "9",
"MAX_RETRY_ATTEMPTS": "3"
}
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/etcd";
description = "Etcd data directory.";
};
};
config = mkIf cfg.enable {
systemd.services.etcd = {
description = "Etcd Daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network-interfaces.target" ];
environment = {
ETCD_NAME = cfg.name;
ETCD_DISCOVERY = cfg.discovery;
ETCD_DATA_DIR = cfg.dataDir;
ETCD_ADVERTISE_CLIENT_URLS = concatStringsSep "," cfg.advertiseClientUrls;
ETCD_LISTEN_CLIENT_URLS = concatStringsSep "," cfg.listenClientUrls;
ETCD_LISTEN_PEER_URLS = concatStringsSep "," cfg.listenPeerUrls;
ETCD_INITIAL_ADVERTISE_PEER_URLS = concatStringsSep "," cfg.initialAdvertisePeerUrls;
} // (optionalAttrs (cfg.discovery == ""){
ETCD_INITIAL_CLUSTER = concatStringsSep "," cfg.initialCluster;
ETCD_INITIAL_CLUSTER_STATE = cfg.initialClusterState;
ETCD_INITIAL_CLUSTER_TOKEN = cfg.initialClusterToken;
}) // (mapAttrs' (n: v: nameValuePair "ETCD_${n}" v) cfg.extraConf);
serviceConfig = {
ExecStart = "${pkgs.etcd}/bin/etcd";
User = "etcd";
PermissionsStartOnly = true;
};
preStart = ''
mkdir -m 0700 -p ${cfg.dataDir}
if [ "$(id -u)" = 0 ]; then chown etcd ${cfg.dataDir}; fi
'';
postStart = ''
until ${pkgs.etcdctl}/bin/etcdctl set /nixos/state 'up'; do
sleep 1;
done
until ${pkgs.etcdctl}/bin/etcdctl get /nixos/state | grep up; do
sleep 1;
done
'';
};
environment.systemPackages = [ pkgs.etcdctl ];
users.extraUsers = singleton {
name = "etcd";
uid = config.ids.uids.etcd;
description = "Etcd daemon user";
home = cfg.dataDir;
};
};
}

View file

@ -0,0 +1,295 @@
{ config, lib, pkgs, ... }:
# TODO: support non-postgresql
with lib;
let
cfg = config.services.gitlab;
ruby = pkgs.ruby;
rubyLibs = pkgs.rubyLibs;
databaseYml = ''
production:
adapter: postgresql
database: ${cfg.databaseName}
host: ${cfg.databaseHost}
password: ${cfg.databasePassword}
username: ${cfg.databaseUsername}
encoding: utf8
'';
gitlabShellYml = ''
user: gitlab
gitlab_url: "http://${cfg.host}:${toString cfg.port}/"
http_settings:
self_signed_cert: false
repos_path: "${cfg.stateDir}/repositories"
log_file: "${cfg.stateDir}/log/gitlab-shell.log"
redis:
bin: ${pkgs.redis}/bin/redis-cli
host: 127.0.0.1
port: 6379
database: 0
namespace: resque:gitlab
'';
unicornConfig = builtins.readFile ./defaultUnicornConfig.rb;
gitlab-runner = pkgs.stdenv.mkDerivation rec {
name = "gitlab-runner";
buildInputs = [ pkgs.gitlab pkgs.rubyLibs.bundler pkgs.makeWrapper ];
phases = "installPhase fixupPhase";
buildPhase = "";
installPhase = ''
mkdir -p $out/bin
makeWrapper ${rubyLibs.bundler}/bin/bundle $out/bin/gitlab-runner\
--set RAKEOPT '"-f ${pkgs.gitlab}/share/gitlab/Rakefile"'\
--set UNICORN_PATH "${cfg.stateDir}/"\
--set GITLAB_PATH "${pkgs.gitlab}/share/gitlab/"\
--set GITLAB_APPLICATION_LOG_PATH "${cfg.stateDir}/log/application.log"\
--set GITLAB_SATELLITES_PATH "${cfg.stateDir}/satellites"\
--set GITLAB_SHELL_PATH "${pkgs.gitlab-shell}"\
--set GITLAB_REPOSITORIES_PATH "${cfg.stateDir}/repositories"\
--set GITLAB_SHELL_HOOKS_PATH "${cfg.stateDir}/shell/hooks"\
--set BUNDLE_GEMFILE "${pkgs.gitlab}/share/gitlab/Gemfile"\
--set GITLAB_EMAIL_FROM "${cfg.emailFrom}"\
--set GITLAB_SHELL_CONFIG_PATH "${cfg.stateDir}/shell/config.yml"\
--set GITLAB_SHELL_SECRET_PATH "${cfg.stateDir}/config/gitlab_shell_secret"\
--set GITLAB_HOST "${cfg.host}"\
--set GITLAB_PORT "${toString cfg.port}"\
--set GITLAB_BACKUP_PATH"${cfg.backupPath}"\
--set RAILS_ENV "production"
'';
};
in {
options = {
services.gitlab = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable the gitlab service.
'';
};
satelliteDir = mkOption {
type = types.str;
default = "/var/gitlab/git-satellites";
description = "Gitlab directory to store checked out git trees requires for operation.";
};
stateDir = mkOption {
type = types.str;
default = "/var/gitlab/state";
description = "Gitlab state directory, logs are stored here.";
};
backupPath = mkOption {
type = types.str;
default = cfg.stateDir + "/backup";
description = "Gitlab path for backups.";
};
databaseHost = mkOption {
type = types.str;
default = "127.0.0.1";
description = "Gitlab database hostname.";
};
databasePassword = mkOption {
type = types.str;
default = "";
description = "Gitlab database user password.";
};
databaseName = mkOption {
type = types.str;
default = "gitlab";
description = "Gitlab database name.";
};
databaseUsername = mkOption {
type = types.str;
default = "gitlab";
description = "Gitlab database user.";
};
emailFrom = mkOption {
type = types.str;
default = "example@example.org";
description = "The source address for emails sent by gitlab.";
};
host = mkOption {
type = types.str;
default = config.networking.hostName;
description = "Gitlab host name. Used e.g. for copy-paste URLs.";
};
port = mkOption {
type = types.int;
default = 8080;
description = "Gitlab server listening port.";
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ gitlab-runner pkgs.gitlab-shell ];
assertions = [
{ assertion = cfg.databasePassword != "";
message = "databasePassword must be set";
}
];
# Redis is required for the sidekiq queue runner.
services.redis.enable = mkDefault true;
# We use postgres as the main data store.
services.postgresql.enable = mkDefault true;
services.postgresql.package = mkDefault pkgs.postgresql;
# Use postfix to send out mails.
services.postfix.enable = mkDefault true;
users.extraUsers = [
{ name = "gitlab";
group = "gitlab";
home = "${cfg.stateDir}/home";
shell = "${pkgs.bash}/bin/bash";
uid = config.ids.uids.gitlab;
} ];
users.extraGroups = [
{ name = "gitlab";
gid = config.ids.gids.gitlab;
} ];
systemd.services.gitlab-sidekiq = {
after = [ "network.target" "redis.service" ];
wantedBy = [ "multi-user.target" ];
environment.HOME = "${cfg.stateDir}/home";
environment.UNICORN_PATH = "${cfg.stateDir}/";
environment.GITLAB_PATH = "${pkgs.gitlab}/share/gitlab/";
environment.GITLAB_APPLICATION_LOG_PATH = "${cfg.stateDir}/log/application.log";
environment.GITLAB_SATELLITES_PATH = "${cfg.stateDir}/satellites";
environment.GITLAB_SHELL_PATH = "${pkgs.gitlab-shell}";
environment.GITLAB_REPOSITORIES_PATH = "${cfg.stateDir}/repositories";
environment.GITLAB_SHELL_HOOKS_PATH = "${cfg.stateDir}/shell/hooks";
environment.BUNDLE_GEMFILE = "${pkgs.gitlab}/share/gitlab/Gemfile";
environment.GITLAB_EMAIL_FROM = "${cfg.emailFrom}";
environment.GITLAB_SHELL_CONFIG_PATH = "${cfg.stateDir}/shell/config.yml";
environment.GITLAB_SHELL_SECRET_PATH = "${cfg.stateDir}/config/gitlab_shell_secret";
environment.GITLAB_HOST = "${cfg.host}";
environment.GITLAB_PORT = "${toString cfg.port}";
environment.GITLAB_DATABASE_HOST = "${cfg.databaseHost}";
environment.GITLAB_DATABASE_PASSWORD = "${cfg.databasePassword}";
environment.RAILS_ENV = "production";
path = with pkgs; [
config.services.postgresql.package
gitAndTools.git
ruby
openssh
nodejs
];
serviceConfig = {
Type = "simple";
User = "gitlab";
Group = "gitlab";
TimeoutSec = "300";
WorkingDirectory = "${pkgs.gitlab}/share/gitlab";
ExecStart="${rubyLibs.bundler}/bin/bundle exec \"sidekiq -q post_receive -q mailer -q system_hook -q project_web_hook -q gitlab_shell -q common -q default -e production -P ${cfg.stateDir}/tmp/sidekiq.pid\"";
};
};
systemd.services.gitlab = {
after = [ "network.target" "postgresql.service" "redis.service" ];
wantedBy = [ "multi-user.target" ];
environment.HOME = "${cfg.stateDir}/home";
environment.UNICORN_PATH = "${cfg.stateDir}/";
environment.GITLAB_PATH = "${pkgs.gitlab}/share/gitlab/";
environment.GITLAB_APPLICATION_LOG_PATH = "${cfg.stateDir}/log/application.log";
environment.GITLAB_SATELLITES_PATH = "${cfg.stateDir}/satellites";
environment.GITLAB_SHELL_PATH = "${pkgs.gitlab-shell}";
environment.GITLAB_REPOSITORIES_PATH = "${cfg.stateDir}/repositories";
environment.GITLAB_SHELL_HOOKS_PATH = "${cfg.stateDir}/shell/hooks";
environment.BUNDLE_GEMFILE = "${pkgs.gitlab}/share/gitlab/Gemfile";
environment.GITLAB_EMAIL_FROM = "${cfg.emailFrom}";
environment.GITLAB_HOST = "${cfg.host}";
environment.GITLAB_PORT = "${toString cfg.port}";
environment.GITLAB_DATABASE_HOST = "${cfg.databaseHost}";
environment.GITLAB_DATABASE_PASSWORD = "${cfg.databasePassword}";
environment.RAILS_ENV = "production";
path = with pkgs; [
config.services.postgresql.package
gitAndTools.git
ruby
openssh
nodejs
];
preStart = ''
# TODO: use env vars
mkdir -p ${cfg.stateDir}
mkdir -p ${cfg.stateDir}/log
mkdir -p ${cfg.stateDir}/satellites
mkdir -p ${cfg.stateDir}/repositories
mkdir -p ${cfg.stateDir}/shell/hooks
mkdir -p ${cfg.stateDir}/tmp/pids
mkdir -p ${cfg.stateDir}/tmp/sockets
rm -rf ${cfg.stateDir}/config
mkdir -p ${cfg.stateDir}/config
# TODO: What exactly is gitlab-shell doing with the secret?
head -c 20 /dev/urandom > ${cfg.stateDir}/config/gitlab_shell_secret
mkdir -p ${cfg.stateDir}/home/.ssh
touch ${cfg.stateDir}/home/.ssh/authorized_keys
cp -rf ${pkgs.gitlab}/share/gitlab/config ${cfg.stateDir}/
cp ${pkgs.gitlab}/share/gitlab/VERSION ${cfg.stateDir}/VERSION
ln -fs ${pkgs.writeText "database.yml" databaseYml} ${cfg.stateDir}/config/database.yml
ln -fs ${pkgs.writeText "unicorn.rb" unicornConfig} ${cfg.stateDir}/config/unicorn.rb
chown -R gitlab:gitlab ${cfg.stateDir}/
chmod -R 755 ${cfg.stateDir}/
if [ "${cfg.databaseHost}" = "127.0.0.1" ]; then
if ! test -e "${cfg.stateDir}/db-created"; then
psql postgres -c "CREATE ROLE gitlab WITH LOGIN NOCREATEDB NOCREATEROLE NOCREATEUSER ENCRYPTED PASSWORD '${cfg.databasePassword}'"
${config.services.postgresql.package}/bin/createdb --owner gitlab gitlab || true
touch "${cfg.stateDir}/db-created"
# force=yes disables the manual-interaction yes/no prompt
# which breaks without an stdin.
force=yes ${rubyLibs.bundler}/bin/bundle exec rake -f ${pkgs.gitlab}/share/gitlab/Rakefile gitlab:setup RAILS_ENV=production
fi
fi
# Install the shell required to push repositories
ln -fs ${pkgs.writeText "config.yml" gitlabShellYml} ${cfg.stateDir}/shell/config.yml
export GITLAB_SHELL_CONFIG_PATH=""${cfg.stateDir}/shell/config.yml
${pkgs.gitlab-shell}/bin/install
# Change permissions in the last step because some of the
# intermediary scripts like to create directories as root.
chown -R gitlab:gitlab ${cfg.stateDir}/
chmod -R 755 ${cfg.stateDir}/
'';
serviceConfig = {
PermissionsStartOnly = true; # preStart must be run as root
Type = "simple";
User = "gitlab";
Group = "gitlab";
TimeoutSec = "300";
WorkingDirectory = "${pkgs.gitlab}/share/gitlab";
ExecStart="${rubyLibs.bundler}/bin/bundle exec \"unicorn -c ${cfg.stateDir}/config/unicorn.rb -E production\"";
};
};
};
}

View file

@ -15,14 +15,21 @@ in
default = false;
description = ''
Enable gitolite management under the
<literal>gitolite</literal> user. The Gitolite home
directory is <literal>/var/lib/gitolite</literal>. After
<literal>gitolite</literal> user. After
switching to a configuration with Gitolite enabled, you can
then run <literal>git clone
gitolite@host:gitolite-admin.git</literal> to manage it further.
'';
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/gitolite";
description = ''
Gitolite home directory (used to store all the repositories).
'';
};
adminPubkey = mkOption {
type = types.str;
description = ''
@ -39,13 +46,21 @@ in
A list of custom git hooks that get copied to <literal>~/.gitolite/hooks/common</literal>.
'';
};
user = mkOption {
type = types.str;
default = "gitolite";
description = ''
Gitolite user account. This is the username of the gitolite endpoint.
'';
};
};
};
config = mkIf cfg.enable {
users.extraUsers.gitolite = {
users.extraUsers.${cfg.user} = {
description = "Gitolite user";
home = "/var/lib/gitolite";
home = cfg.dataDir;
createHome = true;
uid = config.ids.uids.gitolite;
useDefaultShell = true;
@ -55,13 +70,13 @@ in
description = "Gitolite initialization";
wantedBy = [ "multi-user.target" ];
serviceConfig.User = "gitolite";
serviceConfig.User = "${cfg.user}";
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash pkgs.openssh ];
script = ''
cd /var/lib/gitolite
cd ${cfg.dataDir}
mkdir -p .gitolite/logs
if [ ! -d repositories ]; then
gitolite setup -pk ${pubkeyFile}

View file

@ -4,11 +4,11 @@ with lib;
let
cfg = config.services.mesos.master;
in {
options.services.mesos = {
master = {
enable = mkOption {
description = "Whether to enable the Mesos Master.";
@ -31,36 +31,36 @@ in {
'';
type = types.str;
};
workDir = mkOption {
description = "The Mesos work directory.";
default = "/var/lib/mesos/master";
type = types.str;
};
extraCmdLineOptions = mkOption {
description = ''
Extra command line options for Mesos Master.
See https://mesos.apache.org/documentation/latest/configuration/
'';
default = [ "" ];
type = types.listOf types.string;
example = [ "--credentials=VALUE" ];
};
quorum = mkOption {
description = ''
The size of the quorum of replicas when using 'replicated_log' based
registry. It is imperative to set this value to be a majority of
masters i.e., quorum > (number of masters)/2.
If 0 will fall back to --registry=in_memory.
'';
default = 0;
type = types.int;
};
logLevel = mkOption {
description = ''
The logging level used. Possible values:
@ -86,11 +86,12 @@ in {
${pkgs.mesos}/bin/mesos-master \
--port=${toString cfg.port} \
--zk=${cfg.zk} \
${if cfg.quorum == 0 then "--registry=in_memory" else "--registry=replicated_log --quorum=${cfg.quorum}"} \
${if cfg.quorum == 0 then "--registry=in_memory" else "--registry=replicated_log --quorum=${toString cfg.quorum}"} \
--work_dir=${cfg.workDir} \
--logging_level=${cfg.logLevel} \
${toString cfg.extraCmdLineOptions}
'';
Restart = "on-failure";
PermissionsStartOnly = true;
};
preStart = ''
@ -98,6 +99,6 @@ in {
'';
};
};
}

Some files were not shown because too many files have changed in this diff Show more