Merge rename.nix changes.

This commit is contained in:
Nicolas B. Pierron 2015-04-03 23:12:12 +02:00
commit 6de931a0f8
837 changed files with 23729 additions and 8319 deletions

View file

@ -612,15 +612,45 @@ sed -i '/ = data_files/d' setup.py</programlisting>
<section xml:id="ssec-language-ruby"><title>Ruby</title> <section xml:id="ssec-language-ruby"><title>Ruby</title>
<para>For example, to package yajl-ruby package, use gem-nix:</para> <para>There currently is support to bundle applications that are packaged as Ruby gems. The utility "bundix" allows you to write a <filename>Gemfile</filename>, let bundler create a <filename>Gemfile.lock</filename>, and then convert
this into a nix expression that contains all Gem dependencies automatically.</para>
<para>For example, to package sensu, we did:</para>
<screen> <screen>
$ nix-env -i gem-nix <![CDATA[$ cd pkgs/servers/monitoring
$ gem-nix --no-user-install --nix-file=pkgs/development/interpreters/ruby/generated.nix yajl-ruby $ mkdir sensu
$ nix-build -A rubyPackages.yajl-ruby $ cat > Gemfile
</screen> source 'https://rubygems.org'
</section> gem 'sensu'
$ bundler package --path /tmp/vendor/bundle
$ $(nix-build '&nixpkgs>' -A bundix)/bin/bundix
$ cat > default.nix
{ lib, bundlerEnv, ruby }:
bundlerEnv {
name = "sensu-0.17.1";
inherit ruby;
gemfile = ./Gemfile;
lockfile = ./Gemfile.lock;
gemset = ./gemset.nix;
meta = with lib; {
description = "A monitoring framework that aims to be simple, malleable,
and scalable.";
homepage = http://sensuapp.org/;
license = with licenses; mit;
maintainers = with maintainers; [ theuni ];
platforms = platforms.unix;
};
}]]>
</screen>
<para>Please check in the <filename>Gemfile</filename>, <filename>Gemfile.lock</filename> and the <filename>gemset.nix</filename> so future updates can be run easily.
</para>
</section>
<section xml:id="ssec-language-go"><title>Go</title> <section xml:id="ssec-language-go"><title>Go</title>

View file

@ -29,8 +29,8 @@ rec {
For another application, see build-support/vm, where this For another application, see build-support/vm, where this
function is used to build arbitrary derivations inside a QEMU function is used to build arbitrary derivations inside a QEMU
virtual machine. */ virtual machine.
*/
overrideDerivation = drv: f: overrideDerivation = drv: f:
let let
newDrv = derivation (drv.drvAttrs // (f drv)); newDrv = derivation (drv.drvAttrs // (f drv));
@ -56,18 +56,17 @@ rec {
makeOverridable = f: origArgs: makeOverridable = f: origArgs:
let let
ff = f origArgs; ff = f origArgs;
overrideWith = newArgs: origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs);
in in
if builtins.isAttrs ff then (ff // if builtins.isAttrs ff then (ff //
{ override = newArgs: { override = newArgs: makeOverridable f (overrideWith newArgs);
makeOverridable f (origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs));
deepOverride = newArgs: deepOverride = newArgs:
makeOverridable f (lib.overrideExisting (lib.mapAttrs (deepOverrider newArgs) origArgs) newArgs); makeOverridable f (lib.overrideExisting (lib.mapAttrs (deepOverrider newArgs) origArgs) newArgs);
overrideDerivation = fdrv: overrideDerivation = fdrv:
makeOverridable (args: overrideDerivation (f args) fdrv) origArgs; makeOverridable (args: overrideDerivation (f args) fdrv) origArgs;
}) })
else if builtins.isFunction ff then else if builtins.isFunction ff then
{ override = newArgs: { override = newArgs: makeOverridable f (overrideWith newArgs);
makeOverridable f (origArgs // (if builtins.isFunction newArgs then newArgs origArgs else newArgs));
__functor = self: ff; __functor = self: ff;
deepOverride = throw "deepOverride not yet supported for functors"; deepOverride = throw "deepOverride not yet supported for functors";
overrideDerivation = throw "overrideDerivation not yet supported for functors"; overrideDerivation = throw "overrideDerivation not yet supported for functors";
@ -102,8 +101,11 @@ rec {
}; };
*/ */
callPackageWith = autoArgs: fn: args: callPackageWith = autoArgs: fn: args:
let f = if builtins.isFunction fn then fn else import fn; in let
makeOverridable f ((builtins.intersectAttrs (builtins.functionArgs f) autoArgs) // args); f = if builtins.isFunction fn then fn else import fn;
auto = builtins.intersectAttrs (builtins.functionArgs f) autoArgs;
in makeOverridable f (auto // args);
/* Add attributes to each output of a derivation without changing the derivation itself */ /* Add attributes to each output of a derivation without changing the derivation itself */
addPassthru = drv: passthru: addPassthru = drv: passthru:
@ -122,4 +124,38 @@ rec {
outputsList = map outputToAttrListElement outputs; outputsList = map outputToAttrListElement outputs;
in commonAttrs.${drv.outputName}; in commonAttrs.${drv.outputName};
/* Strip a derivation of all non-essential attributes, returning
only those needed by hydra-eval-jobs. Also strictly evaluate the
result to ensure that there are no thunks kept alive to prevent
garbage collection. */
hydraJob = drv:
let
outputs = drv.outputs or ["out"];
commonAttrs =
{ inherit (drv) name system meta; inherit outputs; }
// lib.optionalAttrs (drv._hydraAggregate or false) {
_hydraAggregate = true;
constituents = map hydraJob (lib.flatten drv.constituents);
}
// (lib.listToAttrs outputsList);
makeOutput = outputName:
let output = drv.${outputName}; in
{ name = outputName;
value = commonAttrs // {
outPath = output.outPath;
drvPath = output.drvPath;
type = "derivation";
inherit outputName;
};
};
outputsList = map makeOutput outputs;
drv' = (lib.head outputsList).value;
in lib.deepSeq drv' drv';
} }

View file

@ -13,10 +13,11 @@ rec {
addErrorContextToAttrs = lib.mapAttrs (a: v: lib.addErrorContext "while evaluating ${a}" v); addErrorContextToAttrs = lib.mapAttrs (a: v: lib.addErrorContext "while evaluating ${a}" v);
traceIf = p: msg: x: if p then trace msg x else x;
traceVal = x: builtins.trace x x; traceVal = x: trace x x;
traceXMLVal = x: builtins.trace (builtins.toXML x) x; traceXMLVal = x: trace (builtins.toXML x) x;
traceXMLValMarked = str: x: builtins.trace (str + builtins.toXML x) x; traceXMLValMarked = str: x: trace (str + builtins.toXML x) x;
# this can help debug your code as well - designed to not produce thousands of lines # this can help debug your code as well - designed to not produce thousands of lines
traceShowVal = x : trace (showVal x) x; traceShowVal = x : trace (showVal x) x;
@ -42,6 +43,7 @@ rec {
traceCall2 = n : f : a : b : let t = n2 : x : traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b)); traceCall2 = n : f : a : b : let t = n2 : x : traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b));
traceCall3 = n : f : a : b : c : let t = n2 : x : traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b) (t "arg 3" c)); traceCall3 = n : f : a : b : c : let t = n2 : x : traceShowValMarked "${n} ${n2}:" x; in t "result" (f (t "arg 1" a) (t "arg 2" b) (t "arg 3" c));
# FIXME: rename this?
traceValIfNot = c: x: traceValIfNot = c: x:
if c x then true else trace (showVal x) false; if c x then true else trace (showVal x) false;
@ -106,6 +108,6 @@ rec {
) )
else else
let r = strict expr; let r = strict expr;
in builtins.trace "${str}\n result:\n${builtins.toXML r}" r in trace "${str}\n result:\n${builtins.toXML r}" r
); );
} }

View file

@ -100,6 +100,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Creative Commons Attribution 4.0"; fullName = "Creative Commons Attribution 4.0";
}; };
cc-by-sa-40 = spdx {
spdxId = "CC-BY-SA-4.0";
fullName = "Creative Commons Attribution Share Alike 4.0";
};
cddl = spdx { cddl = spdx {
spdxId = "CDDL-1.0"; spdxId = "CDDL-1.0";
fullName = "Common Development and Distribution License 1.0"; fullName = "Common Development and Distribution License 1.0";
@ -125,6 +130,16 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "Common Public License 1.0"; fullName = "Common Public License 1.0";
}; };
efl10 = spdx {
spdxId = "EFL-1.0";
fullName = "Eiffel Forum License v1.0";
};
efl20 = spdx {
spdxId = "EFL-2.0";
fullName = "Eiffel Forum License v2.0";
};
epl10 = spdx { epl10 = spdx {
spdxId = "EPL-1.0"; spdxId = "EPL-1.0";
fullName = "Eclipse Public License 1.0"; fullName = "Eclipse Public License 1.0";
@ -282,6 +297,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) rec {
fullName = "MIT License"; fullName = "MIT License";
}; };
mpl10 = spdx {
spdxId = "MPL-1.0";
fullName = "Mozilla Public License 1.0";
};
mpl11 = spdx { mpl11 = spdx {
spdxId = "MPL-1.1"; spdxId = "MPL-1.1";
fullName = "Mozilla Public License 1.1"; fullName = "Mozilla Public License 1.1";

View file

@ -5,6 +5,7 @@
alphabetically sorted. */ alphabetically sorted. */
_1126 = "Christian Lask <mail@elfsechsundzwanzig.de>"; _1126 = "Christian Lask <mail@elfsechsundzwanzig.de>";
abaldeau = "Andreas Baldeau <andreas@baldeau.net>";
abbradar = "Nikolay Amiantov <ab@fmap.me>"; abbradar = "Nikolay Amiantov <ab@fmap.me>";
aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>"; aforemny = "Alexander Foremny <alexanderforemny@googlemail.com>";
aherrmann = "Andreas Herrmann <andreash87@gmx.ch>"; aherrmann = "Andreas Herrmann <andreash87@gmx.ch>";
@ -81,6 +82,7 @@
garrison = "Jim Garrison <jim@garrison.cc>"; garrison = "Jim Garrison <jim@garrison.cc>";
gavin = "Gavin Rogers <gavin@praxeology.co.uk>"; gavin = "Gavin Rogers <gavin@praxeology.co.uk>";
gebner = "Gabriel Ebner <gebner@gebner.org>"; gebner = "Gabriel Ebner <gebner@gebner.org>";
giogadi = "Luis G. Torres <lgtorres42@gmail.com>";
globin = "Robin Gloster <robin@glob.in>"; globin = "Robin Gloster <robin@glob.in>";
goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>"; goibhniu = "Cillian de Róiste <cillian.deroiste@gmail.com>";
gridaphobe = "Eric Seidel <eric@seidel.io>"; gridaphobe = "Eric Seidel <eric@seidel.io>";
@ -94,11 +96,13 @@
iyzsong = "Song Wenwu <iyzsong@gmail.com>"; iyzsong = "Song Wenwu <iyzsong@gmail.com>";
j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>"; j-keck = "Jürgen Keck <jhyphenkeck@gmail.com>";
jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>"; jagajaga = "Arseniy Seroka <ars.seroka@gmail.com>";
jb55 = "William Casarin <bill@casarin.me>";
jcumming = "Jack Cummings <jack@mudshark.org>"; jcumming = "Jack Cummings <jack@mudshark.org>";
jgeerds = "Jascha Geerds <jg@ekby.de>"; jgeerds = "Jascha Geerds <jg@ekby.de>";
jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>"; jirkamarsik = "Jirka Marsik <jiri.marsik89@gmail.com>";
joachifm = "Joachim Fasting <joachifm@fastmail.fm>"; joachifm = "Joachim Fasting <joachifm@fastmail.fm>";
joamaki = "Jussi Maki <joamaki@gmail.com>"; joamaki = "Jussi Maki <joamaki@gmail.com>";
joelmo = "Joel Moberg <joel.moberg@gmail.com>";
joelteon = "Joel Taylor <me@joelt.io>"; joelteon = "Joel Taylor <me@joelt.io>";
jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>"; jpbernardy = "Jean-Philippe Bernardy <jeanphilippe.bernardy@gmail.com>";
jwiegley = "John Wiegley <johnw@newartisans.com>"; jwiegley = "John Wiegley <johnw@newartisans.com>";
@ -141,6 +145,7 @@
orbitz = "Malcolm Matalka <mmatalka@gmail.com>"; orbitz = "Malcolm Matalka <mmatalka@gmail.com>";
page = "Carles Pagès <page@cubata.homelinux.net>"; page = "Carles Pagès <page@cubata.homelinux.net>";
paholg = "Paho Lurie-Gregg <paho@paholg.com>"; paholg = "Paho Lurie-Gregg <paho@paholg.com>";
pakhfn = "Fedor Pakhomov <pakhfn@gmail.com>";
pashev = "Igor Pashev <pashev.igor@gmail.com>"; pashev = "Igor Pashev <pashev.igor@gmail.com>";
phausmann = "Philipp Hausmann <nix@314.ch>"; phausmann = "Philipp Hausmann <nix@314.ch>";
phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>"; phreedom = "Evgeny Egorochkin <phreedom@yandex.ru>";
@ -185,9 +190,11 @@
tailhook = "Paul Colomiets <paul@colomiets.name>"; tailhook = "Paul Colomiets <paul@colomiets.name>";
thammers = "Tobias Hammerschmidt <jawr@gmx.de>"; thammers = "Tobias Hammerschmidt <jawr@gmx.de>";
the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>"; the-kenny = "Moritz Ulrich <moritz@tarn-vedra.de>";
theuni = "Christian Theune <ct@flyingcircus.io>";
thoughtpolice = "Austin Seipp <aseipp@pobox.com>"; thoughtpolice = "Austin Seipp <aseipp@pobox.com>";
titanous = "Jonathan Rudenberg <jonathan@titanous.com>"; titanous = "Jonathan Rudenberg <jonathan@titanous.com>";
tomberek = "Thomas Bereknyei <tomberek@gmail.com>"; tomberek = "Thomas Bereknyei <tomberek@gmail.com>";
trino = "Hubert Mühlhans <muehlhans.hubert@ekodia.de>";
tstrobel = "Thomas Strobel <ts468@cam.ac.uk>"; tstrobel = "Thomas Strobel <ts468@cam.ac.uk>";
ttuegel = "Thomas Tuegel <ttuegel@gmail.com>"; ttuegel = "Thomas Tuegel <ttuegel@gmail.com>";
tv = "Tomislav Viljetić <tv@shackspace.de>"; tv = "Tomislav Viljetić <tv@shackspace.de>";

View file

@ -1,8 +1,3 @@
with {
inherit (import ./lists.nix) deepSeqList;
inherit (import ./attrsets.nix) deepSeqAttrs;
};
rec { rec {
# Identity function. # Identity function.
@ -23,23 +18,11 @@ rec {
# Flip the order of the arguments of a binary function. # Flip the order of the arguments of a binary function.
flip = f: a: b: f b a; flip = f: a: b: f b a;
# `seq x y' evaluates x, then returns y. That is, it forces strict
# evaluation of its first argument.
seq = x: y: if x == null then y else y;
# Like `seq', but recurses into lists and attribute sets to force evaluation
# of all list elements/attributes.
deepSeq = x: y:
if builtins.isList x
then deepSeqList x y
else if builtins.isAttrs x
then deepSeqAttrs x y
else seq x y;
# Pull in some builtins not included elsewhere. # Pull in some builtins not included elsewhere.
inherit (builtins) inherit (builtins)
pathExists readFile isBool isFunction pathExists readFile isBool isFunction
isInt add sub lessThan; isInt add sub lessThan
seq deepSeq;
# Return the Nixpkgs version number. # Return the Nixpkgs version number.
nixpkgsVersion = nixpkgsVersion =

View file

@ -25,6 +25,22 @@
<arg choice='plain'><option>--root</option></arg> <arg choice='plain'><option>--root</option></arg>
<replaceable>root</replaceable> <replaceable>root</replaceable>
</arg> </arg>
<arg>
<group choice='req'>
<arg choice='plain'><option>--max-jobs</option></arg>
<arg choice='plain'><option>-j</option></arg>
</group>
<replaceable>number</replaceable>
</arg>
<arg>
<option>--cores</option>
<replaceable>number</replaceable>
</arg>
<arg>
<option>--option</option>
<replaceable>name</replaceable>
<replaceable>value</replaceable>
</arg>
<arg> <arg>
<arg choice='plain'><option>--show-trace</option></arg> <arg choice='plain'><option>--show-trace</option></arg>
</arg> </arg>
@ -96,6 +112,37 @@ it.</para>
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry><term><option>--max-jobs</option></term>
<term><option>-j</option></term>
<listitem><para>Sets the maximum number of build jobs that Nix will
perform in parallel to the specified number. The default is <literal>1</literal>.
A higher value is useful on SMP systems or to exploit I/O latency.</para></listitem>
</varlistentry>
<varlistentry><term><option>--cores</option></term>
<listitem><para>Sets the value of the <envar>NIX_BUILD_CORES</envar>
environment variable in the invocation of builders. Builders can
use this variable at their discretion to control the maximum amount
of parallelism. For instance, in Nixpkgs, if the derivation
attribute <varname>enableParallelBuilding</varname> is set to
<literal>true</literal>, the builder passes the
<option>-j<replaceable>N</replaceable></option> flag to GNU Make.
The value <literal>0</literal> means that the builder should use all
available CPU cores in the system.</para></listitem>
</varlistentry>
<varlistentry><term><option>--option</option> <replaceable>name</replaceable> <replaceable>value</replaceable></term>
<listitem><para>Set the Nix configuration option
<replaceable>name</replaceable> to <replaceable>value</replaceable>.</para></listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term><option>--show-trace</option></term> <term><option>--show-trace</option></term>
<listitem> <listitem>

View file

@ -1,4 +1,4 @@
{ stdenv, perl, cdrkit, pathsFromGraph { stdenv, perl, pathsFromGraph, xorriso, syslinux
, # The file name of the resulting ISO image. , # The file name of the resulting ISO image.
isoName ? "cd.iso" isoName ? "cd.iso"
@ -22,12 +22,18 @@
, # Whether this should be an efi-bootable El-Torito CD. , # Whether this should be an efi-bootable El-Torito CD.
efiBootable ? false efiBootable ? false
, # Wheter this should be an hybrid CD (bootable from USB as well as CD).
usbBootable ? false
, # The path (in the ISO file system) of the boot image. , # The path (in the ISO file system) of the boot image.
bootImage ? "" bootImage ? ""
, # The path (in the ISO file system) of the efi boot image. , # The path (in the ISO file system) of the efi boot image.
efiBootImage ? "" efiBootImage ? ""
, # The path (outside the ISO file system) of the isohybrid-mbr image.
isohybridMbrImage ? ""
, # Whether to compress the resulting ISO image with bzip2. , # Whether to compress the resulting ISO image with bzip2.
compressImage ? false compressImage ? false
@ -38,13 +44,14 @@
assert bootable -> bootImage != ""; assert bootable -> bootImage != "";
assert efiBootable -> efiBootImage != ""; assert efiBootable -> efiBootImage != "";
assert usbBootable -> isohybridMbrImage != "";
stdenv.mkDerivation { stdenv.mkDerivation {
name = "iso9660-image"; name = "iso9660-image";
builder = ./make-iso9660-image.sh; builder = ./make-iso9660-image.sh;
buildInputs = [perl cdrkit]; buildInputs = [perl xorriso syslinux];
inherit isoName bootable bootImage compressImage volumeID pathsFromGraph efiBootImage efiBootable; inherit isoName bootable bootImage compressImage volumeID pathsFromGraph efiBootImage efiBootable isohybridMbrImage usbBootable;
# !!! should use XML. # !!! should use XML.
sources = map (x: x.source) contents; sources = map (x: x.source) contents;

View file

@ -13,6 +13,20 @@ stripSlash() {
if test "${res:0:1}" = /; then res=${res:1}; fi if test "${res:0:1}" = /; then res=${res:1}; fi
} }
# Escape potential equal signs (=) with backslash (\=)
escapeEquals() {
echo "$1" | sed -e 's/\\/\\\\/g' -e 's/=/\\=/g'
}
# Queues an file/directory to be placed on the ISO.
# An entry consists of a local source path (2) and
# a destination path on the ISO (1).
addPath() {
target="$1"
source="$2"
echo "$(escapeEquals "$target")=$(escapeEquals "$source")" >> pathlist
}
stripSlash "$bootImage"; bootImage="$res" stripSlash "$bootImage"; bootImage="$res"
@ -31,11 +45,20 @@ if test -n "$bootable"; then
fi fi
done done
bootFlags="-b $bootImage -c .boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table" isoBootFlags="-eltorito-boot ${bootImage}
-eltorito-catalog .boot.cat
-no-emul-boot -boot-load-size 4 -boot-info-table"
fi
if test -n "$usbBootable"; then
usbBootFlags="-isohybrid-mbr ${isohybridMbrImage}"
fi fi
if test -n "$efiBootable"; then if test -n "$efiBootable"; then
bootFlags="$bootFlags -eltorito-alt-boot -e $efiBootImage -no-emul-boot" efiBootFlags="-eltorito-alt-boot
-e $efiBootImage
-no-emul-boot
-isohybrid-gpt-basdat"
fi fi
touch pathlist touch pathlist
@ -44,14 +67,14 @@ touch pathlist
# Add the individual files. # Add the individual files.
for ((i = 0; i < ${#targets_[@]}; i++)); do for ((i = 0; i < ${#targets_[@]}; i++)); do
stripSlash "${targets_[$i]}" stripSlash "${targets_[$i]}"
echo "$res=${sources_[$i]}" >> pathlist addPath "$res" "${sources_[$i]}"
done done
# Add the closures of the top-level store objects. # Add the closures of the top-level store objects.
storePaths=$(perl $pathsFromGraph closure-*) storePaths=$(perl $pathsFromGraph closure-*)
for i in $storePaths; do for i in $storePaths; do
echo "${i:1}=$i" >> pathlist addPath "${i:1}" "$i"
done done
@ -59,7 +82,7 @@ done
# nix-store --load-db. # nix-store --load-db.
if [ -n "$object" ]; then if [ -n "$object" ]; then
printRegistration=1 perl $pathsFromGraph closure-* > nix-path-registration printRegistration=1 perl $pathsFromGraph closure-* > nix-path-registration
echo "nix-path-registration=nix-path-registration" >> pathlist addPath "nix-path-registration" "nix-path-registration"
fi fi
@ -70,22 +93,39 @@ for ((n = 0; n < ${#objects[*]}; n++)); do
if test "$symlink" != "none"; then if test "$symlink" != "none"; then
mkdir -p $(dirname ./$symlink) mkdir -p $(dirname ./$symlink)
ln -s $object ./$symlink ln -s $object ./$symlink
echo "$symlink=./$symlink" >> pathlist addPath "$symlink" "./$symlink"
fi fi
done done
# !!! what does this do?
cat pathlist | sed -e 's/=\(.*\)=\(.*\)=/\\=\1=\2\\=/' | tee pathlist.safer
mkdir -p $out/iso mkdir -p $out/iso
genCommand="genisoimage -iso-level 4 -r -J $bootFlags -hide-rr-moved -graft-points -path-list pathlist.safer ${volumeID:+-V $volumeID}"
if test -z "$compressImage"; then xorriso="xorriso
$genCommand -o $out/iso/$isoName -as mkisofs
else -iso-level 3
$genCommand | bzip2 > $out/iso/$isoName.bz2 -volid ${volumeID}
-appid nixos
-publisher nixos
-graft-points
-full-iso9660-filenames
${isoBootFlags}
${usbBootFlags}
${efiBootFlags}
-r
-path-list pathlist
--sort-weight 0 /
--sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
$xorriso -output $out/iso/$isoName
if test -n "$usbBootable"; then
echo "Making image hybrid..."
isohybrid --uefi $out/iso/$isoName
fi fi
if test -n "$compressImage"; then
echo "Compressing image..."
bzip2 $out/iso/$isoName
fi
mkdir -p $out/nix-support mkdir -p $out/nix-support
echo $system > $out/nix-support/system echo $system > $out/nix-support/system

View file

@ -37,6 +37,10 @@ sub new {
if defined $args->{hda}; if defined $args->{hda};
$startCommand .= "-cdrom $args->{cdrom} " $startCommand .= "-cdrom $args->{cdrom} "
if defined $args->{cdrom}; if defined $args->{cdrom};
$startCommand .= "-device piix3-usb-uhci -drive id=usbdisk,file=$args->{usb},if=none,readonly -device usb-storage,drive=usbdisk "
if defined $args->{usb};
$startCommand .= "-bios $args->{bios} "
if defined $args->{bios};
$startCommand .= $args->{qemuFlags} || ""; $startCommand .= $args->{qemuFlags} || "";
} else { } else {
$startCommand = Cwd::abs_path $startCommand; $startCommand = Cwd::abs_path $startCommand;

View file

@ -23,9 +23,9 @@ in
boot.kernelParams = [ "console=ttyS0" ]; boot.kernelParams = [ "console=ttyS0" ];
boot.initrd.extraUtilsCommands = '' boot.initrd.extraUtilsCommands = ''
cp -v ${pkgs.gawk}/bin/gawk $out/bin/gawk copy_bin_and_libs ${pkgs.gawk}/bin/gawk
cp -v ${pkgs.gnused}/bin/sed $out/bin/gnused copy_bin_and_libs ${pkgs.gnused}/bin/sed
cp -v ${pkgs.utillinux}/sbin/sfdisk $out/bin/sfdisk copy_bin_and_libs ${pkgs.utillinux}/sbin/sfdisk
cp -v ${growpart} $out/bin/growpart cp -v ${growpart} $out/bin/growpart
''; '';
boot.initrd.postDeviceCommands = '' boot.initrd.postDeviceCommands = ''

View file

@ -27,6 +27,6 @@ with lib;
fonts.fontconfig.enable = false; fonts.fontconfig.enable = false;
nixpkgs.config.packageOverrides = pkgs: nixpkgs.config.packageOverrides = pkgs:
{ dbus = pkgs.dbus.override { useX11 = false; }; }; { dbus = pkgs.dbus.override { x11Support = false; }; };
}; };
} }

View file

@ -124,9 +124,7 @@ in {
} }
(mkIf cfg.enable { (mkIf cfg.enable {
environment.systemPackages = [ environment.systemPackages = [ cfg.package ];
cfg.package
] ++ lib.optionals enable32BitAlsaPlugins [ pkgs_i686.pulseaudio ];
environment.etc = singleton { environment.etc = singleton {
target = "asound.conf"; target = "asound.conf";

View file

@ -64,6 +64,6 @@ in
# #
# Removed under grsecurity. # Removed under grsecurity.
boot.kernel.sysctl."kernel.kptr_restrict" = boot.kernel.sysctl."kernel.kptr_restrict" =
if config.security.grsecurity.enable then null else 1; if (config.boot.kernelPackages.kernel.features.grsecurity or false) then null else 1;
}; };
} }

View file

@ -110,7 +110,7 @@ let
shell = mkOption { shell = mkOption {
type = types.str; type = types.str;
default = "/run/current-system/sw/sbin/nologin"; default = "/run/current-system/sw/bin/nologin";
description = "The path to the user's shell."; description = "The path to the user's shell.";
}; };

View file

@ -22,8 +22,7 @@ with lib;
###### implementation ###### implementation
config = mkIf config.hardware.cpu.amd.updateMicrocode { config = mkIf config.hardware.cpu.amd.updateMicrocode {
hardware.firmware = [ "${pkgs.amdUcode}/lib/firmware" ]; boot.initrd.prepend = [ "${pkgs.microcodeAmd}/amd-ucode.img" ];
boot.kernelModules = [ "microcode" ];
}; };
} }

View file

@ -22,8 +22,7 @@ with lib;
###### implementation ###### implementation
config = mkIf config.hardware.cpu.intel.updateMicrocode { config = mkIf config.hardware.cpu.intel.updateMicrocode {
hardware.firmware = [ "${pkgs.microcodeIntel}/lib/firmware" ]; boot.initrd.prepend = [ "${pkgs.microcodeIntel}/intel-ucode.img" ];
boot.kernelModules = [ "microcode" ];
}; };
} }

View file

@ -0,0 +1,18 @@
{ config, lib, ... }:
{
options.hardware.enableKSM = lib.mkEnableOption "Kernel Same-Page Merging";
config = lib.mkIf config.hardware.enableKSM {
systemd.services.enable-ksm = {
description = "Enable Kernel Same-Page Merging";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
script = ''
if [ -e /sys/kernel/mm/ksm ]; then
echo 1 > /sys/kernel/mm/ksm/run
fi
'';
};
};
}

View file

@ -36,6 +36,9 @@ with lib;
# EFI booting # EFI booting
isoImage.makeEfiBootable = true; isoImage.makeEfiBootable = true;
# USB booting
isoImage.makeUsbBootable = true;
# Add Memtest86+ to the CD. # Add Memtest86+ to the CD.
boot.loader.grub.memtest86.enable = true; boot.loader.grub.memtest86.enable = true;

View file

@ -7,66 +7,89 @@
with lib; with lib;
let let
# Timeout in syslinux is in units of 1/10 of a second.
# 0 is used to disable timeouts.
syslinuxTimeout = if config.boot.loader.timeout == null then
0
else
max (config.boot.loader.timeout * 10) 1;
# The Grub image.
grubImage = pkgs.runCommand "grub_eltorito" {} max = x: y: if x > y then x else y;
# The configuration file for syslinux.
# Notes on syslinux configuration and UNetbootin compatiblity:
# * Do not use '/syslinux/syslinux.cfg' as the path for this
# configuration. UNetbootin will not parse the file and use it as-is.
# This results in a broken configuration if the partition label does
# not match the specified config.isoImage.volumeID. For this reason
# we're using '/isolinux/isolinux.cfg'.
# * Use APPEND instead of adding command-line arguments directly after
# the LINUX entries.
# * COM32 entries (chainload, reboot, poweroff) are not recognized. They
# result in incorrect boot entries.
baseIsolinuxCfg =
'' ''
${pkgs.grub2}/bin/grub-mkimage -p /boot/grub -O i386-pc -o tmp biosdisk iso9660 help linux linux16 chain png jpeg echo gfxmenu reboot SERIAL 0 38400
cat ${pkgs.grub2}/lib/grub/*/cdboot.img tmp > $out TIMEOUT ${builtins.toString syslinuxTimeout}
''; # */ UI vesamenu.c32
MENU TITLE NixOS
MENU BACKGROUND /isolinux/background.png
DEFAULT boot
LABEL boot
# The configuration file for Grub. MENU LABEL NixOS ${config.system.nixosVersion} Installer
grubCfg = LINUX /boot/bzImage
'' APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
set default=${builtins.toString config.boot.loader.grub.default} INITRD /boot/initrd
set timeout=${builtins.toString config.boot.loader.grub.timeout}
if loadfont /boot/grub/unicode.pf2; then
set gfxmode=640x480
insmod gfxterm
insmod vbe
terminal_output gfxterm
insmod png
if background_image /boot/grub/splash.png; then
set color_normal=white/black
set color_highlight=black/white
else
set menu_color_normal=cyan/blue
set menu_color_highlight=white/blue
fi
fi
${config.boot.loader.grub.extraEntries}
''; '';
isolinuxMemtest86Entry = ''
LABEL memtest
MENU LABEL Memtest86+
LINUX /boot/memtest.bin
APPEND ${toString config.boot.loader.grub.memtest86.params}
'';
isolinuxCfg = baseIsolinuxCfg + (optionalString config.boot.loader.grub.memtest86.enable isolinuxMemtest86Entry);
# The efi boot image # The efi boot image
efiDir = pkgs.runCommand "efi-directory" {} '' efiDir = pkgs.runCommand "efi-directory" {} ''
mkdir -p $out/efi/boot mkdir -p $out/EFI/boot
cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/efi/boot/boot${targetArch}.efi cp -v ${pkgs.gummiboot}/lib/gummiboot/gummiboot${targetArch}.efi $out/EFI/boot/boot${targetArch}.efi
mkdir -p $out/loader/entries mkdir -p $out/loader/entries
echo "title NixOS LiveCD" > $out/loader/entries/nixos-livecd.conf echo "title NixOS LiveCD" > $out/loader/entries/nixos-livecd.conf
echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf echo "linux /boot/bzImage" >> $out/loader/entries/nixos-livecd.conf
echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf echo "initrd /boot/initrd" >> $out/loader/entries/nixos-livecd.conf
echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf echo "options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}" >> $out/loader/entries/nixos-livecd.conf
echo "default nixos-livecd" > $out/loader/loader.conf echo "default nixos-livecd" > $out/loader/loader.conf
echo "timeout 5" >> $out/loader/loader.conf echo "timeout ${builtins.toString config.boot.loader.gummiboot.timeout}" >> $out/loader/loader.conf
''; '';
efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools ]; } efiImg = pkgs.runCommand "efi-image_eltorito" { buildInputs = [ pkgs.mtools pkgs.libfaketime ]; }
# Be careful about determinism: du --apparent-size,
# dates (cp -p, touch, mcopy -m, faketime for label), IDs (mkfs.vfat -i)
'' ''
#Let's hope 15M is enough mkdir ./contents && cd ./contents
dd bs=2048 count=7680 if=/dev/zero of="$out" cp -rp "${efiDir}"/* .
${pkgs.dosfstools}/sbin/mkfs.vfat "$out" mkdir ./boot
mcopy -svi "$out" ${efiDir}/* :: cp -p "${config.boot.kernelPackages.kernel}/bzImage" \
mmd -i "$out" boot "${config.system.build.initialRamdisk}/initrd" ./boot/
mcopy -v -i "$out" \ touch --date=@0 ./*
${config.boot.kernelPackages.kernel}/bzImage ::boot/bzImage
mcopy -v -i "$out" \ usage_size=$(du -sb --apparent-size . | tr -cd '[:digit:]')
${config.system.build.initialRamdisk}/initrd ::boot/initrd # Make the image 110% as big as the files need to make up for FAT overhead
image_size=$(( ($usage_size * 110) / 100 ))
# Make the image fit blocks of 1M
block_size=$((1024*1024))
image_size=$(( ($image_size / $block_size + 1) * $block_size ))
echo "Usage size: $usage_size"
echo "Image size: $image_size"
truncate --size=$image_size "$out"
${pkgs.libfaketime}/bin/faketime "2000-01-01 00:00:00" ${pkgs.dosfstools}/sbin/mkfs.vfat -i 12345678 -n EFIBOOT "$out"
mcopy -bpsvm -i "$out" ./* ::
''; # */ ''; # */
targetArch = if pkgs.stdenv.isi686 then targetArch = if pkgs.stdenv.isi686 then
@ -152,10 +175,25 @@ in
''; '';
}; };
isoImage.makeUsbBootable = mkOption {
default = false;
description = ''
Whether the ISO image should be bootable from CD as well as USB.
'';
};
isoImage.splashImage = mkOption {
default = pkgs.fetchurl {
url = https://raw.githubusercontent.com/NixOS/nixos-artwork/5729ab16c6a5793c10a2913b5a1b3f59b91c36ee/ideas/grub-splash/grub-nixos-1.png;
sha256 = "43fd8ad5decf6c23c87e9026170a13588c2eba249d9013cb9f888da5e2002217";
};
description = ''
The splash image to use in the bootloader.
'';
};
}; };
config = { config = {
boot.loader.grub.version = 2; boot.loader.grub.version = 2;
@ -166,7 +204,7 @@ in
# !!! Hack - attributes expected by other modules. # !!! Hack - attributes expected by other modules.
system.boot.loader.kernelFile = "bzImage"; system.boot.loader.kernelFile = "bzImage";
environment.systemPackages = [ pkgs.grub2 ]; environment.systemPackages = [ pkgs.grub2 pkgs.syslinux ];
# In stage 1 of the boot, mount the CD as the root FS by label so # In stage 1 of the boot, mount the CD as the root FS by label so
# that we don't need to know its device. We pass the label of the # that we don't need to know its device. We pass the label of the
@ -216,7 +254,7 @@ in
options = "allow_other,cow,nonempty,chroot=/mnt-root,max_files=32768,hide_meta_files,dirs=/nix/.rw-store=rw:/nix/.ro-store=ro"; options = "allow_other,cow,nonempty,chroot=/mnt-root,max_files=32768,hide_meta_files,dirs=/nix/.rw-store=rw:/nix/.ro-store=ro";
}; };
boot.initrd.availableKernelModules = [ "squashfs" "iso9660" ]; boot.initrd.availableKernelModules = [ "squashfs" "iso9660" "usb-storage" ];
boot.initrd.kernelModules = [ "loop" ]; boot.initrd.kernelModules = [ "loop" ];
@ -236,15 +274,12 @@ in
# Individual files to be included on the CD, outside of the Nix # Individual files to be included on the CD, outside of the Nix
# store on the CD. # store on the CD.
isoImage.contents = isoImage.contents =
[ { source = grubImage; [ { source = pkgs.substituteAll {
target = "/boot/grub/grub_eltorito"; name = "isolinux.cfg";
} src = pkgs.writeText "isolinux.cfg-in" isolinuxCfg;
{ source = pkgs.substituteAll {
name = "grub.cfg";
src = pkgs.writeText "grub.cfg-in" grubCfg;
bootRoot = "/boot"; bootRoot = "/boot";
}; };
target = "/boot/grub/grub.cfg"; target = "/isolinux/isolinux.cfg";
} }
{ source = config.boot.kernelPackages.kernel + "/bzImage"; { source = config.boot.kernelPackages.kernel + "/bzImage";
target = "/boot/bzImage"; target = "/boot/bzImage";
@ -252,51 +287,44 @@ in
{ source = config.system.build.initialRamdisk + "/initrd"; { source = config.system.build.initialRamdisk + "/initrd";
target = "/boot/initrd"; target = "/boot/initrd";
} }
{ source = "${pkgs.grub2}/share/grub/unicode.pf2";
target = "/boot/grub/unicode.pf2";
}
{ source = config.boot.loader.grub.splashImage;
target = "/boot/grub/splash.png";
}
{ source = config.system.build.squashfsStore; { source = config.system.build.squashfsStore;
target = "/nix-store.squashfs"; target = "/nix-store.squashfs";
} }
{ source = "${pkgs.syslinux}/share/syslinux";
target = "/isolinux";
}
{ source = config.isoImage.splashImage;
target = "/isolinux/background.png";
}
] ++ optionals config.isoImage.makeEfiBootable [ ] ++ optionals config.isoImage.makeEfiBootable [
{ source = efiImg; { source = efiImg;
target = "/boot/efi.img"; target = "/boot/efi.img";
} }
{ source = "${efiDir}/efi"; { source = "${efiDir}/EFI";
target = "/efi"; target = "/EFI";
} }
{ source = "${efiDir}/loader"; { source = "${efiDir}/loader";
target = "/loader"; target = "/loader";
} }
] ++ mapAttrsToList (n: v: { source = v; target = "/boot/${n}"; }) config.boot.loader.grub.extraFiles; ] ++ optionals config.boot.loader.grub.memtest86.enable [
{ source = "${pkgs.memtest86plus}/memtest.bin";
# The Grub menu. target = "/boot/memtest.bin";
boot.loader.grub.extraEntries =
''
menuentry "NixOS ${config.system.nixosVersion} Installer" {
linux /boot/bzImage init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
initrd /boot/initrd
} }
];
menuentry "Boot from hard disk" { boot.loader.timeout = 10;
set root=(hd0)
chainloader +1
}
'';
boot.loader.grub.timeout = 10;
# Create the ISO image. # Create the ISO image.
system.build.isoImage = import ../../../lib/make-iso9660-image.nix ({ system.build.isoImage = import ../../../lib/make-iso9660-image.nix ({
inherit (pkgs) stdenv perl cdrkit pathsFromGraph; inherit (pkgs) stdenv perl pathsFromGraph xorriso syslinux;
inherit (config.isoImage) isoName compressImage volumeID contents; inherit (config.isoImage) isoName compressImage volumeID contents;
bootable = true; bootable = true;
bootImage = "/boot/grub/grub_eltorito"; bootImage = "/isolinux/isolinux.bin";
} // optionalAttrs config.isoImage.makeUsbBootable {
usbBootable = true;
isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
} // optionalAttrs config.isoImage.makeEfiBootable { } // optionalAttrs config.isoImage.makeEfiBootable {
efiBootable = true; efiBootable = true;
efiBootImage = "boot/efi.img"; efiBootImage = "boot/efi.img";

View file

@ -98,7 +98,7 @@ in
boot.initrd.extraUtilsCommands = boot.initrd.extraUtilsCommands =
'' ''
cp ${pkgs.utillinux}/sbin/hwclock $out/bin copy_bin_and_libs ${pkgs.utillinux}/sbin/hwclock
''; '';
boot.initrd.postDeviceCommands = boot.initrd.postDeviceCommands =

View file

@ -28,9 +28,14 @@ chrootCommand=(/run/current-system/sw/bin/bash)
while [ "$#" -gt 0 ]; do while [ "$#" -gt 0 ]; do
i="$1"; shift 1 i="$1"; shift 1
case "$i" in case "$i" in
-I) --max-jobs|-j|--cores|-I)
given_path="$1"; shift 1 j="$1"; shift 1
extraBuildFlags+=("$i" "$given_path") extraBuildFlags+=("$i" "$j")
;;
--option)
j="$1"; shift 1
k="$1"; shift 1
extraBuildFlags+=("$i" "$j" "$k")
;; ;;
--root) --root)
mountPoint="$1"; shift 1 mountPoint="$1"; shift 1
@ -128,7 +133,7 @@ mkdir -m 0755 -p \
$mountPoint/nix/var/nix/db \ $mountPoint/nix/var/nix/db \
$mountPoint/nix/var/log/nix/drvs $mountPoint/nix/var/log/nix/drvs
mkdir -m 1775 -p $mountPoint/nix/store mkdir -m 1735 -p $mountPoint/nix/store
chown root:nixbld $mountPoint/nix/store chown root:nixbld $mountPoint/nix/store

View file

@ -27,28 +27,38 @@
ids.uids = { ids.uids = {
root = 0; root = 0;
nscd = 1; #wheel = 1; # unused
sshd = 2; #kmem = 2; # unused
ntp = 3; #tty = 3; # unused
messagebus = 4; # D-Bus messagebus = 4; # D-Bus
haldaemon = 5; haldaemon = 5;
nagios = 6; #disk = 6; # unused
vsftpd = 7; vsftpd = 7;
ftp = 8; ftp = 8;
bitlbee = 9; bitlbee = 9;
avahi = 10; avahi = 10;
nagios = 11;
atd = 12; atd = 12;
zabbix = 13; postfix = 13;
postfix = 14; #postdrop = 14; # unused
dovecot = 15; dovecot = 15;
tomcat = 16; tomcat = 16;
#audio = 17; # unused
#floppy = 18; # unused
#uucp = 19; # unused
#lp = 20; # unused
pulseaudio = 22; # must match `pulseaudio' GID pulseaudio = 22; # must match `pulseaudio' GID
gpsd = 23; gpsd = 23;
#cdrom = 24; # unused
#tape = 25; # unused
#video = 26; # unused
#dialout = 27; # unused
polkituser = 28; polkituser = 28;
uptimed = 29; #utmp = 29; # unused
ddclient = 30; ddclient = 30;
davfs2 = 31; davfs2 = 31;
privoxy = 32; privoxy = 32;
#disnix = 33; # unused
osgi = 34; osgi = 34;
tor = 35; tor = 35;
cups = 36; cups = 36;
@ -70,18 +80,25 @@
fprot = 52; fprot = 52;
bind = 53; bind = 53;
wwwrun = 54; wwwrun = 54;
#adm = 55; # unused
spamd = 56; spamd = 56;
#networkmanager = 57; # unused
nslcd = 58; nslcd = 58;
#scanner = 59; # unused
nginx = 60; nginx = 60;
chrony = 61; chrony = 61;
#systemd-journal = 62; # unused
smtpd = 63; smtpd = 63;
smtpq = 64; smtpq = 64;
supybot = 65; supybot = 65;
iodined = 66; iodined = 66;
#libvirtd = 67; # unused
graphite = 68; graphite = 68;
statsd = 69; statsd = 69;
transmission = 70; transmission = 70;
postgres = 71; postgres = 71;
#vboxusers = 72; # unused
#vboxsf = 73; # unused
smbguest = 74; # unused smbguest = 74; # unused
varnish = 75; varnish = 75;
datadog = 76; datadog = 76;
@ -102,13 +119,13 @@
minidlna = 91; minidlna = 91;
elasticsearch = 92; elasticsearch = 92;
tcpcryptd = 93; # tcpcryptd uses a hard-coded uid. We patch it in Nixpkgs to match this choice. tcpcryptd = 93; # tcpcryptd uses a hard-coded uid. We patch it in Nixpkgs to match this choice.
zope2 = 94; #connman = 94; # unused
firebird = 95; firebird = 95;
redis = 96; #keys = 96; # unused
haproxy = 97; haproxy = 97;
mongodb = 98; mongodb = 98;
openldap = 99; openldap = 99;
memcached = 100; #users = 100; # unused
cgminer = 101; cgminer = 101;
munin = 102; munin = 102;
logcheck = 103; logcheck = 103;
@ -129,6 +146,7 @@
foundationdb = 118; foundationdb = 118;
newrelic = 119; newrelic = 119;
starbound = 120; starbound = 120;
#grsecurity = 121; # unused
hydra = 122; hydra = 122;
spiped = 123; spiped = 123;
teamspeak = 124; teamspeak = 124;
@ -138,7 +156,7 @@
znc = 128; znc = 128;
polipo = 129; polipo = 129;
mopidy = 130; mopidy = 130;
unifi = 131; #docker = 131; # unused
gdm = 132; gdm = 132;
dhcpd = 133; dhcpd = 133;
siproxd = 134; siproxd = 134;
@ -180,7 +198,21 @@
panamax = 170; panamax = 170;
marathon = 171; marathon = 171;
exim = 172; exim = 172;
#fleet = 173; # unused
#input = 174; # unused
sddm = 175; sddm = 175;
tss = 176;
memcached = 177;
nscd = 178;
ntp = 179;
zabbix = 180;
redis = 181;
sshd = 182;
unifi = 183;
uptimed = 184;
zope2 = 185;
ripple-data-api = 186;
mediatomb = 187;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399! # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -200,15 +232,16 @@
ftp = 8; ftp = 8;
bitlbee = 9; bitlbee = 9;
avahi = 10; avahi = 10;
#nagios = 11; # unused
atd = 12; atd = 12;
postfix = 13; postfix = 13;
postdrop = 14; postdrop = 14;
dovecot = 15; dovecot = 15;
tomcat = 16;
audio = 17; audio = 17;
floppy = 18; floppy = 18;
uucp = 19; uucp = 19;
lp = 20; lp = 20;
tomcat = 21;
pulseaudio = 22; # must match `pulseaudio' UID pulseaudio = 22; # must match `pulseaudio' UID
gpsd = 23; gpsd = 23;
cdrom = 24; cdrom = 24;
@ -217,21 +250,30 @@
dialout = 27; dialout = 27;
#polkituser = 28; # currently unused, polkitd doesn't need a group #polkituser = 28; # currently unused, polkitd doesn't need a group
utmp = 29; utmp = 29;
#ddclient = 30; # unused
davfs2 = 31; davfs2 = 31;
privoxy = 32; privoxy = 32;
disnix = 33; disnix = 33;
osgi = 34; osgi = 34;
tor = 35; tor = 35;
ghostOne = 40; #cups = 36; # unused
#foldingathome = 37; # unused
#sabnzd = 38; # unused
#kdm = 39; # unused
ghostone = 40;
git = 41; git = 41;
fourstore = 42; fourstore = 42;
fourstorehttpd = 43; fourstorehttp = 43;
virtuoso = 44; virtuoso = 44;
#rtkit = 45; # unused
dovecot2 = 46; dovecot2 = 46;
#dovenull = 47; # unused
#unbound = 48; # unused
prayer = 49; prayer = 49;
mpd = 50; mpd = 50;
clamav = 51; clamav = 51;
fprot = 52; fprot = 52;
#bind = 53; # unused
wwwrun = 54; wwwrun = 54;
adm = 55; adm = 55;
spamd = 56; spamd = 56;
@ -239,6 +281,7 @@
nslcd = 58; nslcd = 58;
scanner = 59; scanner = 59;
nginx = 60; nginx = 60;
#chrony = 61; # unused
systemd-journal = 62; systemd-journal = 62;
smtpd = 63; smtpd = 63;
smtpq = 64; smtpq = 64;
@ -246,6 +289,7 @@
iodined = 66; iodined = 66;
libvirtd = 67; libvirtd = 67;
graphite = 68; graphite = 68;
#statsd = 69; # unused
transmission = 70; transmission = 70;
postgres = 71; postgres = 71;
vboxusers = 72; vboxusers = 72;
@ -268,11 +312,17 @@
quassel = 89; quassel = 89;
amule = 90; amule = 90;
minidlna = 91; minidlna = 91;
haproxy = 92; #elasticsearch = 92; # unused
openldap = 93; #tcpcryptd = 93; # unused
connman = 94; connman = 94;
munin = 95; firebird = 95;
keys = 96; keys = 96;
haproxy = 97;
#mongodb = 98; # unused
openldap = 99;
munin = 102;
#logcheck = 103; # unused
#nix-ssh = 104; # unused
dictd = 105; dictd = 105;
couchdb = 106; couchdb = 106;
searx = 107; searx = 107;
@ -280,8 +330,12 @@
jenkins = 109; jenkins = 109;
systemd-journal-gateway = 110; systemd-journal-gateway = 110;
notbit = 111; notbit = 111;
#ngircd = 112; # unused
btsync = 113; btsync = 113;
#minecraft = 114; # unused
monetdb = 115; monetdb = 115;
#ripped = 116; # unused
#murmur = 117; # unused
foundationdb = 118; foundationdb = 118;
newrelic = 119; newrelic = 119;
starbound = 120; starbound = 120;
@ -291,39 +345,64 @@
teamspeak = 124; teamspeak = 124;
influxdb = 125; influxdb = 125;
nsd = 126; nsd = 126;
firebird = 127; #gitolite = 127; # unused
znc = 128; znc = 128;
polipo = 129; polipo = 129;
mopidy = 130; mopidy = 130;
docker = 131; docker = 131;
gdm = 132; gdm = 132;
tss = 133; #dhcpcd = 133; # unused
siproxd = 134; siproxd = 134;
mlmmj = 135; mlmmj = 135;
#neo4j = 136; # unused
riemann = 137; riemann = 137;
riemanndash = 138; riemanndash = 138;
#radvd = 139; # unused
#zookeeper = 140; # unused
#dnsmasq = 141; # unused
uhub = 142; uhub = 142;
#yandexdisk = 143; # unused
#collectd = 144; # unused
#consul = 145; # unused
mailpile = 146; mailpile = 146;
redmine = 147; redmine = 147;
seeks = 148; seeks = 148;
prosody = 149; prosody = 149;
i2pd = 150; i2pd = 150;
#dnscrypt-proxy = 151; # unused
systemd-network = 152; systemd-network = 152;
systemd-resolve = 153; systemd-resolve = 153;
systemd-timesync = 154; systemd-timesync = 154;
liquidsoap = 155; liquidsoap = 155;
#etcd = 156; # unused
#docker-registry = 157; # unused
hbase = 158; hbase = 158;
opentsdb = 159; opentsdb = 159;
scollector = 160; scollector = 160;
bosun = 161; bosun = 161;
kubernetes = 162; kubernetes = 162;
#peerflix = 163; # unused
#chronos = 164; # unused
gitlab = 165; gitlab = 165;
nylon = 168; nylon = 168;
panamax = 170; panamax = 170;
#marathon = 171; # unused
exim = 172; exim = 172;
fleet = 173; fleet = 173;
input = 174; input = 174;
sddm = 175; sddm = 175;
tss = 176;
#memcached = 177; # unused
#nscd = 178; # unused
#ntp = 179; # unused
#zabbix = 180; # unused
#redis = 181; # unused
#sshd = 182; # unused
#unifi = 183; # unused
#uptimed = 184; # unused
#zope2 = 185; # unused
#ripple-data-api = 186; #unused
mediatomb = 187;
# When adding a gid, make sure it doesn't match an existing # When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal # uid. Users and groups with the same name should have equal

View file

@ -29,6 +29,7 @@
./hardware/all-firmware.nix ./hardware/all-firmware.nix
./hardware/cpu/amd-microcode.nix ./hardware/cpu/amd-microcode.nix
./hardware/cpu/intel-microcode.nix ./hardware/cpu/intel-microcode.nix
./hardware/ksm.nix
./hardware/network/b43.nix ./hardware/network/b43.nix
./hardware/network/intel-2100bg.nix ./hardware/network/intel-2100bg.nix
./hardware/network/intel-2200bg.nix ./hardware/network/intel-2200bg.nix
@ -91,8 +92,9 @@
./services/amqp/activemq/default.nix ./services/amqp/activemq/default.nix
./services/amqp/rabbitmq.nix ./services/amqp/rabbitmq.nix
./services/audio/alsa.nix ./services/audio/alsa.nix
# Disabled as fuppes it does no longer builds. # Disabled as fuppes no longer builds.
# ./services/audio/fuppes.nix # ./services/audio/fuppes.nix
./services/audio/icecast.nix
./services/audio/liquidsoap.nix ./services/audio/liquidsoap.nix
./services/audio/mpd.nix ./services/audio/mpd.nix
./services/audio/mopidy.nix ./services/audio/mopidy.nix
@ -109,6 +111,7 @@
./services/cluster/panamax.nix ./services/cluster/panamax.nix
./services/computing/torque/server.nix ./services/computing/torque/server.nix
./services/computing/torque/mom.nix ./services/computing/torque/mom.nix
./services/computing/slurm/slurm.nix
./services/continuous-integration/jenkins/default.nix ./services/continuous-integration/jenkins/default.nix
./services/continuous-integration/jenkins/slave.nix ./services/continuous-integration/jenkins/slave.nix
./services/databases/4store-endpoint.nix ./services/databases/4store-endpoint.nix
@ -159,6 +162,7 @@
./services/hardware/udisks2.nix ./services/hardware/udisks2.nix
./services/hardware/upower.nix ./services/hardware/upower.nix
./services/hardware/thermald.nix ./services/hardware/thermald.nix
./services/logging/fluentd.nix
./services/logging/klogd.nix ./services/logging/klogd.nix
./services/logging/logcheck.nix ./services/logging/logcheck.nix
./services/logging/logrotate.nix ./services/logging/logrotate.nix
@ -188,6 +192,7 @@
./services/misc/gitlab.nix ./services/misc/gitlab.nix
./services/misc/gitolite.nix ./services/misc/gitolite.nix
./services/misc/gpsd.nix ./services/misc/gpsd.nix
./services/misc/mediatomb.nix
./services/misc/mesos-master.nix ./services/misc/mesos-master.nix
./services/misc/mesos-slave.nix ./services/misc/mesos-slave.nix
./services/misc/nix-daemon.nix ./services/misc/nix-daemon.nix
@ -198,6 +203,7 @@
./services/misc/phd.nix ./services/misc/phd.nix
./services/misc/redmine.nix ./services/misc/redmine.nix
./services/misc/rippled.nix ./services/misc/rippled.nix
./services/misc/ripple-data-api.nix
./services/misc/rogue.nix ./services/misc/rogue.nix
./services/misc/siproxd.nix ./services/misc/siproxd.nix
./services/misc/svnserve.nix ./services/misc/svnserve.nix
@ -231,6 +237,7 @@
./services/network-filesystems/diod.nix ./services/network-filesystems/diod.nix
./services/network-filesystems/u9fs.nix ./services/network-filesystems/u9fs.nix
./services/network-filesystems/yandex-disk.nix ./services/network-filesystems/yandex-disk.nix
./services/networking/aiccu.nix
./services/networking/amuled.nix ./services/networking/amuled.nix
./services/networking/atftpd.nix ./services/networking/atftpd.nix
./services/networking/avahi-daemon.nix ./services/networking/avahi-daemon.nix
@ -327,6 +334,7 @@
./services/security/fprot.nix ./services/security/fprot.nix
./services/security/frandom.nix ./services/security/frandom.nix
./services/security/haveged.nix ./services/security/haveged.nix
./services/security/munge.nix
./services/security/torify.nix ./services/security/torify.nix
./services/security/tor.nix ./services/security/tor.nix
./services/security/torsocks.nix ./services/security/torsocks.nix

View file

@ -100,7 +100,7 @@ in
chgpasswd = { rootOK = true; }; chgpasswd = { rootOK = true; };
}; };
security.setuidPrograms = [ "passwd" "chfn" "su" "newgrp" security.setuidPrograms = [ "passwd" "chfn" "su" "sg" "newgrp"
"newuidmap" "newgidmap" # new in shadow 4.2.x "newuidmap" "newgidmap" # new in shadow 4.2.x
]; ];

View file

@ -107,7 +107,6 @@ in zipModules ([]
++ obsolete [ "services" "sshd" "permitRootLogin" ] [ "services" "openssh" "permitRootLogin" ] ++ obsolete [ "services" "sshd" "permitRootLogin" ] [ "services" "openssh" "permitRootLogin" ]
++ obsolete [ "services" "xserver" "startSSHAgent" ] [ "services" "xserver" "startOpenSSHAgent" ] ++ obsolete [ "services" "xserver" "startSSHAgent" ] [ "services" "xserver" "startOpenSSHAgent" ]
++ obsolete [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ] ++ obsolete [ "services" "xserver" "startOpenSSHAgent" ] [ "programs" "ssh" "startAgent" ]
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "xbmc" ]
# VirtualBox # VirtualBox
++ obsolete [ "services" "virtualbox" "enable" ] [ "services" "virtualboxGuest" "enable" ] ++ obsolete [ "services" "virtualbox" "enable" ] [ "services" "virtualboxGuest" "enable" ]
@ -138,6 +137,10 @@ in zipModules ([]
++ obsolete [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ] ++ obsolete [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ]
# XBMC
++ obsolete [ "services" "xserver" "windowManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
++ obsolete [ "services" "xserver" "desktopManager" "xbmc" ] [ "services" "xserver" "desktopManager" "kodi" ]
# Options that are obsolete and have no replacement. # Options that are obsolete and have no replacement.
++ obsolete' [ "boot" "loader" "grub" "bootDevice" ] ++ obsolete' [ "boot" "loader" "grub" "bootDevice" ]
++ obsolete' [ "boot" "initrd" "luks" "enable" ] ++ obsolete' [ "boot" "initrd" "luks" "enable" ]

View file

@ -1,43 +1,49 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
let let
inherit (lib) mkIf mkOption types concatMapStrings;
cfg = config.security.apparmor; cfg = config.security.apparmor;
in in
{ {
options = { options = {
security.apparmor = { security.apparmor = {
enable = mkOption { enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = "Enable the AppArmor Mandatory Access Control system."; description = "Enable the AppArmor Mandatory Access Control system.";
}; };
profiles = mkOption {
type = types.listOf types.path;
default = [];
description = "List of files containing AppArmor profiles.";
};
};
};
profiles = mkOption { config = mkIf cfg.enable {
type = types.listOf types.path; environment.systemPackages = [ pkgs.apparmor-utils ];
default = [];
description = "List of files containing AppArmor profiles.";
};
};
};
config = mkIf cfg.enable { systemd.services.apparmor = {
environment.systemPackages = [ pkgs.apparmor ]; wantedBy = [ "local-fs.target" ];
systemd.services.apparmor = { serviceConfig = {
wantedBy = [ "local-fs.target" ]; Type = "oneshot";
path = [ pkgs.apparmor ]; RemainAfterExit = "yes";
ExecStart = concatMapStrings (p:
''${pkgs.apparmor-parser}/bin/apparmor_parser -rKv -I ${pkgs.apparmor-profiles}/etc/apparmor.d "${p}" ; ''
) cfg.profiles;
ExecStop = concatMapStrings (p:
''${pkgs.apparmor-parser}/bin/apparmor_parser -Rv "${p}" ; ''
) cfg.profiles;
};
};
serviceConfig = { security.pam.services.apparmor.text = ''
Type = "oneshot"; ## AppArmor changes hats according to `order`: first try user, then
RemainAfterExit = "yes"; ## group, and finally fall back to a hat called "DEFAULT"
ExecStart = concatMapStrings (profile: ##
''${pkgs.apparmor}/sbin/apparmor_parser -rKv -I ${pkgs.apparmor}/etc/apparmor.d/ "${profile}" ; '' ## For now, enable debugging as this is an experimental feature.
) cfg.profiles; session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug
ExecStop = concatMapStrings (profile: '';
''${pkgs.apparmor}/sbin/apparmor_parser -Rv -I ${pkgs.apparmor}/etc/apparmor.d/ "${profile}" ; '' };
) cfg.profiles;
};
};
};
} }

View file

@ -38,59 +38,47 @@ in
type = types.bool; type = types.bool;
default = false; default = false;
description = '' description = ''
Enable the testing grsecurity patch, based on Linux 3.18. Enable the testing grsecurity patch, based on Linux 3.19.
''; '';
}; };
config = { config = {
mode = mkOption { mode = mkOption {
type = types.str; type = types.enum [ "auto" "custom" ];
default = "auto"; default = "auto";
example = "custom";
description = '' description = ''
grsecurity configuration mode. This specifies whether grsecurity configuration mode. This specifies whether
grsecurity is auto-configured or otherwise completely grsecurity is auto-configured or otherwise completely
manually configured. Can either be manually configured.
<literal>custom</literal> or <literal>auto</literal>.
<literal>auto</literal> is recommended.
''; '';
}; };
priority = mkOption { priority = mkOption {
type = types.str; type = types.enum [ "security" "performance" ];
default = "security"; default = "security";
example = "performance";
description = '' description = ''
grsecurity configuration priority. This specifies whether grsecurity configuration priority. This specifies whether
the kernel configuration should emphasize speed or the kernel configuration should emphasize speed or
security. Can either be <literal>security</literal> or security.
<literal>performance</literal>.
''; '';
}; };
system = mkOption { system = mkOption {
type = types.str; type = types.enum [ "desktop" "server" ];
default = ""; default = "desktop";
example = "desktop";
description = '' description = ''
grsecurity system configuration. This specifies whether grsecurity system configuration.
the kernel configuration should be suitable for a Desktop
or a Server. Can either be <literal>server</literal> or
<literal>desktop</literal>.
''; '';
}; };
virtualisationConfig = mkOption { virtualisationConfig = mkOption {
type = types.str; type = types.nullOr (types.enum [ "host" "guest" ]);
default = "none"; default = null;
example = "host";
description = '' description = ''
grsecurity virtualisation configuration. This specifies grsecurity virtualisation configuration. This specifies
the virtualisation role of the machine - that is, whether the virtualisation role of the machine - that is, whether
it will be a virtual machine guest, a virtual machine it will be a virtual machine guest, a virtual machine
host, or neither. Can be one of <literal>none</literal>, host, or neither.
<literal>host</literal>, or <literal>guest</literal>.
''; '';
}; };
@ -106,17 +94,10 @@ in
}; };
virtualisationSoftware = mkOption { virtualisationSoftware = mkOption {
type = types.str; type = types.nullOr (types.enum [ "kvm" "xen" "vmware" "virtualbox" ]);
default = ""; default = null;
example = "kvm";
description = '' description = ''
grsecurity virtualisation software. Set this to the Configure grsecurity for use with this virtualisation software.
specified virtual machine technology if the machine is
running as a guest, or a host.
Can be one of <literal>kvm</literal>,
<literal>xen</literal>, <literal>vmware</literal> or
<literal>virtualbox</literal>.
''; '';
}; };
@ -245,7 +226,7 @@ in
message = '' message = ''
If grsecurity is enabled, you must select either the If grsecurity is enabled, you must select either the
stable patch (with kernel 3.14), or the testing patch (with stable patch (with kernel 3.14), or the testing patch (with
kernel 3.18) to continue. kernel 3.19) to continue.
''; '';
} }
{ assertion = (cfg.stable -> !cfg.testing) || (cfg.testing -> !cfg.stable); { assertion = (cfg.stable -> !cfg.testing) || (cfg.testing -> !cfg.stable);
@ -262,25 +243,13 @@ in
&& config.boot.kernelPackages.kernel.features.grsecurity; && config.boot.kernelPackages.kernel.features.grsecurity;
message = "grsecurity enabled, but kernel doesn't have grsec support"; message = "grsecurity enabled, but kernel doesn't have grsec support";
} }
{ assertion = elem cfg.config.mode [ "auto" "custom" ]; { assertion = (cfg.config.mode == "auto" && (cfg.config.virtualisationConfig != null)) ->
message = "grsecurity mode must either be 'auto' or 'custom'.";
}
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.system [ "desktop" "server" ];
message = "when using auto grsec mode, system must be either 'desktop' or 'server'";
}
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.priority [ "performance" "security" ];
message = "when using auto grsec mode, priority must be 'performance' or 'security'.";
}
{ assertion = cfg.config.mode == "auto" -> elem cfg.config.virtualisationConfig [ "host" "guest" "none" ];
message = "when using auto grsec mode, 'virt' must be 'host', 'guest' or 'none'.";
}
{ assertion = (cfg.config.mode == "auto" && (elem cfg.config.virtualisationConfig [ "host" "guest" ])) ->
cfg.config.hardwareVirtualisation != null; cfg.config.hardwareVirtualisation != null;
message = "when using auto grsec mode with virtualisation, you must specify if your hardware has virtualisation extensions"; message = "when using auto grsec mode with virtualisation, you must specify if your hardware has virtualisation extensions";
} }
{ assertion = (cfg.config.mode == "auto" && (elem cfg.config.virtualisationConfig [ "host" "guest" ])) -> { assertion = (cfg.config.mode == "auto" && (cfg.config.virtualisationConfig != null)) ->
elem cfg.config.virtualisationSoftware [ "kvm" "xen" "virtualbox" "vmware" ]; cfg.config.virtualisationSoftware != null;
message = "virtualisation software must be 'kvm', 'xen', 'vmware' or 'virtualbox'"; message = "grsecurity configured for virtualisation but no virtualisation software specified";
} }
]; ];

View file

@ -77,7 +77,7 @@ in
root ALL=(ALL) SETENV: ALL root ALL=(ALL) SETENV: ALL
# Users in the "wheel" group can do anything. # Users in the "wheel" group can do anything.
%wheel ALL=(ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL %wheel ALL=(ALL:ALL) ${if cfg.wheelNeedsPassword then "" else "NOPASSWD: ALL, "}SETENV: ALL
${cfg.extraConfig} ${cfg.extraConfig}
''; '';

View file

@ -0,0 +1,130 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.icecast;
configFile = pkgs.writeText "icecast.xml" ''
<icecast>
<hostname>${cfg.hostname}</hostname>
<authentication>
<admin-user>${cfg.admin.user}</admin-user>
<admin-password>${cfg.admin.password}</admin-password>
</authentication>
<paths>
<logdir>${cfg.logDir}</logdir>
<adminroot>${pkgs.icecast}/share/icecast/admin</adminroot>
<webroot>${pkgs.icecast}/share/icecast/web</webroot>
<alias source="/" dest="/status.xsl"/>
</paths>
<listen-socket>
<port>${toString cfg.listen.port}</port>
<bind-address>${cfg.listen.address}</bind-address>
</listen-socket>
<security>
<chroot>0</chroot>
<changeowner>
<user>${cfg.user}</user>
<group>${cfg.group}</group>
</changeowner>
</security>
${cfg.extraConf}
</icecast>
'';
in {
###### interface
options = {
services.icecast = {
enable = mkEnableOption "Icecast server";
hostname = mkOption {
type = types.str;
description = "DNS name or IP address that will be used for the stream directory lookups or possibily the playlist generation if a Host header is not provided.";
default = config.networking.domain;
};
admin = {
user = mkOption {
type = types.str;
description = "Username used for all administration functions.";
default = "admin";
};
password = mkOption {
type = types.str;
description = "Password used for all administration functions.";
};
};
logDir = mkOption {
type = types.path;
description = "Base directory used for logging.";
default = "/var/log/icecast";
};
listen = {
port = mkOption {
type = types.int;
description = "TCP port that will be used to accept client connections.";
default = 8000;
};
address = mkOption {
type = types.str;
description = "Address Icecast will listen on.";
default = "::";
};
};
user = mkOption {
type = types.str;
description = "User privileges for the server.";
default = "nobody";
};
group = mkOption {
type = types.str;
description = "Group privileges for the server.";
default = "nogroup";
};
extraConf = mkOption {
type = types.lines;
description = "icecast.xml content.";
default = "";
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.icecast = {
after = [ "network.target" ];
description = "Icecast Network Audio Streaming Server";
wantedBy = [ "multi-user.target" ];
preStart = "mkdir -p ${cfg.logDir} && chown ${cfg.user}:${cfg.group} ${cfg.logDir}";
serviceConfig = {
Type = "simple";
ExecStart = "${pkgs.icecast}/bin/icecast -c ${configFile}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
};
};
}

View file

@ -17,10 +17,10 @@ let
log_file "syslog" log_file "syslog"
user "${cfg.user}" user "${cfg.user}"
group "${cfg.group}" group "${cfg.group}"
${if cfg.network.host != "any" then
"bind_to_address ${cfg.network.host}" else ""} ${optionalString (cfg.network.host != "any") ''bind_to_address "${cfg.network.host}"''}
${if cfg.network.port != 6600 then ${optionalString (cfg.network.port != 6600) ''port "${toString cfg.network.port}"''}
"port ${toString cfg.network.port}" else ""}
${cfg.extraConfig} ${cfg.extraConfig}
''; '';
@ -125,6 +125,7 @@ in {
}); });
users.extraGroups = optionalAttrs (cfg.group == "mpd") (singleton { users.extraGroups = optionalAttrs (cfg.group == "mpd") (singleton {
name = "mpd";
gid = gid; gid = gid;
}); });
}; };

View file

@ -44,6 +44,12 @@ in {
type = types.path; type = types.path;
}; };
dockerCfg = mkOption {
description = "Kubernetes contents of dockercfg file.";
default = "";
type = types.lines;
};
apiserver = { apiserver = {
enable = mkOption { enable = mkOption {
description = "Whether to enable kubernetes apiserver."; description = "Whether to enable kubernetes apiserver.";
@ -217,13 +223,13 @@ in {
}; };
machines = mkOption { machines = mkOption {
description = "Kubernetes apiserver list of machines to schedule to schedule onto"; description = "Kubernetes controller list of machines to schedule to schedule onto";
default = []; default = [];
type = types.listOf types.str; type = types.listOf types.str;
}; };
extraOpts = mkOption { extraOpts = mkOption {
description = "Kubernetes scheduler extra command line options."; description = "Kubernetes controller extra command line options.";
default = ""; default = "";
type = types.str; type = types.str;
}; };
@ -260,6 +266,30 @@ in {
type = types.bool; type = types.bool;
}; };
apiServers = mkOption {
description = "Kubernetes kubelet list of Kubernetes API servers for publishing events, and reading pods and services.";
default = ["${cfg.apiserver.address}:${toString cfg.apiserver.port}"];
type = types.listOf types.str;
};
cadvisorPort = mkOption {
description = "Kubernetes kubelet local cadvisor port.";
default = config.services.cadvisor.port;
type = types.int;
};
clusterDns = mkOption {
description = "Use alternative dns.";
default = "";
type = types.str;
};
clusterDomain = mkOption {
description = "Use alternative domain.";
default = "";
type = types.str;
};
extraOpts = mkOption { extraOpts = mkOption {
description = "Kubernetes kubelet extra command line options."; description = "Kubernetes kubelet extra command line options.";
default = ""; default = "";
@ -295,6 +325,7 @@ in {
systemd.services.kubernetes-apiserver = { systemd.services.kubernetes-apiserver = {
description = "Kubernetes Api Server"; description = "Kubernetes Api Server";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
requires = ["kubernetes-setup.service"];
after = [ "network-interfaces.target" "etcd.service" ]; after = [ "network-interfaces.target" "etcd.service" ];
serviceConfig = { serviceConfig = {
ExecStart = let ExecStart = let
@ -306,26 +337,25 @@ in {
(concatImapStringsSep "\n" (i: v: v + "," + (toString i)) (concatImapStringsSep "\n" (i: v: v + "," + (toString i))
(mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth)); (mapAttrsToList (name: token: token + "," + name) cfg.apiserver.tokenAuth));
in ''${cfg.package}/bin/kube-apiserver \ in ''${cfg.package}/bin/kube-apiserver \
-etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \ --etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
-address=${cfg.apiserver.address} \ --address=${cfg.apiserver.address} \
-port=${toString cfg.apiserver.port} \ --port=${toString cfg.apiserver.port} \
-read_only_port=${toString cfg.apiserver.readOnlyPort} \ --read_only_port=${toString cfg.apiserver.readOnlyPort} \
-public_address_override=${cfg.apiserver.publicAddress} \ --public_address_override=${cfg.apiserver.publicAddress} \
-allow_privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \ --allow_privileged=${if cfg.apiserver.allowPrivileged then "true" else "false"} \
${optionalString (cfg.apiserver.tlsCertFile!="") ${optionalString (cfg.apiserver.tlsCertFile!="")
"-tls_cert_file=${cfg.apiserver.tlsCertFile}"} \ "--tls_cert_file=${cfg.apiserver.tlsCertFile}"} \
${optionalString (cfg.apiserver.tlsPrivateKeyFile!="") ${optionalString (cfg.apiserver.tlsPrivateKeyFile!="")
"-tls_private_key_file=${cfg.apiserver.tlsPrivateKeyFile}"} \ "--tls_private_key_file=${cfg.apiserver.tlsPrivateKeyFile}"} \
${optionalString (cfg.apiserver.tokenAuth!=[]) ${optionalString (cfg.apiserver.tokenAuth!=[])
"-token_auth_file=${tokenAuthFile}"} \ "--token_auth_file=${tokenAuthFile}"} \
-authorization_mode=${cfg.apiserver.authorizationMode} \ --authorization_mode=${cfg.apiserver.authorizationMode} \
${optionalString (cfg.apiserver.authorizationMode == "ABAC") ${optionalString (cfg.apiserver.authorizationMode == "ABAC")
"-authorization_policy_file=${authorizationPolicyFile}"} \ "--authorization_policy_file=${authorizationPolicyFile}"} \
${optionalString (cfg.apiserver.tlsCertFile!="" && cfg.apiserver.tlsCertFile!="") --secure_port=${toString cfg.apiserver.securePort} \
"-secure_port=${toString cfg.apiserver.securePort}"} \ --portal_net=${cfg.apiserver.portalNet} \
-portal_net=${cfg.apiserver.portalNet} \ --logtostderr=true \
-logtostderr=true \ ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.apiserver.extraOpts} ${cfg.apiserver.extraOpts}
''; '';
User = "kubernetes"; User = "kubernetes";
@ -345,11 +375,11 @@ in {
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ]; after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
serviceConfig = { serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-scheduler \ ExecStart = ''${cfg.package}/bin/kube-scheduler \
-address=${cfg.scheduler.address} \ --address=${cfg.scheduler.address} \
-port=${toString cfg.scheduler.port} \ --port=${toString cfg.scheduler.port} \
-master=${cfg.scheduler.master} \ --master=${cfg.scheduler.master} \
-logtostderr=true \ --logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \ ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${cfg.scheduler.extraOpts} ${cfg.scheduler.extraOpts}
''; '';
User = "kubernetes"; User = "kubernetes";
@ -364,13 +394,12 @@ in {
after = [ "network-interfaces.target" "kubernetes-apiserver.service" ]; after = [ "network-interfaces.target" "kubernetes-apiserver.service" ];
serviceConfig = { serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-controller-manager \ ExecStart = ''${cfg.package}/bin/kube-controller-manager \
-address=${cfg.controllerManager.address} \ --address=${cfg.controllerManager.address} \
-port=${toString cfg.controllerManager.port} \ --port=${toString cfg.controllerManager.port} \
-master=${cfg.controllerManager.master} \ --master=${cfg.controllerManager.master} \
${optionalString (cfg.controllerManager.machines != []) --machines=${concatStringsSep "," cfg.controllerManager.machines} \
"-machines=${concatStringsSep "," cfg.controllerManager.machines}"} \ --logtostderr=true \
-logtostderr=true \ ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \
${cfg.controllerManager.extraOpts} ${cfg.controllerManager.extraOpts}
''; '';
User = "kubernetes"; User = "kubernetes";
@ -382,23 +411,28 @@ in {
systemd.services.kubernetes-kubelet = { systemd.services.kubernetes-kubelet = {
description = "Kubernetes Kubelet Service"; description = "Kubernetes Kubelet Service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
requires = ["kubernetes-setup.service"];
after = [ "network-interfaces.target" "etcd.service" "docker.service" ]; after = [ "network-interfaces.target" "etcd.service" "docker.service" ];
serviceConfig = { script = ''
ExecStart = ''${cfg.package}/bin/kubelet \ export PATH="/bin:/sbin:/usr/bin:/usr/sbin:$PATH"
-etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \ exec ${cfg.package}/bin/kubelet \
-address=${cfg.kubelet.address} \ --etcd_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.etcdServers} \
-port=${toString cfg.kubelet.port} \ --api_servers=${concatMapStringsSep "," (f: "http://${f}") cfg.kubelet.apiServers} \
-hostname_override=${cfg.kubelet.hostname} \ --address=${cfg.kubelet.address} \
-allow_privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \ --port=${toString cfg.kubelet.port} \
-root_dir=${cfg.dataDir} \ --hostname_override=${cfg.kubelet.hostname} \
-logtostderr=true \ --allow_privileged=${if cfg.kubelet.allowPrivileged then "true" else "false"} \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \ --root_dir=${cfg.dataDir} \
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
${optionalString (cfg.kubelet.clusterDns != "")
''--cluster_dns=${cfg.kubelet.clusterDns}''} \
${optionalString (cfg.kubelet.clusterDomain != "")
''--cluster_domain=${cfg.kubelet.clusterDomain}''} \
--logtostderr=true \
${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${cfg.kubelet.extraOpts} ${cfg.kubelet.extraOpts}
''; '';
User = "kubernetes"; serviceConfig.WorkingDirectory = cfg.dataDir;
PermissionsStartOnly = true;
WorkingDirectory = cfg.dataDir;
};
}; };
}) })
@ -409,10 +443,10 @@ in {
after = [ "network-interfaces.target" "etcd.service" ]; after = [ "network-interfaces.target" "etcd.service" ];
serviceConfig = { serviceConfig = {
ExecStart = ''${cfg.package}/bin/kube-proxy \ ExecStart = ''${cfg.package}/bin/kube-proxy \
-etcd_servers=${concatMapStringsSep "," (s: "http://${s}") cfg.etcdServers} \ --etcd_servers=${concatMapStringsSep "," (s: "http://${s}") cfg.etcdServers} \
-bind_address=${cfg.proxy.address} \ --bind_address=${cfg.proxy.address} \
-logtostderr=true \ --logtostderr=true \
${optionalString cfg.verbose "-v=6 -log_flush_frequency=1s"} \ ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \
${cfg.proxy.extraOpts} ${cfg.proxy.extraOpts}
''; '';
}; };
@ -427,6 +461,8 @@ in {
(mkIf (any (el: el == "node") cfg.roles) { (mkIf (any (el: el == "node") cfg.roles) {
virtualisation.docker.enable = mkDefault true; virtualisation.docker.enable = mkDefault true;
services.cadvisor.enable = mkDefault true;
services.cadvisor.port = mkDefault 4194;
services.kubernetes.kubelet.enable = mkDefault true; services.kubernetes.kubelet.enable = mkDefault true;
services.kubernetes.proxy.enable = mkDefault true; services.kubernetes.proxy.enable = mkDefault true;
}) })
@ -442,6 +478,16 @@ in {
cfg.kubelet.enable || cfg.kubelet.enable ||
cfg.proxy.enable cfg.proxy.enable
) { ) {
systemd.services.kubernetes-setup = {
description = "Kubernetes setup.";
serviceConfig.Type = "oneshot";
script = ''
mkdir -p /var/run/kubernetes
chown kubernetes /var/run/kubernetes
ln -fs ${pkgs.writeText "kubernetes-dockercfg" cfg.dockerCfg} /var/run/kubernetes/.dockercfg
'';
};
services.kubernetes.package = mkDefault pkgs.kubernetes; services.kubernetes.package = mkDefault pkgs.kubernetes;
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];

View file

@ -124,14 +124,15 @@ in {
}; };
preStart = '' preStart = ''
rm -rf ${cfg.dataDir}/state/tmp
mkdir -p ${cfg.dataDir}/ui/state/{log,tmp} mkdir -p ${cfg.dataDir}/ui/state/{log,tmp}
chown -R panamax:panamax ${cfg.dataDir}
''; '';
serviceConfig = { serviceConfig = {
ExecStart = "${panamax_ui}/bin/bundle exec rails server --binding 127.0.0.1 --port ${toString cfg.UIPort}"; ExecStart = "${panamax_ui}/bin/bundle exec rails server --binding 127.0.0.1 --port ${toString cfg.UIPort}";
User = "panamax"; User = "panamax";
Group = "panamax"; Group = "panamax";
PermissionsStartOnly = true;
}; };
}; };
@ -145,6 +146,8 @@ in {
services.journald.enableHttpGateway = mkDefault true; services.journald.enableHttpGateway = mkDefault true;
services.fleet.enable = mkDefault true; services.fleet.enable = mkDefault true;
services.cadvisor.enable = mkDefault true;
services.cadvisor.port = mkDefault 3002;
virtualisation.docker.enable = mkDefault true; virtualisation.docker.enable = mkDefault true;
environment.systemPackages = [ panamax_api panamax_ui ]; environment.systemPackages = [ panamax_api panamax_ui ];

View file

@ -0,0 +1,130 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.slurm;
# configuration file can be generated by http://slurm.schedmd.com/configurator.html
configFile = pkgs.writeText "slurm.conf"
''
${optionalString (cfg.controlMachine != null) ''controlMachine=${cfg.controlMachine}''}
${optionalString (cfg.controlAddr != null) ''controlAddr=${cfg.controlAddr}''}
${optionalString (cfg.nodeName != null) ''nodeName=${cfg.nodeName}''}
${optionalString (cfg.partitionName != null) ''partitionName=${cfg.partitionName}''}
${cfg.extraConfig}
'';
in
{
###### interface
options = {
services.slurm = {
server = {
enable = mkEnableOption "slurm control daemon";
};
client = {
enable = mkEnableOption "slurm rlient daemon";
};
controlMachine = mkOption {
type = types.nullOr types.str;
default = null;
example = null;
description = ''
The short hostname of the machine where SLURM control functions are
executed (i.e. the name returned by the command "hostname -s", use "tux001"
rather than "tux001.my.com").
'';
};
controlAddr = mkOption {
type = types.nullOr types.str;
default = cfg.controlMachine;
example = null;
description = ''
Name that ControlMachine should be referred to in establishing a
communications path.
'';
};
nodeName = mkOption {
type = types.nullOr types.str;
default = null;
example = "linux[1-32] CPUs=1 State=UNKNOWN";
description = ''
Name that SLURM uses to refer to a node (or base partition for BlueGene
systems). Typically this would be the string that "/bin/hostname -s"
returns. Note that now you have to write node's parameters after the name.
'';
};
partitionName = mkOption {
type = types.nullOr types.str;
default = null;
example = "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP";
description = ''
Name by which the partition may be referenced. Note that now you have
to write patrition's parameters after the name.
'';
};
extraConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration options that will be added verbatim at
the end of the slurm configuration file.
'';
};
};
};
###### implementation
config = mkIf (cfg.client.enable || cfg.server.enable) {
environment.systemPackages = [ pkgs.slurm-llnl ];
systemd.services.slurmd = mkIf (cfg.client.enable) {
path = with pkgs; [ slurm-llnl coreutils ];
wantedBy = [ "multi-user.target" ];
after = [ "systemd-tmpfiles-clean.service" ];
serviceConfig = {
Type = "forking";
ExecStart = "${pkgs.slurm-llnl}/bin/slurmd -f ${configFile}";
PIDFile = "/run/slurmd.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
};
systemd.services.slurmctld = mkIf (cfg.server.enable) {
path = with pkgs; [ slurm-llnl munge coreutils ];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "auditd.service" "munged.service" "slurmdbd.service" ];
requires = [ "munged.service" ];
serviceConfig = {
Type = "forking";
ExecStart = "${pkgs.slurm-llnl}/bin/slurmctld";
PIDFile = "/run/slurmctld.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
environment = { SLURM_CONF = "${configFile}"; };
};
};
}

View file

@ -8,9 +8,7 @@ let
mysql = cfg.package; mysql = cfg.package;
is55 = mysql.mysqlVersion == "5.5"; atLeast55 = versionAtLeast mysql.mysqlVersion "5.5";
mysqldDir = if is55 then "${mysql}/bin" else "${mysql}/libexec";
pidFile = "${cfg.pidDir}/mysqld.pid"; pidFile = "${cfg.pidDir}/mysqld.pid";
@ -24,7 +22,7 @@ let
port = ${toString cfg.port} port = ${toString cfg.port}
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"} ${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "log-bin=mysql-bin"}
${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"} ${optionalString (cfg.replication.role == "master" || cfg.replication.role == "slave") "server-id = ${toString cfg.replication.serverId}"}
${optionalString (cfg.replication.role == "slave" && !is55) ${optionalString (cfg.replication.role == "slave" && !atLeast55)
'' ''
master-host = ${cfg.replication.masterHost} master-host = ${cfg.replication.masterHost}
master-user = ${cfg.replication.masterUser} master-user = ${cfg.replication.masterUser}
@ -75,7 +73,7 @@ in
}; };
pidDir = mkOption { pidDir = mkOption {
default = "/var/run/mysql"; default = "/run/mysqld";
description = "Location of the file which stores the PID of the MySQL server"; description = "Location of the file which stores the PID of the MySQL server";
}; };
@ -180,15 +178,19 @@ in
mkdir -m 0700 -p ${cfg.pidDir} mkdir -m 0700 -p ${cfg.pidDir}
chown -R ${cfg.user} ${cfg.pidDir} chown -R ${cfg.user} ${cfg.pidDir}
# Make the socket directory
mkdir -m 0700 -p /run/mysqld
chown -R ${cfg.user} /run/mysqld
''; '';
serviceConfig.ExecStart = "${mysqldDir}/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}"; serviceConfig.ExecStart = "${mysql}/bin/mysqld --defaults-extra-file=${myCnf} ${mysqldOptions}";
postStart = postStart =
'' ''
# Wait until the MySQL server is available for use # Wait until the MySQL server is available for use
count=0 count=0
while [ ! -e /tmp/mysql.sock ] while [ ! -e /run/mysqld/mysqld.sock ]
do do
if [ $count -eq 30 ] if [ $count -eq 30 ]
then then
@ -222,7 +224,7 @@ in
fi fi
'') cfg.initialDatabases} '') cfg.initialDatabases}
${optionalString (cfg.replication.role == "slave" && is55) ${optionalString (cfg.replication.role == "slave" && atLeast55)
'' ''
# Set up the replication master # Set up the replication master

View file

@ -128,12 +128,12 @@ in
users.extraUsers = optionalAttrs (cfg.user == "tss") (singleton users.extraUsers = optionalAttrs (cfg.user == "tss") (singleton
{ name = "tss"; { name = "tss";
group = "tss"; group = "tss";
uid = config.ids.uids.nginx; uid = config.ids.uids.tss;
}); });
users.extraGroups = optionalAttrs (cfg.group == "tss") (singleton users.extraGroups = optionalAttrs (cfg.group == "tss") (singleton
{ name = "tss"; { name = "tss";
gid = config.ids.gids.nginx; gid = config.ids.gids.tss;
}); });
}; };
} }

View file

@ -28,6 +28,7 @@ let
# Perform substitutions in all udev rules files. # Perform substitutions in all udev rules files.
udevRules = stdenv.mkDerivation { udevRules = stdenv.mkDerivation {
name = "udev-rules"; name = "udev-rules";
preferLocalBuild = true;
buildCommand = '' buildCommand = ''
mkdir -p $out mkdir -p $out
shopt -s nullglob shopt -s nullglob

View file

@ -0,0 +1,39 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.fluentd;
in {
###### interface
options = {
services.fluentd = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable fluentd.";
};
config = mkOption {
type = types.lines;
default = "";
description = "Fluentd config.";
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.fluentd = with pkgs; {
description = "Fluentd Daemon";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.fluentd}/bin/fluentd -c ${pkgs.writeText "fluentd.conf" cfg.config}";
};
};
};
}

View file

@ -0,0 +1,282 @@
{ config, lib, pkgs, ... }:
with lib;
let
uid = config.ids.uids.mediatomb;
gid = config.ids.gids.mediatomb;
cfg = config.services.mediatomb;
mtConf = pkgs.writeText "config.xml" ''
<?xml version="1.0" encoding="UTF-8"?>
<config version="2" xmlns="http://mediatomb.cc/config/2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://mediatomb.cc/config/2 http://mediatomb.cc/config/2.xsd">
<server>
<ui enabled="yes" show-tooltips="yes">
<accounts enabled="no" session-timeout="30">
<account user="mediatomb" password="mediatomb"/>
</accounts>
</ui>
<name>${cfg.serverName}</name>
<udn>uuid:${cfg.uuid}</udn>
<home>${cfg.dataDir}</home>
<webroot>${pkgs.mediatomb}/share/mediatomb/web</webroot>
<storage>
<sqlite3 enabled="yes">
<database-file>mediatomb.db</database-file>
</sqlite3>
</storage>
<protocolInfo extend="${if cfg.ps3Support then "yes" else "no"}"/>
${if cfg.dsmSupport then ''
<custom-http-headers>
<add header="X-User-Agent: redsonic"/>
</custom-http-headers>
<manufacturerURL>redsonic.com</manufacturerURL>
<modelNumber>105</modelNumber>
'' else ""}
${if cfg.tg100Support then ''
<upnp-string-limit>101</upnp-string-limit>
'' else ""}
<extended-runtime-options>
<mark-played-items enabled="yes" suppress-cds-updates="yes">
<string mode="prepend">*</string>
<mark>
<content>video</content>
</mark>
</mark-played-items>
</extended-runtime-options>
</server>
<import hidden-files="no">
<scripting script-charset="UTF-8">
<common-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/common.js</common-script>
<playlist-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/playlists.js</playlist-script>
<virtual-layout type="builtin">
<import-script>/nix/store/cngbzn39vidd6jm4wgzxfafqll74ybfa-mediatomb-0.12.1/share/mediatomb/js/import.js</import-script>
</virtual-layout>
</scripting>
<mappings>
<extension-mimetype ignore-unknown="no">
<map from="mp3" to="audio/mpeg"/>
<map from="ogx" to="application/ogg"/>
<map from="ogv" to="video/ogg"/>
<map from="oga" to="audio/ogg"/>
<map from="ogg" to="audio/ogg"/>
<map from="ogm" to="video/ogg"/>
<map from="asf" to="video/x-ms-asf"/>
<map from="asx" to="video/x-ms-asf"/>
<map from="wma" to="audio/x-ms-wma"/>
<map from="wax" to="audio/x-ms-wax"/>
<map from="wmv" to="video/x-ms-wmv"/>
<map from="wvx" to="video/x-ms-wvx"/>
<map from="wm" to="video/x-ms-wm"/>
<map from="wmx" to="video/x-ms-wmx"/>
<map from="m3u" to="audio/x-mpegurl"/>
<map from="pls" to="audio/x-scpls"/>
<map from="flv" to="video/x-flv"/>
<map from="mkv" to="video/x-matroska"/>
<map from="mka" to="audio/x-matroska"/>
${if cfg.ps3Support then ''
<map from="avi" to="video/divx"/>
'' else ""}
${if cfg.dsmSupport then ''
<map from="avi" to="video/avi"/>
'' else ""}
</extension-mimetype>
<mimetype-upnpclass>
<map from="audio/*" to="object.item.audioItem.musicTrack"/>
<map from="video/*" to="object.item.videoItem"/>
<map from="image/*" to="object.item.imageItem"/>
</mimetype-upnpclass>
<mimetype-contenttype>
<treat mimetype="audio/mpeg" as="mp3"/>
<treat mimetype="application/ogg" as="ogg"/>
<treat mimetype="audio/ogg" as="ogg"/>
<treat mimetype="audio/x-flac" as="flac"/>
<treat mimetype="audio/x-ms-wma" as="wma"/>
<treat mimetype="audio/x-wavpack" as="wv"/>
<treat mimetype="image/jpeg" as="jpg"/>
<treat mimetype="audio/x-mpegurl" as="playlist"/>
<treat mimetype="audio/x-scpls" as="playlist"/>
<treat mimetype="audio/x-wav" as="pcm"/>
<treat mimetype="audio/L16" as="pcm"/>
<treat mimetype="video/x-msvideo" as="avi"/>
<treat mimetype="video/mp4" as="mp4"/>
<treat mimetype="audio/mp4" as="mp4"/>
<treat mimetype="application/x-iso9660" as="dvd"/>
<treat mimetype="application/x-iso9660-image" as="dvd"/>
</mimetype-contenttype>
</mappings>
<online-content>
<YouTube enabled="no" refresh="28800" update-at-start="no" purge-after="604800" racy-content="exclude" format="mp4" hd="no">
<favorites user="mediatomb"/>
<standardfeed feed="most_viewed" time-range="today"/>
<playlists user="mediatomb"/>
<uploads user="mediatomb"/>
<standardfeed feed="recently_featured" time-range="today"/>
</YouTube>
</online-content>
</import>
<transcoding enabled="${if cfg.transcoding then "yes" else "no"}">
<mimetype-profile-mappings>
<transcode mimetype="video/x-flv" using="vlcmpeg"/>
<transcode mimetype="application/ogg" using="vlcmpeg"/>
<transcode mimetype="application/ogg" using="oggflac2raw"/>
<transcode mimetype="audio/x-flac" using="oggflac2raw"/>
</mimetype-profile-mappings>
<profiles>
<profile name="oggflac2raw" enabled="no" type="external">
<mimetype>audio/L16</mimetype>
<accept-url>no</accept-url>
<first-resource>yes</first-resource>
<accept-ogg-theora>no</accept-ogg-theora>
<agent command="ogg123" arguments="-d raw -o byteorder:big -f %out %in"/>
<buffer size="1048576" chunk-size="131072" fill-size="262144"/>
</profile>
<profile name="vlcmpeg" enabled="no" type="external">
<mimetype>video/mpeg</mimetype>
<accept-url>yes</accept-url>
<first-resource>yes</first-resource>
<accept-ogg-theora>yes</accept-ogg-theora>
<agent command="vlc" arguments="-I dummy %in --sout #transcode{venc=ffmpeg,vcodec=mp2v,vb=4096,fps=25,aenc=ffmpeg,acodec=mpga,ab=192,samplerate=44100,channels=2}:standard{access=file,mux=ps,dst=%out} vlc:quit"/>
<buffer size="14400000" chunk-size="512000" fill-size="120000"/>
</profile>
</profiles>
</transcoding>
</config>
'';
in {
###### interface
options = {
services.mediatomb = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the mediatomb DLNA server.
'';
};
serverName = mkOption {
type = types.string;
default = "mediatomb";
description = ''
How to identify the server on the network.
'';
};
ps3Support = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable ps3 specific tweaks.
WARNING: incompatible with DSM 320 support.
'';
};
dsmSupport = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable D-Link DSM 320 specific tweaks.
WARNING: incompatible with ps3 support.
'';
};
tg100Support = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable Telegent TG100 specific tweaks.
'';
};
transcoding = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable transcoding.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/mediatomb";
description = ''
The directory where mediatomb stores its state, data, etc.
'';
};
user = mkOption {
default = "mediatomb";
description = "User account under which mediatomb runs.";
};
group = mkOption {
default = "mediatomb";
description = "Group account under which mediatomb runs.";
};
port = mkOption {
default = 49152;
description = ''
The network port to listen on.
'';
};
uuid = mkOption {
default = "fdfc8a4e-a3ad-4c1d-b43d-a2eedb03a687";
description = ''
A unique (on your network) to identify the server by.
'';
};
customCfg = mkOption {
type = types.bool;
default = false;
description = ''
Allow mediatomb to create and use its own config file inside ${cfg.dataDir}.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.mediatomb = {
description = "MediaTomb media Server";
after = [ "local-fs.target" "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.mediatomb ];
serviceConfig.ExecStart = "${pkgs.mediatomb}/bin/mediatomb -p ${toString cfg.port} ${if cfg.customCfg then "" else "-c ${mtConf}"} -m ${cfg.dataDir}";
serviceConfig.User = "${cfg.user}";
};
users.extraGroups = optionalAttrs (cfg.group == "mediatomb") (singleton {
name = "mediatomb";
gid = gid;
});
users.extraUsers = optionalAttrs (cfg.user == "mediatomb") (singleton {
name = "mediatomb";
isSystemUser = true;
group = cfg.group;
home = "${cfg.dataDir}";
createHome = true;
description = "Mediatomb DLNA Server User";
});
networking.firewall = {
allowedUDPPorts = [ 1900 cfg.port ];
allowedTCPPorts = [ cfg.port ];
};
};
}

View file

@ -379,9 +379,6 @@ in
/nix/var/nix/gcroots/per-user \ /nix/var/nix/gcroots/per-user \
/nix/var/nix/profiles/per-user \ /nix/var/nix/profiles/per-user \
/nix/var/nix/gcroots/tmp /nix/var/nix/gcroots/tmp
ln -sf /nix/var/nix/profiles /nix/var/nix/gcroots/
ln -sf /nix/var/nix/manifests /nix/var/nix/gcroots/
''; '';
}; };

View file

@ -0,0 +1,168 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.rippleDataApi;
deployment_env_config = builtins.toJSON {
production = {
port = toString cfg.port;
maxSockets = 150;
batchSize = 100;
startIndex = 32570;
rippleds = cfg.rippleds;
redis = {
enable = cfg.redis.enable;
host = cfg.redis.host;
port = cfg.redis.port;
options.auth_pass = null;
};
};
};
db_config = builtins.toJSON {
production = {
username = optional (cfg.couchdb.pass != "") cfg.couchdb.user;
password = optional (cfg.couchdb.pass != "") cfg.couchdb.pass;
host = cfg.couchdb.host;
port = cfg.couchdb.port;
database = cfg.couchdb.db;
protocol = "http";
};
};
in {
options = {
services.rippleDataApi = {
enable = mkEnableOption "Whether to enable ripple data api.";
port = mkOption {
description = "Ripple data api port";
default = 5993;
type = types.int;
};
redis = {
enable = mkOption {
description = "Whether to enable caching of ripple data to redis.";
default = true;
type = types.bool;
};
host = mkOption {
description = "Ripple data api redis host.";
default = "localhost";
type = types.str;
};
port = mkOption {
description = "Ripple data api redis port.";
default = 5984;
type = types.int;
};
};
couchdb = {
host = mkOption {
description = "Ripple data api couchdb host.";
default = "localhost";
type = types.str;
};
port = mkOption {
description = "Ripple data api couchdb port.";
default = 5984;
type = types.int;
};
db = mkOption {
description = "Ripple data api couchdb database.";
default = "rippled";
type = types.str;
};
user = mkOption {
description = "Ripple data api couchdb username.";
default = "rippled";
type = types.str;
};
pass = mkOption {
description = "Ripple data api couchdb password.";
default = "";
type = types.str;
};
create = mkOption {
description = "Whether to create couchdb database needed by ripple data api.";
type = types.bool;
default = true;
};
};
rippleds = mkOption {
description = "List of rippleds to be used by ripple data api.";
default = [
"http://s_east.ripple.com:51234"
"http://s_west.ripple.com:51234"
];
type = types.listOf types.str;
};
};
};
config = mkIf (cfg.enable) {
services.couchdb.enable = mkDefault true;
services.couchdb.bindAddress = mkDefault "0.0.0.0";
services.redis.enable = mkDefault true;
systemd.services.ripple-data-api = {
after = [ "couchdb.service" "redis.service" "ripple-data-api-importer.service" ];
wantedBy = [ "multi-user.target" ];
environment = {
NODE_ENV = "production";
DEPLOYMENT_ENVS_CONFIG = pkgs.writeText "deployment.environment.json" deployment_env_config;
DB_CONFIG = pkgs.writeText "db.config.json" db_config;
};
serviceConfig = {
ExecStart = "${pkgs.ripple-data-api}/bin/api";
User = "ripple-data-api";
};
};
systemd.services.ripple-data-importer = {
after = [ "couchdb.service" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.curl ];
environment = {
NODE_ENV = "production";
DEPLOYMENT_ENVS_CONFIG = pkgs.writeText "deployment.environment.json" deployment_env_config;
DB_CONFIG = pkgs.writeText "db.config.json" db_config;
LOG_FILE = "/dev/null";
};
serviceConfig = {
ExecStart = "${pkgs.ripple-data-api}/bin/importer live debug2";
User = "ripple-data-api";
};
preStart = mkMerge [
(mkIf (cfg.couchdb.create) ''
HOST="http://${optionalString (cfg.couchdb.pass != "") "${cfg.couchdb.user}:${cfg.couchdb.pass}@"}${cfg.couchdb.host}:${toString cfg.couchdb.port}"
curl -X PUT $HOST/${cfg.couchdb.db} || true
'')
"${pkgs.ripple-data-api}/bin/update-views"
];
};
users.extraUsers = singleton
{ name = "ripple-data-api";
description = "Ripple data api user";
uid = config.ids.uids.ripple-data-api;
};
};
}

View file

@ -1,5 +1,3 @@
# configuration building is commented out until better tested.
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib; with lib;
@ -7,29 +5,189 @@ with lib;
let let
cfg = config.services.rippled; cfg = config.services.rippled;
rippledStateCfgFile = "/var/lib/rippled/rippled.cfg"; b2i = val: if val then "1" else "0";
dbCfg = db: ''
type=${db.type}
path=${db.path}
${optionalString (db.compression != null) ("compression=${b2i db.compression}") }
${optionalString (db.onlineDelete != null) ("online_delete=${toString db.onlineDelete}")}
${optionalString (db.advisoryDelete != null) ("advisory_delete=${toString db.advisoryDelete}")}
${db.extraOpts}
'';
rippledCfg = '' rippledCfg = ''
[server]
${concatMapStringsSep "\n" (n: "port_${n}") (attrNames cfg.ports)}
${concatMapStrings (p: ''
[port_${p.name}]
ip=${p.ip}
port=${toString p.port}
protocol=${concatStringsSep "," p.protocol}
${optionalString (p.user != "") "user=${p.user}"}
${optionalString (p.password != "") "user=${p.password}"}
admin=${if p.admin then "allow" else "no"}
${optionalString (p.ssl.key != null) "ssl_key=${p.ssl.key}"}
${optionalString (p.ssl.cert != null) "ssl_cert=${p.ssl.cert}"}
${optionalString (p.ssl.chain != null) "ssl_chain=${p.ssl.chain}"}
'') (attrValues cfg.ports)}
[database_path]
${cfg.databasePath}
[node_db] [node_db]
type=HyperLevelDB ${dbCfg cfg.nodeDb}
path=/var/lib/rippled/db/hyperldb
[debug_logfile] ${optionalString (cfg.tempDb != null) ''
/var/log/rippled/debug.log [temp_db]
${dbCfg cfg.tempDb}''}
'' ${optionalString (cfg.importDb != null) ''
+ optionalString (cfg.peerIp != null) '' [import_db]
[peer_ip] ${dbCfg cfg.importDb}''}
${cfg.peerIp}
[peer_port] [ips]
${toString cfg.peerPort} ${concatStringsSep "\n" cfg.ips}
'' [ips_fixed]
+ cfg.extraConfig; ${concatStringsSep "\n" cfg.ipsFixed}
[validators]
${concatStringsSep "\n" cfg.validators}
[node_size]
${cfg.nodeSize}
[ledger_history]
${toString cfg.ledgerHistory}
[fetch_depth]
${toString cfg.fetchDepth}
[validation_quorum]
${toString cfg.validationQuorum}
[sntp_servers]
${concatStringsSep "\n" cfg.sntpServers}
[rpc_startup]
{ "command": "log_level", "severity": "${cfg.logLevel}" }
'' + cfg.extraConfig;
portOptions = { name, ...}: {
options = {
name = mkOption {
internal = true;
default = name;
};
ip = mkOption {
default = "127.0.0.1";
description = "Ip where rippled listens.";
type = types.str;
};
port = mkOption {
description = "Port where rippled listens.";
type = types.int;
};
protocol = mkOption {
description = "Protocols expose by rippled.";
type = types.listOf (types.enum ["http" "https" "ws" "wss" "peer"]);
};
user = mkOption {
description = "When set, these credentials will be required on HTTP/S requests.";
type = types.str;
default = "";
};
password = mkOption {
description = "When set, these credentials will be required on HTTP/S requests.";
type = types.str;
default = "";
};
admin = mkOption {
description = "Controls whether or not administrative commands are allowed.";
type = types.bool;
default = false;
};
ssl = {
key = mkOption {
description = ''
Specifies the filename holding the SSL key in PEM format.
'';
default = null;
type = types.nullOr types.path;
};
cert = mkOption {
description = ''
Specifies the path to the SSL certificate file in PEM format.
This is not needed if the chain includes it.
'';
default = null;
type = types.nullOr types.path;
};
chain = mkOption {
description = ''
If you need a certificate chain, specify the path to the
certificate chain here. The chain may include the end certificate.
'';
default = null;
type = types.nullOr types.path;
};
};
};
};
dbOptions = {
type = mkOption {
description = "Rippled database type.";
type = types.enum ["rocksdb" "nudb" "sqlite"];
default = "rocksdb";
};
path = mkOption {
description = "Location to store the database.";
type = types.path;
default = cfg.databasePath;
};
compression = mkOption {
description = "Whether to enable snappy compression.";
type = types.nullOr types.bool;
default = null;
};
onlineDelete = mkOption {
description = "Enable automatic purging of older ledger information.";
type = types.addCheck (types.nullOr types.int) (v: v > 256);
default = cfg.ledgerHistory;
};
advisoryDelete = mkOption {
description = ''
If set, then require administrative RPC call "can_delete"
to enable online deletion of ledger records.
'';
type = types.nullOr types.bool;
default = null;
};
extraOpts = mkOption {
description = "Extra database options.";
type = types.lines;
default = "";
};
};
rippledCfgFile = pkgs.writeText "rippled.cfg" rippledCfg;
in in
{ {
@ -37,236 +195,176 @@ in
###### interface ###### interface
options = { options = {
services.rippled = { services.rippled = {
enable = mkEnableOption "Whether to enable rippled";
enable = mkOption { package = mkOption {
default = false; description = "Which rippled package to use.";
description = "Whether to enable rippled"; type = types.package;
default = pkgs.rippled;
}; };
# ports = mkOption {
# Rippled has a simple configuration file layout that is easy to description = "Ports exposed by rippled";
# build with nix. Many of the options are defined here but are type = types.attrsOf types.optionSet;
# commented out until the code to append them to the config above options = [portOptions];
# is written and they are tested. default = {
# rpc = {
# If you find a yourself implementing more options, please submit a port = 5005;
# pull request. admin = true;
# protocol = ["http"];
};
peer = {
port = 51235;
ip = "0.0.0.0";
protocol = ["peer"];
};
ws_public = {
port = 5006;
ip = "0.0.0.0";
protocol = ["ws" "wss"];
};
};
};
nodeDb = mkOption {
description = "Rippled main database options.";
type = types.nullOr types.optionSet;
options = [dbOptions];
default = {
type = "rocksdb";
extraOpts = ''
open_files=2000
filter_bits=12
cache_mb=256
file_size_pb=8
file_size_mult=2;
'';
};
};
tempDb = mkOption {
description = "Rippled temporary database options.";
type = types.nullOr types.optionSet;
options = [dbOptions];
default = null;
};
importDb = mkOption {
description = "Settings for performing a one-time import.";
type = types.nullOr types.optionSet;
options = [dbOptions];
default = null;
};
nodeSize = mkOption {
description = ''
Rippled size of the node you are running.
"tiny", "small", "medium", "large", and "huge"
'';
type = types.enum ["tiny" "small" "medium" "large" "huge"];
default = "small";
};
/*
ips = mkOption { ips = mkOption {
default = [ "r.ripple.com 51235" ];
example = [ "192.168.0.1" "192.168.0.1 3939" "r.ripple.com 51235" ];
description = '' description = ''
List of hostnames or ips where the Ripple protocol is served. List of hostnames or ips where the Ripple protocol is served.
For a starter list, you can either copy entries from: For a starter list, you can either copy entries from:
https://ripple.com/ripple.txt or if you prefer you can let it https://ripple.com/ripple.txt or if you prefer you can let it
default to r.ripple.com 51235 default to r.ripple.com 51235
A port may optionally be specified after adding a space to the A port may optionally be specified after adding a space to the
address. By convention, if known, IPs are listed in from most address. By convention, if known, IPs are listed in from most
to least trusted. to least trusted.
''; '';
type = types.listOf types.str;
default = ["r.ripple.com 51235"];
}; };
ipsFixed = mkOption { ipsFixed = mkOption {
default = null;
example = [ "192.168.0.1" "192.168.0.1 3939" "r.ripple.com 51235" ];
description = '' description = ''
List of IP addresses or hostnames to which rippled should always List of IP addresses or hostnames to which rippled should always
attempt to maintain peer connections with. This is useful for attempt to maintain peer connections with. This is useful for
manually forming private networks, for example to configure a manually forming private networks, for example to configure a
validation server that connects to the Ripple network through a validation server that connects to the Ripple network through a
public-facing server, or for building a set of cluster peers. public-facing server, or for building a set of cluster peers.
A port may optionally be specified after adding a space to the address A port may optionally be specified after adding a space to the address
''; '';
}; type = types.listOf types.str;
*/ default = [];
peerIp = mkOption {
default = null;
example = "0.0.0.0";
description = ''
IP address or domain to bind to allow external connections from peers.
Defaults to not binding, which disallows external connections from peers.
'';
}; };
peerPort = mkOption { validators = mkOption {
default = 51235;
description = '' description = ''
If peerIp is supplied, corresponding port to bind to for peer connections. List of nodes to always accept as validators. Nodes are specified by domain
or public key.
''; '';
type = types.listOf types.str;
default = [
"n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 RL1"
"n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj RL2"
"n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C RL3"
"n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS RL4"
"n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA RL5"
];
}; };
/* databasePath = mkOption {
peerPortProxy = mkOption {
type = types.int;
example = 51236;
description = '' description = ''
An optional, additional listening port number for peers. Incoming Path to the ripple database.
connections on this port will be required to provide a PROXY Protocol
handshake, described in this document (external link):
http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
The PROXY Protocol is a popular method used by elastic load balancing
service providers such as Amazon, to identify the true IP address and
port number of external incoming connections.
In addition to enabling this setting, it will also be required to
use your provider-specific control panel or administrative web page
to configure your server instance to receive PROXY Protocol handshakes,
and also to restrict access to your instance to the Elastic Load Balancer.
''; '';
type = types.path;
default = "/var/lib/rippled/db";
}; };
peerPrivate = mkOption { validationQuorum = mkOption {
default = null;
example = 0;
description = '' description = ''
0: Request peers to broadcast your address. Normal outbound peer connections [default] The minimum number of trusted validations a ledger must have before
1: Request peers not broadcast your address. Only connect to configured peers. the server considers it fully validated.
'';
};
peerSslCipherList = mkOption {
default = null;
example = "ALL:!LOW:!EXP:!MD5:@STRENGTH";
description = ''
A colon delimited string with the allowed SSL cipher modes for peer. The
choices for for ciphers are defined by the OpenSSL API function
SSL_CTX_set_cipher_list, documented here (external link):
http://pic.dhe.ibm.com/infocenter/tpfhelp/current/index.jsp?topic=%2Fcom.ibm.ztpf-ztpfdf.doc_put.cur%2Fgtpc2%2Fcpp_ssl_ctx_set_cipher_list.html
The default setting of "ALL:!LOW:!EXP:!MD5:@STRENGTH", which allows
non-authenticated peer connections (they are, however, secure).
'';
};
nodeSeed = mkOption {
default = null;
example = "RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE";
description = ''
This is used for clustering. To force a particular node seed or key, the
key can be set here. The format is the same as the validation_seed field.
To obtain a validation seed, use the rippled validation_create command.
'';
};
clusterNodes = mkOption {
default = null;
example = [ "n9KorY8QtTdRx7TVDpwnG9NvyxsDwHUKUEeDLY3AkiGncVaSXZi5" ];
description = ''
To extend full trust to other nodes, place their node public keys here.
Generally, you should only do this for nodes under common administration.
Node public keys start with an 'n'. To give a node a name for identification
place a space after the public key and then the name.
'';
};
sntpServers = mkOption {
default = null;
example = [ "time.nist.gov" "pool.ntp.org" ];
description = ''
IP address or domain of NTP servers to use for time synchronization.
'';
};
# TODO: websocket options
rpcAllowRemote = mkOption {
default = false;
description = ''
false: Allow RPC connections only from 127.0.0.1. [default]
true: Allow RPC connections from any IP.
'';
};
rpcAdminAllow = mkOption {
example = [ "10.0.0.4" ];
description = ''
List of IP addresses allowed to have admin access.
'';
};
rpcAdminUser = mkOption {
type = types.str;
description = ''
As a server, require this as the admin user to be specified. Also, require
rpc_admin_user and rpc_admin_password to be checked for RPC admin functions.
The request must specify these as the admin_user and admin_password in the
request object.
'';
};
rpcAdminPassword = mkOption {
type = types.str;
description = ''
As a server, require this as the admin pasword to be specified. Also,
require rpc_admin_user and rpc_admin_password to be checked for RPC admin
functions. The request must specify these as the admin_user and
admin_password in the request object.
'';
};
rpcIp = mkOption {
type = types.str;
description = ''
IP address or domain to bind to allow insecure RPC connections.
Defaults to not binding, which disallows RPC connections.
''; '';
type = types.int;
default = 3;
}; };
rpcPort = mkOption { ledgerHistory = mkOption {
type = types.int;
description = ''
If rpcIp is supplied, corresponding port to bind to for peer connections.
'';
};
rpcUser = mkOption {
type = types.str;
description = '' description = ''
Require a this user to specified and require rpcPassword to The number of past ledgers to acquire on server startup and the minimum
be checked for RPC access via the rpcIp and rpcPort. The user and password to maintain while running.
must be specified via HTTP's basic authentication method.
As a client, supply this to the server via HTTP's basic authentication
method.
''; '';
type = types.either types.int (types.enum ["full"]);
default = 1296000; # 1 month
}; };
rpcPassword = mkOption { fetchDepth = mkOption {
type = types.str;
description = '' description = ''
Require a this password to specified and require rpc_user to The number of past ledgers to serve to other peers that request historical
be checked for RPC access via the rpcIp and rpcPort. The user and password ledger data (or "full" for no limit).
must be specified via HTTP's basic authentication method.
As a client, supply this to the server via HTTP's basic authentication
method.
''; '';
type = types.either types.int (types.enum ["full"]);
default = "full";
}; };
rpcStartup = mkOption { sntpServers = mkOption {
example = [ ''"command" : "log_level"'' ''"partition" : "ripplecalc"'' ''"severity" : "trace"'' ];
description = "List of RPC commands to run at startup.";
};
rpcSecure = mkOption {
default = false;
description = '' description = ''
false: Server certificates are not provided for RPC clients using SSL [default] IP address or domain of NTP servers to use for time synchronization.;
true: Client RPC connections wil be provided with SSL certificates.
Note that if rpc_secure is enabled, it will also be necessasry to configure the
certificate file settings located in rpcSslCert, rpcSslChain, and rpcSslKey
''; '';
type = types.listOf types.str;
default = [
"time.windows.com"
"time.apple.com"
"time.nist.gov"
"pool.ntp.org"
];
};
logLevel = mkOption {
description = "Logging verbosity.";
type = types.enum ["debug" "error" "info"];
default = "error";
}; };
*/
extraConfig = mkOption { extraConfig = mkOption {
default = ""; default = "";
@ -275,8 +373,11 @@ in
''; '';
}; };
config = mkOption {
internal = true;
default = pkgs.writeText "rippled.conf" rippledCfg;
};
}; };
}; };
@ -288,27 +389,21 @@ in
{ name = "rippled"; { name = "rippled";
description = "Ripple server user"; description = "Ripple server user";
uid = config.ids.uids.rippled; uid = config.ids.uids.rippled;
home = "/var/lib/rippled"; home = cfg.databasePath;
createHome = true;
}; };
systemd.services.rippled = { systemd.services.rippled = {
path = [ pkgs.rippled ];
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig = { serviceConfig = {
ExecStart = "${pkgs.rippled}/bin/rippled --fg -q --conf ${rippledStateCfgFile}"; ExecStart = "${cfg.package}/bin/rippled --fg --conf ${cfg.config}";
WorkingDirectory = "/var/lib/rippled"; User = "rippled";
}; };
}; };
networking.firewall.allowedTCPPorts = mkIf (cfg.peerIp != null) [ cfg.peerPort ]; environment.systemPackages = [ cfg.package ];
system.activationScripts.rippled = ''
mkdir -p /var/{lib,log}/rippled
chown -R rippled /var/{lib,log}/rippled
ln -sf ${rippledCfgFile} ${rippledStateCfgFile}
'';
}; };
} }

View file

@ -90,17 +90,9 @@ in {
${optionalString cfg.storageDriverSecure "-storage_driver_secure"} ${optionalString cfg.storageDriverSecure "-storage_driver_secure"}
''} ''}
''; '';
User = "cadvisor";
}; };
}; };
virtualisation.docker.enable = true; virtualisation.docker.enable = mkDefault true;
users.extraUsers = singleton {
name = "cadvisor";
uid = config.ids.uids.cadvisor;
description = "Cadvisor user";
extraGroups = [ "docker" ];
};
}; };
} }

View file

@ -34,7 +34,7 @@ let
cap=$(sed -nr 's/.*#%#\s+capabilities\s*=\s*(.+)/\1/p' $file) cap=$(sed -nr 's/.*#%#\s+capabilities\s*=\s*(.+)/\1/p' $file)
wrapProgram $file \ wrapProgram $file \
--set PATH "/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/sbin" \ --set PATH "/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/bin" \
--set MUNIN_LIBDIR "${pkgs.munin}/lib" \ --set MUNIN_LIBDIR "${pkgs.munin}/lib" \
--set MUNIN_PLUGSTATE "/var/run/munin" --set MUNIN_PLUGSTATE "/var/run/munin"
@ -194,7 +194,7 @@ in
mkdir -p /etc/munin/plugins mkdir -p /etc/munin/plugins
rm -rf /etc/munin/plugins/* rm -rf /etc/munin/plugins/*
PATH="/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/sbin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash PATH="/var/setuid-wrappers:/run/current-system/sw/bin:/run/current-system/sw/bin" ${pkgs.munin}/sbin/munin-node-configure --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${muninPlugins} --servicedir=/etc/munin/plugins 2>/dev/null | ${pkgs.bash}/bin/bash
''; '';
serviceConfig = { serviceConfig = {
ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/"; ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/";

View file

@ -20,6 +20,10 @@ let
cfg.collectors)} cfg.collectors)}
''; '';
cmdLineOpts = concatStringsSep " " (
[ "-h=${cfg.bosunHost}" "-c=${collectors}" ] ++ cfg.extraOpts
);
in { in {
options = { options = {
@ -79,6 +83,15 @@ in {
''; '';
}; };
extraOpts = mkOption {
type = with types; listOf str;
default = [];
example = [ "-d" ];
description = ''
Extra scollector command line options
'';
};
}; };
}; };
@ -95,9 +108,7 @@ in {
PermissionsStartOnly = true; PermissionsStartOnly = true;
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
ExecStart = '' ExecStart = "${cfg.package}/bin/scollector ${cmdLineOpts}";
${cfg.package}/bin/scollector -h=${cfg.bosunHost} -c=${collectors}
'';
}; };
}; };

View file

@ -44,7 +44,7 @@ let cfg = config.services.drbd; in
boot.extraModprobeConfig = boot.extraModprobeConfig =
'' ''
options drbd usermode_helper=/run/current-system/sw/sbin/drbdadm options drbd usermode_helper=/run/current-system/sw/bin/drbdadm
''; '';
environment.etc = singleton environment.etc = singleton

View file

@ -0,0 +1,195 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.aiccu;
showBool = b: if b then "true" else "false";
notNull = a: ! isNull a;
configFile = pkgs.writeText "aiccu.conf" ''
${if notNull cfg.username then "username " + cfg.username else ""}
${if notNull cfg.password then "password " + cfg.password else ""}
protocol ${cfg.protocol}
server ${cfg.server}
ipv6_interface ${cfg.interfaceName}
verbose ${showBool cfg.verbose}
daemonize true
automatic ${showBool cfg.automatic}
requiretls ${showBool cfg.requireTLS}
pidfile ${cfg.pidFile}
defaultroute ${showBool cfg.defaultRoute}
${if notNull cfg.setupScript then cfg.setupScript else ""}
makebeats ${showBool cfg.makeHeartBeats}
noconfigure ${showBool cfg.noConfigure}
behindnat ${showBool cfg.behindNAT}
${if cfg.localIPv4Override then "local_ipv4_override" else ""}
'';
in {
options = {
services.aiccu = {
enable = mkOption {
type = types.bool;
default = false;
example = true;
description = "Enable aiccu IPv6 over IPv4 SiXXs tunnel";
};
username = mkOption {
type = with types; nullOr str;
default = null;
example = "FAB5-SIXXS";
description = "Login credential";
};
password = mkOption {
type = with types; nullOr str;
default = null;
example = "TmAkRbBEr0";
description = "Login credential";
};
protocol = mkOption {
type = types.str;
default = "tic";
example = "tic|tsp|l2tp";
description = "Protocol to use for setting up the tunnel";
};
server = mkOption {
type = types.str;
default = "tic.sixxs.net";
example = "enabled.ipv6server.net";
description = "Server to use for setting up the tunnel";
};
interfaceName = mkOption {
type = types.str;
default = "aiccu";
example = "sixxs";
description = ''
The name of the interface that will be used as a tunnel interface.
On *BSD the ipv6_interface should be set to gifX (eg gif0) for proto-41 tunnels
or tunX (eg tun0) for AYIYA tunnels.
'';
};
tunnelID = mkOption {
type = with types; nullOr str;
default = null;
example = "T12345";
description = "The tunnel id to use, only required when there are multiple tunnels in the list";
};
verbose = mkOption {
type = types.bool;
default = false;
example = true;
description = "Be verbose?";
};
automatic = mkOption {
type = types.bool;
default = true;
example = false;
description = "Automatic Login and Tunnel activation";
};
requireTLS = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
When set to true, if TLS is not supported on the server
the TIC transaction will fail.
When set to false, it will try a starttls, when that is
not supported it will continue.
In any case if AICCU is build with TLS support it will
try to do a 'starttls' to the TIC server to see if that
is supported.
'';
};
pidFile = mkOption {
type = types.path;
default = "/run/aiccu.pid";
example = "/var/lib/aiccu/aiccu.pid";
description = "Location of PID File";
};
defaultRoute = mkOption {
type = types.bool;
default = true;
example = false;
description = "Add a default route";
};
setupScript = mkOption {
type = with types; nullOr path;
default = null;
example = "/var/lib/aiccu/fix-subnets.sh";
description = "Script to run after setting up the interfaces";
};
makeHeartBeats = mkOption {
type = types.bool;
default = true;
example = false;
description = ''
In general you don't want to turn this off
Of course only applies to AYIYA and heartbeat tunnels not to static ones
'';
};
noConfigure = mkOption {
type = types.bool;
default = false;
example = true;
description = "Don't configure anything";
};
behindNAT = mkOption {
type = types.bool;
default = false;
example = true;
description = "Notify the user that a NAT-kind network is detected";
};
localIPv4Override = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
Overrides the IPv4 parameter received from TIC
This allows one to configure a NAT into "DMZ" mode and then
forwarding the proto-41 packets to an internal host.
This is only needed for static proto-41 tunnels!
AYIYA and heartbeat tunnels don't require this.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.aiccu = {
description = "Automatic IPv6 Connectivity Client Utility";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.aiccu}/bin/aiccu start ${configFile}";
ExecStop = "${pkgs.aiccu}/bin/aiccu stop";
Type = "forking";
PIDFile = cfg.pidFile;
Restart = "no"; # aiccu startup errors are serious, do not pound the tic server or be banned.
};
};
};
}

View file

@ -3,24 +3,22 @@
let let
inherit (lib) mkOption mkIf singleton; inherit (lib) mkOption mkIf singleton;
inherit (pkgs) ddclient; inherit (pkgs) ddclient;
stateDir = "/var/spool/ddclient"; stateDir = "/var/spool/ddclient";
ddclientUser = "ddclient"; ddclientUser = "ddclient";
ddclientFlags = "-foreground -verbose -noquiet -file ${ddclientCfg}";
ddclientFlags = "-foreground -file ${ddclientCfg}"; ddclientPIDFile = "${stateDir}/ddclient.pid";
ddclientCfg = pkgs.writeText "ddclient.conf" '' ddclientCfg = pkgs.writeText "ddclient.conf" ''
daemon=600 daemon=600
cache=${stateDir}/ddclient.cache cache=${stateDir}/ddclient.cache
pid=${stateDir}/ddclient.pid pid=${ddclientPIDFile}
use=${config.services.ddclient.web} use=${config.services.ddclient.use}
login=${config.services.ddclient.username} login=${config.services.ddclient.username}
password=${config.services.ddclient.password} password=${config.services.ddclient.password}
protocol=${config.services.ddclient.protocol} protocol=${config.services.ddclient.protocol}
server=${config.services.ddclient.server} server=${config.services.ddclient.server}
ssl=${if config.services.ddclient.ssl then "yes" else "yes"}
wildcard=YES wildcard=YES
${config.services.ddclient.domain} ${config.services.ddclient.domain}
${config.services.ddclient.extraConfig} ${config.services.ddclient.extraConfig}
@ -34,10 +32,11 @@ in
options = { options = {
services.ddclient = { services.ddclient = with lib.types; {
enable = mkOption { enable = mkOption {
default = false; default = false;
type = bool;
description = '' description = ''
Whether to synchronise your machine's IP address with a dynamic DNS provider (e.g. dyndns.org). Whether to synchronise your machine's IP address with a dynamic DNS provider (e.g. dyndns.org).
''; '';
@ -45,6 +44,7 @@ in
domain = mkOption { domain = mkOption {
default = ""; default = "";
type = str;
description = '' description = ''
Domain name to synchronize. Domain name to synchronize.
''; '';
@ -52,76 +52,93 @@ in
username = mkOption { username = mkOption {
default = ""; default = "";
type = str;
description = '' description = ''
Username. Username.
''; '';
}; };
password = mkOption { password = mkOption {
default = "" ; default = "";
type = str;
description = '' description = ''
Password. Password.
''; '';
}; };
protocol = mkOption { protocol = mkOption {
default = "dyndns2" ; default = "dyndns2";
type = str;
description = '' description = ''
Protocol to use with dynamic DNS provider. (see also, http://sourceforge.net/apps/trac/ddclient/wiki/Protocols) Protocol to use with dynamic DNS provider (see http://sourceforge.net/apps/trac/ddclient/wiki/Protocols).
''; '';
}; };
server = mkOption { server = mkOption {
default = "members.dyndns.org" ; default = "";
type = str;
description = '' description = ''
Server Server address.
'';
};
ssl = mkOption {
default = true;
type = bool;
description = ''
Whether to use to use SSL/TLS to connect to dynamic DNS provider.
''; '';
}; };
extraConfig = mkOption { extraConfig = mkOption {
default = "" ; default = "";
type = str;
description = '' description = ''
Extra configuration. Contents will be added verbatim to the configuration file. Extra configuration. Contents will be added verbatim to the configuration file.
''; '';
}; };
web = mkOption { use = mkOption {
default = "web, web=checkip.dyndns.com/, web-skip='Current IP Address: '" ; default = "web, web=checkip.dyndns.com/, web-skip='Current IP Address: '";
description = ""; type = str;
description = ''
Method to determine the IP address to send to the dymanic DNS provider.
'';
}; };
}; };
}; };
###### implementation ###### implementation
config = mkIf config.services.ddclient.enable { config = mkIf config.services.ddclient.enable {
environment.systemPackages = [ ddclient ]; environment.systemPackages = [ ddclient ];
users.extraUsers = singleton users.extraUsers = singleton {
{ name = ddclientUser; name = ddclientUser;
uid = config.ids.uids.ddclient; uid = config.ids.uids.ddclient;
description = "ddclient daemon user"; description = "ddclient daemon user";
home = stateDir; home = stateDir;
};
systemd.services.ddclient = {
description = "Dynamic DNS Client";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
# This may change back to forking if too many problems occur:
type = "simple";
User = ddclientUser;
Group = "nogroup"; #TODO get this to work
PermissionsStartOnly = "true";
PIDFile = ddclientPIDFile;
ExecStartPre = ''
${pkgs.stdenv.shell} -c "${pkgs.coreutils}/bin/mkdir -m 0755 -p ${stateDir} && ${pkgs.coreutils}/bin/chown ${ddclientUser} ${stateDir}"
'';
ExecStart = "${ddclient}/bin/ddclient ${ddclientFlags}";
#ExecStartPost = "${pkgs.coreutils}/bin/rm -r ${stateDir}"; # Should we have this?
}; };
};
jobs.ddclient =
{ name = "ddclient";
startOn = "startup";
preStart =
''
mkdir -m 0755 -p ${stateDir}
chown ${ddclientUser} ${stateDir}
'';
exec = "${ddclient}/bin/ddclient ${ddclientFlags}";
};
}; };
} }

View file

@ -183,6 +183,9 @@ in {
{ source = "${networkmanager_pptp}/etc/NetworkManager/VPN/nm-pptp-service.name"; { source = "${networkmanager_pptp}/etc/NetworkManager/VPN/nm-pptp-service.name";
target = "NetworkManager/VPN/nm-pptp-service.name"; target = "NetworkManager/VPN/nm-pptp-service.name";
} }
{ source = "${networkmanager_l2tp}/etc/NetworkManager/VPN/nm-l2tp-service.name";
target = "NetworkManager/VPN/nm-l2tp-service.name";
}
] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == []) ] ++ optional (cfg.appendNameservers == [] || cfg.insertNameservers == [])
{ source = overrideNameserversScript; { source = overrideNameserversScript;
target = "NetworkManager/dispatcher.d/02overridedns"; target = "NetworkManager/dispatcher.d/02overridedns";
@ -197,6 +200,7 @@ in {
networkmanager_vpnc networkmanager_vpnc
networkmanager_openconnect networkmanager_openconnect
networkmanager_pptp networkmanager_pptp
networkmanager_l2tp
modemmanager modemmanager
]; ];
@ -240,6 +244,7 @@ in {
networkmanager_vpnc networkmanager_vpnc
networkmanager_openconnect networkmanager_openconnect
networkmanager_pptp networkmanager_pptp
networkmanager_l2tp
modemmanager modemmanager
]; ];

View file

@ -9,6 +9,14 @@ let
stateDir = "/var/lib/nsd"; stateDir = "/var/lib/nsd";
pidFile = stateDir + "/var/nsd.pid"; pidFile = stateDir + "/var/nsd.pid";
nsdPkg = pkgs.nsd.override {
bind8Stats = cfg.bind8Stats;
ipv6 = cfg.ipv6;
ratelimit = cfg.ratelimit.enable;
rootServer = cfg.rootServer;
zoneStats = length (collect (x: (x.zoneStats or null) != null) cfg.zones) > 0;
};
zoneFiles = pkgs.stdenv.mkDerivation { zoneFiles = pkgs.stdenv.mkDerivation {
preferLocalBuild = true; preferLocalBuild = true;
name = "nsd-env"; name = "nsd-env";
@ -107,6 +115,7 @@ let
zone: zone:
name: "${name}" name: "${name}"
zonefile: "${stateDir}/zones/${name}" zonefile: "${stateDir}/zones/${name}"
${maybeString "zonestats: " zone.zoneStats}
${maybeString "outgoing-interface: " zone.outgoingInterface} ${maybeString "outgoing-interface: " zone.outgoingInterface}
${forEach " rrl-whitelist: " zone.rrlWhitelist} ${forEach " rrl-whitelist: " zone.rrlWhitelist}
@ -270,6 +279,19 @@ let
Use imports or pkgs.lib.readFile if you don't want this data in your config file. Use imports or pkgs.lib.readFile if you don't want this data in your config file.
''; '';
}; };
zoneStats = mkOption {
type = types.nullOr types.str;
default = null;
example = "%s";
description = ''
When set to something distinct to null NSD is able to collect
statistics per zone. All statistics of this zone(s) will be added
to the group specified by this given name. Use "%s" to use the zones
name as the group. The groups are output from nsd-control stats
and stats_noreset.
'';
};
}; };
}; };
@ -286,6 +308,15 @@ in
''; '';
}; };
bind8Stats = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
Wheter to enable BIND8 like statisics.
'';
};
rootServer = mkOption { rootServer = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -659,13 +690,6 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
# this is not working :(
nixpkgs.config.nsd = {
ipv6 = cfg.ipv6;
ratelimit = cfg.ratelimit.enable;
rootServer = cfg.rootServer;
};
users.extraGroups = singleton { users.extraGroups = singleton {
name = username; name = username;
gid = config.ids.gids.nsd; gid = config.ids.gids.nsd;
@ -688,7 +712,7 @@ in
serviceConfig = { serviceConfig = {
PIDFile = pidFile; PIDFile = pidFile;
Restart = "always"; Restart = "always";
ExecStart = "${pkgs.nsd}/sbin/nsd -d -c ${configFile}"; ExecStart = "${nsdPkg}/sbin/nsd -d -c ${configFile}";
}; };
preStart = '' preStart = ''

View file

@ -9,12 +9,6 @@ let
nssModulesPath = config.system.nssModules.path; nssModulesPath = config.system.nssModules.path;
permitRootLoginCheck = v:
v == "yes" ||
v == "without-password" ||
v == "forced-commands-only" ||
v == "no";
knownHosts = map (h: getAttr h cfg.knownHosts) (attrNames cfg.knownHosts); knownHosts = map (h: getAttr h cfg.knownHosts) (attrNames cfg.knownHosts);
knownHostsText = flip (concatMapStringsSep "\n") knownHosts knownHostsText = flip (concatMapStringsSep "\n") knownHosts
@ -116,12 +110,9 @@ in
permitRootLogin = mkOption { permitRootLogin = mkOption {
default = "without-password"; default = "without-password";
type = types.addCheck types.str permitRootLoginCheck; type = types.enum ["yes" "without-password" "forced-commands-only" "no"];
description = '' description = ''
Whether the root user can login using ssh. Valid values are Whether the root user can login using ssh.
<literal>yes</literal>, <literal>without-password</literal>,
<literal>forced-commands-only</literal> or
<literal>no</literal>.
''; '';
}; };

View file

@ -0,0 +1,61 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.munge;
in
{
###### interface
options = {
services.munge = {
enable = mkEnableOption "munge service";
password = mkOption {
default = "/etc/munge/munge.key";
type = types.string;
description = ''
The path to a daemon's secret key.
'';
};
};
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.munge ];
systemd.services.munged = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [ pkgs.munge pkgs.coreutils ];
preStart = ''
chmod 0700 ${cfg.password}
mkdir -p /var/lib/munge -m 0711
mkdir -p /var/log/munge -m 0700
mkdir -p /run/munge -m 0755
'';
serviceConfig = {
ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
PIDFile = "/run/munge/munged.pid";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
};
};
}

View file

@ -130,6 +130,9 @@ in
config.system.path config.system.path
]; ];
# Don't restart dbus-daemon. Bad things tend to happen if we do.
systemd.services.dbus.reloadIfChanged = true;
environment.pathsToLink = [ "/etc/dbus-1" "/share/dbus-1" ]; environment.pathsToLink = [ "/etc/dbus-1" "/share/dbus-1" ];
}; };

View file

@ -384,8 +384,7 @@ rec {
}; };
adminPassword = mkOption { adminPassword = mkOption {
description = "The admin password for accessing owncloud. description = "The admin password for accessing owncloud.";
Warning: this is stored in cleartext in the Nix store!";
}; };
dbType = mkOption { dbType = mkOption {
@ -571,7 +570,7 @@ rec {
chown wwwrun:wwwrun ${config.dataDir}/owncloud.log || true chown wwwrun:wwwrun ${config.dataDir}/owncloud.log || true
QUERY="INSERT INTO groups (gid) values('admin'); INSERT INTO users (uid,password) values('${config.adminUser}','`echo -n "${config.adminPassword}" | ${pkgs.openssl}/bin/openssl dgst -sha1 | ${pkgs.gawk}/bin/awk '{print $2}'`'); INSERT INTO group_user (gid,uid) values('admin','${config.adminUser}');" QUERY="INSERT INTO groups (gid) values('admin'); INSERT INTO users (uid,password) values('${config.adminUser}','${builtins.hashString "sha1" config.adminPassword}'); INSERT INTO group_user (gid,uid) values('admin','${config.adminUser}');"
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true ${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/psql -h "/tmp" -U postgres -d ${config.dbName} -Atw -c "$QUERY" || true
''; '';
} }

View file

@ -102,6 +102,9 @@ in
''; '';
serviceConfig = { serviceConfig = {
ExecStart = "${nginx}/bin/nginx -c ${configFile} -p ${cfg.stateDir}"; ExecStart = "${nginx}/bin/nginx -c ${configFile} -p ${cfg.stateDir}";
Restart = "on-failure";
RestartSec = "10s";
StartLimitInterval = "1min";
}; };
}; };

View file

@ -19,7 +19,7 @@ in
# E.g., if KDE is enabled, it supersedes xterm. # E.g., if KDE is enabled, it supersedes xterm.
imports = [ imports = [
./none.nix ./xterm.nix ./xfce.nix ./kde4.nix ./kde5.nix ./none.nix ./xterm.nix ./xfce.nix ./kde4.nix ./kde5.nix
./e19.nix ./gnome3.nix ./xbmc.nix ./kodi.nix ./e19.nix ./gnome3.nix ./kodi.nix
]; ];
options = { options = {

View file

@ -1,31 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.xserver.desktopManager.xbmc;
in
{
options = {
services.xserver.desktopManager.xbmc = {
enable = mkOption {
default = false;
example = true;
description = "Enable the xbmc multimedia center.";
};
};
};
config = mkIf cfg.enable {
services.xserver.desktopManager.session = [{
name = "xbmc";
start = ''
${pkgs.xbmc}/bin/xbmc --lircdev /var/run/lirc/lircd --standalone &
waitPID=$!
'';
}];
environment.systemPackages = [ pkgs.xbmc ];
};
}

View file

@ -55,7 +55,7 @@ let
[UserList] [UserList]
minimum-uid=500 minimum-uid=500
hidden-users=${concatStringsSep " " dmcfg.hiddenUsers} hidden-users=${concatStringsSep " " dmcfg.hiddenUsers}
hidden-shells=/run/current-system/sw/sbin/nologin hidden-shells=/run/current-system/sw/bin/nologin
''; '';
lightdmConf = writeText "lightdm.conf" lightdmConf = writeText "lightdm.conf"

View file

@ -26,7 +26,7 @@ let
[Users] [Users]
MaximumUid=${toString config.ids.uids.nixbld} MaximumUid=${toString config.ids.uids.nixbld}
HideUsers=${concatStringsSep "," dmcfg.hiddenUsers} HideUsers=${concatStringsSep "," dmcfg.hiddenUsers}
HideShells=/run/current-system/sw/sbin/nologin HideShells=/run/current-system/sw/bin/nologin
[XDisplay] [XDisplay]
MinimumVT=${toString xcfg.tty} MinimumVT=${toString xcfg.tty}

View file

@ -18,6 +18,7 @@ in
./openbox.nix ./openbox.nix
./sawfish.nix ./sawfish.nix
./stumpwm.nix ./stumpwm.nix
./spectrwm.nix
./twm.nix ./twm.nix
./windowmaker.nix ./windowmaker.nix
./wmii.nix ./wmii.nix

View file

@ -0,0 +1,33 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.xserver.windowManager.spectrwm;
in
{
options = {
services.xserver.windowManager.spectrwm = {
enable = mkOption {
default = false;
example = true;
description = "Enable the spectrwm window manager.";
};
};
};
config = mkIf cfg.enable {
services.xserver.windowManager = {
session = [{
name = "spectrwm";
start = ''
${pkgs.spectrwm}/bin/spectrwm &
waitPID=$!
'';
}];
};
environment.systemPackages = [ pkgs.spectrwm ];
};
}

View file

@ -384,9 +384,13 @@ system("@systemd@/bin/systemctl", "reset-failed");
# Make systemd reload its units. # Make systemd reload its units.
system("@systemd@/bin/systemctl", "daemon-reload") == 0 or $res = 3; system("@systemd@/bin/systemctl", "daemon-reload") == 0 or $res = 3;
# Signal dbus to reload its configuration before starting other units. # Reload units that need it. This includes remounting changed mount
# Other units may rely on newly installed policy files under /etc/dbus-1 # units.
system("@systemd@/bin/systemctl", "reload-or-restart", "dbus.service"); if (scalar(keys %unitsToReload) > 0) {
print STDERR "reloading the following units: ", join(", ", sort(keys %unitsToReload)), "\n";
system("@systemd@/bin/systemctl", "reload", "--", sort(keys %unitsToReload)) == 0 or $res = 4;
unlink($reloadListFile);
}
# Restart changed services (those that have to be restarted rather # Restart changed services (those that have to be restarted rather
# than stopped and started). # than stopped and started).
@ -407,14 +411,6 @@ print STDERR "starting the following units: ", join(", ", @unitsToStartFiltered)
system("@systemd@/bin/systemctl", "start", "--", sort(keys %unitsToStart)) == 0 or $res = 4; system("@systemd@/bin/systemctl", "start", "--", sort(keys %unitsToStart)) == 0 or $res = 4;
unlink($startListFile); unlink($startListFile);
# Reload units that need it. This includes remounting changed mount
# units.
if (scalar(keys %unitsToReload) > 0) {
print STDERR "reloading the following units: ", join(", ", sort(keys %unitsToReload)), "\n";
system("@systemd@/bin/systemctl", "reload", "--", sort(keys %unitsToReload)) == 0 or $res = 4;
unlink($reloadListFile);
}
# Print failed and new units. # Print failed and new units.
my (@failed, @new, @restarting); my (@failed, @new, @restarting);

View file

@ -405,29 +405,19 @@ in
# copy the cryptsetup binary and it's dependencies # copy the cryptsetup binary and it's dependencies
boot.initrd.extraUtilsCommands = '' boot.initrd.extraUtilsCommands = ''
cp -pdv ${pkgs.cryptsetup}/sbin/cryptsetup $out/bin copy_bin_and_libs ${pkgs.cryptsetup}/bin/cryptsetup
cp -pdv ${pkgs.libgcrypt}/lib/libgcrypt*.so.* $out/lib
cp -pdv ${pkgs.libgpgerror}/lib/libgpg-error*.so.* $out/lib
cp -pdv ${pkgs.cryptsetup}/lib/libcryptsetup*.so.* $out/lib
cp -pdv ${pkgs.popt}/lib/libpopt*.so.* $out/lib
${optionalString luks.yubikeySupport '' ${optionalString luks.yubikeySupport ''
cp -pdv ${pkgs.ykpers}/bin/ykchalresp $out/bin copy_bin_and_libs ${pkgs.ykpers}/bin/ykchalresp
cp -pdv ${pkgs.ykpers}/bin/ykinfo $out/bin copy_bin_and_libs ${pkgs.ykpers}/bin/ykinfo
cp -pdv ${pkgs.openssl}/bin/openssl $out/bin copy_bin_and_libs ${pkgs.openssl}/bin/openssl
cc -O3 -I${pkgs.openssl}/include -L${pkgs.openssl}/lib ${./pbkdf2-sha512.c} -o $out/bin/pbkdf2-sha512 -lcrypto cc -O3 -I${pkgs.openssl}/include -L${pkgs.openssl}/lib ${./pbkdf2-sha512.c} -o pbkdf2-sha512 -lcrypto
strip -s $out/bin/pbkdf2-sha512 strip -s pbkdf2-sha512
copy_bin_and_libs pbkdf2-sha512
cp -pdv ${pkgs.libusb1}/lib/libusb*.so.* $out/lib mkdir -p $out/etc/ssl
cp -pdv ${pkgs.ykpers}/lib/libykpers*.so.* $out/lib cp -pdv ${pkgs.openssl}/etc/ssl/openssl.cnf $out/etc/ssl
cp -pdv ${pkgs.libyubikey}/lib/libyubikey*.so.* $out/lib
cp -pdv ${pkgs.openssl}/lib/libssl*.so.* $out/lib
cp -pdv ${pkgs.openssl}/lib/libcrypto*.so.* $out/lib
mkdir -p $out/etc/ssl
cp -pdv ${pkgs.openssl}/etc/ssl/openssl.cnf $out/etc/ssl
cat > $out/bin/openssl-wrap <<EOF cat > $out/bin/openssl-wrap <<EOF
#!$out/bin/sh #!$out/bin/sh

View file

@ -39,46 +39,60 @@ let
mkdir -p $out/bin $out/lib mkdir -p $out/bin $out/lib
ln -s $out/bin $out/sbin ln -s $out/bin $out/sbin
# Copy what we need from Glibc. copy_bin_and_libs () {
cp -pv ${pkgs.glibc}/lib/ld*.so.? $out/lib [ -f "$out/bin/$(basename $1)" ] && rm "$out/bin/$(basename $1)"
cp -pv ${pkgs.glibc}/lib/libc.so.* $out/lib cp -pdv $1 $out/bin
cp -pv ${pkgs.glibc}/lib/libm.so.* $out/lib }
cp -pv ${pkgs.glibc}/lib/libpthread.so.* $out/lib
cp -pv ${pkgs.glibc}/lib/librt.so.* $out/lib
cp -pv ${pkgs.glibc}/lib/libdl.so.* $out/lib
cp -pv ${pkgs.gcc.cc}/lib*/libgcc_s.so.* $out/lib
# Copy BusyBox. # Copy BusyBox.
cp -pvd ${pkgs.busybox}/bin/* ${pkgs.busybox}/sbin/* $out/bin/ for BIN in ${pkgs.busybox}/{s,}bin/*; do
copy_bin_and_libs $BIN
done
# Copy some utillinux stuff. # Copy some utillinux stuff.
cp -vf --remove-destination ${pkgs.utillinux}/sbin/blkid $out/bin copy_bin_and_libs ${pkgs.utillinux}/sbin/blkid
cp -pdv ${pkgs.utillinux}/lib/libblkid*.so.* $out/lib
cp -pdv ${pkgs.utillinux}/lib/libuuid*.so.* $out/lib
# Copy dmsetup and lvm. # Copy dmsetup and lvm.
cp -v ${pkgs.lvm2}/sbin/dmsetup $out/bin/dmsetup copy_bin_and_libs ${pkgs.lvm2}/sbin/dmsetup
cp -v ${pkgs.lvm2}/sbin/lvm $out/bin/lvm copy_bin_and_libs ${pkgs.lvm2}/sbin/lvm
cp -v ${pkgs.lvm2}/lib/libdevmapper.so.*.* $out/lib
cp -v ${pkgs.systemd}/lib/libsystemd.so.* $out/lib
# Add RAID mdadm tool. # Add RAID mdadm tool.
cp -v ${pkgs.mdadm}/sbin/mdadm $out/bin/mdadm copy_bin_and_libs ${pkgs.mdadm}/sbin/mdadm
# Copy udev. # Copy udev.
cp -v ${udev}/lib/systemd/systemd-udevd ${udev}/bin/udevadm $out/bin copy_bin_and_libs ${udev}/lib/systemd/systemd-udevd
cp -v ${udev}/lib/udev/*_id $out/bin copy_bin_and_libs ${udev}/bin/udevadm
cp -pdv ${udev}/lib/libudev.so.* $out/lib for BIN in ${udev}/lib/udev/*_id; do
cp -v ${pkgs.kmod}/lib/libkmod.so.* $out/lib copy_bin_and_libs $BIN
cp -v ${pkgs.acl}/lib/libacl.so.* $out/lib done
cp -v ${pkgs.attr}/lib/libattr.so.* $out/lib
# Copy modprobe. # Copy modprobe.
cp -v ${pkgs.kmod}/bin/kmod $out/bin/ copy_bin_and_libs ${pkgs.kmod}/bin/kmod
ln -sf kmod $out/bin/modprobe ln -sf kmod $out/bin/modprobe
${config.boot.initrd.extraUtilsCommands} ${config.boot.initrd.extraUtilsCommands}
# Copy ld manually since it isn't detected correctly
cp -pv ${pkgs.glibc}/lib/ld*.so.? $out/lib
# Copy all of the needed libraries for the binaries
for BIN in $(find $out/{bin,sbin} -type f); do
echo "Copying libs for bin $BIN"
LDD="$(ldd $BIN)" || continue
LIBS="$(echo "$LDD" | awk '{print $3}' | sed '/^$/d')"
for LIB in $LIBS; do
[ ! -f "$out/lib/$(basename $LIB)" ] && cp -pdv $LIB $out/lib
while [ "$(readlink $LIB)" != "" ]; do
LINK="$(readlink $LIB)"
if [ "${LINK:0:1}" != "/" ]; then
LINK="$(dirname $LIB)/$LINK"
fi
LIB="$LINK"
[ ! -f "$out/lib/$(basename $LIB)" ] && cp -pdv $LIB $out/lib
done
done
done
# Strip binaries further than normal. # Strip binaries further than normal.
chmod -R u+w $out chmod -R u+w $out
stripDirs "lib bin" "-s" stripDirs "lib bin" "-s"
@ -100,10 +114,11 @@ let
echo "testing patched programs..." echo "testing patched programs..."
$out/bin/ash -c 'echo hello world' | grep "hello world" $out/bin/ash -c 'echo hello world' | grep "hello world"
export LD_LIBRARY_PATH=$out/lib export LD_LIBRARY_PATH=$out/lib
$out/bin/mount --help 2>&1 | grep "BusyBox" $out/bin/mount --help 2>&1 | grep -q "BusyBox"
$out/bin/blkid --help 2>&1 | grep -q 'libblkid'
$out/bin/udevadm --version $out/bin/udevadm --version
$out/bin/dmsetup --version 2>&1 | tee -a log | grep "version:" $out/bin/dmsetup --version 2>&1 | tee -a log | grep -q "version:"
LVM_SYSTEM_DIR=$out $out/bin/lvm version 2>&1 | tee -a log | grep "LVM" LVM_SYSTEM_DIR=$out $out/bin/lvm version 2>&1 | tee -a log | grep -q "LVM"
$out/bin/mdadm --version $out/bin/mdadm --version
${config.boot.initrd.extraUtilsCommandsTest} ${config.boot.initrd.extraUtilsCommandsTest}
@ -205,7 +220,7 @@ let
# The closure of the init script of boot stage 1 is what we put in # The closure of the init script of boot stage 1 is what we put in
# the initial RAM disk. # the initial RAM disk.
initialRamdisk = pkgs.makeInitrd { initialRamdisk = pkgs.makeInitrd {
inherit (config.boot.initrd) compressor; inherit (config.boot.initrd) compressor prepend;
contents = contents =
[ { object = bootStage1; [ { object = bootStage1;
@ -247,6 +262,14 @@ in
''; '';
}; };
boot.initrd.prepend = mkOption {
default = [ ];
type = types.listOf types.str;
description = ''
Other initrd files to prepend to the final initrd we are building.
'';
};
boot.initrd.checkJournalingFS = mkOption { boot.initrd.checkJournalingFS = mkOption {
default = true; default = true;
type = types.bool; type = types.bool;

View file

@ -53,7 +53,7 @@ echo "booting system configuration $systemConfig" > /dev/kmsg
# Silence chown/chmod to fail gracefully on a readonly filesystem # Silence chown/chmod to fail gracefully on a readonly filesystem
# like squashfs. # like squashfs.
chown -f 0:30000 /nix/store chown -f 0:30000 /nix/store
chmod -f 1775 /nix/store chmod -f 1735 /nix/store
if [ -n "@readOnlyStore@" ]; then if [ -n "@readOnlyStore@" ]; then
if ! readonly-mountpoint /nix/store; then if ! readonly-mountpoint /nix/store; then
mount --bind /nix/store /nix/store mount --bind /nix/store /nix/store

View file

@ -13,7 +13,7 @@ let
makeUnit = name: unit: makeUnit = name: unit:
let let
pathSafeName = lib.replaceChars ["@" "\\"] ["-" "-"] name; pathSafeName = lib.replaceChars ["@" ":" "\\"] ["-" "-" "-"] name;
in in
if unit.enable then if unit.enable then
pkgs.runCommand "unit-${pathSafeName}" { preferLocalBuild = true; inherit (unit) text; } pkgs.runCommand "unit-${pathSafeName}" { preferLocalBuild = true; inherit (unit) text; }

View file

@ -17,13 +17,9 @@ in
boot.initrd.extraUtilsCommands = mkIf inInitrd boot.initrd.extraUtilsCommands = mkIf inInitrd
'' ''
mkdir -p $out/bin copy_bin_and_libs ${pkgs.btrfsProgs}/bin/btrfs
cp -v ${pkgs.btrfsProgs}/bin/btrfs $out/bin
ln -sv btrfs $out/bin/btrfsck ln -sv btrfs $out/bin/btrfsck
ln -sv btrfsck $out/bin/fsck.btrfs ln -sv btrfsck $out/bin/fsck.btrfs
# !!! Increases uncompressed initrd by 240k
cp -pv ${pkgs.zlib}/lib/libz.so* $out/lib
cp -pv ${pkgs.lzo}/lib/liblzo2.so* $out/lib
''; '';
boot.initrd.extraUtilsCommandsTest = mkIf inInitrd boot.initrd.extraUtilsCommandsTest = mkIf inInitrd

View file

@ -18,7 +18,7 @@ in
boot.initrd.extraUtilsCommands = mkIf inInitrd boot.initrd.extraUtilsCommands = mkIf inInitrd
'' ''
cp -v ${pkgs.cifs_utils}/sbin/mount.cifs $out/bin copy_bin_and_libs ${pkgs.cifs_utils}/sbin/mount.cifs
''; '';
}; };

View file

@ -10,12 +10,11 @@
boot.initrd.extraUtilsCommands = boot.initrd.extraUtilsCommands =
'' ''
# Copy e2fsck and friends. # Copy e2fsck and friends.
cp -v ${pkgs.e2fsprogs}/sbin/e2fsck $out/bin copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/e2fsck
cp -v ${pkgs.e2fsprogs}/sbin/tune2fs $out/bin copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/tune2fs
ln -sv e2fsck $out/bin/fsck.ext2 ln -sv e2fsck $out/bin/fsck.ext2
ln -sv e2fsck $out/bin/fsck.ext3 ln -sv e2fsck $out/bin/fsck.ext3
ln -sv e2fsck $out/bin/fsck.ext4 ln -sv e2fsck $out/bin/fsck.ext4
cp -pdv ${pkgs.e2fsprogs}/lib/lib*.so.* $out/lib
''; '';
}; };

View file

@ -13,9 +13,7 @@ in
boot.initrd.availableKernelModules = mkIf inInitrd [ "f2fs" ]; boot.initrd.availableKernelModules = mkIf inInitrd [ "f2fs" ];
boot.initrd.extraUtilsCommands = mkIf inInitrd '' boot.initrd.extraUtilsCommands = mkIf inInitrd ''
mkdir -p $out/bin $out/lib copy_bin_and_libs ${pkgs.f2fs-tools}/sbin/fsck.f2fs
cp -v ${pkgs.f2fs-tools}/sbin/fsck.f2fs $out/bin
cp -pdv ${pkgs.f2fs-tools}/lib/lib*.so.* $out/lib
''; '';
}; };
} }

View file

@ -13,7 +13,7 @@ in
boot.initrd.kernelModules = mkIf inInitrd [ "jfs" ]; boot.initrd.kernelModules = mkIf inInitrd [ "jfs" ];
boot.initrd.extraUtilsCommands = mkIf inInitrd '' boot.initrd.extraUtilsCommands = mkIf inInitrd ''
cp -v ${pkgs.jfsutils}/sbin/fsck.jfs "$out/bin/" copy_bin_and_libs ${pkgs.jfsutils}/sbin/fsck.jfs
''; '';
}; };
} }

View file

@ -17,8 +17,8 @@ in
boot.initrd.extraUtilsCommands = mkIf inInitrd boot.initrd.extraUtilsCommands = mkIf inInitrd
'' ''
cp -v ${pkgs.reiserfsprogs}/sbin/reiserfsck $out/bin copy_bin_and_libs ${pkgs.reiserfsprogs}/sbin/reiserfsck
ln -sv reiserfsck $out/bin/fsck.reiserfs ln -s reiserfsck $out/bin/fsck.reiserfs
''; '';
}; };

View file

@ -7,9 +7,8 @@
boot.initrd.kernelModules = [ "fuse" ]; boot.initrd.kernelModules = [ "fuse" ];
boot.initrd.extraUtilsCommands = '' boot.initrd.extraUtilsCommands = ''
cp -v ${pkgs.fuse}/lib/libfuse* $out/lib copy_bin_and_libs ${pkgs.fuse}/sbin/mount.fuse
cp -v ${pkgs.fuse}/sbin/mount.fuse $out/bin copy_bin_and_libs ${pkgs.unionfs-fuse}/bin/unionfs
cp -v ${pkgs.unionfs-fuse}/bin/unionfs $out/bin
substitute ${pkgs.unionfs-fuse}/sbin/mount.unionfs-fuse $out/bin/mount.unionfs-fuse \ substitute ${pkgs.unionfs-fuse}/sbin/mount.unionfs-fuse $out/bin/mount.unionfs-fuse \
--replace '${pkgs.bash}/bin/bash' /bin/sh \ --replace '${pkgs.bash}/bin/bash' /bin/sh \
--replace '${pkgs.fuse}/sbin' /bin \ --replace '${pkgs.fuse}/sbin' /bin \

View file

@ -17,7 +17,7 @@ in
boot.initrd.extraUtilsCommands = mkIf inInitrd boot.initrd.extraUtilsCommands = mkIf inInitrd
'' ''
cp -v ${pkgs.dosfstools}/sbin/dosfsck $out/bin copy_bin_and_libs ${pkgs.dosfstools}/sbin/dosfsck
ln -sv dosfsck $out/bin/fsck.vfat ln -sv dosfsck $out/bin/fsck.vfat
''; '';

View file

@ -17,7 +17,7 @@ in
boot.initrd.extraUtilsCommands = mkIf inInitrd boot.initrd.extraUtilsCommands = mkIf inInitrd
'' ''
cp -v ${pkgs.xfsprogs}/sbin/fsck.xfs $out/bin copy_bin_and_libs ${pkgs.xfsprogs}/sbin/fsck.xfs
''; '';
# Trick just to set 'sh' after the extraUtils nuke-refs. # Trick just to set 'sh' after the extraUtils nuke-refs.

View file

@ -55,8 +55,7 @@ in
boot.zfs = { boot.zfs = {
useGit = mkOption { useGit = mkOption {
type = types.bool; type = types.bool;
# TODO(wkennington): Revert when 0.6.4 is out default = false;
default = versionAtLeast config.boot.kernelPackages.kernel.version "3.19";
example = true; example = true;
description = '' description = ''
Use the git version of the SPL and ZFS packages. Use the git version of the SPL and ZFS packages.
@ -204,11 +203,14 @@ in
kernelModules = [ "spl" "zfs" ]; kernelModules = [ "spl" "zfs" ];
extraUtilsCommands = extraUtilsCommands =
'' ''
cp -v ${zfsUserPkg}/sbin/zfs $out/bin copy_bin_and_libs ${zfsUserPkg}/sbin/zfs
cp -v ${zfsUserPkg}/sbin/zdb $out/bin copy_bin_and_libs ${zfsUserPkg}/sbin/zdb
cp -v ${zfsUserPkg}/sbin/zpool $out/bin copy_bin_and_libs ${zfsUserPkg}/sbin/zpool
cp -pdv ${zfsUserPkg}/lib/lib*.so* $out/lib '';
cp -pdv ${pkgs.zlib}/lib/lib*.so* $out/lib extraUtilsCommandsTest = mkIf inInitrd
''
$out/bin/zfs --help >/dev/null 2>&1
$out/bin/zpool --help >/dev/null 2>&1
''; '';
postDeviceCommands = concatStringsSep "\n" (['' postDeviceCommands = concatStringsSep "\n" ([''
ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}" ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"

View file

@ -16,7 +16,7 @@ with lib;
Enable sensitivity and speed configuration for trackpoints. Enable sensitivity and speed configuration for trackpoints.
''; '';
}; };
sensitivity = mkOption { sensitivity = mkOption {
default = 128; default = 128;
example = 255; example = 255;
@ -44,7 +44,7 @@ with lib;
Enable scrolling while holding the middle mouse button. Enable scrolling while holding the middle mouse button.
''; '';
}; };
}; };
}; };
@ -70,7 +70,7 @@ with lib;
'' ''
Section "InputClass" Section "InputClass"
Identifier "Trackpoint Wheel Emulation" Identifier "Trackpoint Wheel Emulation"
MatchProduct "TPPS/2 IBM TrackPoint|DualPoint Stick|Synaptics Inc. Composite TouchPad / TrackPoint|ThinkPad USB Keyboard with TrackPoint|USB Trackpoint pointing device|Composite TouchPad / TrackPoint" MatchProduct "Elantech PS/2 TrackPoint|TPPS/2 IBM TrackPoint|DualPoint Stick|Synaptics Inc. Composite TouchPad / TrackPoint|ThinkPad USB Keyboard with TrackPoint|USB Trackpoint pointing device|Composite TouchPad / TrackPoint"
MatchDevicePath "/dev/input/event*" MatchDevicePath "/dev/input/event*"
Option "EmulateWheel" "true" Option "EmulateWheel" "true"
Option "EmulateWheelButton" "2" Option "EmulateWheelButton" "2"

View file

@ -165,7 +165,7 @@ in
boot.initrd.extraUtilsCommands = boot.initrd.extraUtilsCommands =
'' ''
# We need swapon in the initrd. # We need swapon in the initrd.
cp --remove-destination ${pkgs.utillinux}/sbin/swapon $out/bin copy_bin_and_libs ${pkgs.utillinux}/sbin/swapon
''; '';
# Don't put old configurations in the GRUB menu. The user has no # Don't put old configurations in the GRUB menu. The user has no

View file

@ -7,6 +7,9 @@ in
{ {
imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ]; imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
# https://cloud.google.com/compute/docs/tutorials/building-images
networking.firewall.enable = mkDefault false;
system.build.googleComputeImage = system.build.googleComputeImage =
pkgs.vmTools.runInLinuxVM ( pkgs.vmTools.runInLinuxVM (
pkgs.runCommand "google-compute-image" pkgs.runCommand "google-compute-image"
@ -95,6 +98,7 @@ in
boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ]; boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
boot.initrd.kernelModules = [ "virtio_scsi" ]; boot.initrd.kernelModules = [ "virtio_scsi" ];
boot.kernelModules = [ "virtio_pci" "virtio_net" ];
# Generate a GRUB menu. Amazon's pv-grub uses this to boot our kernel/initrd. # Generate a GRUB menu. Amazon's pv-grub uses this to boot our kernel/initrd.
boot.loader.grub.device = "/dev/sda"; boot.loader.grub.device = "/dev/sda";
@ -108,6 +112,7 @@ in
# at instance creation time. # at instance creation time.
services.openssh.enable = true; services.openssh.enable = true;
services.openssh.permitRootLogin = "without-password"; services.openssh.permitRootLogin = "without-password";
services.openssh.passwordAuthentication = mkDefault false;
# Force getting the hostname from Google Compute. # Force getting the hostname from Google Compute.
networking.hostName = mkDefault ""; networking.hostName = mkDefault "";
@ -178,5 +183,79 @@ in
serviceConfig.RemainAfterExit = true; serviceConfig.RemainAfterExit = true;
serviceConfig.StandardError = "journal+console"; serviceConfig.StandardError = "journal+console";
serviceConfig.StandardOutput = "journal+console"; serviceConfig.StandardOutput = "journal+console";
}; };
# Setings taken from https://cloud.google.com/compute/docs/tutorials/building-images#providedkernel
boot.kernel.sysctl = {
# enables syn flood protection
"net.ipv4.tcp_syncookies" = mkDefault "1";
# ignores source-routed packets
"net.ipv4.conf.all.accept_source_route" = mkDefault "0";
# ignores source-routed packets
"net.ipv4.conf.default.accept_source_route" = mkDefault "0";
# ignores ICMP redirects
"net.ipv4.conf.all.accept_redirects" = mkDefault "0";
# ignores ICMP redirects
"net.ipv4.conf.default.accept_redirects" = mkDefault "0";
# ignores ICMP redirects from non-GW hosts
"net.ipv4.conf.all.secure_redirects" = mkDefault "1";
# ignores ICMP redirects from non-GW hosts
"net.ipv4.conf.default.secure_redirects" = mkDefault "1";
# don't allow traffic between networks or act as a router
"net.ipv4.ip_forward" = mkDefault "0";
# don't allow traffic between networks or act as a router
"net.ipv4.conf.all.send_redirects" = mkDefault "0";
# don't allow traffic between networks or act as a router
"net.ipv4.conf.default.send_redirects" = mkDefault "0";
# reverse path filtering - IP spoofing protection
"net.ipv4.conf.all.rp_filter" = mkDefault "1";
# reverse path filtering - IP spoofing protection
"net.ipv4.conf.default.rp_filter" = mkDefault "1";
# ignores ICMP broadcasts to avoid participating in Smurf attacks
"net.ipv4.icmp_echo_ignore_broadcasts" = mkDefault "1";
# ignores bad ICMP errors
"net.ipv4.icmp_ignore_bogus_error_responses" = mkDefault "1";
# logs spoofed, source-routed, and redirect packets
"net.ipv4.conf.all.log_martians" = mkDefault "1";
# log spoofed, source-routed, and redirect packets
"net.ipv4.conf.default.log_martians" = mkDefault "1";
# implements RFC 1337 fix
"net.ipv4.tcp_rfc1337" = mkDefault "1";
# randomizes addresses of mmap base, heap, stack and VDSO page
"kernel.randomize_va_space" = mkDefault "2";
# provides protection from ToCToU races
"fs.protected_hardlinks" = mkDefault "1";
# provides protection from ToCToU races
"fs.protected_symlinks" = mkDefault "1";
# makes locating kernel addresses more difficult
"kernel.kptr_restrict" = mkDefault "1";
# set ptrace protections
"kernel.yama.ptrace_scope" = mkDefault "1";
# set perf only available to root
"kernel.perf_event_paranoid" = mkDefault "2";
};
} }

View file

@ -23,6 +23,7 @@ Usage: nixos-container list
nixos-container start <container-name> nixos-container start <container-name>
nixos-container stop <container-name> nixos-container stop <container-name>
nixos-container status <container-name> nixos-container status <container-name>
nixos-container update <container-name> [--config <string>]
nixos-container login <container-name> nixos-container login <container-name>
nixos-container root-login <container-name> nixos-container root-login <container-name>
nixos-container run <container-name> -- args... nixos-container run <container-name> -- args...

View file

@ -346,7 +346,7 @@ in
boot.initrd.extraUtilsCommands = boot.initrd.extraUtilsCommands =
'' ''
# We need mke2fs in the initrd. # We need mke2fs in the initrd.
cp -vf --remove-destination ${pkgs.e2fsprogs}/sbin/mke2fs $out/bin copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/mke2fs
''; '';
boot.initrd.postDeviceCommands = boot.initrd.postDeviceCommands =

View file

@ -31,7 +31,7 @@ in rec {
nixpkgs = nixpkgsSrc; nixpkgs = nixpkgsSrc;
})) [ "unstable" ]; })) [ "unstable" ];
tested = pkgs.releaseTools.aggregate { tested = pkgs.lib.hydraJob (pkgs.releaseTools.aggregate {
name = "nixos-${nixos.channel.version}"; name = "nixos-${nixos.channel.version}";
meta = { meta = {
description = "Release-critical builds for the NixOS channel"; description = "Release-critical builds for the NixOS channel";
@ -57,6 +57,7 @@ in rec {
(all nixos.tests.installer.simple) (all nixos.tests.installer.simple)
(all nixos.tests.installer.simpleLabels) (all nixos.tests.installer.simpleLabels)
(all nixos.tests.installer.simpleProvided) (all nixos.tests.installer.simpleProvided)
(all nixos.tests.installer.swraid)
(all nixos.tests.installer.btrfsSimple) (all nixos.tests.installer.btrfsSimple)
(all nixos.tests.installer.btrfsSubvols) (all nixos.tests.installer.btrfsSubvols)
(all nixos.tests.installer.btrfsSubvolDefault) (all nixos.tests.installer.btrfsSubvolDefault)
@ -85,6 +86,6 @@ in rec {
nixpkgs.tarball nixpkgs.tarball
(all nixpkgs.emacs) (all nixpkgs.emacs)
]; ];
}; });
} }

View file

@ -79,7 +79,7 @@ in rec {
vim; vim;
}; };
tested = pkgs.releaseTools.aggregate { tested = lib.hydraJob (pkgs.releaseTools.aggregate {
name = "nixos-${nixos.channel.version}"; name = "nixos-${nixos.channel.version}";
meta = { meta = {
description = "Release-critical builds for the NixOS channel"; description = "Release-critical builds for the NixOS channel";
@ -88,6 +88,6 @@ in rec {
constituents = constituents =
let all = x: map (system: x.${system}) supportedSystems; in let all = x: map (system: x.${system}) supportedSystems; in
[ nixpkgs.tarball ] ++ lib.collect lib.isDerivation nixos; [ nixpkgs.tarball ] ++ lib.collect lib.isDerivation nixos;
}; });
} }

View file

@ -3,22 +3,20 @@
, supportedSystems ? [ "x86_64-linux" "i686-linux" ] , supportedSystems ? [ "x86_64-linux" "i686-linux" ]
}: }:
with import ../lib;
let let
version = builtins.readFile ../.version; version = builtins.readFile ../.version;
versionSuffix = versionSuffix =
(if stableBranch then "." else "pre") + "${toString nixpkgs.revCount}.${nixpkgs.shortRev}"; (if stableBranch then "." else "pre") + "${toString nixpkgs.revCount}.${nixpkgs.shortRev}";
forAllSystems = pkgs.lib.genAttrs supportedSystems; forAllSystems = genAttrs supportedSystems;
scrubDrv = drv: let res = { inherit (drv) drvPath outPath type name system meta; outputName = "out"; out = res; }; in res; callTest = fn: args: forAllSystems (system: hydraJob (import fn ({ inherit system; } // args)));
callTest = fn: args: forAllSystems (system: scrubDrv (import fn ({ inherit system; } // args)));
pkgs = import nixpkgs { system = "x86_64-linux"; }; pkgs = import nixpkgs { system = "x86_64-linux"; };
lib = pkgs.lib;
versionModule = versionModule =
{ system.nixosVersionSuffix = versionSuffix; { system.nixosVersionSuffix = versionSuffix;
@ -42,10 +40,10 @@ let
in in
# Declare the ISO as a build product so that it shows up in Hydra. # Declare the ISO as a build product so that it shows up in Hydra.
scrubDrv (runCommand "nixos-iso-${config.system.nixosVersion}" hydraJob (runCommand "nixos-iso-${config.system.nixosVersion}"
{ meta = { { meta = {
description = "NixOS installation CD (${description}) - ISO image for ${system}"; description = "NixOS installation CD (${description}) - ISO image for ${system}";
maintainers = map (x: lib.getAttr x lib.maintainers) maintainers; maintainers = map (x: lib.maintainers.${x}) maintainers;
}; };
inherit iso; inherit iso;
passthru = { inherit config; }; passthru = { inherit config; };
@ -74,7 +72,7 @@ let
tarball // tarball //
{ meta = { { meta = {
description = "NixOS system tarball for ${system} - ${stdenv.platform.name}"; description = "NixOS system tarball for ${system} - ${stdenv.platform.name}";
maintainers = map (x: lib.getAttr x lib.maintainers) maintainers; maintainers = map (x: lib.maintainers.${x}) maintainers;
}; };
inherit config; inherit config;
}; };
@ -83,12 +81,12 @@ let
makeClosure = module: buildFromConfig module (config: config.system.build.toplevel); makeClosure = module: buildFromConfig module (config: config.system.build.toplevel);
buildFromConfig = module: sel: forAllSystems (system: scrubDrv (sel (import ./lib/eval-config.nix { buildFromConfig = module: sel: forAllSystems (system: hydraJob (sel (import ./lib/eval-config.nix {
inherit system; inherit system;
modules = [ module versionModule ] ++ lib.singleton modules = [ module versionModule ] ++ singleton
({ config, lib, ... }: ({ config, lib, ... }:
{ fileSystems."/".device = lib.mkDefault "/dev/sda1"; { fileSystems."/".device = mkDefault "/dev/sda1";
boot.loader.grub.device = lib.mkDefault "/dev/sda"; boot.loader.grub.device = mkDefault "/dev/sda";
}); });
}).config)); }).config));
@ -175,10 +173,10 @@ in rec {
in in
# Declare the OVA as a build product so that it shows up in Hydra. # Declare the OVA as a build product so that it shows up in Hydra.
scrubDrv (runCommand "nixos-ova-${config.system.nixosVersion}-${system}" hydraJob (runCommand "nixos-ova-${config.system.nixosVersion}-${system}"
{ meta = { { meta = {
description = "NixOS VirtualBox appliance (${system})"; description = "NixOS VirtualBox appliance (${system})";
maintainers = lib.maintainers.eelco; maintainers = maintainers.eelco;
}; };
ova = config.system.build.virtualBoxOVA; ova = config.system.build.virtualBoxOVA;
} }
@ -195,9 +193,9 @@ in rec {
dummy = forAllSystems (system: pkgs.runCommand "dummy" dummy = forAllSystems (system: pkgs.runCommand "dummy"
{ toplevel = (import lib/eval-config.nix { { toplevel = (import lib/eval-config.nix {
inherit system; inherit system;
modules = lib.singleton ({ config, pkgs, ... }: modules = singleton ({ config, pkgs, ... }:
{ fileSystems."/".device = lib.mkDefault "/dev/sda1"; { fileSystems."/".device = mkDefault "/dev/sda1";
boot.loader.grub.device = lib.mkDefault "/dev/sda"; boot.loader.grub.device = mkDefault "/dev/sda";
}); });
}).config.system.build.toplevel; }).config.system.build.toplevel;
} }
@ -242,34 +240,35 @@ in rec {
tests.avahi = callTest tests/avahi.nix {}; tests.avahi = callTest tests/avahi.nix {};
tests.bittorrent = callTest tests/bittorrent.nix {}; tests.bittorrent = callTest tests/bittorrent.nix {};
tests.blivet = callTest tests/blivet.nix {}; tests.blivet = callTest tests/blivet.nix {};
tests.cadvisor = scrubDrv (import tests/cadvisor.nix { system = "x86_64-linux"; }); tests.cadvisor = hydraJob (import tests/cadvisor.nix { system = "x86_64-linux"; });
tests.chromium = callTest tests/chromium.nix {}; tests.chromium = callTest tests/chromium.nix {};
#tests.cjdns = callTest tests/cjdns.nix {}; tests.cjdns = callTest tests/cjdns.nix {};
tests.containers = callTest tests/containers.nix {}; tests.containers = callTest tests/containers.nix {};
tests.docker = scrubDrv (import tests/docker.nix { system = "x86_64-linux"; }); tests.docker = hydraJob (import tests/docker.nix { system = "x86_64-linux"; });
tests.dockerRegistry = scrubDrv (import tests/docker-registry.nix { system = "x86_64-linux"; }); tests.dockerRegistry = hydraJob (import tests/docker-registry.nix { system = "x86_64-linux"; });
tests.etcd = scrubDrv (import tests/etcd.nix { system = "x86_64-linux"; }); tests.etcd = hydraJob (import tests/etcd.nix { system = "x86_64-linux"; });
tests.firefox = callTest tests/firefox.nix {}; tests.firefox = callTest tests/firefox.nix {};
tests.firewall = callTest tests/firewall.nix {}; tests.firewall = callTest tests/firewall.nix {};
tests.fleet = scrubDrv (import tests/fleet.nix { system = "x86_64-linux"; }); tests.fleet = hydraJob (import tests/fleet.nix { system = "x86_64-linux"; });
#tests.gitlab = callTest tests/gitlab.nix {}; #tests.gitlab = callTest tests/gitlab.nix {};
tests.gnome3 = callTest tests/gnome3.nix {}; tests.gnome3 = callTest tests/gnome3.nix {};
tests.i3wm = callTest tests/i3wm.nix {}; tests.i3wm = callTest tests/i3wm.nix {};
tests.installer.grub1 = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).grub1.test); tests.installer.grub1 = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).grub1.test);
tests.installer.lvm = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).lvm.test); tests.installer.lvm = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).lvm.test);
tests.installer.rebuildCD = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).rebuildCD.test); tests.installer.rebuildCD = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).rebuildCD.test);
tests.installer.separateBoot = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).separateBoot.test); tests.installer.separateBoot = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).separateBoot.test);
tests.installer.simple = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).simple.test); tests.installer.simple = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simple.test);
tests.installer.simpleLabels = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).simpleLabels.test); tests.installer.simpleLabels = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simpleLabels.test);
tests.installer.simpleProvided = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).simpleProvided.test); tests.installer.simpleProvided = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).simpleProvided.test);
tests.installer.btrfsSimple = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).btrfsSimple.test); tests.installer.swraid = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).swraid.test);
tests.installer.btrfsSubvols = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).btrfsSubvols.test); tests.installer.btrfsSimple = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSimple.test);
tests.installer.btrfsSubvolDefault = forAllSystems (system: scrubDrv (import tests/installer.nix { inherit system; }).btrfsSubvolDefault.test); tests.installer.btrfsSubvols = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSubvols.test);
tests.installer.btrfsSubvolDefault = forAllSystems (system: hydraJob (import tests/installer.nix { inherit system; }).btrfsSubvolDefault.test);
tests.influxdb = callTest tests/influxdb.nix {}; tests.influxdb = callTest tests/influxdb.nix {};
tests.ipv6 = callTest tests/ipv6.nix {}; tests.ipv6 = callTest tests/ipv6.nix {};
tests.jenkins = callTest tests/jenkins.nix {}; tests.jenkins = callTest tests/jenkins.nix {};
tests.kde4 = callTest tests/kde4.nix {}; tests.kde4 = callTest tests/kde4.nix {};
tests.kubernetes = scrubDrv (import tests/kubernetes.nix { system = "x86_64-linux"; }); tests.kubernetes = hydraJob (import tests/kubernetes.nix { system = "x86_64-linux"; });
tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; }; tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; };
tests.login = callTest tests/login.nix {}; tests.login = callTest tests/login.nix {};
#tests.logstash = callTest tests/logstash.nix {}; #tests.logstash = callTest tests/logstash.nix {};
@ -299,9 +298,10 @@ in rec {
# TODO: put in networking.nix after the test becomes more complete # TODO: put in networking.nix after the test becomes more complete
tests.networkingProxy = callTest tests/networking-proxy.nix {}; tests.networkingProxy = callTest tests/networking-proxy.nix {};
tests.nfs3 = callTest tests/nfs.nix { version = 3; }; tests.nfs3 = callTest tests/nfs.nix { version = 3; };
tests.nfs4 = callTest tests/nfs.nix { version = 4; };
tests.nsd = callTest tests/nsd.nix {}; tests.nsd = callTest tests/nsd.nix {};
tests.openssh = callTest tests/openssh.nix {}; tests.openssh = callTest tests/openssh.nix {};
tests.panamax = scrubDrv (import tests/panamax.nix { system = "x86_64-linux"; }); tests.panamax = hydraJob (import tests/panamax.nix { system = "x86_64-linux"; });
tests.peerflix = callTest tests/peerflix.nix {}; tests.peerflix = callTest tests/peerflix.nix {};
tests.printing = callTest tests/printing.nix {}; tests.printing = callTest tests/printing.nix {};
tests.proxy = callTest tests/proxy.nix {}; tests.proxy = callTest tests/proxy.nix {};
@ -312,6 +312,10 @@ in rec {
tests.udisks2 = callTest tests/udisks2.nix {}; tests.udisks2 = callTest tests/udisks2.nix {};
tests.virtualbox = callTest tests/virtualbox.nix {}; tests.virtualbox = callTest tests/virtualbox.nix {};
tests.xfce = callTest tests/xfce.nix {}; tests.xfce = callTest tests/xfce.nix {};
tests.bootBiosCdrom = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootBiosCdrom);
tests.bootBiosUsb = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootBiosUsb);
tests.bootUefiCdrom = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootUefiCdrom);
tests.bootUefiUsb = forAllSystems (system: hydraJob (import tests/boot.nix { inherit system; }).bootUefiUsb);
/* Build a bunch of typical closures so that Hydra can keep track of /* Build a bunch of typical closures so that Hydra can keep track of

63
nixos/tests/boot.nix Normal file
View file

@ -0,0 +1,63 @@
{ system ? builtins.currentSystem }:
with import ../lib/testing.nix { inherit system; };
with import ../lib/qemu-flags.nix;
with pkgs.lib;
let
iso =
(import ../lib/eval-config.nix {
inherit system;
modules =
[ ../modules/installer/cd-dvd/installation-cd-minimal.nix
../modules/testing/test-instrumentation.nix
{ key = "serial";
boot.loader.grub.timeout = mkOverride 0 0;
# The test cannot access the network, so any sources we
# need must be included in the ISO.
isoImage.storeContents =
[ pkgs.glibcLocales
pkgs.sudo
pkgs.docbook5
pkgs.docbook5_xsl
pkgs.grub
pkgs.perlPackages.XMLLibXML
pkgs.unionfs-fuse
pkgs.gummiboot
];
}
];
}).config.system.build.isoImage;
makeBootTest = name: machineConfig:
makeTest {
inherit iso;
name = "boot-" + name;
nodes = { };
testScript =
''
my $machine = createMachine({ ${machineConfig}, qemuFlags => '-m 768' });
$machine->start;
$machine->waitForUnit("multi-user.target");
$machine->shutdown;
'';
};
in {
bootBiosCdrom = makeBootTest "bios-cdrom" ''
cdrom => glob("${iso}/iso/*.iso")
'';
bootBiosUsb = makeBootTest "bios-usb" ''
usb => glob("${iso}/iso/*.iso")
'';
bootUefiCdrom = makeBootTest "uefi-cdrom" ''
cdrom => glob("${iso}/iso/*.iso"),
bios => '${pkgs.OVMF}/FV/OVMF.fd'
'';
bootUefiUsb = makeBootTest "uefi-usb" ''
usb => glob("${iso}/iso/*.iso"),
bios => '${pkgs.OVMF}/FV/OVMF.fd'
'';
}

View file

@ -109,7 +109,12 @@ import ./make-test.nix (
$machine->waitUntilSucceeds("${xdo "check-startup" '' $machine->waitUntilSucceeds("${xdo "check-startup" ''
search --sync --onlyvisible --name "startup done" search --sync --onlyvisible --name "startup done"
# close first start help popup # close first start help popup
key Escape key -delay 1000 Escape
# XXX: This is to make sure the popup is closed, but we better do
# screenshots to detect visual changes.
key -delay 2000 Escape
key -delay 3000 Escape
key -delay 4000 Escape
windowfocus --sync windowfocus --sync
windowactivate --sync windowactivate --sync
''}"); ''}");

View file

@ -3,15 +3,15 @@ let
carolPubKey = "n932l3pjvmhtxxcdrqq2qpw5zc58f01vvjx01h4dtd1bb0nnu2h0.k"; carolPubKey = "n932l3pjvmhtxxcdrqq2qpw5zc58f01vvjx01h4dtd1bb0nnu2h0.k";
carolPassword = "678287829ce4c67bc8b227e56d94422ee1b85fa11618157b2f591de6c6322b52"; carolPassword = "678287829ce4c67bc8b227e56d94422ee1b85fa11618157b2f591de6c6322b52";
carolIp4 = "192.168.0.9"; carolIp4 = "192.168.0.9";
basicConfig = basicConfig =
{ config, pkgs, ... }: { config, pkgs, ... }:
{ services.cjdns.enable = true; { services.cjdns.enable = true;
# Turning off DHCP isn't very realistic but makes # Turning off DHCP isn't very realistic but makes
# the sequence of address assignment less stochastic. # the sequence of address assignment less stochastic.
networking.useDHCP = false; networking.useDHCP = false;
networking.interfaces.eth1.prefixLength = 24; networking.interfaces.eth1.prefixLength = 24;
# CJDNS output is incompatible with the XML log. # CJDNS output is incompatible with the XML log.
systemd.services.cjdns.serviceConfig.StandardOutput = "null"; systemd.services.cjdns.serviceConfig.StandardOutput = "null";
@ -41,19 +41,18 @@ import ./make-test.nix {
# Bob explicitly connects to Carol over UDPInterface. # Bob explicitly connects to Carol over UDPInterface.
bob = bob =
{ config, lib, nodes, ... }: { config, lib, nodes, ... }:
let carolIp4 = lib.mkForce nodes.carol.config.networking.interfaces.eth1; in let carolIp4 = lib.mkForce nodes.carol.config.networking.interfaces.eth1; in
{ imports = [ basicConfig ]; { imports = [ basicConfig ];
networking.interfaces.eth1.ipAddress = "192.168.0.2"; networking.interfaces.eth1.ipAddress = "192.168.0.2";
services.cjdns = services.cjdns =
{ UDPInterface = { UDPInterface =
{ bind = "0.0.0.0:1024"; { bind = "0.0.0.0:1024";
connectTo."192.168.0.1:1024}" = connectTo."192.168.0.1:1024}" =
{ hostname = "carol.hype"; { password = carolPassword;
password = carolPassword;
publicKey = carolPubKey; publicKey = carolPubKey;
}; };
}; };
@ -75,7 +74,7 @@ import ./make-test.nix {
''; '';
networking.interfaces.eth1.ipAddress = "192.168.0.1"; networking.interfaces.eth1.ipAddress = "192.168.0.1";
services.cjdns = services.cjdns =
{ authorizedPasswords = [ carolPassword ]; { authorizedPasswords = [ carolPassword ];
ETHInterface.bind = "eth1"; ETHInterface.bind = "eth1";
@ -106,13 +105,13 @@ import ./make-test.nix {
my $carolIp6 = cjdnsIp $carol; my $carolIp6 = cjdnsIp $carol;
# ping a few times each to let the routing table establish itself # ping a few times each to let the routing table establish itself
$alice->succeed("ping6 -c 4 $carolIp6"); $alice->succeed("ping6 -c 4 $carolIp6");
$bob->succeed("ping6 -c 4 carol.hype"); $bob->succeed("ping6 -c 4 $carolIp6");
$carol->succeed("ping6 -c 4 $aliceIp6"); $carol->succeed("ping6 -c 4 $aliceIp6");
$carol->succeed("ping6 -c 4 $bobIp6"); $carol->succeed("ping6 -c 4 $bobIp6");
$alice->succeed("ping6 -c 4 $bobIp6"); $alice->succeed("ping6 -c 4 $bobIp6");
$bob->succeed("ping6 -c 4 $aliceIp6"); $bob->succeed("ping6 -c 4 $aliceIp6");

View file

@ -327,12 +327,12 @@ in {
$machine->succeed( $machine->succeed(
"parted /dev/vda --" "parted /dev/vda --"
. " mklabel msdos" . " mklabel msdos"
. " mkpart primary ext2 1M 30MB" # /boot . " mkpart primary ext2 1M 100MB" # /boot
. " mkpart extended 30M -1s" . " mkpart extended 100M -1s"
. " mkpart logical 31M 1531M" # md0 (root), first device . " mkpart logical 102M 1602M" # md0 (root), first device
. " mkpart logical 1540M 3040M" # md0 (root), second device . " mkpart logical 1603M 3103M" # md0 (root), second device
. " mkpart logical 3050M 3306M" # md1 (swap), first device . " mkpart logical 3104M 3360M" # md1 (swap), first device
. " mkpart logical 3320M 3576M", # md1 (swap), second device . " mkpart logical 3361M 3617M", # md1 (swap), second device
"udevadm settle", "udevadm settle",
"ls -l /dev/vda* >&2", "ls -l /dev/vda* >&2",
"cat /proc/partitions >&2", "cat /proc/partitions >&2",

View file

@ -13,7 +13,7 @@ import ./make-test.nix rec {
id: redis-master-pod id: redis-master-pod
containers: containers:
- name: master - name: master
image: master:5000/scratch image: master:5000/nix
cpu: 100 cpu: 100
ports: ports:
- name: redis-server - name: redis-server
@ -50,8 +50,8 @@ import ./make-test.nix rec {
virtualisation.memorySize = 768; virtualisation.memorySize = 768;
services.kubernetes = { services.kubernetes = {
roles = ["master" "node"]; roles = ["master" "node"];
dockerCfg = ''{"master:5000":{}}'';
controllerManager.machines = ["master" "node"]; controllerManager.machines = ["master" "node"];
kubelet.extraOpts = "-network_container_image=master:5000/pause";
apiserver.address = "0.0.0.0"; apiserver.address = "0.0.0.0";
verbose = true; verbose = true;
}; };
@ -94,7 +94,8 @@ import ./make-test.nix rec {
{ {
services.kubernetes = { services.kubernetes = {
roles = ["node"]; roles = ["node"];
kubelet.extraOpts = "-network_container_image=master:5000/pause"; dockerCfg = ''{"master:5000":{}}'';
kubelet.apiServers = ["master:8080"];
verbose = true; verbose = true;
}; };
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0 --insecure-registry master:5000"; virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0 --insecure-registry master:5000";
@ -155,14 +156,14 @@ import ./make-test.nix rec {
$node->waitForUnit("kubernetes-kubelet.service"); $node->waitForUnit("kubernetes-kubelet.service");
$node->waitForUnit("kubernetes-proxy.service"); $node->waitForUnit("kubernetes-proxy.service");
$master->waitUntilSucceeds("kubecfg list minions | grep master"); $master->waitUntilSucceeds("kubectl get minions | grep master");
$master->waitUntilSucceeds("kubecfg list minions | grep node"); $master->waitUntilSucceeds("kubectl get minions | grep node");
$client->waitForUnit("docker.service"); $client->waitForUnit("docker.service");
$client->succeed("tar cv --files-from /dev/null | docker import - scratch"); $client->succeed("tar cv --files-from /dev/null | docker import - nix");
$client->succeed("docker tag scratch master:5000/scratch"); $client->succeed("docker tag nix master:5000/nix");
$master->waitForUnit("docker-registry.service"); $master->waitForUnit("docker-registry.service");
$client->succeed("docker push master:5000/scratch"); $client->succeed("docker push master:5000/nix");
$client->succeed("mkdir -p /root/pause"); $client->succeed("mkdir -p /root/pause");
$client->succeed("cp /etc/test/pause /root/pause/"); $client->succeed("cp /etc/test/pause /root/pause/");
$client->succeed("cp /etc/test/Dockerfile /root/pause/"); $client->succeed("cp /etc/test/Dockerfile /root/pause/");

View file

@ -1,4 +1,4 @@
import ./make-test.nix ({ version, ... }: import ./make-test.nix ({ version ? 4, ... }:
let let

View file

@ -31,8 +31,8 @@ import ./make-test.nix ({pkgs, ... }: {
startAll; startAll;
# Make sure that cups is up on both sides. # Make sure that cups is up on both sides.
$server->waitForUnit("cupsd.service"); $server->waitForUnit("cups.service");
$client->waitForUnit("cupsd.service"); $client->waitForUnit("cups.service");
$client->succeed("lpstat -r") =~ /scheduler is running/ or die; $client->succeed("lpstat -r") =~ /scheduler is running/ or die;
$client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die; $client->succeed("lpstat -H") =~ "/var/run/cups/cups.sock" or die;
$client->succeed("curl --fail http://localhost:631/"); $client->succeed("curl --fail http://localhost:631/");

View file

@ -39,9 +39,8 @@ import ./make-test.nix ({ pkgs, ... }: with pkgs.lib; let
]; ];
boot.initrd.extraUtilsCommands = '' boot.initrd.extraUtilsCommands = ''
cp -av -t "$out/bin/" \ copy_bin_and_libs "${pkgs.linuxPackages.virtualboxGuestAdditions}/sbin/mount.vboxsf"
"${pkgs.linuxPackages.virtualboxGuestAdditions}/sbin/mount.vboxsf" \ copy_bin_and_libs "${pkgs.utillinux}/bin/unshare"
"${pkgs.utillinux}/bin/unshare"
${(attrs.extraUtilsCommands or (const "")) pkgs} ${(attrs.extraUtilsCommands or (const "")) pkgs}
''; '';

View file

@ -2,8 +2,8 @@
rec { rec {
bitcoin = callPackage ./bitcoin.nix { openssl = pkgs.openssl_1_0_1j; withGui = true; }; bitcoin = callPackage ./bitcoin.nix { withGui = true; };
bitcoind = callPackage ./bitcoin.nix { openssl = pkgs.openssl_1_0_1j; withGui = false; }; bitcoind = callPackage ./bitcoin.nix { withGui = false; };
darkcoin = callPackage ./darkcoin.nix { withGui = true; }; darkcoin = callPackage ./darkcoin.nix { withGui = true; };
darkcoind = callPackage ./darkcoin.nix { withGui = false; }; darkcoind = callPackage ./darkcoin.nix { withGui = false; };

View file

@ -1,4 +1,5 @@
{ stdenv, fetchurl, alsaLib, dbus, jack2, pkgconfig, python }: { stdenv, fetchurl, makeWrapper, pkgconfig, alsaLib, dbus, jack2
, python, pythonDBus }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "a2jmidid-${version}"; name = "a2jmidid-${version}";
@ -9,13 +10,16 @@ stdenv.mkDerivation rec {
sha256 = "0pzm0qk5ilqhwz74pydg1jwrds27vm47185dakdrxidb5bv3b5ia"; sha256 = "0pzm0qk5ilqhwz74pydg1jwrds27vm47185dakdrxidb5bv3b5ia";
}; };
buildInputs = [ alsaLib dbus jack2 pkgconfig python ]; buildInputs = [ makeWrapper pkgconfig alsaLib dbus jack2 python pythonDBus ];
configurePhase = "python waf configure --prefix=$out"; configurePhase = "python waf configure --prefix=$out";
buildPhase = "python waf"; buildPhase = "python waf";
installPhase = "python waf install"; installPhase = ''
python waf install
wrapProgram $out/bin/a2j_control --set PYTHONPATH $PYTHONPATH
'';
meta = with stdenv.lib; { meta = with stdenv.lib; {
homepage = http://home.gna.org/a2jmidid; homepage = http://home.gna.org/a2jmidid;

View file

@ -17,7 +17,7 @@ stdenv.mkDerivation rec {
QT_PLUGIN_PATH="${qtscriptgenerator}/lib/qt4/plugins"; QT_PLUGIN_PATH="${qtscriptgenerator}/lib/qt4/plugins";
buildInputs = [ qtscriptgenerator stdenv.cc.libc gettext curl buildInputs = [ qtscriptgenerator stdenv.cc.libc gettext curl
libxml2 mysql taglib taglib_extras loudmouth kdelibs automoc4 phonon strigi libxml2 mysql.lib taglib taglib_extras loudmouth kdelibs automoc4 phonon strigi
soprano qca2 libmtp liblastfm libgpod pkgconfig qjson ffmpeg libofa nepomuk_core ]; soprano qca2 libmtp liblastfm libgpod pkgconfig qjson ffmpeg libofa nepomuk_core ];
cmakeFlags = "-DKDE4_BUILD_TESTS=OFF"; cmakeFlags = "-DKDE4_BUILD_TESTS=OFF";

View file

@ -0,0 +1,29 @@
{ stdenv, fetchurl, pkgconfig, gettext, gtk2, expat, intltool, libgcrypt,
libunique, gnutls, libxml2, curl, mpd_clientlib, dbus_glib, libnotify,
libsoup, avahi, taglib
}:
stdenv.mkDerivation rec {
version = "1.5.1";
name = "ario-${version}";
src = fetchurl {
url = "mirror://sourceforge/ario-player/${name}.tar.gz";
sha256 = "07n97618jv1ilxnm5c6qj9zjz0imw3p304mn4hjbjkk3p0d2hc88";
};
patches = [ ./glib-single-include.patch ];
buildInputs = [
pkgconfig gettext gtk2 expat intltool libgcrypt libunique gnutls
libxml2 curl mpd_clientlib dbus_glib libnotify libsoup avahi taglib
];
meta = {
description = "GTK2 client for MPD (Music player daemon)";
homepage = "http://ario-player.sourceforge.net/";
license = stdenv.lib.licenses.gpl2Plus;
maintainers = [ stdenv.lib.maintainers.garrison ];
platforms = stdenv.lib.platforms.all;
};
}

View file

@ -0,0 +1,40 @@
From: Michael Biebl <biebl@debian.org>
Origin: vendor
Bug-Debian: http://bugs.debian.org/665506
Subject: Including individual glib headers no longer supported
--- a/src/ario-profiles.h
+++ b/src/ario-profiles.h
@@ -20,7 +20,7 @@
#ifndef __ARIO_PROFILES_H
#define __ARIO_PROFILES_H
-#include <glib/gslist.h>
+#include <glib.h>
#include "servers/ario-server.h"
G_BEGIN_DECLS
--- a/src/plugins/ario-plugin-info.c
+++ b/src/plugins/ario-plugin-info.c
@@ -27,7 +27,7 @@
#include <string.h>
#include <glib/gi18n.h>
-#include <glib/gkeyfile.h>
+#include <glib.h>
#include "plugins/ario-plugin-info-priv.h"
#include "ario-debug.h"
--- a/src/ario-util.h
+++ b/src/ario-util.h
@@ -18,8 +18,8 @@
*/
#include "servers/ario-server.h"
-#include "glib/gslist.h"
-#include "gdk/gdkpixbuf.h"
+#include <glib.h>
+#include <gdk/gdkpixbuf.h>
/* Number of covers used to generate the drag & drop image */
#define MAX_COVERS_IN_DRAG 3

View file

@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
preConfigure = /* we prefer system-wide libs */ '' preConfigure = /* we prefer system-wide libs */ ''
mv lib-src lib-src-rm mv lib-src lib-src-rm
mkdir lib-src mkdir lib-src
mv lib-src-rm/{Makefile*,lib-widget-extra,portaudio-v19,portmixer,portsmf,FileDialog,sbsms} lib-src/ mv lib-src-rm/{Makefile*,lib-widget-extra,portaudio-v19,portmixer,portsmf,FileDialog,sbsms,libnyquist} lib-src/
rm -r lib-src-rm/ rm -r lib-src-rm/
''; '';

View file

@ -0,0 +1,17 @@
{ stdenv, fetchurl, pkgconfig, gtk2, libsndfile, portaudio }:
stdenv.mkDerivation rec {
name = "gnaural-1.0.20110606";
buildInputs = [ pkgconfig gtk2 libsndfile portaudio ];
src = fetchurl {
url = "mirror://sourceforge/gnaural/Gnaural/${name}.tar.gz";
sha256 = "0p9rasz1jmxf16vnpj17g3vzdjygcyz3l6nmbq6wr402l61f1vy5";
};
meta = with stdenv.lib;
{ description = "Auditory binaural-beat generator";
homepage = http://gnaural.sourceforge.net/;
licenses = licenses.gpl2;
maintainers = [ maintainers.emery ];
platforms = platforms.linux;
};
}

View file

@ -0,0 +1,35 @@
{stdenv, fetchurl, scons, boost, ladspaH, pkgconfig }:
stdenv.mkDerivation rec {
version = "0.2-2";
name = "nova-filters-${version}";
src = fetchurl {
url = http://klingt.org/~tim/nova-filters/nova-filters_0.2-2.tar.gz;
sha256 = "16064vvl2w5lz4xi3lyjk4xx7fphwsxc14ajykvndiz170q32s6i";
};
buildInputs = [ scons boost ladspaH pkgconfig ];
patchPhase = ''
# remove TERM:
sed -i -e '4d' SConstruct
sed -i "s@mfpmath=sse@mfpmath=sse -I ${boost.dev}/include@g" SConstruct
sed -i "s@ladspa.h@${ladspaH}/include/ladspa.h@g" filters.cpp
sed -i "s/= check/= detail::filter_base<internal_type, checked>::check/" nova/source/dsp/filter.hpp
'';
buildPhase = ''
scons
'';
installPhase = ''
scons $sconsFlags "prefix=$out" install
'';
meta = {
homepage = http://klingt.org/~tim/nova-filters/;
description = "LADSPA plugins based on filters of nova";
license = stdenv.lib.licenses.gpl2Plus;
};
}

Some files were not shown because too many files have changed in this diff Show more