Merge staging-next into staging

This commit is contained in:
github-actions[bot] 2021-01-27 18:28:34 +00:00 committed by GitHub
commit 06efb52369
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
62 changed files with 4994 additions and 5009 deletions

View file

@ -11,8 +11,7 @@
</para> </para>
<para> <para>
It makes virtio modules available on the initrd, sets the system time from It makes virtio modules available on the initrd and sets the system time from
the hardware clock to work around a bug in qemu-kvm, and the hardware clock to work around a bug in qemu-kvm.
<link linkend="opt-security.rngd.enable">enables rngd</link>.
</para> </para>
</section> </section>

View file

@ -610,6 +610,15 @@ self: super:
been dropped from upstream releases. been dropped from upstream releases.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
In the ACME module, the data used to build the hash for the account
directory has changed to accomodate new features to reduce account
rate limit issues. This will trigger new account creation on the first
rebuild following this update. No issues are expected to arise from this,
thanks to the new account creation handling.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<xref linkend="opt-users.users._name_.createHome" /> now always ensures home directory permissions to be <literal>0700</literal>. <xref linkend="opt-users.users._name_.createHome" /> now always ensures home directory permissions to be <literal>0700</literal>.

View file

@ -71,7 +71,7 @@ in
#utmp = 29; # unused #utmp = 29; # unused
# ddclient = 30; # converted to DynamicUser = true # ddclient = 30; # converted to DynamicUser = true
davfs2 = 31; davfs2 = 31;
#disnix = 33; # unused #disnix = 33; # module removed
osgi = 34; osgi = 34;
tor = 35; tor = 35;
cups = 36; cups = 36;
@ -387,7 +387,7 @@ in
utmp = 29; utmp = 29;
# ddclient = 30; # converted to DynamicUser = true # ddclient = 30; # converted to DynamicUser = true
davfs2 = 31; davfs2 = 31;
disnix = 33; #disnix = 33; # module removed
osgi = 34; osgi = 34;
tor = 35; tor = 35;
#cups = 36; # unused #cups = 36; # unused

View file

@ -450,8 +450,6 @@
./services/misc/devmon.nix ./services/misc/devmon.nix
./services/misc/dictd.nix ./services/misc/dictd.nix
./services/misc/dwm-status.nix ./services/misc/dwm-status.nix
./services/misc/dysnomia.nix
./services/misc/disnix.nix
./services/misc/docker-registry.nix ./services/misc/docker-registry.nix
./services/misc/domoticz.nix ./services/misc/domoticz.nix
./services/misc/errbot.nix ./services/misc/errbot.nix
@ -727,6 +725,7 @@
./services/networking/owamp.nix ./services/networking/owamp.nix
./services/networking/pdnsd.nix ./services/networking/pdnsd.nix
./services/networking/pixiecore.nix ./services/networking/pixiecore.nix
./services/networking/pleroma.nix
./services/networking/polipo.nix ./services/networking/polipo.nix
./services/networking/powerdns.nix ./services/networking/powerdns.nix
./services/networking/pdns-recursor.nix ./services/networking/pdns-recursor.nix

View file

@ -1,7 +1,7 @@
# Common configuration for virtual machines running under QEMU (using # Common configuration for virtual machines running under QEMU (using
# virtio). # virtio).
{ lib, ... }: { ... }:
{ {
boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ]; boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ];
@ -14,6 +14,4 @@
# to the *boot time* of the host). # to the *boot time* of the host).
hwclock -s hwclock -s
''; '';
security.rngd.enable = lib.mkDefault false;
} }

View file

@ -7,6 +7,11 @@ let
numCerts = length (builtins.attrNames cfg.certs); numCerts = length (builtins.attrNames cfg.certs);
_24hSecs = 60 * 60 * 24; _24hSecs = 60 * 60 * 24;
# Used to make unique paths for each cert/account config set
mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
mkAccountHash = acmeServer: data: mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
accountDirRoot = "/var/lib/acme/.lego/accounts/";
# There are many services required to make cert renewals work. # There are many services required to make cert renewals work.
# They all follow a common structure: # They all follow a common structure:
# - They inherit this commonServiceConfig # - They inherit this commonServiceConfig
@ -19,7 +24,7 @@ let
Type = "oneshot"; Type = "oneshot";
User = "acme"; User = "acme";
Group = mkDefault "acme"; Group = mkDefault "acme";
UMask = 0027; UMask = 0023;
StateDirectoryMode = 750; StateDirectoryMode = 750;
ProtectSystem = "full"; ProtectSystem = "full";
PrivateTmp = true; PrivateTmp = true;
@ -54,23 +59,35 @@ let
''; '';
}; };
# Previously, all certs were owned by whatever user was configured in # Ensures that directories which are shared across all certs
# config.security.acme.certs.<cert>.user. Now everything is owned by and # exist and have the correct user and group, since group
# run by the acme user. # is configurable on a per-cert basis.
userMigrationService = { userMigrationService = let
description = "Fix owner and group of all ACME certificates"; script = with builtins; ''
chown -R acme .lego/accounts
script = with builtins; concatStringsSep "\n" (mapAttrsToList (cert: data: '' '' + (concatStringsSep "\n" (mapAttrsToList (cert: data: ''
for fixpath in /var/lib/acme/${escapeShellArg cert} /var/lib/acme/.lego/${escapeShellArg cert}; do for fixpath in ${escapeShellArg cert} .lego/${escapeShellArg cert}; do
if [ -d "$fixpath" ]; then if [ -d "$fixpath" ]; then
chmod -R u=rwX,g=rX,o= "$fixpath" chmod -R u=rwX,g=rX,o= "$fixpath"
chown -R acme:${data.group} "$fixpath" chown -R acme:${data.group} "$fixpath"
fi fi
done done
'') certConfigs); '') certConfigs));
in {
description = "Fix owner and group of all ACME certificates";
# We don't want this to run every time a renewal happens serviceConfig = commonServiceConfig // {
serviceConfig.RemainAfterExit = true; # We don't want this to run every time a renewal happens
RemainAfterExit = true;
# These StateDirectory entries negate the need for tmpfiles
StateDirectory = [ "acme" "acme/.lego" "acme/.lego/accounts" ];
StateDirectoryMode = 755;
WorkingDirectory = "/var/lib/acme";
# Run the start script as root
ExecStart = "+" + (pkgs.writeShellScript "acme-fixperms" script);
};
}; };
certToConfig = cert: data: let certToConfig = cert: data: let
@ -101,11 +118,10 @@ let
${toString acmeServer} ${toString data.dnsProvider} ${toString acmeServer} ${toString data.dnsProvider}
${toString data.ocspMustStaple} ${data.keyType} ${toString data.ocspMustStaple} ${data.keyType}
''; '';
mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
certDir = mkHash hashData; certDir = mkHash hashData;
domainHash = mkHash "${concatStringsSep " " extraDomains} ${data.domain}"; domainHash = mkHash "${concatStringsSep " " extraDomains} ${data.domain}";
othersHash = mkHash "${toString acmeServer} ${data.keyType} ${data.email}"; accountHash = (mkAccountHash acmeServer data);
accountDir = "/var/lib/acme/.lego/accounts/" + othersHash; accountDir = accountDirRoot + accountHash;
protocolOpts = if useDns then ( protocolOpts = if useDns then (
[ "--dns" data.dnsProvider ] [ "--dns" data.dnsProvider ]
@ -142,9 +158,8 @@ let
); );
in { in {
inherit accountDir selfsignedDeps; inherit accountHash cert selfsignedDeps;
webroot = data.webroot;
group = data.group; group = data.group;
renewTimer = { renewTimer = {
@ -184,7 +199,10 @@ let
StateDirectory = "acme/${cert}"; StateDirectory = "acme/${cert}";
BindPaths = "/var/lib/acme/.minica:/tmp/ca /var/lib/acme/${cert}:/tmp/${keyName}"; BindPaths = [
"/var/lib/acme/.minica:/tmp/ca"
"/var/lib/acme/${cert}:/tmp/${keyName}"
];
}; };
# Working directory will be /tmp # Working directory will be /tmp
@ -222,16 +240,22 @@ let
serviceConfig = commonServiceConfig // { serviceConfig = commonServiceConfig // {
Group = data.group; Group = data.group;
# AccountDir dir will be created by tmpfiles to ensure correct permissions # Keep in mind that these directories will be deleted if the user runs
# And to avoid deletion during systemctl clean # systemctl clean --what=state
# acme/.lego/${cert} is listed so that it is deleted during systemctl clean # acme/.lego/${cert} is listed for this reason.
StateDirectory = "acme/${cert} acme/.lego/${cert} acme/.lego/${cert}/${certDir}"; StateDirectory = [
"acme/${cert}"
"acme/.lego/${cert}"
"acme/.lego/${cert}/${certDir}"
"acme/.lego/accounts/${accountHash}"
];
# Needs to be space separated, but can't use a multiline string because that'll include newlines # Needs to be space separated, but can't use a multiline string because that'll include newlines
BindPaths = BindPaths = [
"${accountDir}:/tmp/accounts " + "${accountDir}:/tmp/accounts"
"/var/lib/acme/${cert}:/tmp/out " + "/var/lib/acme/${cert}:/tmp/out"
"/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates "; "/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
];
# Only try loading the credentialsFile if the dns challenge is enabled # Only try loading the credentialsFile if the dns challenge is enabled
EnvironmentFile = mkIf useDns data.credentialsFile; EnvironmentFile = mkIf useDns data.credentialsFile;
@ -248,13 +272,18 @@ let
# Working directory will be /tmp # Working directory will be /tmp
script = '' script = ''
set -euo pipefail set -euxo pipefail
${optionalString (data.webroot != null) ''
# Ensure the webroot exists
mkdir -p '${data.webroot}/.well-known/acme-challenge'
chown 'acme:${data.group}' ${data.webroot}/{.well-known,.well-known/acme-challenge}
''}
echo '${domainHash}' > domainhash.txt echo '${domainHash}' > domainhash.txt
# Check if we can renew # Check if we can renew
# Certificates and account credentials must exist if [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a -n "$(ls -1 accounts)" ]; then
if [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a "$(ls -1 accounts)" ]; then
# When domains are updated, there's no need to do a full # When domains are updated, there's no need to do a full
# Lego run, but it's likely renew won't work if days is too low. # Lego run, but it's likely renew won't work if days is too low.
@ -664,21 +693,33 @@ in {
systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs; systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs;
# .lego and .lego/accounts specified to fix any incorrect permissions systemd.targets = let
systemd.tmpfiles.rules = [ # Create some targets which can be depended on to be "active" after cert renewals
"d /var/lib/acme/.lego - acme acme" finishedTargets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" {
"d /var/lib/acme/.lego/accounts - acme acme" wantedBy = [ "default.target" ];
] ++ (unique (concatMap (conf: [ requires = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps;
"d ${conf.accountDir} - acme acme" after = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps;
] ++ (optional (conf.webroot != null) "d ${conf.webroot}/.well-known/acme-challenge - acme ${conf.group}") }) certConfigs;
) (attrValues certConfigs)));
# Create some targets which can be depended on to be "active" after cert renewals # Create targets to limit the number of simultaneous account creations
systemd.targets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" { # How it works:
wantedBy = [ "default.target" ]; # - Pick a "leader" cert service, which will be in charge of creating the account,
requires = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps; # and run first (requires + after)
after = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps; # - Make all other cert services sharing the same account wait for the leader to
}) certConfigs; # finish before starting (requiredBy + before).
# Using a target here is fine - account creation is a one time event. Even if
# systemd clean --what=state is used to delete the account, so long as the user
# then runs one of the cert services, there won't be any issues.
accountTargets = mapAttrs' (hash: confs: let
leader = "acme-${(builtins.head confs).cert}.service";
dependantServices = map (conf: "acme-${conf.cert}.service") (builtins.tail confs);
in nameValuePair "acme-account-${hash}" {
requiredBy = dependantServices;
before = dependantServices;
requires = [ leader ];
after = [ leader ];
}) (groupBy (conf: conf.accountHash) (attrValues certConfigs));
in finishedTargets // accountTargets;
}) })
]; ];

View file

@ -162,6 +162,9 @@ services.httpd = {
<xref linkend="opt-security.acme.certs"/>."foo.example.com" = { <xref linkend="opt-security.acme.certs"/>."foo.example.com" = {
<link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges"; <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges";
<link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com"; <link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com";
# Ensure that the web server you use can read the generated certs
# Take a look at the <link linkend="opt-services.nginx.group">group</link> option for the web server you choose.
<link linkend="opt-security.acme.certs._name_.group">group</link> = "nginx";
# Since we have a wildcard vhost to handle port 80, # Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything! # we can generate certs for anything!
# Just make sure your DNS resolves them. # Just make sure your DNS resolves them.
@ -257,10 +260,11 @@ chmod 400 /var/lib/secrets/certs.secret
<para> <para>
Should you need to regenerate a particular certificate in a hurry, such Should you need to regenerate a particular certificate in a hurry, such
as when a vulnerability is found in Let's Encrypt, there is now a convenient as when a vulnerability is found in Let's Encrypt, there is now a convenient
mechanism for doing so. Running <literal>systemctl clean acme-example.com.service</literal> mechanism for doing so. Running
will remove all certificate files for the given domain, allowing you to then <literal>systemctl clean --what=state acme-example.com.service</literal>
<literal>systemctl start acme-example.com.service</literal> to generate fresh will remove all certificate files and the account data for the given domain,
ones. allowing you to then <literal>systemctl start acme-example.com.service</literal>
to generate fresh ones.
</para> </para>
</section> </section>
<section xml:id="module-security-acme-fix-jws"> <section xml:id="module-security-acme-fix-jws">

View file

@ -10,16 +10,8 @@ let
(n: v: (if v ? program then v else v // {program=n;})) (n: v: (if v ? program then v else v // {program=n;}))
wrappers); wrappers);
securityWrapper = pkgs.stdenv.mkDerivation { securityWrapper = pkgs.callPackage ./wrapper.nix {
name = "security-wrapper"; inherit parentWrapperDir;
phases = [ "installPhase" "fixupPhase" ];
buildInputs = [ pkgs.libcap pkgs.libcap_ng pkgs.linuxHeaders ];
hardeningEnable = [ "pie" ];
installPhase = ''
mkdir -p $out/bin
$CC -Wall -O2 -DWRAPPER_DIR=\"${parentWrapperDir}\" \
-lcap-ng -lcap ${./wrapper.c} -o $out/bin/security-wrapper
'';
}; };
###### Activation script for the setcap wrappers ###### Activation script for the setcap wrappers

View file

@ -4,15 +4,17 @@
#include <unistd.h> #include <unistd.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/xattr.h>
#include <fcntl.h> #include <fcntl.h>
#include <dirent.h> #include <dirent.h>
#include <assert.h> #include <assert.h>
#include <errno.h> #include <errno.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <sys/capability.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <limits.h> #include <limits.h>
#include <cap-ng.h> #include <stdint.h>
#include <syscall.h>
#include <byteswap.h>
// Make sure assertions are not compiled out, we use them to codify // Make sure assertions are not compiled out, we use them to codify
// invariants about this program and we want it to fail fast and // invariants about this program and we want it to fail fast and
@ -23,182 +25,172 @@ extern char **environ;
// The WRAPPER_DIR macro is supplied at compile time so that it cannot // The WRAPPER_DIR macro is supplied at compile time so that it cannot
// be changed at runtime // be changed at runtime
static char * wrapperDir = WRAPPER_DIR; static char *wrapper_dir = WRAPPER_DIR;
// Wrapper debug variable name // Wrapper debug variable name
static char * wrapperDebug = "WRAPPER_DEBUG"; static char *wrapper_debug = "WRAPPER_DEBUG";
// Update the capabilities of the running process to include the given #define CAP_SETPCAP 8
// capability in the Ambient set.
static void set_ambient_cap(cap_value_t cap)
{
capng_get_caps_process();
if (capng_update(CAPNG_ADD, CAPNG_INHERITABLE, (unsigned long) cap)) #if __BYTE_ORDER == __BIG_ENDIAN
{ #define LE32_TO_H(x) bswap_32(x)
perror("cannot raise the capability into the Inheritable set\n"); #else
exit(1); #define LE32_TO_H(x) (x)
#endif
int get_last_cap(unsigned *last_cap) {
FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
if (file == NULL) {
int saved_errno = errno;
fprintf(stderr, "failed to open /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
return -saved_errno;
} }
int res = fscanf(file, "%u", last_cap);
capng_apply(CAPNG_SELECT_CAPS); if (res == EOF) {
int saved_errno = errno;
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0)) fprintf(stderr, "could not read number from /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
{ return -saved_errno;
perror("cannot raise the capability into the Ambient set\n");
exit(1);
} }
fclose(file);
return 0;
} }
// Given the path to this program, fetch its configured capability set // Given the path to this program, fetch its configured capability set
// (as set by `setcap ... /path/to/file`) and raise those capabilities // (as set by `setcap ... /path/to/file`) and raise those capabilities
// into the Ambient set. // into the Ambient set.
static int make_caps_ambient(const char *selfPath) static int make_caps_ambient(const char *self_path) {
{ struct vfs_ns_cap_data data = {};
cap_t caps = cap_get_file(selfPath); int r = getxattr(self_path, "security.capability", &data, sizeof(data));
if(!caps)
{
if(getenv(wrapperDebug))
fprintf(stderr, "no caps set or could not retrieve the caps for this file, not doing anything...");
if (r < 0) {
if (errno == ENODATA) {
// no capabilities set
return 0;
}
fprintf(stderr, "cannot get capabilities for %s: %s", self_path, strerror(errno));
return 1; return 1;
} }
// We use `cap_to_text` and iteration over the tokenized result size_t size;
// string because, as of libcap's current release, there is no uint32_t version = LE32_TO_H(data.magic_etc) & VFS_CAP_REVISION_MASK;
// facility for retrieving an array of `cap_value_t`'s that can be switch (version) {
// given to `prctl` in order to lift that capability into the case VFS_CAP_REVISION_1:
// Ambient set. size = VFS_CAP_U32_1;
// break;
// Some discussion was had around shot-gunning all of the case VFS_CAP_REVISION_2:
// capabilities we know about into the Ambient set but that has a case VFS_CAP_REVISION_3:
// security smell and I deemed the risk of the current size = VFS_CAP_U32_3;
// implementation crashing the program to be lower than the risk break;
// of a privilege escalation security hole being introduced by default:
// raising all capabilities, even ones we didn't intend for the fprintf(stderr, "BUG! Unsupported capability version 0x%x on %s. Report to NixOS bugtracker\n", version, self_path);
// program, into the Ambient set. return 1;
// }
// `cap_t` which is returned by `cap_get_*` is an opaque type and
// even if we could retrieve the bitmasks (which, as far as I can const struct __user_cap_header_struct header = {
// tell we cannot) in order to get the `cap_value_t` .version = _LINUX_CAPABILITY_VERSION_3,
// representation for each capability we would have to take the .pid = getpid(),
// total number of capabilities supported and iterate over the };
// sequence of integers up-to that maximum total, testing each one struct __user_cap_data_struct user_data[2] = {};
// against the bitmask ((bitmask >> n) & 1) to see if it's set and
// aggregating each "capability integer n" that is set in the for (size_t i = 0; i < size; i++) {
// bitmask. // merge inheritable & permitted into one
// user_data[i].permitted = user_data[i].inheritable =
// That, combined with the fact that we can't easily get the LE32_TO_H(data.data[i].inheritable) | LE32_TO_H(data.data[i].permitted);
// bitmask anyway seemed much more brittle than fetching the }
// `cap_t`, transforming it into a textual representation,
// tokenizing the string, and using `cap_from_name` on the token if (syscall(SYS_capset, &header, &user_data) < 0) {
// to get the `cap_value_t` that we need for `prctl`. There is fprintf(stderr, "failed to inherit capabilities: %s", strerror(errno));
// indeed risk involved if the output string format of return 1;
// `cap_to_text` ever changes but at this time the combination of }
// factors involving the below list have led me to the conclusion unsigned last_cap;
// that the best implementation at this time is reading then r = get_last_cap(&last_cap);
// parsing with *lots of documentation* about why we're doing it if (r < 0) {
// this way. return 1;
// }
// 1. No explicit API for fetching an array of `cap_value_t`'s or uint64_t set = user_data[0].permitted | (uint64_t)user_data[1].permitted << 32;
// for transforming a `cap_t` into such a representation for (unsigned cap = 0; cap < last_cap; cap++) {
// 2. The risk of a crash is lower than lifting all capabilities if (!(set & (1ULL << cap))) {
// into the Ambient set continue;
// 3. libcap is depended on heavily in the Linux ecosystem so }
// there is a high chance that the output representation of
// `cap_to_text` will not change which reduces our risk that // Check for the cap_setpcap capability, we set this on the
// this parsing step will cause a crash // wrapper so it can elevate the capabilities to the Ambient
// // set but we do not want to propagate it down into the
// The preferred method, should it ever be available in the // wrapped program.
// future, would be to use libcap API's to transform the result //
// from a `cap_get_*` into an array of `cap_value_t`'s that can // TODO: what happens if that's the behavior you want
// then be given to prctl. // though???? I'm preferring a strict vs. loose policy here.
// if (cap == CAP_SETPCAP) {
// - Parnell if(getenv(wrapper_debug)) {
ssize_t capLen; fprintf(stderr, "cap_setpcap in set, skipping it\n");
char* capstr = cap_to_text(caps, &capLen); }
cap_free(caps); continue;
}
// TODO: For now, we assume that cap_to_text always starts its if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0)) {
// result string with " =" and that the first capability is listed fprintf(stderr, "cannot raise the capability %d into the ambient set: %s\n", cap, strerror(errno));
// immediately after that. We should verify this. return 1;
assert(capLen >= 2); }
capstr += 2; if (getenv(wrapper_debug)) {
fprintf(stderr, "raised %d into the ambient capability set\n", cap);
char* saveptr = NULL; }
for(char* tok = strtok_r(capstr, ",", &saveptr); tok; tok = strtok_r(NULL, ",", &saveptr))
{
cap_value_t capnum;
if (cap_from_name(tok, &capnum))
{
if(getenv(wrapperDebug))
fprintf(stderr, "cap_from_name failed, skipping: %s", tok);
}
else if (capnum == CAP_SETPCAP)
{
// Check for the cap_setpcap capability, we set this on the
// wrapper so it can elevate the capabilities to the Ambient
// set but we do not want to propagate it down into the
// wrapped program.
//
// TODO: what happens if that's the behavior you want
// though???? I'm preferring a strict vs. loose policy here.
if(getenv(wrapperDebug))
fprintf(stderr, "cap_setpcap in set, skipping it\n");
}
else
{
set_ambient_cap(capnum);
if(getenv(wrapperDebug))
fprintf(stderr, "raised %s into the Ambient capability set\n", tok);
}
} }
cap_free(capstr);
return 0; return 0;
} }
int main(int argc, char * * argv) int readlink_malloc(const char *p, char **ret) {
{ size_t l = FILENAME_MAX+1;
// I *think* it's safe to assume that a path from a symbolic link int r;
// should safely fit within the PATH_MAX system limit. Though I'm
// not positive it's safe...
char selfPath[PATH_MAX];
int selfPathSize = readlink("/proc/self/exe", selfPath, sizeof(selfPath));
assert(selfPathSize > 0); for (;;) {
char *c = calloc(l, sizeof(char));
if (!c) {
return -ENOMEM;
}
// Assert we have room for the zero byte, this ensures the path ssize_t n = readlink(p, c, l-1);
// isn't being truncated because it's too big for the buffer. if (n < 0) {
// r = -errno;
// A better way to handle this might be to use something like the free(c);
// whereami library (https://github.com/gpakosz/whereami) or a return r;
// loop that resizes the buffer and re-reads the link if the }
// contents are being truncated.
assert(selfPathSize < sizeof(selfPath));
// Set the zero byte since readlink doesn't do that for us. if ((size_t) n < l-1) {
selfPath[selfPathSize] = '\0'; c[n] = 0;
*ret = c;
return 0;
}
free(c);
l *= 2;
}
}
int main(int argc, char **argv) {
char *self_path = NULL;
int self_path_size = readlink_malloc("/proc/self/exe", &self_path);
if (self_path_size < 0) {
fprintf(stderr, "cannot readlink /proc/self/exe: %s", strerror(-self_path_size));
}
// Make sure that we are being executed from the right location, // Make sure that we are being executed from the right location,
// i.e., `safeWrapperDir'. This is to prevent someone from creating // i.e., `safe_wrapper_dir'. This is to prevent someone from creating
// hard link `X' from some other location, along with a false // hard link `X' from some other location, along with a false
// `X.real' file, to allow arbitrary programs from being executed // `X.real' file, to allow arbitrary programs from being executed
// with elevated capabilities. // with elevated capabilities.
int len = strlen(wrapperDir); int len = strlen(wrapper_dir);
if (len > 0 && '/' == wrapperDir[len - 1]) if (len > 0 && '/' == wrapper_dir[len - 1])
--len; --len;
assert(!strncmp(selfPath, wrapperDir, len)); assert(!strncmp(self_path, wrapper_dir, len));
assert('/' == wrapperDir[0]); assert('/' == wrapper_dir[0]);
assert('/' == selfPath[len]); assert('/' == self_path[len]);
// Make *really* *really* sure that we were executed as // Make *really* *really* sure that we were executed as
// `selfPath', and not, say, as some other setuid program. That // `self_path', and not, say, as some other setuid program. That
// is, our effective uid/gid should match the uid/gid of // is, our effective uid/gid should match the uid/gid of
// `selfPath'. // `self_path'.
struct stat st; struct stat st;
assert(lstat(selfPath, &st) != -1); assert(lstat(self_path, &st) != -1);
assert(!(st.st_mode & S_ISUID) || (st.st_uid == geteuid())); assert(!(st.st_mode & S_ISUID) || (st.st_uid == geteuid()));
assert(!(st.st_mode & S_ISGID) || (st.st_gid == getegid())); assert(!(st.st_mode & S_ISGID) || (st.st_gid == getegid()));
@ -207,33 +199,35 @@ int main(int argc, char * * argv)
assert(!(st.st_mode & (S_IWGRP | S_IWOTH))); assert(!(st.st_mode & (S_IWGRP | S_IWOTH)));
// Read the path of the real (wrapped) program from <self>.real. // Read the path of the real (wrapped) program from <self>.real.
char realFN[PATH_MAX + 10]; char real_fn[PATH_MAX + 10];
int realFNSize = snprintf (realFN, sizeof(realFN), "%s.real", selfPath); int real_fn_size = snprintf(real_fn, sizeof(real_fn), "%s.real", self_path);
assert (realFNSize < sizeof(realFN)); assert(real_fn_size < sizeof(real_fn));
int fdSelf = open(realFN, O_RDONLY); int fd_self = open(real_fn, O_RDONLY);
assert (fdSelf != -1); assert(fd_self != -1);
char sourceProg[PATH_MAX]; char source_prog[PATH_MAX];
len = read(fdSelf, sourceProg, PATH_MAX); len = read(fd_self, source_prog, PATH_MAX);
assert (len != -1); assert(len != -1);
assert (len < sizeof(sourceProg)); assert(len < sizeof(source_prog));
assert (len > 0); assert(len > 0);
sourceProg[len] = 0; source_prog[len] = 0;
close(fdSelf); close(fd_self);
// Read the capabilities set on the wrapper and raise them in to // Read the capabilities set on the wrapper and raise them in to
// the Ambient set so the program we're wrapping receives the // the ambient set so the program we're wrapping receives the
// capabilities too! // capabilities too!
make_caps_ambient(selfPath); if (make_caps_ambient(self_path) != 0) {
free(self_path);
return 1;
}
free(self_path);
execve(sourceProg, argv, environ); execve(source_prog, argv, environ);
fprintf(stderr, "%s: cannot run `%s': %s\n", fprintf(stderr, "%s: cannot run `%s': %s\n",
argv[0], sourceProg, strerror(errno)); argv[0], source_prog, strerror(errno));
exit(1); return 1;
} }

View file

@ -0,0 +1,21 @@
{ stdenv, linuxHeaders, parentWrapperDir, debug ? false }:
# For testing:
# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
stdenv.mkDerivation {
name = "security-wrapper";
buildInputs = [ linuxHeaders ];
dontUnpack = true;
hardeningEnable = [ "pie" ];
CFLAGS = [
''-DWRAPPER_DIR="${parentWrapperDir}"''
] ++ (if debug then [
"-Werror" "-Og" "-g"
] else [
"-Wall" "-O2"
]);
dontStrip = debug;
installPhase = ''
mkdir -p $out/bin
$CC $CFLAGS ${./wrapper.c} -o $out/bin/security-wrapper
'';
}

View file

@ -1,98 +0,0 @@
# Disnix server
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.disnix;
in
{
###### interface
options = {
services.disnix = {
enable = mkEnableOption "Disnix";
enableMultiUser = mkOption {
type = types.bool;
default = true;
description = "Whether to support multi-user mode by enabling the Disnix D-Bus service";
};
useWebServiceInterface = mkEnableOption "the DisnixWebService interface running on Apache Tomcat";
package = mkOption {
type = types.path;
description = "The Disnix package";
default = pkgs.disnix;
defaultText = "pkgs.disnix";
};
enableProfilePath = mkEnableOption "exposing the Disnix profiles in the system's PATH";
profiles = mkOption {
type = types.listOf types.string;
default = [ "default" ];
example = [ "default" ];
description = "Names of the Disnix profiles to expose in the system's PATH";
};
};
};
###### implementation
config = mkIf cfg.enable {
dysnomia.enable = true;
environment.systemPackages = [ pkgs.disnix ] ++ optional cfg.useWebServiceInterface pkgs.DisnixWebService;
environment.variables.PATH = lib.optionals cfg.enableProfilePath (map (profileName: "/nix/var/nix/profiles/disnix/${profileName}/bin" ) cfg.profiles);
services.dbus.enable = true;
services.dbus.packages = [ pkgs.disnix ];
services.tomcat.enable = cfg.useWebServiceInterface;
services.tomcat.extraGroups = [ "disnix" ];
services.tomcat.javaOpts = "${optionalString cfg.useWebServiceInterface "-Djava.library.path=${pkgs.libmatthew_java}/lib/jni"} ";
services.tomcat.sharedLibs = optional cfg.useWebServiceInterface "${pkgs.DisnixWebService}/share/java/DisnixConnection.jar"
++ optional cfg.useWebServiceInterface "${pkgs.dbus_java}/share/java/dbus.jar";
services.tomcat.webapps = optional cfg.useWebServiceInterface pkgs.DisnixWebService;
users.groups.disnix.gid = config.ids.gids.disnix;
systemd.services = {
disnix = mkIf cfg.enableMultiUser {
description = "Disnix server";
wants = [ "dysnomia.target" ];
wantedBy = [ "multi-user.target" ];
after = [ "dbus.service" ]
++ optional config.services.httpd.enable "httpd.service"
++ optional config.services.mysql.enable "mysql.service"
++ optional config.services.postgresql.enable "postgresql.service"
++ optional config.services.tomcat.enable "tomcat.service"
++ optional config.services.svnserve.enable "svnserve.service"
++ optional config.services.mongodb.enable "mongodb.service"
++ optional config.services.influxdb.enable "influxdb.service";
restartIfChanged = false;
path = [ config.nix.package cfg.package config.dysnomia.package "/run/current-system/sw" ];
environment = {
HOME = "/root";
}
// (if config.environment.variables ? DYSNOMIA_CONTAINERS_PATH then { inherit (config.environment.variables) DYSNOMIA_CONTAINERS_PATH; } else {})
// (if config.environment.variables ? DYSNOMIA_MODULES_PATH then { inherit (config.environment.variables) DYSNOMIA_MODULES_PATH; } else {});
serviceConfig.ExecStart = "${cfg.package}/bin/disnix-service";
};
};
};
}

View file

@ -1,257 +0,0 @@
{pkgs, lib, config, ...}:
with lib;
let
cfg = config.dysnomia;
printProperties = properties:
concatMapStrings (propertyName:
let
property = properties.${propertyName};
in
if isList property then "${propertyName}=(${lib.concatMapStrings (elem: "\"${toString elem}\" ") (properties.${propertyName})})\n"
else "${propertyName}=\"${toString property}\"\n"
) (builtins.attrNames properties);
properties = pkgs.stdenv.mkDerivation {
name = "dysnomia-properties";
buildCommand = ''
cat > $out << "EOF"
${printProperties cfg.properties}
EOF
'';
};
containersDir = pkgs.stdenv.mkDerivation {
name = "dysnomia-containers";
buildCommand = ''
mkdir -p $out
cd $out
${concatMapStrings (containerName:
let
containerProperties = cfg.containers.${containerName};
in
''
cat > ${containerName} <<EOF
${printProperties containerProperties}
type=${containerName}
EOF
''
) (builtins.attrNames cfg.containers)}
'';
};
linkMutableComponents = {containerName}:
''
mkdir ${containerName}
${concatMapStrings (componentName:
let
component = cfg.components.${containerName}.${componentName};
in
"ln -s ${component} ${containerName}/${componentName}\n"
) (builtins.attrNames (cfg.components.${containerName} or {}))}
'';
componentsDir = pkgs.stdenv.mkDerivation {
name = "dysnomia-components";
buildCommand = ''
mkdir -p $out
cd $out
${concatMapStrings (containerName:
linkMutableComponents { inherit containerName; }
) (builtins.attrNames cfg.components)}
'';
};
dysnomiaFlags = {
enableApacheWebApplication = config.services.httpd.enable;
enableAxis2WebService = config.services.tomcat.axis2.enable;
enableDockerContainer = config.virtualisation.docker.enable;
enableEjabberdDump = config.services.ejabberd.enable;
enableMySQLDatabase = config.services.mysql.enable;
enablePostgreSQLDatabase = config.services.postgresql.enable;
enableTomcatWebApplication = config.services.tomcat.enable;
enableMongoDatabase = config.services.mongodb.enable;
enableSubversionRepository = config.services.svnserve.enable;
enableInfluxDatabase = config.services.influxdb.enable;
};
in
{
options = {
dysnomia = {
enable = mkOption {
type = types.bool;
default = false;
description = "Whether to enable Dysnomia";
};
enableAuthentication = mkOption {
type = types.bool;
default = false;
description = "Whether to publish privacy-sensitive authentication credentials";
};
package = mkOption {
type = types.path;
description = "The Dysnomia package";
};
properties = mkOption {
description = "An attribute set in which each attribute represents a machine property. Optionally, these values can be shell substitutions.";
default = {};
};
containers = mkOption {
description = "An attribute set in which each key represents a container and each value an attribute set providing its configuration properties";
default = {};
};
components = mkOption {
description = "An atttribute set in which each key represents a container and each value an attribute set in which each key represents a component and each value a derivation constructing its initial state";
default = {};
};
extraContainerProperties = mkOption {
description = "An attribute set providing additional container settings in addition to the default properties";
default = {};
};
extraContainerPaths = mkOption {
description = "A list of paths containing additional container configurations that are added to the search folders";
default = [];
};
extraModulePaths = mkOption {
description = "A list of paths containing additional modules that are added to the search folders";
default = [];
};
enableLegacyModules = mkOption {
type = types.bool;
default = true;
description = "Whether to enable Dysnomia legacy process and wrapper modules";
};
};
};
config = mkIf cfg.enable {
environment.etc = {
"dysnomia/containers" = {
source = containersDir;
};
"dysnomia/components" = {
source = componentsDir;
};
"dysnomia/properties" = {
source = properties;
};
};
environment.variables = {
DYSNOMIA_STATEDIR = "/var/state/dysnomia-nixos";
DYSNOMIA_CONTAINERS_PATH = "${lib.concatMapStrings (containerPath: "${containerPath}:") cfg.extraContainerPaths}/etc/dysnomia/containers";
DYSNOMIA_MODULES_PATH = "${lib.concatMapStrings (modulePath: "${modulePath}:") cfg.extraModulePaths}/etc/dysnomia/modules";
};
environment.systemPackages = [ cfg.package ];
dysnomia.package = pkgs.dysnomia.override (origArgs: dysnomiaFlags // lib.optionalAttrs (cfg.enableLegacyModules) {
enableLegacy = builtins.trace ''
WARNING: Dysnomia has been configured to use the legacy 'process' and 'wrapper'
modules for compatibility reasons! If you rely on these modules, consider
migrating to better alternatives.
More information: https://raw.githubusercontent.com/svanderburg/dysnomia/f65a9a84827bcc4024d6b16527098b33b02e4054/README-legacy.md
If you have migrated already or don't rely on these Dysnomia modules, you can
disable legacy mode with the following NixOS configuration option:
dysnomia.enableLegacyModules = false;
In a future version of Dysnomia (and NixOS) the legacy option will go away!
'' true;
});
dysnomia.properties = {
hostname = config.networking.hostName;
inherit (config.nixpkgs.localSystem) system;
supportedTypes = [
"echo"
"fileset"
"process"
"wrapper"
# These are not base modules, but they are still enabled because they work with technology that are always enabled in NixOS
"systemd-unit"
"sysvinit-script"
"nixos-configuration"
]
++ optional (dysnomiaFlags.enableApacheWebApplication) "apache-webapplication"
++ optional (dysnomiaFlags.enableAxis2WebService) "axis2-webservice"
++ optional (dysnomiaFlags.enableDockerContainer) "docker-container"
++ optional (dysnomiaFlags.enableEjabberdDump) "ejabberd-dump"
++ optional (dysnomiaFlags.enableInfluxDatabase) "influx-database"
++ optional (dysnomiaFlags.enableMySQLDatabase) "mysql-database"
++ optional (dysnomiaFlags.enablePostgreSQLDatabase) "postgresql-database"
++ optional (dysnomiaFlags.enableTomcatWebApplication) "tomcat-webapplication"
++ optional (dysnomiaFlags.enableMongoDatabase) "mongo-database"
++ optional (dysnomiaFlags.enableSubversionRepository) "subversion-repository";
};
dysnomia.containers = lib.recursiveUpdate ({
process = {};
wrapper = {};
}
// lib.optionalAttrs (config.services.httpd.enable) { apache-webapplication = {
documentRoot = config.services.httpd.virtualHosts.localhost.documentRoot;
}; }
// lib.optionalAttrs (config.services.tomcat.axis2.enable) { axis2-webservice = {}; }
// lib.optionalAttrs (config.services.ejabberd.enable) { ejabberd-dump = {
ejabberdUser = config.services.ejabberd.user;
}; }
// lib.optionalAttrs (config.services.mysql.enable) { mysql-database = {
mysqlPort = config.services.mysql.port;
mysqlSocket = "/run/mysqld/mysqld.sock";
} // lib.optionalAttrs cfg.enableAuthentication {
mysqlUsername = "root";
};
}
// lib.optionalAttrs (config.services.postgresql.enable) { postgresql-database = {
} // lib.optionalAttrs (cfg.enableAuthentication) {
postgresqlUsername = "postgres";
};
}
// lib.optionalAttrs (config.services.tomcat.enable) { tomcat-webapplication = {
tomcatPort = 8080;
}; }
// lib.optionalAttrs (config.services.mongodb.enable) { mongo-database = {}; }
// lib.optionalAttrs (config.services.influxdb.enable) {
influx-database = {
influxdbUsername = config.services.influxdb.user;
influxdbDataDir = "${config.services.influxdb.dataDir}/data";
influxdbMetaDir = "${config.services.influxdb.dataDir}/meta";
};
}
// lib.optionalAttrs (config.services.svnserve.enable) { subversion-repository = {
svnBaseDir = config.services.svnserve.svnBaseDir;
}; }) cfg.extraContainerProperties;
system.activationScripts.dysnomia = ''
mkdir -p /etc/systemd-mutable/system
if [ ! -f /etc/systemd-mutable/system/dysnomia.target ]
then
( echo "[Unit]"
echo "Description=Services that are activated and deactivated by Dysnomia"
echo "After=final.target"
) > /etc/systemd-mutable/system/dysnomia.target
fi
'';
};
}

View file

@ -0,0 +1,140 @@
{ config, options, lib, pkgs, stdenv, ... }:
let
cfg = config.services.pleroma;
in {
options = {
services.pleroma = with lib; {
enable = mkEnableOption "pleroma";
package = mkOption {
type = types.package;
default = pkgs.pleroma-otp;
description = "Pleroma package to use.";
};
user = mkOption {
type = types.str;
default = "pleroma";
description = "User account under which pleroma runs.";
};
group = mkOption {
type = types.str;
default = "pleroma";
description = "Group account under which pleroma runs.";
};
stateDir = mkOption {
type = types.str;
default = "/var/lib/pleroma";
readOnly = true;
description = "Directory where the pleroma service will save the uploads and static files.";
};
configs = mkOption {
type = with types; listOf str;
description = ''
Pleroma public configuration.
This list gets appended from left to
right into /etc/pleroma/config.exs. Elixir evaluates its
configuration imperatively, meaning you can override a
setting by appending a new str to this NixOS option list.
<emphasis>DO NOT STORE ANY PLEROMA SECRET
HERE</emphasis>, use
<link linkend="opt-services.pleroma.secretConfigFile">services.pleroma.secretConfigFile</link>
instead.
This setting is going to be stored in a file part of
the Nix store. The Nix store being world-readable, it's not
the right place to store any secret
Have a look to Pleroma section in the NixOS manual for more
informations.
'';
};
secretConfigFile = mkOption {
type = types.str;
default = "/var/lib/pleroma/secrets.exs";
description = ''
Path to the file containing your secret pleroma configuration.
<emphasis>DO NOT POINT THIS OPTION TO THE NIX
STORE</emphasis>, the store being world-readable, it'll
compromise all your secrets.
'';
};
};
};
config = lib.mkIf cfg.enable {
users = {
users."${cfg.user}" = {
description = "Pleroma user";
home = cfg.stateDir;
extraGroups = [ cfg.group ];
};
groups."${cfg.group}" = {};
};
environment.systemPackages = [ cfg.package ];
environment.etc."/pleroma/config.exs".text = ''
${lib.concatMapStrings (x: "${x}") cfg.configs}
# The lau/tzdata library is trying to download the latest
# timezone database in the OTP priv directory by default.
# This directory being in the store, it's read-only.
# Setting that up to a more appropriate location.
config :tzdata, :data_dir, "/var/lib/pleroma/elixir_tzdata_data"
import_config "${cfg.secretConfigFile}"
'';
systemd.services.pleroma = {
description = "Pleroma social network";
after = [ "network-online.target" "postgresql.service" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."/pleroma/config.exs".source ];
serviceConfig = {
User = cfg.user;
Group = cfg.group;
Type = "exec";
WorkingDirectory = "~";
StateDirectory = "pleroma pleroma/static pleroma/uploads";
StateDirectoryMode = "700";
# Checking the conf file is there then running the database
# migration before each service start, just in case there are
# some pending ones.
#
# It's sub-optimal as we'll always run this, even if pleroma
# has not been updated. But the no-op process is pretty fast.
# Better be safe than sorry migration-wise.
ExecStartPre =
let preScript = pkgs.writers.writeBashBin "pleromaStartPre"
"${cfg.package}/bin/pleroma_ctl migrate";
in "${preScript}/bin/pleromaStartPre";
ExecStart = "${cfg.package}/bin/pleroma start";
ExecStop = "${cfg.package}/bin/pleroma stop";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
# Systemd sandboxing directives.
# Taken from the upstream contrib systemd service at
# pleroma/installation/pleroma.service
PrivateTmp = true;
ProtectHome = true;
ProtectSystem = "full";
PrivateDevices = false;
NoNewPrivileges = true;
CapabilityBoundingSet = "~CAP_SYS_ADMIN";
};
};
};
meta.maintainers = with lib.maintainers; [ ninjatrappeur ];
meta.doc = ./pleroma.xml;
}

View file

@ -0,0 +1,132 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-pleroma">
<title>Pleroma</title>
<para><link xlink:href="https://pleroma.social/">Pleroma</link> is a lightweight activity pub server.</para>
<section xml:id="module-services-pleroma-getting-started">
<title>Quick Start</title>
<para>To get quickly started, you can use this sample NixOS configuration and adapt it to your use case.</para>
<para><programlisting>
{
security.acme = {
email = "root@tld";
acceptTerms = true;
certs = {
"social.tld.com" = {
webroot = "/var/www/social.tld.com";
email = "root@tld";
group = "nginx";
};
};
};
services = {
pleroma = {
enable = true;
secretConfigFile = "/var/lib/pleroma/secrets.exs";
configs = [
''
import Config
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "social.tld.com", scheme: "https", port: 443],
http: [ip: {127, 0, 0, 1}, port: 4000]
config :pleroma, :instance,
name: "NixOS test pleroma server",
email: "pleroma@social.tld.com",
notify_email: "pleroma@social.tld.com",
limit: 5000,
registrations_open: true
config :pleroma, :media_proxy,
enabled: false,
redirect_on_failure: true
#base_url: "https://cache.pleroma.social"
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "pleroma",
password: "${test-db-passwd}",
database: "pleroma",
hostname: "localhost",
pool_size: 10,
prepare: :named,
parameters: [
plan_cache_mode: "force_custom_plan"
]
config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/pleroma/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
config :pleroma, configurable_from_database: false
''
];
};
postgresql = {
enable = true;
package = pkgs.postgresql_12;
};
nginx = {
enable = true;
addSSL = true;
sslCertificate = "/var/lib/acme/social.tld.com/fullchain.pem";
sslCertificateKey = "/var/lib/acme/social.tld.com/key.pem";
root = "/var/www/social.tld.com";
# ACME endpoint
locations."/.well-known/acme-challenge" = {
root = "/var/www/social.tld.com/";
};
virtualHosts."social.tld.com" = {
addSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:4000";
extraConfig = ''
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
if ($request_method = OPTIONS) {
return 204;
}
add_header X-XSS-Protection "1; mode=block";
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin;
add_header X-Download-Options noopen;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
client_max_body_size 16m;
'';
};
};
};
};
};
</programlisting></para>
<para>Note that you'll need to seed your database and upload your pleroma secrets to the path pointed by <literal>config.pleroma.secretConfigFile</literal>. You can find more informations about how to do that in the <link linkend="module-services-pleroma-generate-config">next</link> section.</para>
</section>
<section xml:id="module-services-pleroma-generate-config">
<title>Generating the Pleroma Config and Seed the Database</title>
<para>Before using this service, you'll need to generate your
server configuration and its associated database seed. The
<literal>pleroma_ctl</literal> CLI utility can help you with that. You
can start with <literal>pleroma_ctl instance gen --output config.exs
--output-psql setup.psql</literal>, this will prompt you some
questions and will generate both your config file and database initial
migration. </para>
<para>For more details about this configuration format, please have a look at the <link xlink:href="https://docs-develop.pleroma.social/backend/configuration/cheatsheet/">upstream documentation</link>.</para>
<para>To seed your database, you can use the <literal>setup.psql</literal> file you just generated by running
<programlisting>
sudo -u postgres psql -f setup.psql
</programlisting></para>
<para>In regard of the pleroma service configuration you also just generated, you'll need to split it in two parts. The "public" part, which do not contain any secrets and thus can be safely stored in the Nix store and its "private" counterpart containing some secrets (database password, endpoint secret key, salts, etc.).</para>
<para>The public part will live in your NixOS machine configuration in the <link linkend="opt-services.pleroma.configs">services.pleroma.configs</link> option. However, it's up to you to upload the secret pleroma configuration to the path pointed by <link linkend="opt-services.pleroma.secretConfigFile">services.pleroma.secretConfigFile</link>. You can do that manually or rely on a third party tool such as <link xlink:href="https://github.com/DBCDK/morph">Morph</link> or <link xlink:href="https://github.com/NixOS/nixops">NixOps</link>.</para>
</section>
</chapter>

View file

@ -77,6 +77,27 @@ in import ./make-test-python.nix ({ lib, ... }: {
after = [ "acme-a.example.test.service" "nginx-config-reload.service" ]; after = [ "acme-a.example.test.service" "nginx-config-reload.service" ];
}; };
# Test that account creation is collated into one service
specialisation.account-creation.configuration = { nodes, pkgs, lib, ... }: let
email = "newhostmaster@example.test";
caDomain = nodes.acme.config.test-support.acme.caDomain;
# Exit 99 to make it easier to track if this is the reason a renew failed
testScript = ''
test -e accounts/${caDomain}/${email}/account.json || exit 99
'';
in {
security.acme.email = lib.mkForce email;
systemd.services."b.example.test".preStart = testScript;
systemd.services."c.example.test".preStart = testScript;
services.nginx.virtualHosts."b.example.test" = (vhostBase pkgs) // {
enableACME = true;
};
services.nginx.virtualHosts."c.example.test" = (vhostBase pkgs) // {
enableACME = true;
};
};
# Cert config changes will not cause the nginx configuration to change. # Cert config changes will not cause the nginx configuration to change.
# This tests that the reload service is correctly triggered. # This tests that the reload service is correctly triggered.
# It also tests that postRun is exec'd as root # It also tests that postRun is exec'd as root
@ -289,7 +310,7 @@ in import ./make-test-python.nix ({ lib, ... }: {
acme.start() acme.start()
webserver.start() webserver.start()
acme.wait_for_unit("default.target") acme.wait_for_unit("network-online.target")
acme.wait_for_unit("pebble.service") acme.wait_for_unit("pebble.service")
client.succeed("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt") client.succeed("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
@ -314,6 +335,15 @@ in import ./make-test-python.nix ({ lib, ... }: {
check_issuer(webserver, "a.example.test", "pebble") check_issuer(webserver, "a.example.test", "pebble")
check_connection(client, "a.example.test") check_connection(client, "a.example.test")
with subtest("Runs 1 cert for account creation before others"):
switch_to(webserver, "account-creation")
webserver.wait_for_unit("acme-finished-a.example.test.target")
check_connection(client, "a.example.test")
webserver.wait_for_unit("acme-finished-b.example.test.target")
webserver.wait_for_unit("acme-finished-c.example.test.target")
check_connection(client, "b.example.test")
check_connection(client, "c.example.test")
with subtest("Can reload web server when cert configuration changes"): with subtest("Can reload web server when cert configuration changes"):
switch_to(webserver, "cert-change") switch_to(webserver, "cert-change")
webserver.wait_for_unit("acme-finished-a.example.test.target") webserver.wait_for_unit("acme-finished-a.example.test.target")

View file

@ -303,6 +303,7 @@ in
php = handleTest ./php {}; php = handleTest ./php {};
pinnwand = handleTest ./pinnwand.nix {}; pinnwand = handleTest ./pinnwand.nix {};
plasma5 = handleTest ./plasma5.nix {}; plasma5 = handleTest ./plasma5.nix {};
pleroma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./pleroma.nix {};
plotinus = handleTest ./plotinus.nix {}; plotinus = handleTest ./plotinus.nix {};
podman = handleTestOn ["x86_64-linux"] ./podman.nix {}; podman = handleTestOn ["x86_64-linux"] ./podman.nix {};
postfix = handleTest ./postfix.nix {}; postfix = handleTest ./postfix.nix {};

265
nixos/tests/pleroma.nix Normal file
View file

@ -0,0 +1,265 @@
/*
Pleroma E2E VM test.
Abstract:
=========
Using pleroma, postgresql, a local CA cert, a nginx reverse proxy
and a toot-based client, we're going to:
1. Provision a pleroma service from scratch (pleroma config + postgres db).
2. Create a "jamy" admin user.
3. Send a toot from this user.
4. Send a upload from this user.
5. Check the toot is part of the server public timeline
Notes:
- We need a fully functional TLS setup without having any access to
the internet. We do that by issuing a self-signed cert, add this
self-cert to the hosts pki trust store and finally spoof the
hostnames using /etc/hosts.
- For this NixOS test, we *had* to store some DB-related and
pleroma-related secrets to the store. Keep in mind the store is
world-readable, it's the worst place possible to store *any*
secret. **DO NOT DO THIS IN A REAL WORLD DEPLOYMENT**.
*/
import ./make-test-python.nix ({ pkgs, ... }:
let
send-toot = pkgs.writeScriptBin "send-toot" ''
set -eux
# toot is using the requests library internally. This library
# sadly embed its own certificate store instead of relying on the
# system one. Overriding this pretty bad default behaviour.
export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
export TOOT_LOGIN_CLI_PASSWORD="jamy-password"
toot login_cli -i "pleroma.nixos.test" -e "jamy@nixos.test"
echo "Login OK"
# Send a toot then verify it's part of the public timeline
echo "y" | toot post "hello world Jamy here"
echo "Send toot OK"
echo "y" | toot timeline | grep -c "hello world Jamy here"
echo "Get toot from timeline OK"
# Test file upload
echo "y" | toot upload ${db-seed} | grep -c "https://pleroma.nixos.test/media"
echo "File upload OK"
echo "====================================================="
echo "= SUCCESS ="
echo "= ="
echo "= We were able to sent a toot + a upload and ="
echo "= retrieve both of them in the public timeline. ="
echo "====================================================="
'';
provision-db = pkgs.writeScriptBin "provision-db" ''
set -eux
sudo -u postgres psql -f ${db-seed}
'';
test-db-passwd = "SccZOvTGM//BMrpoQj68JJkjDkMGb4pHv2cECWiI+XhVe3uGJTLI0vFV/gDlZ5jJ";
/* For this NixOS test, we *had* to store this secret to the store.
Keep in mind the store is world-readable, it's the worst place
possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
DEPLOYMENT**.*/
db-seed = pkgs.writeText "provision.psql" ''
CREATE USER pleroma WITH ENCRYPTED PASSWORD '${test-db-passwd}';
CREATE DATABASE pleroma OWNER pleroma;
\c pleroma;
--Extensions made by ecto.migrate that need superuser access
CREATE EXTENSION IF NOT EXISTS citext;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
'';
pleroma-conf = ''
import Config
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "pleroma.nixos.test", scheme: "https", port: 443],
http: [ip: {127, 0, 0, 1}, port: 4000]
config :pleroma, :instance,
name: "NixOS test pleroma server",
email: "pleroma@nixos.test",
notify_email: "pleroma@nixos.test",
limit: 5000,
registrations_open: true
config :pleroma, :media_proxy,
enabled: false,
redirect_on_failure: true
#base_url: "https://cache.pleroma.social"
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "pleroma",
password: "${test-db-passwd}",
database: "pleroma",
hostname: "localhost",
pool_size: 10,
prepare: :named,
parameters: [
plan_cache_mode: "force_custom_plan"
]
config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/pleroma/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
config :pleroma, configurable_from_database: false
'';
/* For this NixOS test, we *had* to store this secret to the store.
Keep in mind the store is world-readable, it's the worst place
possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
DEPLOYMENT**.
In a real-word deployment, you'd handle this either by:
- manually upload your pleroma secrets to /var/lib/pleroma/secrets.exs
- use a deployment tool such as morph or NixOps to deploy your secrets.
*/
pleroma-conf-secret = pkgs.writeText "secrets.exs" ''
import Config
config :joken, default_signer: "PS69/wMW7X6FIQPABt9lwvlZvgrJIncfiAMrK9J5mjVus/7/NJJi1DsDA1OghBE5"
config :pleroma, Pleroma.Web.Endpoint,
secret_key_base: "NvfmU7lYaQrmmxt4NACm0AaAfN9t6WxsrX0NCB4awkGHvr1S7jyshlEmrjaPFhhq",
signing_salt: "3L41+BuJ"
config :web_push_encryption, :vapid_details,
subject: "mailto:pleroma@nixos.test",
public_key: "BKjfNX9-UqAcncaNqERQtF7n9pKrB0-MO-juv6U5E5XQr_Tg5D-f8AlRjduAguDpyAngeDzG8MdrTejMSL4VF30",
private_key: "k7o9onKMQrgMjMb6l4fsxSaXO0BTNAer5MVSje3q60k"
'';
/* For this NixOS test, we *had* to store this secret to the store.
Keep in mind the store is world-readable, it's the worst place
possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
DEPLOYMENT**.
In a real-word deployment, you'd handle this either by:
- manually upload your pleroma secrets to /var/lib/pleroma/secrets.exs
- use a deployment tool such as morph or NixOps to deploy your secrets.
*/
provision-secrets = pkgs.writeScriptBin "provision-secrets" ''
set -eux
cp "${pleroma-conf-secret}" "/var/lib/pleroma/secrets.exs"
chown pleroma:pleroma /var/lib/pleroma/secrets.exs
'';
/* For this NixOS test, we *had* to store this secret to the store.
Keep in mind the store is world-readable, it's the worst place
possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
DEPLOYMENT**.
*/
provision-user = pkgs.writeScriptBin "provision-user" ''
set -eux
# Waiting for pleroma to be up.
timeout 5m bash -c 'while [[ "$(curl -s -o /dev/null -w '%{http_code}' https://pleroma.nixos.test/api/v1/instance)" != "200" ]]; do sleep 2; done'
pleroma_ctl user new jamy jamy@nixos.test --password 'jamy-password' --moderator --admin -y
'';
tls-cert = pkgs.runCommandNoCC "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=pleroma.nixos.test'
mkdir -p $out
cp key.pem cert.pem $out
'';
/* Toot is preventing users from feeding login_cli a password non
interactively. While it makes sense most of the times, it's
preventing us to login in this non-interactive test. This patch
introduce a TOOT_LOGIN_CLI_PASSWORD env variable allowing us to
provide a password to toot login_cli
If https://github.com/ihabunek/toot/pull/180 gets merged at some
point, feel free to remove this patch. */
custom-toot = pkgs.toot.overrideAttrs(old:{
patches = [ (pkgs.fetchpatch {
url = "https://github.com/NinjaTrappeur/toot/commit/b4a4c30f41c0cb7e336714c2c4af9bc9bfa0c9f2.patch";
sha256 = "sha256-0xxNwjR/fStLjjUUhwzCCfrghRVts+fc+fvVJqVcaFg=";
}) ];
});
hosts = nodes: ''
${nodes.pleroma.config.networking.primaryIPAddress} pleroma.nixos.test
${nodes.client.config.networking.primaryIPAddress} client.nixos.test
'';
in {
name = "pleroma";
nodes = {
client = { nodes, pkgs, config, ... }: {
security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
networking.extraHosts = hosts nodes;
environment.systemPackages = with pkgs; [
custom-toot
send-toot
];
};
pleroma = { nodes, pkgs, config, ... }: {
security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
networking.extraHosts = hosts nodes;
networking.firewall.enable = false;
environment.systemPackages = with pkgs; [
provision-db
provision-secrets
provision-user
];
services = {
pleroma = {
enable = true;
configs = [
pleroma-conf
];
};
postgresql = {
enable = true;
package = pkgs.postgresql_12;
};
nginx = {
enable = true;
virtualHosts."pleroma.nixos.test" = {
addSSL = true;
sslCertificate = "${tls-cert}/cert.pem";
sslCertificateKey = "${tls-cert}/key.pem";
locations."/" = {
proxyPass = "http://127.0.0.1:4000";
extraConfig = ''
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
if ($request_method = OPTIONS) {
return 204;
}
add_header X-XSS-Protection "1; mode=block";
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin;
add_header X-Download-Options noopen;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
client_max_body_size 16m;
'';
};
};
};
};
};
};
testScript = { nodes, ... }: ''
pleroma.wait_for_unit("postgresql.service")
pleroma.succeed("provision-db")
pleroma.succeed("provision-secrets")
pleroma.systemctl("restart pleroma.service")
pleroma.wait_for_unit("pleroma.service")
pleroma.succeed("provision-user")
client.succeed("send-toot")
'';
})

View file

@ -19,13 +19,13 @@ assert withOpenCL -> ocl-icd != null;
mkDerivation rec { mkDerivation rec {
pname = "mandelbulber"; pname = "mandelbulber";
version = "2.23"; version = "2.24";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "buddhi1980"; owner = "buddhi1980";
repo = "mandelbulber2"; repo = "mandelbulber2";
rev = version; rev = version;
sha256 = "08izphj7jyk3wsq3qbzaf2fplys80vr39wliqc1i4c5cr90nrq1l"; sha256 = "sha256-JgpYGzD2FsqcCWnOKBiVCxUKqLfT4S++uUBZekhGWmA=";
}; };
nativeBuildInputs = [ nativeBuildInputs = [

View file

@ -2,7 +2,7 @@
let let
pname = "Sylk"; pname = "Sylk";
version = "2.9.4"; version = "3.0.0";
in in
appimageTools.wrapType2 rec { appimageTools.wrapType2 rec {
@ -10,7 +10,7 @@ appimageTools.wrapType2 rec {
src = fetchurl { src = fetchurl {
url = "http://download.ag-projects.com/Sylk/Sylk-${version}-x86_64.AppImage"; url = "http://download.ag-projects.com/Sylk/Sylk-${version}-x86_64.AppImage";
hash = "sha256-LnJ8Pd+AHIrHrYpIx+rxnFKZ1uh2viDHC5TaU1BL62s="; hash = "sha256-A+SFkGDg+I1lPJ56XPaRfBUktyjSttcsZSqgXvw9sDs=";
}; };
profile = '' profile = ''

View file

@ -1,9 +1,6 @@
{ stdenv, fetchurl, openssl }: { lib, stdenv, fetchurl, openssl }:
with stdenv; stdenv.mkDerivation rec {
with lib;
mkDerivation rec {
pname = "wraith"; pname = "wraith";
version = "1.4.7"; version = "1.4.7";
src = fetchurl { src = fetchurl {
@ -25,7 +22,7 @@ mkDerivation rec {
ln -s wraith $out/bin/hub ln -s wraith $out/bin/hub
''; '';
meta = { meta = with lib; {
description = "An IRC channel management bot written purely in C/C++"; description = "An IRC channel management bot written purely in C/C++";
longDescription = '' longDescription = ''
Wraith is an IRC channel management bot written purely in C/C++. It has Wraith is an IRC channel management bot written purely in C/C++. It has

View file

@ -45,7 +45,7 @@ let
}; };
releaseRev = v: "V${v}"; releaseRev = v: "V${v}";
fetched = import ../../../../build-support/coq/meta-fetch/default.nix fetched = import ../../../../build-support/coq/meta-fetch/default.nix
{ inherit stdenv fetchzip; } { inherit lib stdenv fetchzip; }
{ inherit release releaseRev; location = { owner = "coq"; repo = "coq";}; } { inherit release releaseRev; location = { owner = "coq"; repo = "coq";}; }
args.version; args.version;
version = fetched.version; version = fetched.version;

View file

@ -53,7 +53,7 @@ stdenv.mkDerivation {
meta = { meta = {
description = "Tools for software verification and analysis"; description = "Tools for software verification and analysis";
homepage = "https://saw.galois.com"; homepage = "https://saw.galois.com";
license = lib.licenses.unfreeRedistributable; license = lib.licenses.bsd3;
platforms = lib.platforms.linux; platforms = lib.platforms.linux;
maintainers = [ lib.maintainers.thoughtpolice ]; maintainers = [ lib.maintainers.thoughtpolice ];
}; };

View file

@ -1,4 +1,4 @@
{stdenv, fetchurl, makeWrapper, flex, bison, {lib, stdenv, fetchurl, makeWrapper, flex, bison,
asciidoc, docbook_xml_dtd_45, docbook_xsl, asciidoc, docbook_xml_dtd_45, docbook_xsl,
libxml2, libxslt, libxml2, libxslt,
python3, rcs, cvs, git, python3, rcs, cvs, git,

View file

@ -1,4 +1,4 @@
{ stdenv, fetchurl, makeWrapper, python27Packages, git { lib, stdenv, fetchurl, makeWrapper, python27Packages, git
, docbook_xml_dtd_412, docbook_xsl, asciidoc, xmlto, pypy , docbook_xml_dtd_412, docbook_xsl, asciidoc, xmlto, pypy
, breezy ? null, cvs ? null, darcs ? null, fossil ? null , breezy ? null, cvs ? null, darcs ? null, fossil ? null
, mercurial ? null, monotone ? null, rcs ? null , mercurial ? null, monotone ? null, rcs ? null

View file

@ -35,7 +35,7 @@ let
"extraInstallFlags" "setCOQBIN" "mlPlugin" "extraInstallFlags" "setCOQBIN" "mlPlugin"
"dropAttrs" "dropDerivationAttrs" "keepAttrs" ] ++ dropAttrs) keepAttrs; "dropAttrs" "dropDerivationAttrs" "keepAttrs" ] ++ dropAttrs) keepAttrs;
fetch = import ../coq/meta-fetch/default.nix fetch = import ../coq/meta-fetch/default.nix
{ inherit stdenv fetchzip; } ({ { inherit lib stdenv fetchzip; } ({
inherit release releaseRev; inherit release releaseRev;
location = { inherit domain owner repo; }; location = { inherit domain owner repo; };
} // optionalAttrs (args?fetcher) {inherit fetcher;}); } // optionalAttrs (args?fetcher) {inherit fetcher;});

View file

@ -1,5 +1,6 @@
{ stdenv, fetchzip }@args: { lib, stdenv, fetchzip }@args:
let lib = import ../extra-lib.nix {inherit (args.stdenv) lib;}; in let lib' = lib; in
let lib = import ../extra-lib.nix {lib = lib';}; in
with builtins; with lib; with builtins; with lib;
let let
default-fetcher = {domain ? "github.com", owner ? "", repo, rev, name ? "source", sha256 ? null, ...}@args: default-fetcher = {domain ? "github.com", owner ? "", repo, rev, name ? "source", sha256 ? null, ...}@args:

View file

@ -1,14 +1,14 @@
{ lib, fetchzip }: { lib, fetchzip }:
let let
version = "2.221"; version = "2.225";
in in
fetchzip { fetchzip {
name = "JetBrainsMono-${version}"; name = "JetBrainsMono-${version}";
url = "https://github.com/JetBrains/JetBrainsMono/releases/download/v${version}/JetBrainsMono-${version}.zip"; url = "https://github.com/JetBrains/JetBrainsMono/releases/download/v${version}/JetBrainsMono-${version}.zip";
sha256 = "1in3znnj0i0yfwj93ncxi3s1cp9lhgwnv2r14br47rr7vik4zjr6"; sha256 = "1k8xmjaingz50626hd73hqbp196kg3zndiy0aqb88z5cw9nd0fva";
postFetch = '' postFetch = ''
mkdir -p $out/share/fonts mkdir -p $out/share/fonts

View file

@ -1,10 +1,11 @@
{ stdenv, fetchurl, curl, tzdata, autoPatchelfHook, fixDarwinDylibNames, glibc { lib, stdenv, fetchurl, curl, tzdata, autoPatchelfHook, fixDarwinDylibNames, glibc
, version, hashes }: , version, hashes }:
with stdenv;
let let
inherit (stdenv) hostPlatform;
OS = if hostPlatform.isDarwin then "osx" else hostPlatform.parsed.kernel.name; OS = if hostPlatform.isDarwin then "osx" else hostPlatform.parsed.kernel.name;
MODEL = toString hostPlatform.parsed.cpu.bits; MODEL = toString hostPlatform.parsed.cpu.bits;
in mkDerivation { in stdenv.mkDerivation {
pname = "dmd-bootstrap"; pname = "dmd-bootstrap";
inherit version; inherit version;

View file

@ -1,10 +1,11 @@
{ stdenv, fetchurl, curl, tzdata, autoPatchelfHook, fixDarwinDylibNames, libxml2 { lib, stdenv, fetchurl, curl, tzdata, autoPatchelfHook, fixDarwinDylibNames, libxml2
, version, hashes }: , version, hashes }:
with stdenv;
let let
OS = if hostPlatform.isDarwin then "osx" else hostPlatform.parsed.kernel.name; inherit (stdenv) hostPlatform;
OS = if stdenv.hostPlatform.isDarwin then "osx" else hostPlatform.parsed.kernel.name;
ARCH = toString hostPlatform.parsed.cpu.name; ARCH = toString hostPlatform.parsed.cpu.name;
in mkDerivation { in stdenv.mkDerivation {
pname = "ldc-bootstrap"; pname = "ldc-bootstrap";
inherit version; inherit version;

View file

@ -1,4 +1,4 @@
{ stdenv, fetchFromGitHub, cmake { lib, stdenv, fetchFromGitHub, cmake
, boost, python3, eigen , boost, python3, eigen
, icestorm, trellis , icestorm, trellis
, llvmPackages , llvmPackages
@ -12,7 +12,7 @@
let let
boostPython = boost.override { python = python3; enablePython = true; }; boostPython = boost.override { python = python3; enablePython = true; };
in in
with stdenv; mkDerivation rec { stdenv.mkDerivation rec {
pname = "nextpnr"; pname = "nextpnr";
version = "2021.01.02"; version = "2021.01.02";

View file

@ -6,7 +6,7 @@ rustPlatform.buildRustPackage rec {
# commit chosen by using the latest build from http://bin.zetz.it/ # commit chosen by using the latest build from http://bin.zetz.it/
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "aep"; owner = "zetzit";
repo = "zz"; repo = "zz";
rev = "0b5c52674e9adf795fbfb051d4dceef3126e669f"; rev = "0b5c52674e9adf795fbfb051d4dceef3126e669f";
sha256 = "0bb77ll1g5i6a04ybpgx6lqsb74xs4v4nyqm9j4j6x24407h8l89"; sha256 = "0bb77ll1g5i6a04ybpgx6lqsb74xs4v4nyqm9j4j6x24407h8l89";
@ -16,13 +16,23 @@ rustPlatform.buildRustPackage rec {
cargoSha256 = "1lf4k3n89w2797c1yrj1dp97y8a8d5hnixr1nwa2qcq1sxmm5rcg"; cargoSha256 = "1lf4k3n89w2797c1yrj1dp97y8a8d5hnixr1nwa2qcq1sxmm5rcg";
postPatch = ''
# remove search path entry which would reference /build
sed -i '/env!("CARGO_MANIFEST_DIR")/d' src/lib.rs
'';
postInstall = '' postInstall = ''
wrapProgram $out/bin/zz --prefix PATH ":" "${lib.getBin z3}/bin" mkdir -p "$out/share/zz"
cp -r modules "$out/share/zz/"
wrapProgram $out/bin/zz \
--prefix PATH ":" "${lib.getBin z3}/bin" \
--suffix ZZ_MODULE_PATHS ":" "$out/share/zz/modules"
''; '';
meta = with lib; { meta = with lib; {
description = "🍺🐙 ZetZ a zymbolic verifier and tranzpiler to bare metal C"; description = "🍺🐙 ZetZ a zymbolic verifier and tranzpiler to bare metal C";
homepage = "https://github.com/aep/zz"; homepage = "https://github.com/zetzit/zz";
license = licenses.mit; license = licenses.mit;
maintainers = [ maintainers.marsam ]; maintainers = [ maintainers.marsam ];
}; };

View file

@ -327,6 +327,37 @@ in package-set { inherit pkgs lib callPackage; } self // {
# packages. You should set this to true if you have benchmarks defined # packages. You should set this to true if you have benchmarks defined
# in your local packages that you want to be able to run with cabal benchmark # in your local packages that you want to be able to run with cabal benchmark
doBenchmark ? false doBenchmark ? false
# An optional function that can modify the generic builder arguments
# for the fake package that shellFor uses to construct its environment.
#
# Example:
# let
# # elided...
# haskellPkgs = pkgs.haskell.packages.ghc884.override (hpArgs: {
# overrides = pkgs.lib.composeExtensions (hpArgs.overrides or (_: _: { })) (
# _hfinal: hprev: {
# mkDerivation = args: hprev.mkDerivation ({
# doCheck = false;
# doBenchmark = false;
# doHoogle = true;
# doHaddock = true;
# enableLibraryProfiling = false;
# enableExecutableProfiling = false;
# } // args);
# }
# );
# });
# in
# hpkgs.shellFor {
# packages = p: [ p.foo ];
# genericBuilderArgsModifier = args: args // { doCheck = true; doBenchmark = true };
# }
#
# This will disable tests and benchmarks for everything in "haskellPkgs"
# (which will invalidate the binary cache), and then re-enable them
# for the "shellFor" environment (ensuring that any test/benchmark
# dependencies for "foo" will be available within the nix-shell).
, genericBuilderArgsModifier ? (args: args)
, ... , ...
} @ args: } @ args:
let let
@ -443,7 +474,7 @@ in package-set { inherit pkgs lib callPackage; } self // {
# This is a derivation created with `haskellPackages.mkDerivation`. # This is a derivation created with `haskellPackages.mkDerivation`.
# #
# pkgWithCombinedDeps :: HaskellDerivation # pkgWithCombinedDeps :: HaskellDerivation
pkgWithCombinedDeps = self.mkDerivation genericBuilderArgs; pkgWithCombinedDeps = self.mkDerivation (genericBuilderArgsModifier genericBuilderArgs);
# The derivation returned from `envFunc` for `pkgWithCombinedDeps`. # The derivation returned from `envFunc` for `pkgWithCombinedDeps`.
# #
@ -457,7 +488,7 @@ in package-set { inherit pkgs lib callPackage; } self // {
# pkgWithCombinedDepsDevDrv :: Derivation # pkgWithCombinedDepsDevDrv :: Derivation
pkgWithCombinedDepsDevDrv = pkgWithCombinedDeps.envFunc { inherit withHoogle; }; pkgWithCombinedDepsDevDrv = pkgWithCombinedDeps.envFunc { inherit withHoogle; };
mkDerivationArgs = builtins.removeAttrs args [ "packages" "withHoogle" "doBenchmark" ]; mkDerivationArgs = builtins.removeAttrs args [ "genericBuilderArgsModifier" "packages" "withHoogle" "doBenchmark" ];
in pkgWithCombinedDepsDevDrv.overrideAttrs (old: mkDerivationArgs // { in pkgWithCombinedDepsDevDrv.overrideAttrs (old: mkDerivationArgs // {
nativeBuildInputs = old.nativeBuildInputs ++ mkDerivationArgs.nativeBuildInputs or []; nativeBuildInputs = old.nativeBuildInputs ++ mkDerivationArgs.nativeBuildInputs or [];

View file

@ -1,6 +1,6 @@
{stdenv, fetchurl, libusb-compat-0_1}: {lib, stdenv, fetchurl, libusb-compat-0_1}:
with stdenv; mkDerivation rec { stdenv.mkDerivation rec {
name = "libftdi-0.20"; name = "libftdi-0.20";
src = fetchurl { src = fetchurl {
@ -14,7 +14,7 @@ with stdenv; mkDerivation rec {
# Hack to avoid TMPDIR in RPATHs. # Hack to avoid TMPDIR in RPATHs.
preFixup = ''rm -rf "$(pwd)" ''; preFixup = ''rm -rf "$(pwd)" '';
configureFlags = lib.optional (!isDarwin) "--with-async-mode"; configureFlags = lib.optional (!stdenv.isDarwin) "--with-async-mode";
# allow async mode. from ubuntu. see: # allow async mode. from ubuntu. see:
# https://bazaar.launchpad.net/~ubuntu-branches/ubuntu/trusty/libftdi/trusty/view/head:/debian/patches/04_async_mode.diff # https://bazaar.launchpad.net/~ubuntu-branches/ubuntu/trusty/libftdi/trusty/view/head:/debian/patches/04_async_mode.diff

View file

@ -8,12 +8,12 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "mapnik"; pname = "mapnik";
version = "3.0.23"; version = "3.1.0";
src = fetchzip { src = fetchzip {
# this one contains all git submodules and is cheaper than fetchgit # this one contains all git submodules and is cheaper than fetchgit
url = "https://github.com/mapnik/mapnik/releases/download/v${version}/mapnik-v${version}.tar.bz2"; url = "https://github.com/mapnik/mapnik/releases/download/v${version}/mapnik-v${version}.tar.bz2";
sha256 = "1754m8y7fyk0dyf7cglhavsk66g5kdqhnmgicib1jkcgjbl69f15"; sha256 = "sha256-qqPqN4vs3ZsqKgnx21yQhX8OzHca/0O+3mvQ/vnC5EY=";
}; };
# a distinct dev output makes python-mapnik fail # a distinct dev output makes python-mapnik fail

View file

@ -14,7 +14,8 @@ let
}; };
nativeBuildInputs = [ cmake ]; nativeBuildInputs = [ cmake ];
buildInputs = [ fmt ]; # spdlog <1.3 uses a bundled version of fmt
propagatedBuildInputs = lib.optional (lib.versionAtLeast version "1.3") fmt;
cmakeFlags = [ cmakeFlags = [
"-DSPDLOG_BUILD_SHARED=${if stdenv.hostPlatform.isStatic then "OFF" else "ON"}" "-DSPDLOG_BUILD_SHARED=${if stdenv.hostPlatform.isStatic then "OFF" else "ON"}"
@ -25,7 +26,9 @@ let
"-DSPDLOG_FMT_EXTERNAL=ON" "-DSPDLOG_FMT_EXTERNAL=ON"
]; ];
outputs = [ "out" "doc" ]; outputs = [ "out" "doc" ]
# spdlog <1.4 is header only, no need to split libraries and headers
++ lib.optional (lib.versionAtLeast version "1.4") "dev";
postInstall = '' postInstall = ''
mkdir -p $out/share/doc/spdlog mkdir -p $out/share/doc/spdlog

View file

@ -96,6 +96,7 @@
, "graphql-cli" , "graphql-cli"
, "grunt-cli" , "grunt-cli"
, "makam" , "makam"
, "meshcommander"
, "gqlint" , "gqlint"
, "gtop" , "gtop"
, "gulp" , "gulp"

File diff suppressed because it is too large Load diff

View file

@ -4,7 +4,7 @@
}: }:
with lib; with lib;
let fetched = import ../../../build-support/coq/meta-fetch/default.nix let fetched = import ../../../build-support/coq/meta-fetch/default.nix
{inherit stdenv fetchzip; } ({ {inherit lib stdenv fetchzip; } ({
release."1.12.0".sha256 = "1agisdnaq9wrw3r73xz14yrq3wx742i6j8i5icjagqk0ypmly2is"; release."1.12.0".sha256 = "1agisdnaq9wrw3r73xz14yrq3wx742i6j8i5icjagqk0ypmly2is";
release."1.11.4".sha256 = "1m0jk9swcs3jcrw5yyw5343v8mgax238cjb03s8gc4wipw1fn9f5"; release."1.11.4".sha256 = "1m0jk9swcs3jcrw5yyw5343v8mgax238cjb03s8gc4wipw1fn9f5";
releaseRev = v: "v${v}"; releaseRev = v: "v${v}";

View file

@ -2,13 +2,13 @@
buildGoPackage rec { buildGoPackage rec {
pname = "tfsec"; pname = "tfsec";
version = "0.36.10"; version = "0.37.1";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "tfsec"; owner = "tfsec";
repo = pname; repo = pname;
rev = "v${version}"; rev = "v${version}";
sha256 = "11kv13d4cw515r79azfha1ksmvsha1rvg0jak9nvz9ggivyn0s7a"; sha256 = "sha256-ZYJqnyIFwyPODFqXAZp1ep0Ofl/JH2F07gqPx4WZ7mo=";
}; };
goPackagePath = "github.com/tfsec/tfsec"; goPackagePath = "github.com/tfsec/tfsec";

View file

@ -2,11 +2,11 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "clojure-lsp"; pname = "clojure-lsp";
version = "2021.01.20-01.39.32"; version = "2021.01.26-22.35.27";
src = fetchurl { src = fetchurl {
url = "https://github.com/clojure-lsp/clojure-lsp/releases/download/${version}/${pname}.jar"; url = "https://github.com/clojure-lsp/clojure-lsp/releases/download/${version}/${pname}.jar";
sha256 = "sha256-DqvAIM5YHtcUNZHoH+fcZym6EaPX5a/vgphTFfTO6bU="; sha256 = "sha256-kYxOrallox/LnAdZ4wTWZDlzt5GR0/s6nlG6CO0/pRw=";
}; };
dontUnpack = true; dontUnpack = true;

View file

@ -1,10 +1,8 @@
{ stdenv, mkDerivation, fetchFromGitHub, makeDesktopItem, makeWrapper { lib, stdenv, mkDerivation, fetchFromGitHub, makeDesktopItem, makeWrapper
, python, pkg-config, SDL2, SDL2_ttf, alsaLib, which, qtbase, libXinerama , python, pkg-config, SDL2, SDL2_ttf, alsaLib, which, qtbase, libXinerama
, libpcap, CoreAudioKit, ForceFeedback , libpcap, CoreAudioKit, ForceFeedback
, installShellFiles }: , installShellFiles }:
with stdenv;
let let
majorVersion = "0"; majorVersion = "0";
minorVersion = "226"; minorVersion = "226";

View file

@ -189,6 +189,23 @@ let
}; };
}; };
elmtooling.elm-ls-vscode = buildVscodeMarketplaceExtension {
mktplcRef = {
name = "elm-ls-vscode";
publisher = "Elmtooling";
version = "2.0.1";
sha256 = "06x5ld2r1hzns2s052mvhmfiaawjzcn0jf5lkfprhmrkxnmfdd43";
};
meta = with lib; {
changelog = "https://marketplace.visualstudio.com/items/Elmtooling.elm-ls-vscode/changelog";
description = "Elm language server";
downloadPage = "https://marketplace.visualstudio.com/items?itemName=Elmtooling.elm-ls-vscode";
homepage = "https://github.com/elm-tooling/elm-language-client-vscode";
license = licenses.mit;
maintainers = with maintainers; [ mcwitt ];
};
};
esbenp.prettier-vscode = buildVscodeMarketplaceExtension { esbenp.prettier-vscode = buildVscodeMarketplaceExtension {
meta = with lib; { meta = with lib; {
changelog = "https://marketplace.visualstudio.com/items/esbenp.prettier-vscode/changelog"; changelog = "https://marketplace.visualstudio.com/items/esbenp.prettier-vscode/changelog";

View file

@ -10,15 +10,14 @@
buildGoModule rec { buildGoModule rec {
pname = "oci-seccomp-bpf-hook"; pname = "oci-seccomp-bpf-hook";
version = "1.2.0"; version = "1.2.1";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "containers"; owner = "containers";
repo = "oci-seccomp-bpf-hook"; repo = "oci-seccomp-bpf-hook";
rev = "v${version}"; rev = "v${version}";
sha256 = "143x4daixzhhhpli1l14r7dr7dn3q42w8dddr16jzhhwighsirqw"; sha256 = "0zbrpv6j4gd4l36zl2dljazdm85qlqwchf0xvmnaywcj8c8b49xw";
}; };
vendorSha256 = null; vendorSha256 = null;
doCheck = false;
outputs = [ "out" "man" ]; outputs = [ "out" "man" ];
nativeBuildInputs = [ nativeBuildInputs = [
@ -31,6 +30,10 @@ buildGoModule rec {
libseccomp libseccomp
]; ];
checkPhase = ''
go test -v ./...
'';
buildPhase = '' buildPhase = ''
make make
''; '';

View file

@ -26,11 +26,11 @@ in stdenv.mkDerivation rec {
pname = "postfix"; pname = "postfix";
version = "3.5.8"; version = "3.5.9";
src = fetchurl { src = fetchurl {
url = "ftp://ftp.cs.uu.nl/mirror/postfix/postfix-release/official/${pname}-${version}.tar.gz"; url = "ftp://ftp.cs.uu.nl/mirror/postfix/postfix-release/official/${pname}-${version}.tar.gz";
sha256 = "0vs50z5p5xcrdbbkb0dnbx1sk5fx8d2z97sw2p2iip1yrwl2cn12"; sha256 = "0avn00drmk9c9mjynfvcmir72ss9s3mckdhjm3mmnhas2sixbkji";
}; };
nativeBuildInputs = [ makeWrapper m4 ]; nativeBuildInputs = [ makeWrapper m4 ];

View file

@ -3,12 +3,12 @@
}: }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
version = "6.0.6"; version = "6.0.10";
pname = "redis"; pname = "redis";
src = fetchurl { src = fetchurl {
url = "http://download.redis.io/releases/${pname}-${version}.tar.gz"; url = "http://download.redis.io/releases/${pname}-${version}.tar.gz";
sha256 = "151x6qicmrmlxkmiwi2vdq8p50d52b9gglp8csag6pmgcfqlkb8j"; sha256 = "1gc529nfh8frk4pynyjlnmzvwa0j9r5cmqwyd7537sywz6abifvr";
}; };
# Cross-compiling fixes # Cross-compiling fixes

View file

@ -0,0 +1,70 @@
{ stdenv
, autoPatchelfHook
, fetchurl
, file
, makeWrapper
, ncurses
, nixosTests
, openssl
, unzip
, zlib
}:
stdenv.mkDerivation {
pname = "pleroma-otp";
version = "2.2.2";
# To find the latest binary release stable link, have a look at
# the CI pipeline for the latest commit of the stable branch
# https://git.pleroma.social/pleroma/pleroma/-/tree/stable
src = {
aarch64-linux = fetchurl {
url = "https://git.pleroma.social/pleroma/pleroma/-/jobs/175288/artifacts/download";
sha256 = "107kp5zqwq1lixk1cwkx4v7zpm0h248xzlm152aj36ghb43j2snw";
};
x86_64-linux = fetchurl {
url = "https://git.pleroma.social/pleroma/pleroma/-/jobs/175284/artifacts/download";
sha256 = "1c6l04gga9iigm249ywwcrjg6wzy8iiid652mws3j9dnl71w2sim";
};
}."${stdenv.hostPlatform.system}";
nativeBuildInputs = [ unzip ];
buildInputs = [
autoPatchelfHook
file
makeWrapper
ncurses
openssl
zlib
];
# mkDerivation fails to detect the zip nature of $src due to the
# missing .zip extension.
# Let's unpack the archive explicitely.
unpackCmd = "unzip $curSrc";
installPhase = ''
mkdir $out
cp -r * $out'';
# Pleroma is using the project's root path (here the store path)
# as its TMPDIR.
# Patching it to move the tmp dir to the actual tmpdir
postFixup = ''
wrapProgram $out/bin/pleroma \
--set-default RELEASE_TMP "/tmp"
wrapProgram $out/bin/pleroma_ctl \
--set-default RELEASE_TMP "/tmp"'';
passthru.tests = {
pleroma = nixosTests.pleroma;
};
meta = {
description = "ActivityPub microblogging server";
homepage = https://git.pleroma.social/pleroma/pleroma;
license = stdenv.lib.licenses.agpl3;
maintainers = with stdenv.lib.maintainers; [ ninjatrappeur ];
platforms = [ "x86_64-linux" "aarch64-linux" ];
};
}

View file

@ -24,12 +24,12 @@ let
ctlpath = lib.makeBinPath [ bash gnused gnugrep coreutils util-linux procps ]; ctlpath = lib.makeBinPath [ bash gnused gnugrep coreutils util-linux procps ];
in stdenv.mkDerivation rec { in stdenv.mkDerivation rec {
version = "20.03"; version = "20.12";
pname = "ejabberd"; pname = "ejabberd";
src = fetchurl { src = fetchurl {
url = "https://www.process-one.net/downloads/downloads-action.php?file=/${version}/${pname}-${version}.tgz"; url = "https://www.process-one.net/downloads/downloads-action.php?file=/${version}/${pname}-${version}.tgz";
sha256 = "0i013l9cygmgainfid298n6llhs3mblfklry3jw2a6irvhffym0s"; sha256 = "sha256-nZxdYXRyv4UejPLHNT/p6CrvW22Koo7rZSi96KRjqFQ=";
}; };
nativeBuildInputs = [ fakegit ]; nativeBuildInputs = [ fakegit ];
@ -76,7 +76,7 @@ in stdenv.mkDerivation rec {
outputHashMode = "recursive"; outputHashMode = "recursive";
outputHashAlgo = "sha256"; outputHashAlgo = "sha256";
outputHash = "0xwgi9hy6y0m8mwznl6px98kdmkcxg98k62zgqbaqd4paks5zwqa"; outputHash = "sha256-0/hBgA+9rsDOBcvbROSpc5Xnw4JkYpuLCl2V+lJnieY=";
}; };
configureFlags = configureFlags =
@ -115,7 +115,7 @@ in stdenv.mkDerivation rec {
license = licenses.gpl2; license = licenses.gpl2;
homepage = "https://www.ejabberd.im"; homepage = "https://www.ejabberd.im";
platforms = platforms.linux; platforms = platforms.linux;
maintainers = with maintainers; [ sander abbradar ajs124 ]; maintainers = with maintainers; [ sander abbradar ];
broken = withElixir; broken = withElixir;
}; };
} }

View file

@ -2,11 +2,11 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "abcMIDI"; pname = "abcMIDI";
version = "2021.01.21"; version = "2021.01.25";
src = fetchzip { src = fetchzip {
url = "https://ifdo.ca/~seymour/runabc/${pname}-${version}.zip"; url = "https://ifdo.ca/~seymour/runabc/${pname}-${version}.zip";
sha256 = "184d59pc60dax60j3nzcsr5kflaygbjpbrwg6r4bky0q0sg17z5i"; sha256 = "1c2jx03ssm9yyv6rgr5wfv88ivfgdgq3889yaghjyvllm3nv9380";
}; };
# There is also a file called "makefile" which seems to be preferred by the standard build phase # There is also a file called "makefile" which seems to be preferred by the standard build phase

View file

@ -1,4 +1,4 @@
{ stdenv, runCommand, fetchurl { lib, stdenv, runCommand, fetchurl
, fetchpatch , fetchpatch
, ensureNewerSourcesHook , ensureNewerSourcesHook
, cmake, pkg-config , cmake, pkg-config
@ -36,7 +36,6 @@
# We must have one crypto library # We must have one crypto library
assert cryptopp != null || (nss != null && nspr != null); assert cryptopp != null || (nss != null && nspr != null);
with stdenv; with lib;
let let
shouldUsePkg = pkg: if pkg != null && pkg.meta.available then pkg else null; shouldUsePkg = pkg: if pkg != null && pkg.meta.available then pkg else null;
@ -76,7 +75,7 @@ let
none = [ ]; none = [ ];
}; };
getMeta = description: { getMeta = description: with lib; {
homepage = "https://ceph.com/"; homepage = "https://ceph.com/";
inherit description; inherit description;
license = with licenses; [ lgpl21 gpl2 bsd3 mit publicDomain ]; license = with licenses; [ lgpl21 gpl2 bsd3 mit publicDomain ];
@ -148,11 +147,11 @@ in rec {
boost ceph-python-env libxml2 optYasm optLibatomic_ops optLibs3 boost ceph-python-env libxml2 optYasm optLibatomic_ops optLibs3
malloc zlib openldap lttng-ust babeltrace gperf gtest cunit malloc zlib openldap lttng-ust babeltrace gperf gtest cunit
snappy rocksdb lz4 oathToolkit leveldb libnl libcap_ng rdkafka snappy rocksdb lz4 oathToolkit leveldb libnl libcap_ng rdkafka
] ++ optionals stdenv.isLinux [ ] ++ lib.optionals stdenv.isLinux [
linuxHeaders util-linux libuuid udev keyutils optLibaio optLibxfs optZfs linuxHeaders util-linux libuuid udev keyutils optLibaio optLibxfs optZfs
# ceph 14 # ceph 14
rdma-core rabbitmq-c rdma-core rabbitmq-c
] ++ optionals hasRadosgw [ ] ++ lib.optionals hasRadosgw [
optFcgi optExpat optCurl optFuse optLibedit optFcgi optExpat optCurl optFuse optLibedit
]; ];

View file

@ -1,9 +1,7 @@
{ stdenv, bzip2, zlib, autoconf, automake, cmake, gnumake, help2man , texinfo, libtool , cppzmq , libarchive, avro-cpp, boost, jansson, zeromq, openssl, pam, libiodbc, kerberos, gcc, libcxx, which, catch2 }: { lib, stdenv, bzip2, zlib, autoconf, automake, cmake, gnumake, help2man , texinfo, libtool , cppzmq , libarchive, avro-cpp, boost, jansson, zeromq, openssl, pam, libiodbc, kerberos, gcc, libcxx, which, catch2 }:
# Common attributes of irods packages # Common attributes of irods packages
with stdenv;
{ {
nativeBuildInputs = [ autoconf automake cmake gnumake help2man texinfo which gcc ]; nativeBuildInputs = [ autoconf automake cmake gnumake help2man texinfo which gcc ];
buildInputs = [ bzip2 zlib libtool cppzmq libarchive avro-cpp jansson zeromq openssl pam libiodbc kerberos boost libcxx catch2 ]; buildInputs = [ bzip2 zlib libtool cppzmq libarchive avro-cpp jansson zeromq openssl pam libiodbc kerberos boost libcxx catch2 ];
@ -35,7 +33,7 @@ with stdenv;
" "
''; '';
meta = { meta = with lib; {
description = "Integrated Rule-Oriented Data System (iRODS)"; description = "Integrated Rule-Oriented Data System (iRODS)";
longDescription = '' longDescription = ''
The Integrated Rule-Oriented Data System (iRODS) is open source data management The Integrated Rule-Oriented Data System (iRODS) is open source data management

View file

@ -1,13 +1,11 @@
{ stdenv, fetchFromGitHub, bzip2, zlib, autoconf, automake, cmake, gnumake, help2man , texinfo, libtool , cppzmq , libarchive, avro-cpp_llvm, boost, jansson, zeromq, openssl , pam, libiodbc, kerberos, gcc, libcxx, which, catch2 }: { lib, stdenv, fetchFromGitHub, bzip2, zlib, autoconf, automake, cmake, gnumake, help2man , texinfo, libtool , cppzmq , libarchive, avro-cpp_llvm, boost, jansson, zeromq, openssl , pam, libiodbc, kerberos, gcc, libcxx, which, catch2 }:
with stdenv;
let let
avro-cpp=avro-cpp_llvm; avro-cpp=avro-cpp_llvm;
in in
let let
common = import ./common.nix { common = import ./common.nix {
inherit stdenv bzip2 zlib autoconf automake cmake gnumake inherit lib stdenv bzip2 zlib autoconf automake cmake gnumake
help2man texinfo libtool cppzmq libarchive jansson help2man texinfo libtool cppzmq libarchive jansson
zeromq openssl pam libiodbc kerberos gcc libcxx zeromq openssl pam libiodbc kerberos gcc libcxx
boost avro-cpp which catch2; boost avro-cpp which catch2;
@ -83,7 +81,7 @@ in rec {
''; '';
cmakeFlags = common.cmakeFlags ++ [ cmakeFlags = common.cmakeFlags ++ [
"-DCMAKE_INSTALL_PREFIX=${out}" "-DCMAKE_INSTALL_PREFIX=${stdenv.out}"
"-DIRODS_DIR=${irods}/lib/irods/cmake" "-DIRODS_DIR=${irods}/lib/irods/cmake"
"-DCMAKE_EXE_LINKER_FLAGS=-Wl,-rpath,${irods}/lib" "-DCMAKE_EXE_LINKER_FLAGS=-Wl,-rpath,${irods}/lib"
"-DCMAKE_MODULE_LINKER_FLAGS=-Wl,-rpath,${irods}/lib" "-DCMAKE_MODULE_LINKER_FLAGS=-Wl,-rpath,${irods}/lib"

View file

@ -1,28 +0,0 @@
{lib, stdenv, fetchurl, apacheAnt, jdk, axis2, dbus_java }:
stdenv.mkDerivation {
name = "DisnixWebService-0.10";
src = fetchurl {
url = "https://github.com/svanderburg/DisnixWebService/releases/download/DisnixWebService-0.10/DisnixWebService-0.10.tar.gz";
sha256 = "0m451msd127ay09yb8rbflg68szm8s4hh65j99f7s3mz375vc114";
};
buildInputs = [ apacheAnt jdk ];
PREFIX = "\${env.out}";
AXIS2_LIB = "${axis2}/lib";
AXIS2_WEBAPP = "${axis2}/webapps/axis2";
DBUS_JAVA_LIB = "${dbus_java}/share/java";
prePatch = ''
sed -i -e "s|#JAVA_HOME=|JAVA_HOME=${jdk}|" \
-e "s|#AXIS2_LIB=|AXIS2_LIB=${axis2}/lib|" \
scripts/disnix-soap-client
'';
buildPhase = "ant";
installPhase = "ant install";
meta = {
description = "A SOAP interface and client for Disnix";
license = lib.licenses.mit;
maintainers = [ lib.maintainers.sander ];
platforms = lib.platforms.linux;
};
}

View file

@ -1,20 +0,0 @@
{ lib, stdenv, fetchurl, pkg-config, glib, libxml2, libxslt, getopt, gettext, nixUnstable, dysnomia, libintl, libiconv, help2man, doclifter, docbook5, dblatex, doxygen, libnixxml, autoreconfHook }:
stdenv.mkDerivation {
name = "disnix-0.10";
src = fetchurl {
url = "https://github.com/svanderburg/disnix/releases/download/disnix-0.10/disnix-0.10.tar.gz";
sha256 = "0mciqbc2h60nc0i6pd36w0m2yr96v97ybrzrqzh5f67ac1f0gqwg";
};
nativeBuildInputs = [ pkg-config ];
buildInputs = [ glib libxml2 libxslt getopt nixUnstable libintl libiconv dysnomia ];
meta = {
description = "A Nix-based distributed service deployment tool";
license = lib.licenses.lgpl21Plus;
maintainers = with lib.maintainers; [ sander tomberek ];
platforms = lib.platforms.unix;
};
}

View file

@ -1,20 +0,0 @@
{ lib, stdenv, fetchurl, dysnomia, disnix, socat, pkg-config, getopt }:
stdenv.mkDerivation {
name = "disnixos-0.9";
src = fetchurl {
url = "https://github.com/svanderburg/disnixos/releases/download/disnixos-0.9/disnixos-0.9.tar.gz";
sha256 = "0vllm5a8d9dvz5cjiq1mmkc4r4vnljabq42ng0ml85sjn0w7xvm7";
};
nativeBuildInputs = [ pkg-config ];
buildInputs = [ socat dysnomia disnix getopt ];
meta = {
description = "Provides complementary NixOS infrastructure deployment to Disnix";
license = lib.licenses.lgpl21Plus;
maintainers = [ lib.maintainers.sander ];
platforms = lib.platforms.linux;
};
}

View file

@ -1,27 +0,0 @@
{ lib, stdenv, fetchFromGitHub, autoconf, automake, libtool , pkg-config, glib, libxml2, libxslt, getopt, libiconv, gettext, nix, disnix, libnixxml }:
stdenv.mkDerivation rec {
version="2020-07-04";
name = "dydisnix-${version}";
src = fetchFromGitHub {
owner = "svanderburg";
repo = "dydisnix";
rev = "e99091f1c2329d562097e35faedee80622d387f0";
sha256 = "sha256-XKab2hNGtWDkIEMxE1vMvqQBTP9BvHTabBVfzpH57h0=";
};
nativeBuildInputs = [ pkg-config autoconf automake libtool ];
buildInputs = [ glib libxml2 libxslt getopt nix disnix libiconv gettext libnixxml ];
preConfigure = ''
./bootstrap
'';
meta = {
description = "A toolset enabling self-adaptive redeployment on top of Disnix";
longDescription = "Dynamic Disnix is a (very experimental!) prototype extension framework for Disnix supporting dynamic (re)deployment of service-oriented systems.";
license = lib.licenses.lgpl21Plus;
maintainers = [ lib.maintainers.tomberek ];
platforms = lib.platforms.unix;
};
}

View file

@ -1,71 +0,0 @@
{ lib, stdenv, fetchurl, netcat
, systemd ? null, ejabberd ? null, mysql ? null, postgresql ? null, subversion ? null, mongodb ? null, mongodb-tools ? null, influxdb ? null, supervisor ? null, docker ? null
, enableApacheWebApplication ? false
, enableAxis2WebService ? false
, enableEjabberdDump ? false
, enableMySQLDatabase ? false
, enablePostgreSQLDatabase ? false
, enableSubversionRepository ? false
, enableTomcatWebApplication ? false
, enableMongoDatabase ? false
, enableInfluxDatabase ? false
, enableSupervisordProgram ? false
, enableDockerContainer ? true
, enableLegacy ? false
, catalinaBaseDir ? "/var/tomcat"
, jobTemplate ? "systemd"
, getopt
}:
assert enableMySQLDatabase -> mysql != null;
assert enablePostgreSQLDatabase -> postgresql != null;
assert enableSubversionRepository -> subversion != null;
assert enableEjabberdDump -> ejabberd != null;
assert enableMongoDatabase -> (mongodb != null && mongodb-tools != null);
assert enableInfluxDatabase -> influxdb != null;
assert enableSupervisordProgram -> supervisor != null;
assert enableDockerContainer -> docker != null;
stdenv.mkDerivation {
name = "dysnomia-0.10";
src = fetchurl {
url = "https://github.com/svanderburg/dysnomia/releases/download/dysnomia-0.10/dysnomia-0.10.tar.gz";
sha256 = "19zg4nhn0f9v4i7c9hhan1i4xv3ljfpl2d0s84ph8byiscvhyrna";
};
preConfigure = if enableEjabberdDump then "export PATH=$PATH:${ejabberd}/sbin" else "";
configureFlags = [
(if enableApacheWebApplication then "--with-apache" else "--without-apache")
(if enableAxis2WebService then "--with-axis2" else "--without-axis2")
(if enableEjabberdDump then "--with-ejabberd" else "--without-ejabberd")
(if enableMySQLDatabase then "--with-mysql" else "--without-mysql")
(if enablePostgreSQLDatabase then "--with-postgresql" else "--without-postgresql")
(if enableSubversionRepository then "--with-subversion" else "--without-subversion")
(if enableTomcatWebApplication then "--with-tomcat=${catalinaBaseDir}" else "--without-tomcat")
(if enableMongoDatabase then "--with-mongodb" else "--without-mongodb")
(if enableInfluxDatabase then "--with-influxdb" else "--without-influxdb")
(if enableSupervisordProgram then "--with-supervisord" else "--without-supervisord")
(if enableDockerContainer then "--with-docker" else "--without-docker")
"--with-job-template=${jobTemplate}"
] ++ lib.optional enableLegacy "--enable-legacy";
buildInputs = [ getopt netcat ]
++ lib.optional stdenv.isLinux systemd
++ lib.optional enableEjabberdDump ejabberd
++ lib.optional enableMySQLDatabase mysql.out
++ lib.optional enablePostgreSQLDatabase postgresql
++ lib.optional enableSubversionRepository subversion
++ lib.optional enableMongoDatabase mongodb
++ lib.optional enableMongoDatabase mongodb-tools
++ lib.optional enableInfluxDatabase influxdb
++ lib.optional enableSupervisordProgram supervisor
++ lib.optional enableDockerContainer docker;
meta = {
description = "Automated deployment of mutable components and services for Disnix";
license = lib.licenses.mit;
maintainers = [ lib.maintainers.sander ];
platforms = lib.platforms.unix;
};
}

View file

@ -2,17 +2,17 @@
buildGoModule rec { buildGoModule rec {
pname = "gotop"; pname = "gotop";
version = "4.0.1"; version = "4.1.0";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "xxxserxxx"; owner = "xxxserxxx";
repo = pname; repo = pname;
rev = "v${version}"; rev = "v${version}";
sha256 = "10qfzmq1wdgpvv319khzicalix1x4fqava0wry3bzz84k5c9dabs"; sha256 = "09cs97fjjxcjxzsl2kh8j607cs5zy2hnrh1pb21pggzhg7dzsz0w";
}; };
runVend = true; runVend = true;
vendorSha256 = "09vdhdgj74ifdhl6rmxddkvk7ls26jn8gswzcxf9389zkjzi7822"; vendorSha256 = "1mbjl7b49naaqkr2j658j17z9ryf5g3x6q34gvmrm7n9y082ggnz";
preCheck = '' preCheck = ''
export HOME=$(mktemp -d) export HOME=$(mktemp -d)
@ -21,7 +21,8 @@ buildGoModule rec {
meta = with lib; { meta = with lib; {
description = "A terminal based graphical activity monitor inspired by gtop and vtop"; description = "A terminal based graphical activity monitor inspired by gtop and vtop";
homepage = "https://github.com/xxxserxxx/gotop"; homepage = "https://github.com/xxxserxxx/gotop";
license = licenses.agpl3; changelog = "https://github.com/xxxserxxx/gotop/blob/v${version}/CHANGELOG.md";
license = licenses.mit;
maintainers = [ maintainers.magnetophon ]; maintainers = [ maintainers.magnetophon ];
platforms = platforms.unix; platforms = platforms.unix;
}; };

View file

@ -2,7 +2,7 @@
, texlive , texlive
, zlib, libiconv, libpng, libX11 , zlib, libiconv, libpng, libX11
, freetype, gd, libXaw, icu, ghostscript, libXpm, libXmu, libXext , freetype, gd, libXaw, icu, ghostscript, libXpm, libXmu, libXext
, perl, perlPackages, python2Packages, pkg-config , perl, perlPackages, python3Packages, pkg-config
, poppler, libpaper, graphite2, zziplib, harfbuzz, potrace, gmp, mpfr , poppler, libpaper, graphite2, zziplib, harfbuzz, potrace, gmp, mpfr
, brotli, cairo, pixman, xorg, clisp, biber, woff2, xxHash , brotli, cairo, pixman, xorg, clisp, biber, woff2, xxHash
, makeWrapper, shortenPerlShebang , makeWrapper, shortenPerlShebang
@ -321,13 +321,13 @@ latexindent = perlPackages.buildPerlPackage rec {
}; };
pygmentex = python2Packages.buildPythonApplication rec { pygmentex = python3Packages.buildPythonApplication rec {
pname = "pygmentex"; pname = "pygmentex";
inherit (src) version; inherit (src) version;
src = lib.head (builtins.filter (p: p.tlType == "run") texlive.pygmentex.pkgs); src = lib.head (builtins.filter (p: p.tlType == "run") texlive.pygmentex.pkgs);
propagatedBuildInputs = with python2Packages; [ pygments chardet ]; propagatedBuildInputs = with python3Packages; [ pygments chardet ];
dontBuild = true; dontBuild = true;

View file

@ -29,7 +29,7 @@ let
[ "de-macro" "pythontex" "dviasm" "texliveonfly" ]; [ "de-macro" "pythontex" "dviasm" "texliveonfly" ];
pkgNeedsRuby = pkg: pkg.tlType == "run" && pkg.pname == "match-parens"; pkgNeedsRuby = pkg: pkg.tlType == "run" && pkg.pname == "match-parens";
extraInputs = extraInputs =
lib.optional (lib.any pkgNeedsPython splitBin.wrong) python lib.optional (lib.any pkgNeedsPython splitBin.wrong) python3
++ lib.optional (lib.any pkgNeedsRuby splitBin.wrong) ruby; ++ lib.optional (lib.any pkgNeedsRuby splitBin.wrong) ruby;
}; };

View file

@ -4,7 +4,7 @@
*/ */
{ stdenv, lib, fetchurl, runCommand, writeText, buildEnv { stdenv, lib, fetchurl, runCommand, writeText, buildEnv
, callPackage, ghostscriptX, harfbuzz, poppler_min , callPackage, ghostscriptX, harfbuzz, poppler_min
, makeWrapper, python, ruby, perl , makeWrapper, python3, ruby, perl
, useFixedHashes ? true , useFixedHashes ? true
, recurseIntoAttrs , recurseIntoAttrs
}: }:
@ -25,7 +25,7 @@ let
# function for creating a working environment from a set of TL packages # function for creating a working environment from a set of TL packages
combine = import ./combine.nix { combine = import ./combine.nix {
inherit bin combinePkgs buildEnv lib makeWrapper writeText inherit bin combinePkgs buildEnv lib makeWrapper writeText
stdenv python ruby perl; stdenv python3 ruby perl;
ghostscript = ghostscriptX; # could be without X, probably, but we use X above ghostscript = ghostscriptX; # could be without X, probably, but we use X above
}; };

View file

@ -123,6 +123,9 @@ mapAliases ({
dbus_glib = dbus-glib; # added 2018-02-25 dbus_glib = dbus-glib; # added 2018-02-25
dbus_libs = dbus; # added 2018-04-25 dbus_libs = dbus; # added 2018-04-25
diffuse = throw "diffuse has been removed from nixpkgs, as it's unmaintained"; # added 2019-12-10 diffuse = throw "diffuse has been removed from nixpkgs, as it's unmaintained"; # added 2019-12-10
disnix = throw "disnix has been removed."; # added 2021-01-27
disnixos = throw "disnixos has been removed."; # added 2021-01-27
DisnixWebService = throw "DisnixWebService has been removed."; # added 2021-01-27
dbus_tools = dbus.out; # added 2018-04-25 dbus_tools = dbus.out; # added 2018-04-25
deadbeef-mpris2-plugin = deadbeefPlugins.mpris2; # added 2018-02-23 deadbeef-mpris2-plugin = deadbeefPlugins.mpris2; # added 2018-02-23
deadpixi-sam = deadpixi-sam-unstable; deadpixi-sam = deadpixi-sam-unstable;
@ -139,6 +142,8 @@ mapAliases ({
docker_compose = docker-compose; # 2018-11-10 docker_compose = docker-compose; # 2018-11-10
draftsight = throw "draftsight has been removed, no longer available as freeware"; # added 2020-08-14 draftsight = throw "draftsight has been removed, no longer available as freeware"; # added 2020-08-14
dvb_apps = throw "dvb_apps has been removed."; # added 2020-11-03 dvb_apps = throw "dvb_apps has been removed."; # added 2020-11-03
dydisnix = throw "dydisnix has been removed."; # added 2021-01-27
dysnomia = throw "dysnomia has been removed."; # added 2021-01-27
dwarf_fortress = dwarf-fortress; # added 2016-01-23 dwarf_fortress = dwarf-fortress; # added 2016-01-23
elasticmq = throw "elasticmq has been removed in favour of elasticmq-server-bin"; # added 2021-01-17 elasticmq = throw "elasticmq has been removed in favour of elasticmq-server-bin"; # added 2021-01-17
emacsPackagesGen = emacsPackagesFor; # added 2018-08-18 emacsPackagesGen = emacsPackagesFor; # added 2018-08-18

View file

@ -7056,6 +7056,8 @@ in
tautulli = python3Packages.callPackage ../servers/tautulli { }; tautulli = python3Packages.callPackage ../servers/tautulli { };
pleroma-otp = callPackage ../servers/pleroma-otp { };
ploticus = callPackage ../tools/graphics/ploticus { ploticus = callPackage ../tools/graphics/ploticus {
libpng = libpng12; libpng = libpng12;
}; };
@ -28884,18 +28886,6 @@ in
solfege = python3Packages.callPackage ../misc/solfege { }; solfege = python3Packages.callPackage ../misc/solfege { };
disnix = callPackage ../tools/package-management/disnix { };
dysnomia = callPackage ../tools/package-management/disnix/dysnomia (config.disnix or {
inherit (pythonPackages) supervisor;
});
dydisnix = callPackage ../tools/package-management/disnix/dydisnix { };
disnixos = callPackage ../tools/package-management/disnix/disnixos { };
DisnixWebService = callPackage ../tools/package-management/disnix/DisnixWebService { };
lkproof = callPackage ../tools/typesetting/tex/lkproof { }; lkproof = callPackage ../tools/typesetting/tex/lkproof { };
lice = callPackage ../tools/misc/lice {}; lice = callPackage ../tools/misc/lice {};