Merge remote-tracking branch 'NixOS/master' into feature/futter-linux-desktop

This commit is contained in:
hacker1024 2023-04-25 12:51:56 +10:00
commit c4cbf526e9
2073 changed files with 71320 additions and 38375 deletions

5
.github/CODEOWNERS vendored
View file

@ -90,6 +90,9 @@
# NixOS integration test driver
/nixos/lib/test-driver @tfc
# NixOS QEMU virtualisation
/nixos/virtualisation/qemu-vm.nix @raitobezarius
# Systemd
/nixos/modules/system/boot/systemd.nix @NixOS/systemd
/nixos/modules/system/boot/systemd @NixOS/systemd
@ -139,7 +142,7 @@
# C compilers
/pkgs/development/compilers/gcc @matthewbauer
/pkgs/development/compilers/llvm @matthewbauer
/pkgs/development/compilers/llvm @matthewbauer @RaitoBezarius
# Compatibility stuff
/pkgs/top-level/unix-tools.nix @matthewbauer

View file

@ -43,6 +43,7 @@ Below is a short excerpt of some points in there:
* Not start with the package name.
* More generally, it should not refer to the package name.
* Not end with a period (or any punctuation for that matter).
* Aim to inform while avoiding subjective language.
* `meta.license` must be set and fit the upstream license.
* If there is no upstream license, `meta.license` should default to `lib.licenses.unfree`.
* If in doubt, try to contact the upstream developers for clarification.

1
doc/.gitignore vendored
View file

@ -8,3 +8,4 @@ manual-full.xml
out
result
result-*
media

View file

@ -9,4 +9,5 @@
<xi:include href="special/makesetuphook.section.xml" />
<xi:include href="special/mkshell.section.xml" />
<xi:include href="special/darwin-builder.section.xml" />
<xi:include href="special/vm-tools.section.xml" />
</chapter>

View file

@ -1,6 +1,6 @@
# buildFHSUserEnv {#sec-fhs-environments}
# buildFHSEnv {#sec-fhs-environments}
`buildFHSUserEnv` provides a way to build and run FHS-compatible lightweight sandboxes. It creates an isolated root with bound `/nix/store`, so its footprint in terms of disk space needed is quite small. This allows one to run software which is hard or unfeasible to patch for NixOS -- 3rd-party source trees with FHS assumptions, games distributed as tarballs, software with integrity checking and/or external self-updated binaries. It uses Linux namespaces feature to create temporary lightweight environments which are destroyed after all child processes exit, without root user rights requirement. Accepted arguments are:
`buildFHSEnv` provides a way to build and run FHS-compatible lightweight sandboxes. It creates an isolated root with bound `/nix/store`, so its footprint in terms of disk space needed is quite small. This allows one to run software which is hard or unfeasible to patch for NixOS -- 3rd-party source trees with FHS assumptions, games distributed as tarballs, software with integrity checking and/or external self-updated binaries. It uses Linux namespaces feature to create temporary lightweight environments which are destroyed after all child processes exit, without root user rights requirement. Accepted arguments are:
- `name`
Environment name.
@ -26,7 +26,7 @@ One can create a simple environment using a `shell.nix` like that:
```nix
{ pkgs ? import <nixpkgs> {} }:
(pkgs.buildFHSUserEnv {
(pkgs.buildFHSEnv {
name = "simple-x11-env";
targetPkgs = pkgs: (with pkgs;
[ udev

View file

@ -0,0 +1,148 @@
# vmTools {#sec-vm-tools}
A set of VM related utilities, that help in building some packages in more advanced scenarios.
## `vmTools.createEmptyImage` {#vm-tools-createEmptyImage}
A bash script fragment that produces a disk image at `destination`.
### Attributes
* `size`. The disk size, in MiB.
* `fullName`. Name that will be written to `${destination}/nix-support/full-name`.
* `destination` (optional, default `$out`). Where to write the image files.
## `vmTools.runInLinuxVM` {#vm-tools-runInLinuxVM}
Run a derivation in a Linux virtual machine (using Qemu/KVM).
By default, there is no disk image; the root filesystem is a `tmpfs`, and the Nix store is shared with the host (via the [9P protocol](https://wiki.qemu.org/Documentation/9p#9p_Protocol)).
Thus, any pure Nix derivation should run unmodified.
If the build fails and Nix is run with the `-K/--keep-failed` option, a script `run-vm` will be left behind in the temporary build directory that allows you to boot into the VM and debug it interactively.
### Attributes
* `preVM` (optional). Shell command to be evaluated *before* the VM is started (i.e., on the host).
* `memSize` (optional, default `512`). The memory size of the VM in MiB.
* `diskImage` (optional). A file system image to be attached to `/dev/sda`.
Note that currently we expect the image to contain a filesystem, not a full disk image with a partition table etc.
### Examples
Build the derivation hello inside a VM:
```nix
{ pkgs }: with pkgs; with vmTools;
runInLinuxVM hello
```
Build inside a VM with extra memory:
```nix
{ pkgs }: with pkgs; with vmTools;
runInLinuxVM (hello.overrideAttrs (_: { memSize = 1024; }))
```
Use VM with a disk image (implicitly sets `diskImage`, see [`vmTools.createEmptyImage`](#vm-tools-createEmptyImage)):
```nix
{ pkgs }: with pkgs; with vmTools;
runInLinuxVM (hello.overrideAttrs (_: {
preVM = createEmptyImage {
size = 1024;
fullName = "vm-image";
};
}))
```
## `vmTools.extractFs` {#vm-tools-extractFs}
Takes a file, such as an ISO, and extracts its contents into the store.
### Attributes
* `file`. Path to the file to be extracted.
Note that currently we expect the image to contain a filesystem, not a full disk image with a partition table etc.
* `fs` (optional). Filesystem of the contents of the file.
### Examples
Extract the contents of an ISO file:
```nix
{ pkgs }: with pkgs; with vmTools;
extractFs { file = ./image.iso; }
```
## `vmTools.extractMTDfs` {#vm-tools-extractMTDfs}
Like [](#vm-tools-extractFs), but it makes use of a [Memory Technology Device (MTD)](https://en.wikipedia.org/wiki/Memory_Technology_Device).
## `vmTools.runInLinuxImage` {#vm-tools-runInLinuxImage}
Like [](#vm-tools-runInLinuxVM), but instead of using `stdenv` from the Nix store, run the build using the tools provided by `/bin`, `/usr/bin`, etc. from the specified filesystem image, which typically is a filesystem containing a [FHS](https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard)-based Linux distribution.
## `vmTools.makeImageTestScript` {#vm-tools-makeImageTestScript}
Generate a script that can be used to run an interactive session in the given image.
### Examples
Create a script for running a Fedora 27 VM:
```nix
{ pkgs }: with pkgs; with vmTools;
makeImageTestScript diskImages.fedora27x86_64
```
Create a script for running an Ubuntu 20.04 VM:
```nix
{ pkgs }: with pkgs; with vmTools;
makeImageTestScript diskImages.ubuntu2004x86_64
```
## `vmTools.diskImageFuns` {#vm-tools-diskImageFuns}
A set of functions that build a predefined set of minimal Linux distributions images.
### Images
* Fedora
* `fedora26x86_64`
* `fedora27x86_64`
* CentOS
* `centos6i386`
* `centos6x86_64`
* `centos7x86_64`
* Ubuntu
* `ubuntu1404i386`
* `ubuntu1404x86_64`
* `ubuntu1604i386`
* `ubuntu1604x86_64`
* `ubuntu1804i386`
* `ubuntu1804x86_64`
* `ubuntu2004i386`
* `ubuntu2004x86_64`
* `ubuntu2204i386`
* `ubuntu2204x86_64`
* Debian
* `debian10i386`
* `debian10x86_64`
* `debian11i386`
* `debian11x86_64`
### Attributes
* `size` (optional, defaults to `4096`). The size of the image, in MiB.
* `extraPackages` (optional). A list names of additional packages from the distribution that should be included in the image.
### Examples
8GiB image containing Firefox in addition to the default packages:
```nix
{ pkgs }: with pkgs; with vmTools;
diskImageFuns.ubuntu2004x86_64 { extraPackages = [ "firefox" ]; size = 8192; }
```
## `vmTools.diskImageExtraFuns` {#vm-tools-diskImageExtraFuns}
Shorthand for `vmTools.diskImageFuns.<attr> { extraPackages = ... }`.
## `vmTools.diskImages` {#vm-tools-diskImages}
Shorthand for `vmTools.diskImageFuns.<attr> { }`.

View file

@ -1,6 +1,6 @@
{ pkgs, nixpkgs ? { }, libsets }:
let
revision = pkgs.lib.trivial.revisionWithDefault (nixpkgs.revision or "master");
revision = pkgs.lib.trivial.revisionWithDefault (nixpkgs.rev or "master");
libDefPos = prefix: set:
builtins.concatMap

View file

@ -37,7 +37,7 @@ The recommended way of defining a derivation for a Coq library, is to use the `c
* `buildInputs` (optional), is a list of libraries and dependencies that are required to build and run the current derivation, in addition to the default one `[ coq ]`,
* `extraBuildInputs` (optional, deprecated), an additional list of derivation to add to `buildInputs`,
* `overrideBuildInputs` (optional) replaces the default list of derivation to which `buildInputs` and `extraBuildInputs` adds extras elements,
* `propagatedBuildInputs` (optional) is passed as is to `mkDerivation`, we recommend to use this for Coq libraries and Coq plugin dependencies, as this makes sure the paths of the compiled libraries and plugins will always be added to the build environements of subsequent derivation, which is necessary for Coq packages to work correctly,
* `propagatedBuildInputs` (optional) is passed as is to `mkDerivation`, we recommend to use this for Coq libraries and Coq plugin dependencies, as this makes sure the paths of the compiled libraries and plugins will always be added to the build environments of subsequent derivation, which is necessary for Coq packages to work correctly,
* `mlPlugin` (optional, defaults to `false`). Some extensions (plugins) might require OCaml and sometimes other OCaml packages. Standard dependencies can be added by setting the current option to `true`. For a finer grain control, the `coq.ocamlPackages` attribute can be used in `nativeBuildInputs`, `buildInputs`, and `propagatedBuildInputs` to depend on the same package set Coq was built against.
* `useDuneifVersion` (optional, default to `(x: false)` uses Dune to build the package if the provided predicate evaluates to true on the version, e.g. `useDuneifVersion = versions.isGe "1.1"` will use dune if the version of the package is greater or equal to `"1.1"`,
* `useDune` (optional, defaults to `false`) uses Dune to build the package if set to true, the presence of this attribute overrides the behavior of the previous one.

View file

@ -20,6 +20,7 @@ In the following is an example expression using `buildGoModule`, the following a
To obtain the actual hash, set `vendorHash = lib.fakeSha256;` and run the build ([more details here](#sec-source-hashes)).
- `proxyVendor`: Fetches (go mod download) and proxies the vendor directory. This is useful if your code depends on c code and go mod tidy does not include the needed sources to build or if any dependency has case-insensitive conflicts which will produce platform dependant `vendorHash` checksums.
- `modPostBuild`: Shell commands to run after the build of the go-modules executes `go mod vendor`, and before calculating fixed output derivation's `vendorHash` (or `vendorSha256`). Note that if you change this attribute, you need to update `vendorHash` (or `vendorSha256`) attribute.
```nix
pet = buildGoModule rec {
@ -114,7 +115,16 @@ done
## Attributes used by the builders {#ssec-go-common-attributes}
Both `buildGoModule` and `buildGoPackage` can be tweaked to behave slightly differently, if the following attributes are used:
Many attributes [controlling the build phase](#variables-controlling-the-build-phase) are respected by both `buildGoModule` and `buildGoPackage`. Note that `buildGoModule` reads the following attributes also when building the `vendor/` go-modules fixed output derivation as well:
- [`sourceRoot`](#var-stdenv-sourceRoot)
- [`prePatch`](#var-stdenv-prePatch)
- [`patches`](#var-stdenv-patches)
- [`patchFlags`](#var-stdenv-patchFlags)
- [`postPatch`](#var-stdenv-postPatch)
- [`preBuild`](#var-stdenv-preBuild)
In addition to the above attributes, and the many more variables respected also by `stdenv.mkDerivation`, both `buildGoModule` and `buildGoPackage` respect Go-specific attributes that tweak them to behave slightly differently:
### `ldflags` {#var-go-ldflags}

View file

@ -108,7 +108,7 @@ haskell.compiler.ghcjs ghcjs-8.10.7
Each of those compiler versions has a corresponding attribute set built using
it. However, the non-standard package sets are not tested regularly and, as a
result, contain fewer working packages. The corresponding package set for GHC
9.4.4 is `haskell.packages.ghc944`. In fact `haskellPackages` is just an alias
9.4.5 is `haskell.packages.ghc945`. In fact `haskellPackages` is just an alias
for `haskell.packages.ghc927`:
```console

View file

@ -129,16 +129,21 @@ Let's present the luarocks way first and the manual one in a second time.
### Packaging a library on luarocks {#packaging-a-library-on-luarocks}
[Luarocks.org](https://luarocks.org/) is the main repository of lua packages.
The site proposes two types of packages, the rockspec and the src.rock
The site proposes two types of packages, the `rockspec` and the `src.rock`
(equivalent of a [rockspec](https://github.com/luarocks/luarocks/wiki/Rockspec-format) but with the source).
These packages can have different build types such as `cmake`, `builtin` etc .
Luarocks-based packages are generated in pkgs/development/lua-modules/generated-packages.nix from
the whitelist maintainers/scripts/luarocks-packages.csv and updated by running maintainers/scripts/update-luarocks-packages.
Luarocks-based packages are generated in [pkgs/development/lua-modules/generated-packages.nix](https://github.com/NixOS/nixpkgs/tree/master/pkgs/development/lua-modules/generated-packages.nix) from
the whitelist maintainers/scripts/luarocks-packages.csv and updated by running
the script
[maintainers/scripts/update-luarocks-packages](https://github.com/NixOS/nixpkgs/tree/master/maintainers/scripts/update-luarocks-packages):
```sh
./maintainers/scripts/update-luarocks-packages update
```
[luarocks2nix](https://github.com/nix-community/luarocks) is a tool capable of generating nix derivations from both rockspec and src.rock (and favors the src.rock).
The automation only goes so far though and some packages need to be customized.
These customizations go in `pkgs/development/lua-modules/overrides.nix`.
These customizations go in [pkgs/development/lua-modules/overrides.nix](https://github.com/NixOS/nixpkgs/tree/master/pkgs/development/lua-modules/overrides.nix).
For instance if the rockspec defines `external_dependencies`, these need to be manually added to the overrides.nix.
You can try converting luarocks packages to nix packages with the command `nix-shell -p luarocks-nix` and then `luarocks nix PKG_NAME`.

View file

@ -212,7 +212,7 @@ Note: this is not possible anymore for Neovim.
## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs}
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names). Plugins are listed in alphabetical order in `vim-plugin-names` using the format `[github username]/[repository]@[gitref]`. For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
After running `./update.py`, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
@ -226,7 +226,7 @@ deoplete-fish = super.deoplete-fish.overrideAttrs(old: {
Sometimes plugins require an override that must be changed when the plugin is updated. This can cause issues when Vim plugins are auto-updated but the associated override isn't updated. For these plugins, the override should be written so that it specifies all information required to install the plugin, and running `./update.py` doesn't change the derivation for the plugin. Manually updating the override is required to update these types of plugins. An example of such a plugin is `LanguageClient-neovim`.
To add a new plugin, run `./update.py --add "[owner]/[name]"`. **NOTE**: This script automatically commits to your git repository. Be sure to check out a fresh branch before running.
To add a new plugin, run `./update.py add "[owner]/[name]"`. **NOTE**: This script automatically commits to your git repository. Be sure to check out a fresh branch before running.
Finally, there are some plugins that are also packaged in nodePackages because they have Javascript-related build steps, such as running webpack. Those plugins are not listed in `vim-plugin-names` or managed by `update.py` at all, and are included separately in `overrides.nix`. Currently, all these plugins are related to the `coc.nvim` ecosystem of the Language Server Protocol integration with Vim/Neovim.

View file

@ -86,6 +86,23 @@ meta.platforms = lib.platforms.linux;
Attribute Set `lib.platforms` defines [various common lists](https://github.com/NixOS/nixpkgs/blob/master/lib/systems/doubles.nix) of platforms types.
### `badPlatforms` {#var-meta-badPlatforms}
The list of Nix [platform types](https://github.com/NixOS/nixpkgs/blob/b03ac42b0734da3e7be9bf8d94433a5195734b19/lib/meta.nix#L75-L81) on which the package is known not to be buildable.
Hydra will never create prebuilt binaries for these platform types, even if they are in [`meta.platforms`](#var-meta-platforms).
In general it is preferable to set `meta.platforms = lib.platforms.all` and then exclude any platforms on which the package is known not to build.
For example, a package which requires dynamic linking and cannot be linked statically could use this:
```nix
meta.platforms = lib.platforms.all;
meta.badPlatforms = [ lib.systems.inspect.patterns.isStatic ];
```
The [`lib.meta.availableOn`](https://github.com/NixOS/nixpkgs/blob/b03ac42b0734da3e7be9bf8d94433a5195734b19/lib/meta.nix#L95-L106) function can be used to test whether or not a package is available (i.e. buildable) on a given platform.
Some packages use this to automatically detect the maximum set of features with which they can be built.
For example, `systemd` [requires dynamic linking](https://github.com/systemd/systemd/issues/20600#issuecomment-912338965), and [has a `meta.badPlatforms` setting](https://github.com/NixOS/nixpkgs/blob/b03ac42b0734da3e7be9bf8d94433a5195734b19/pkgs/os-specific/linux/systemd/default.nix#L752) similar to the one above.
Packages which can be built with or without `systemd` support will use `lib.meta.availableOn` to detect whether or not `systemd` is available on the [`hostPlatform`](#ssec-cross-platform-parameters) for which they are being built; if it is not available (e.g. due to a statically-linked host platform like `pkgsStatic`) this support will be disabled by default.
### `tests` {#var-meta-tests}
::: {.warning}
@ -173,7 +190,7 @@ To be effective, it must be presented directly to an evaluation process that han
### `hydraPlatforms` {#var-meta-hydraPlatforms}
The list of Nix platform types for which the Hydra instance at `hydra.nixos.org` will build the package. (Hydra is the Nix-based continuous build system.) It defaults to the value of `meta.platforms`. Thus, the only reason to set `meta.hydraPlatforms` is if you want `hydra.nixos.org` to build the package on a subset of `meta.platforms`, or not at all, e.g.
The list of Nix platform types for which the [Hydra](https://github.com/nixos/hydra) [instance at `hydra.nixos.org`](https://nixos.org/hydra) will build the package. (Hydra is the Nix-based continuous build system.) It defaults to the value of `meta.platforms`. Thus, the only reason to set `meta.hydraPlatforms` is if you want `hydra.nixos.org` to build the package on a subset of `meta.platforms`, or not at all, e.g.
```nix
meta.platforms = lib.platforms.linux;
@ -182,7 +199,26 @@ meta.hydraPlatforms = [];
### `broken` {#var-meta-broken}
If set to `true`, the package is marked as "broken", meaning that it wont show up in `nix-env -qa`, and cannot be built or installed. Such packages should be removed from Nixpkgs eventually unless they are fixed.
If set to `true`, the package is marked as "broken", meaning that it wont show up in [search.nixos.org](https://search.nixos.org/packages), and cannot be built or installed unless the environment variable [`NIXPKGS_ALLOW_BROKEN`](#opt-allowBroken) is set.
Such unconditionally-broken packages should be removed from Nixpkgs eventually unless they are fixed.
The value of this attribute can depend on a package's arguments, including `stdenv`.
This means that `broken` can be used to express constraints, for example:
- Does not cross compile
```nix
meta.broken = !(stdenv.buildPlatform.canExecute stdenv.hostPlatform)
```
- Broken if all of a certain set of its dependencies are broken
```nix
meta.broken = lib.all (map (p: p.meta.broken) [ glibc musl ])
```
This makes `broken` strictly more powerful than `meta.badPlatforms`.
However `meta.availableOn` currently examines only `meta.platforms` and `meta.badPlatforms`, so `meta.broken` does not influence the default values for optional dependencies.
## Licenses {#sec-meta-license}

View file

@ -16,7 +16,8 @@ stdenv.mkDerivation {
}
```
(`stdenv` needs to be in scope, so if you write this in a separate Nix expression from `pkgs/all-packages.nix`, you need to pass it as a function argument.) Specifying a `name` and a `src` is the absolute minimum Nix requires. For convenience, you can also use `pname` and `version` attributes and `mkDerivation` will automatically set `name` to `"${pname}-${version}"` by default. Since [RFC 0035](https://github.com/NixOS/rfcs/pull/35), this is preferred for packages in Nixpkgs, as it allows us to reuse the version easily:
(`stdenv` needs to be in scope, so if you write this in a separate Nix expression from `pkgs/all-packages.nix`, you need to pass it as a function argument.) Specifying a `name` and a `src` is the absolute minimum Nix requires. For convenience, you can also use `pname` and `version` attributes and `mkDerivation` will automatically set `name` to `"${pname}-${version}"` by default.
**Since [RFC 0035](https://github.com/NixOS/rfcs/pull/35), this is preferred for packages in Nixpkgs**, as it allows us to reuse the version easily:
```nix
stdenv.mkDerivation rec {
@ -33,7 +34,8 @@ Many packages have dependencies that are not provided in the standard environmen
```nix
stdenv.mkDerivation {
name = "libfoo-1.2.3";
pname = "libfoo";
version = "1.2.3";
...
buildInputs = [libbar perl ncurses];
}
@ -45,7 +47,8 @@ Often it is necessary to override or modify some aspect of the build. To make th
```nix
stdenv.mkDerivation {
name = "fnord-4.5";
pname = "fnord";
version = "4.5";
...
buildPhase = ''
gcc foo.c -o foo
@ -65,7 +68,8 @@ While the standard environment provides a generic builder, you can still supply
```nix
stdenv.mkDerivation {
name = "libfoo-1.2.3";
pname = "libfoo";
version = "1.2.3";
...
builder = ./builder.sh;
}

View file

@ -9,7 +9,7 @@ let
in
rec {
inherit (builtins) attrNames listToAttrs hasAttr isAttrs getAttr;
inherit (builtins) attrNames listToAttrs hasAttr isAttrs getAttr removeAttrs;
/* Return an attribute from nested attribute sets.

View file

@ -426,4 +426,81 @@ ${expr "" v}
abort "generators.toDhall: cannot convert a null to Dhall"
else
builtins.toJSON v;
/*
Translate a simple Nix expression to Lua representation with occasional
Lua-inlines that can be construted by mkLuaInline function.
Configuration:
* multiline - by default is true which results in indented block-like view.
* indent - initial indent.
Attention:
Regardless of multiline parameter there is no trailing newline.
Example:
generators.toLua {}
{
cmd = [ "typescript-language-server" "--stdio" ];
settings.workspace.library = mkLuaInline ''vim.api.nvim_get_runtime_file("", true)'';
}
->
{
["cmd"] = {
"typescript-language-server",
"--stdio"
},
["settings"] = {
["workspace"] = {
["library"] = (vim.api.nvim_get_runtime_file("", true))
}
}
}
Type:
toLua :: AttrSet -> Any -> String
*/
toLua = {
/* If this option is true, the output is indented with newlines for attribute sets and lists */
multiline ? true,
/* Initial indentation level */
indent ? ""
}@args: v:
with builtins;
let
innerIndent = "${indent} ";
introSpace = if multiline then "\n${innerIndent}" else " ";
outroSpace = if multiline then "\n${indent}" else " ";
innerArgs = args // { indent = innerIndent; };
concatItems = concatStringsSep ",${introSpace}";
isLuaInline = { _type ? null, ... }: _type == "lua-inline";
in
if v == null then
"nil"
else if isInt v || isFloat v || isString v || isBool v then
builtins.toJSON v
else if isList v then
(if v == [ ] then "{}" else
"{${introSpace}${concatItems (map (value: "${toLua innerArgs value}") v)}${outroSpace}}")
else if isAttrs v then
(
if isLuaInline v then
"(${v.expr})"
else if v == { } then
"{}"
else
"{${introSpace}${concatItems (
lib.attrsets.mapAttrsToList (key: value: "[${builtins.toJSON key}] = ${toLua innerArgs value}") v
)}${outroSpace}}"
)
else
abort "generators.toLua: type ${typeOf v} is unsupported";
/*
Mark string as Lua expression to be inlined when processed by toLua.
Type:
mkLuaInline :: String -> AttrSet
*/
mkLuaInline = expr: { _type = "lua-inline"; inherit expr; };
}

View file

@ -185,6 +185,7 @@ rec {
pulseSupport = false;
smbdSupport = false;
seccompSupport = false;
enableDocs = false;
hostCpuTargets = [ "${final.qemuArch}-linux-user" ];
};
wine = (pkgs.winePackagesFor "wine${toString final.parsed.cpu.bits}").minimal;

View file

@ -9,6 +9,14 @@ let abis = lib.mapAttrs (_: abi: builtins.removeAttrs abi [ "assertions" ]) abis
rec {
# these patterns are to be matched against {host,build,target}Platform.parsed
patterns = rec {
# The patterns below are lists in sum-of-products form.
#
# Each attribute is list of product conditions; non-list values are treated
# as a singleton list. If *any* product condition in the list matches then
# the predicate matches. Each product condition is tested by
# `lib.attrsets.matchAttrs`, which requires a match on *all* attributes of
# the product.
isi686 = { cpu = cpuTypes.i686; };
isx86_32 = { cpu = { family = "x86"; bits = 32; }; };
isx86_64 = { cpu = { family = "x86"; bits = 64; }; };

View file

@ -915,6 +915,72 @@ runTests {
};
testToLuaEmptyAttrSet = {
expr = generators.toLua {} {};
expected = ''{}'';
};
testToLuaEmptyList = {
expr = generators.toLua {} [];
expected = ''{}'';
};
testToLuaListOfVariousTypes = {
expr = generators.toLua {} [ null 43 3.14159 true ];
expected = ''
{
nil,
43,
3.14159,
true
}'';
};
testToLuaString = {
expr = generators.toLua {} ''double-quote (") and single quotes (')'';
expected = ''"double-quote (\") and single quotes (')"'';
};
testToLuaAttrsetWithLuaInline = {
expr = generators.toLua {} { x = generators.mkLuaInline ''"abc" .. "def"''; };
expected = ''
{
["x"] = ("abc" .. "def")
}'';
};
testToLuaAttrsetWithSpaceInKey = {
expr = generators.toLua {} { "some space and double-quote (\")" = 42; };
expected = ''
{
["some space and double-quote (\")"] = 42
}'';
};
testToLuaWithoutMultiline = {
expr = generators.toLua { multiline = false; } [ 41 43 ];
expected = ''{ 41, 43 }'';
};
testToLuaBasicExample = {
expr = generators.toLua {} {
cmd = [ "typescript-language-server" "--stdio" ];
settings.workspace.library = generators.mkLuaInline ''vim.api.nvim_get_runtime_file("", true)'';
};
expected = ''
{
["cmd"] = {
"typescript-language-server",
"--stdio"
},
["settings"] = {
["workspace"] = {
["library"] = (vim.api.nvim_get_runtime_file("", true))
}
}
}'';
};
# CLI
testToGNUCommandLine = {

View file

@ -101,6 +101,13 @@
github = "0xd61";
githubId = 8351869;
};
_0xMRTT = {
email = "0xMRTT@proton.me";
name = "0xMRTT";
github = "0xMRTT";
githubId = 105598867;
matrix = "@0xmrtt:envs.net";
};
_1000101 = {
email = "b1000101@pm.me";
github = "1000101";
@ -1054,6 +1061,16 @@
githubId = 1342360;
name = "Andrew Morgan";
};
anpin = {
email = "pavel@anpin.fyi";
github = "anpin";
githubId = 6060545;
matrix = "@anpin:matrix.org";
name = "Pavel Anpin";
keys = [{
fingerprint = "06E8 4FF6 0CCF 7AFD 5101 76C9 0FBC D3EE 6310 7407";
}];
};
anpryl = {
email = "anpryl@gmail.com";
github = "anpryl";
@ -1124,6 +1141,16 @@
githubId = 73002165;
name = "apfelkuchen6";
};
aplund = {
email = "austin.lund@gmail.com";
matrix = "@aplund:matrix.org";
github = "aplund";
githubId = 1369436;
name = "Austin Lund";
keys = [{
fingerprint = "7083 E268 4BFD 845F 2B84 9E74 B695 8918 ED23 32CE";
}];
};
applePrincess = {
email = "appleprincess@appleprincess.io";
github = "applePrincess";
@ -3287,9 +3314,12 @@
}];
};
cyntheticfox = {
email = "houstdav000@gmail.com";
email = "cyntheticfox@gh0st.sh";
github = "cyntheticfox";
githubId = 17628961;
keys = [{
fingerprint = "73C1 C5DF 51E7 BB92 85E9 A262 5960 278C E235 F821";
}];
matrix = "@houstdav000:gh0st.ems.host";
name = "Cynthia Fox";
};
@ -3508,10 +3538,16 @@
};
davidcromp = {
email = "davidcrompton1192@gmail.com";
github = "DavidCromp";
github = "CyborgPotato";
githubId = 10701143;
name = "David Crompton";
};
david-hamelin = {
email = "david.hamelin@outlook.fr";
github = "HamelinDavid";
githubId = 118536343;
name = "David Hamelin";
};
davidrusu = {
email = "davidrusu.me@gmail.com";
github = "davidrusu";
@ -4149,6 +4185,16 @@
githubId = 1931963;
name = "David Sferruzza";
};
dsuetin = {
name = "Danil Suetin";
email = "suetin085@gmail.com";
matrix = "@dani0854:matrix.org";
github = "dani0854";
githubId = 32674935;
keys = [{
fingerprint = "6CC2 D713 6703 0D86 CA29 C71F 23B5 AA6F A374 F2FE";
}];
};
dsymbol = {
name = "dsymbol";
github = "dsymbol";
@ -5475,6 +5521,11 @@
githubId = 2430469;
name = "Gavin Rogers";
};
gaykitty = {
github = "gaykitty";
githubId = 126119280;
name = "Kitty Pride";
};
gazally = {
email = "gazally@runbox.com";
github = "gazally";
@ -5554,6 +5605,12 @@
fingerprint = "D0CF 440A A703 E0F9 73CB A078 82BB 70D5 41AE 2DB4";
}];
};
geri1701 = {
email = "geri@sdf.org";
github = "geri1701";
githubId = 67984144;
name = "Gerhard Schwanzer";
};
gerschtli = {
email = "tobias.happ@gmx.de";
github = "Gerschtli";
@ -6080,6 +6137,12 @@
githubId = 2405974;
name = "Sébastian Méric de Bellefon";
};
hellwolf = {
email = "zhicheng.miao@gmail.com";
github = "hellwolf";
githubId = 186660;
name = "Miao, ZhiCheng";
};
henkery = {
email = "jim@reupload.nl";
github = "henkery";
@ -6282,6 +6345,12 @@
githubId = 53281855;
name = "hqurve";
};
hraban = {
email = "hraban@0brg.net";
github = "hraban";
githubId = 137852;
name = "Hraban Luyat";
};
hrdinka = {
email = "c.nix@hrdinka.at";
github = "hrdinka";
@ -6652,6 +6721,12 @@
githubId = 54999;
name = "Ariel Nunez";
};
ionutnechita = {
email = "ionut_n2001@yahoo.com";
github = "ionutnechita";
githubId = 9405900;
name = "Ionut Nechita";
};
iopq = {
email = "iop_jr@yahoo.com";
github = "iopq";
@ -6719,6 +6794,12 @@
fingerprint = "6BD3 7248 30BD 941E 9180 C1A3 3A33 FA4C 82ED 674F";
}];
};
ivanmoreau = {
email = "Iván Molina Rebolledo";
github = "ivanmoreau";
githubId = 10843250;
name = "ivan@ivmoreau.com";
};
ivan-timokhin = {
email = "nixpkgs@ivan.timokhin.name";
name = "Ivan Timokhin";
@ -6892,6 +6973,12 @@
githubId = 6874204;
name = "Jason Carr";
};
jasonodoom = {
email = "jasonodoom@riseup.net";
github = "jasonodoom";
githubId = 6789916;
name = "Jason Odoom";
};
javaguirre = {
email = "contacto@javaguirre.net";
github = "javaguirre";
@ -8108,6 +8195,12 @@
githubId = 843652;
name = "Kim Burgess";
};
kindrowboat = {
email = "hello@kindrobot.ca";
github = "kindrowboat";
githubId = 777773;
name = "Stef Dunlap";
};
kini = {
email = "keshav.kini@gmail.com";
github = "kini";
@ -8139,6 +8232,11 @@
githubId = 12160;
name = "Kirill Radzikhovskyy";
};
kiskae = {
github = "Kiskae";
githubId = 546681;
name = "Jeroen van Leusen";
};
kisonecat = {
email = "kisonecat@gmail.com";
github = "kisonecat";
@ -8357,7 +8455,7 @@
};
kristian-brucaj = {
email = "kbrucaj@gmail.com";
github = "Kristian-Brucaj";
github = "Flameslice";
githubId = 8893110;
name = "Kristian Brucaj";
};
@ -8771,6 +8869,12 @@
githubId = 3696783;
name = "Leroy Hopson";
};
lillycham = {
email = "lillycat332@gmail.com";
github = "lillycat332";
githubId = 54189319;
name = "Lilly Cham";
};
lilyball = {
email = "lily@sb.org";
github = "lilyball";
@ -9185,6 +9289,13 @@
githubId = 2057309;
name = "Sergey Sofeychuk";
};
lx = {
email = "alex@adnab.me";
github = "Alexis211";
githubId = 101484;
matrix = "@lx:deuxfleurs.fr";
name = "Alex Auvolat";
};
lxea = {
email = "nix@amk.ie";
github = "lxea";
@ -9517,7 +9628,7 @@
mateodd25 = {
email = "mateodd@icloud.com";
github = "mateodd25";
githubId = 854770;
githubId = 7878181;
name = "Mateo Diaz";
};
math-42 = {
@ -10528,6 +10639,12 @@
githubId = 133448;
name = "Mikołaj Siedlarek";
};
mslingsby = {
email = "morten.slingsby@eviny.no";
github = "MortenSlingsby";
githubId = 111859550;
name = "Morten Slingsby";
};
msm = {
email = "msm@tailcall.net";
github = "msm-code";
@ -10818,6 +10935,12 @@
githubId = 137805;
name = "Alexander Tsvyashchenko";
};
ne9z = {
email = "yuchen@apvc.uk";
github = "ne9z";
githubId = 77314501;
name = "Maurice Zhou";
};
nebulka = {
email = "arapun@proton.me";
github = "nebulka1";
@ -11207,6 +11330,12 @@
githubId = 3521180;
name = "Tom Sydney Kerckhove";
};
NotAShelf = {
name = "NotAShelf";
email = "itsashelf@gmail.com";
github = "NotAShelf";
githubId = 62766066;
};
notbandali = {
name = "Amin Bandali";
email = "bandali@gnu.org";
@ -11398,6 +11527,15 @@
fingerprint = "939E F8A5 CED8 7F50 5BB5 B2D0 24BC 2738 5F70 234F";
}];
};
oddlama = {
email = "oddlama@oddlama.org";
github = "oddlama";
githubId = 31919558;
name = "oddlama";
keys = [{
fingerprint = "680A A614 E988 DE3E 84E0 DEFA 503F 6C06 8410 4B0A";
}];
};
odi = {
email = "oliver.dunkl@gmail.com";
github = "odi";
@ -11474,6 +11612,12 @@
githubId = 1538622;
name = "Michael Reilly";
};
onedragon = {
name = "YiLong Liu";
email = "18922251299@163.com";
github = "jackyliu16";
githubId = 50787361;
};
onixie = {
email = "onixie@gmail.com";
github = "onixie";
@ -11676,6 +11820,12 @@
githubId = 11016164;
name = "Fedor Pakhomov";
};
pallix = {
email = "pierre.allix.work@gmail.com";
github = "pallix";
githubId = 676838;
name = "Pierre Allix";
};
paluh = {
email = "paluho@gmail.com";
github = "paluh";
@ -11812,6 +11962,16 @@
githubId = 26949935;
name = "Pierce Bartine";
};
pbek = {
email = "patrizio@bekerle.com";
matrix = "@patrizio:bekerle.com";
github = "pbek";
githubId = 1798101;
name = "Patrizio Bekerle";
keys = [{
fingerprint = "E005 48D5 D6AC 812C AAD2 AFFA 9C42 B05E 5913 60DC";
}];
};
pblkt = {
email = "pebblekite@gmail.com";
github = "pblkt";
@ -11854,6 +12014,12 @@
githubId = 920910;
name = "peelz";
};
pelme = {
email = "andreas@pelme.se";
github = "pelme";
githubId = 20529;
name = "Andreas Pelme";
};
penalty1083 = {
email = "penalty1083@outlook.com";
github = "penalty1083";
@ -12557,6 +12723,12 @@
githubId = 4579165;
name = "Danny Bautista";
};
pyxels = {
email = "pyxels.dev@gmail.com";
github = "Pyxels";
githubId = 39232833;
name = "Jonas";
};
q3k = {
email = "q3k@q3k.org";
github = "q3k";
@ -12573,6 +12745,12 @@
fingerprint = "3586 3350 BFEA C101 DB1A 4AF0 1F81 112D 62A9 ADCE";
}];
};
qjoly = {
email = "github@thoughtless.eu";
github = "qjoly";
githubId = 82603435;
name = "Quentin JOLY";
};
qknight = {
email = "js@lastlog.de";
github = "qknight";
@ -12604,6 +12782,15 @@
githubId = 1024891;
name = "Jens Nolte";
};
quentin = {
email = "quentin@mit.edu";
github = "quentinmit";
githubId = 115761;
name = "Quentin Smith";
keys = [{
fingerprint = "1C71 A066 5400 AACD 142E B1A0 04EE 05A8 FCEF B697";
}];
};
quentini = {
email = "quentini@airmail.cc";
github = "QuentinI";
@ -12698,6 +12885,13 @@
githubId = 14829269;
name = "Ram Kromberg";
};
rampoina = {
email = "rampoina@protonmail.com";
matrix = "@rampoina:matrix.org";
github = "Rampoina";
githubId = 5653911;
name = "Rampoina";
};
ranfdev = {
email = "ranfdev@gmail.com";
name = "Lorenzo Miglietta";
@ -12968,6 +13162,12 @@
github = "ribose-jeffreylau";
githubId = 2649467;
};
ricarch97 = {
email = "ricardo.steijn97@gmail.com";
github = "RicArch97";
githubId = 61013287;
name = "Ricardo Steijn";
};
richardipsum = {
email = "richardipsum@fastmail.co.uk";
github = "richardipsum";
@ -14239,7 +14439,7 @@
name = "Smitty van Bodegom";
email = "me@smitop.com";
matrix = "@smitop:kde.org";
github = "Smittyvb";
github = "syvb";
githubId = 10530973;
};
sna = {
@ -14722,6 +14922,12 @@
githubId = 187109;
name = "Bjarki Ágúst Guðmundsson";
};
surfaceflinger = {
email = "nat@nekopon.pl";
github = "surfaceflinger";
githubId = 44725111;
name = "nat";
};
suryasr007 = {
email = "94suryateja@gmail.com";
github = "suryasr007";
@ -17199,7 +17405,7 @@
zseri = {
name = "zseri";
email = "zseri.devel@ytrizja.de";
github = "zseri";
github = "fogti";
githubId = 1618343;
keys = [{
fingerprint = "7AFB C595 0D3A 77BD B00F 947B 229E 63AE 5644 A96D";

View file

@ -112,6 +112,8 @@ The short version is this:
* We only do the merge if the [\`mergeable\`](https://hydra.nixos.org/job/nixpkgs/haskell-updates/mergeable) job is succeeding on hydra.
* If a [\`maintained\`](https://hydra.nixos.org/job/nixpkgs/haskell-updates/maintained) package is still broken at the time of merge, we will only merge if the maintainer has been pinged 7 days in advance. (If you care about a Haskell package, become a maintainer!)
More information about Haskell packages in nixpkgs can be found [in the nixpkgs manual](https://nixos.org/manual/nixpkgs/unstable/#haskell).
---
This is the follow-up to #${curr_haskell_updates_pr_num}. Come to [#haskell:nixos.org](https://matrix.to/#/#haskell:nixos.org) if you have any questions.

View file

@ -58,6 +58,7 @@ sed -r \
-e '/ distribution-nixpkgs /d' \
-e '/ jailbreak-cabal /d' \
-e '/ language-nix /d' \
-e '/ hackage-db /d' \
-e '/ cabal-install /d' \
-e '/ lsp /d' \
-e '/ lsp-types /d' \

View file

@ -15,8 +15,29 @@
# password-command: pass hackage.haskell.org (this can be any command, but not an arbitrary shell expression. Like cabal we only read the first output line and ignore the rest.)
# Those fields are specified under `upload` on the `cabal` man page.
if test -z "$CABAL_DIR"; then
dirs=(
"$HOME/.cabal"
"${XDG_CONFIG_HOME:-$HOME/.config}/cabal"
)
missing=true
for dir in "${dirs[@]}"; do
if test -d "$dir"; then
export CABAL_DIR="$dir"
missing=false
break
fi
done
if $missing; then
echo "Could not find the cabal configuration directory in any of: ${dirs[@]}" >&2
exit 101
fi
fi
package_list="$(nix-build -A haskell.package-list)/nixos-hackage-packages.csv"
username=$(grep "^username:" ~/.cabal/config | sed "s/^username: //")
password_command=$(grep "^password-command:" ~/.cabal/config | sed "s/^password-command: //")
username=$(grep "^username:" "$CABAL_DIR/config" | sed "s/^username: //")
password_command=$(grep "^password-command:" "$CABAL_DIR/config" | sed "s/^password-command: //")
curl -u "$username:$($password_command | head -n1)" --digest -H "Content-type: text/csv" -T "$package_list" http://hackage.haskell.org/distro/NixOS/packages.csv
echo

View file

@ -40,6 +40,7 @@ lrexlib-pcre,,,,,,vyp
lrexlib-posix,,,,,,
lua-cjson,,,,,,
lua-cmsgpack,,,,,,
lua-curl,,,,,,
lua-iconv,,,,,,
lua-lsp,,,,,,
lua-messagepack,,,,,,

1 name src ref server version luaversion maintainers
40 lrexlib-posix
41 lua-cjson
42 lua-cmsgpack
43 lua-curl
44 lua-iconv
45 lua-lsp
46 lua-messagepack

View file

@ -1,4 +1,7 @@
# Used by pkgs/applications/editors/vim/plugins/update.py and pkgs/applications/editors/kakoune/plugins/update.py
# python library used to update plugins:
# - pkgs/applications/editors/vim/plugins/update.py
# - pkgs/applications/editors/kakoune/plugins/update.py
# - maintainers/scripts/update-luarocks-packages
# format:
# $ nix run nixpkgs.python3Packages.black -c black update.py
@ -315,10 +318,10 @@ def run_nix_expr(expr):
with CleanEnvironment():
cmd = ["nix", "eval", "--extra-experimental-features",
"nix-command", "--impure", "--json", "--expr", expr]
log.debug("Running command %s", cmd)
log.debug("Running command %s", " ".join(cmd))
out = subprocess.check_output(cmd)
data = json.loads(out)
return data
data = json.loads(out)
return data
class Editor:
@ -344,12 +347,39 @@ class Editor:
self.cache_file = cache_file or f"{name}-plugin-cache.json"
self.nixpkgs_repo = None
def add(self, args):
'''CSV spec'''
log.debug("called the 'add' command")
fetch_config = FetchConfig(args.proc, args.github_token)
editor = self
for plugin_line in args.add_plugins:
log.debug("using plugin_line", plugin_line)
pdesc = PluginDesc.load_from_string(fetch_config, plugin_line)
log.debug("loaded as pdesc", pdesc)
append = [ pdesc ]
editor.rewrite_input(fetch_config, args.input_file, editor.deprecated, append=append)
plugin, _ = prefetch_plugin(pdesc, )
autocommit = not args.no_commit
if autocommit:
commit(
editor.nixpkgs_repo,
"{drv_name}: init at {version}".format(
drv_name=editor.get_drv_name(plugin.normalized_name),
version=plugin.version
),
[args.outfile, args.input_file],
)
# Expects arguments generated by 'update' subparser
def update(self, args ):
'''CSV spec'''
print("the update member function should be overriden in subclasses")
def get_current_plugins(self) -> List[Plugin]:
"""To fill the cache"""
data = run_nix_expr(self.get_plugins)
plugins = []
for name, attr in data.items():
print("get_current_plugins: name %s" % name)
p = Plugin(name, attr["rev"], attr["submodules"], attr["sha256"])
plugins.append(p)
return plugins
@ -358,7 +388,7 @@ class Editor:
'''CSV spec'''
return load_plugins_from_csv(config, plugin_file)
def generate_nix(self, plugins, outfile: str):
def generate_nix(self, _plugins, _outfile: str):
'''Returns nothing for now, writes directly to outfile'''
raise NotImplementedError()
@ -395,34 +425,28 @@ class Editor:
return rewrite_input(*args, **kwargs)
def create_parser(self):
parser = argparse.ArgumentParser(
common = argparse.ArgumentParser(
add_help=False,
description=(f"""
Updates nix derivations for {self.name} plugins.\n
By default from {self.default_in} to {self.default_out}"""
)
)
parser.add_argument(
"--add",
dest="add_plugins",
default=[],
action="append",
help=f"Plugin to add to {self.attr_path} from Github in the form owner/repo",
)
parser.add_argument(
common.add_argument(
"--input-names",
"-i",
dest="input_file",
default=self.default_in,
help="A list of plugins in the form owner/repo",
)
parser.add_argument(
common.add_argument(
"--out",
"-o",
dest="outfile",
default=self.default_out,
help="Filename to save generated nix code",
)
parser.add_argument(
common.add_argument(
"--proc",
"-p",
dest="proc",
@ -430,7 +454,7 @@ class Editor:
default=30,
help="Number of concurrent processes to spawn. Setting --github-token allows higher values.",
)
parser.add_argument(
common.add_argument(
"--github-token",
"-t",
type=str,
@ -438,16 +462,61 @@ class Editor:
help="""Allows to set --proc to higher values.
Uses GITHUB_API_TOKEN environment variables as the default value.""",
)
parser.add_argument(
common.add_argument(
"--no-commit", "-n", action="store_true", default=False,
help="Whether to autocommit changes"
)
parser.add_argument(
common.add_argument(
"--debug", "-d", choices=LOG_LEVELS.keys(),
default=logging.getLevelName(logging.WARN),
help="Adjust log level"
)
return parser
main = argparse.ArgumentParser(
parents=[common],
description=(f"""
Updates nix derivations for {self.name} plugins.\n
By default from {self.default_in} to {self.default_out}"""
)
)
subparsers = main.add_subparsers(dest="command", required=False)
padd = subparsers.add_parser(
"add", parents=[],
description="Add new plugin",
add_help=False,
)
padd.set_defaults(func=self.add)
padd.add_argument(
"add_plugins",
default=None,
nargs="+",
help=f"Plugin to add to {self.attr_path} from Github in the form owner/repo",
)
pupdate = subparsers.add_parser(
"update",
description="Update all or a subset of existing plugins",
add_help=False,
)
pupdate.set_defaults(func=self.update)
return main
def run(self,):
'''
Convenience function
'''
parser = self.create_parser()
args = parser.parse_args()
command = args.command or "update"
log.setLevel(LOG_LEVELS[args.debug])
log.info("Chose to run command: %s", command)
if not args.no_commit:
self.nixpkgs_repo = git.Repo(self.root, search_parent_directories=True)
getattr(self, command)(args)
@ -661,7 +730,6 @@ def commit(repo: git.Repo, message: str, files: List[Path]) -> None:
def update_plugins(editor: Editor, args):
"""The main entry function of this module. All input arguments are grouped in the `Editor`."""
log.setLevel(LOG_LEVELS[args.debug])
log.info("Start updating plugins")
fetch_config = FetchConfig(args.proc, args.github_token)
update = editor.get_update(args.input_file, args.outfile, fetch_config)
@ -684,18 +752,3 @@ def update_plugins(editor: Editor, args):
[args.outfile, args.input_file, editor.deprecated],
)
for plugin_line in args.add_plugins:
pdesc = PluginDesc.load_from_string(fetch_config, plugin_line)
append = [ pdesc ]
editor.rewrite_input(fetch_config, args.input_file, editor.deprecated, append=append)
update()
plugin, _ = prefetch_plugin(pdesc, )
if autocommit:
commit(
editor.nixpkgs_repo,
"{drv_name}: init at {version}".format(
drv_name=editor.get_drv_name(plugin.normalized_name),
version=plugin.version
),
[args.outfile, args.input_file],
)

View file

@ -26,7 +26,8 @@ log = logging.getLogger()
log.addHandler(logging.StreamHandler())
ROOT = Path(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))).parent.parent # type: ignore
from pluginupdate import Editor, update_plugins, FetchConfig, CleanEnvironment
import pluginupdate
from pluginupdate import update_plugins, FetchConfig, CleanEnvironment
PKG_LIST="maintainers/scripts/luarocks-packages.csv"
TMP_FILE="$(mktemp)"
@ -70,7 +71,7 @@ class LuaPlugin:
return self.name.replace(".", "-")
# rename Editor to LangUpdate/ EcosystemUpdater
class LuaEditor(Editor):
class LuaEditor(pluginupdate.Editor):
def get_current_plugins(self):
return []
@ -87,6 +88,9 @@ class LuaEditor(Editor):
luaPackages.append(plugin)
return luaPackages
def update(self, args):
update_plugins(self, args)
def generate_nix(
self,
results: List[Tuple[LuaPlugin, str]],
@ -203,11 +207,7 @@ def main():
default_out = ROOT.joinpath(GENERATED_NIXFILE)
)
parser = editor.create_parser()
args = parser.parse_args()
update_plugins(editor, args)
editor.run()
if __name__ == "__main__":

View file

@ -431,6 +431,7 @@ with lib.maintainers; {
lukego
nagy
uthar
hraban
];
githubTeams = [
"lisp"

View file

@ -94,6 +94,6 @@ environment.systemPackages = [ pkgs.appimage-run ];
Then instead of running the AppImage "as-is", run `appimage-run foo.appimage`.
To make other pre-built executables work on NixOS, you need to package them
with Nix and special helpers like `autoPatchelfHook` or `buildFHSUserEnv`. See
with Nix and special helpers like `autoPatchelfHook` or `buildFHSEnv`. See
the [Nixpkgs manual](https://nixos.org/nixpkgs/manual) for details. This
is complex and often doing a source build is easier.

View file

@ -147,7 +147,7 @@ In addition to numerous new and upgraded packages, this release has the followin
- [rstudio-server](https://www.rstudio.com/products/rstudio/#rstudio-server), a browser-based version of the RStudio IDE for the R programming language. Available as [services.rstudio-server](#opt-services.rstudio-server.enable).
- [rtsp-simple-server](https://github.com/aler9/rtsp-simple-server), ready-to-use RTSP / RTMP / HLS server and proxy that allows to read, publish and proxy video and audio streams. Available as [services.rtsp-simple-server](#opt-services.rtsp-simple-server.enable).
- [mediamtx](https://github.com/aler9/mediamtx), ready-to-use RTSP / RTMP / HLS server and proxy that allows to read, publish and proxy video and audio streams. Available as [services.mediamtx](#opt-services.mediamtx.enable).
- [Snipe-IT](https://snipeitapp.com), a free open source IT asset/license management system. Available as [services.snipe-it](#opt-services.snipe-it.enable).

View file

@ -40,6 +40,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [cups-pdf-to-pdf](https://github.com/alexivkin/CUPS-PDF-to-PDF), a pdf-generating cups backend based on [cups-pdf](https://www.cups-pdf.de/). Available as [services.printing.cups-pdf](#opt-services.printing.cups-pdf.enable).
- [clash-verge](https://github.com/zzzgydi/clash-verge), A Clash GUI based on tauri. Available as [programs.clash-verge](#opt-programs.clash-verge.enable).
- [Cloudlog](https://www.magicbug.co.uk/cloudlog/), a web-based Amateur Radio logging application. Available as [services.cloudlog](#opt-services.cloudlog.enable).
- [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion).
@ -61,6 +63,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [opensearch](https://opensearch.org), a search server alternative to Elasticsearch. Available as [services.opensearch](options.html#opt-services.opensearch.enable).
- [monica](https://www.monicahq.com), an open source personal CRM. Available as [services.monica](options.html#opt-services.monica.enable).
- [authelia](https://www.authelia.com/), is an open-source authentication and authorization server. Available under [services.authelia](options.html#opt-services.authelia.enable).
- [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in golang with many filters. Available as [services.goeland](#opt-services.goeland.enable).
@ -71,6 +75,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable).
- [esphome](https://esphome.io), a dashboard to configure ESP8266/ESP32 devices for use with Home Automation systems. Available as [services.esphome](#opt-services.esphome.enable).
- [networkd-dispatcher](https://gitlab.com/craftyguy/networkd-dispatcher), a dispatcher service for systemd-networkd connection status changes. Available as [services.networkd-dispatcher](#opt-services.networkd-dispatcher.enable).
- [mmsd](https://gitlab.com/kop316/mmsd), a lower level daemon that transmits and recieves MMSes. Available as [services.mmsd](#opt-services.mmsd.enable).
@ -85,8 +91,12 @@ In addition to numerous new and upgraded packages, this release has the followin
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
- [PufferPanel](https://pufferpanel.com), game server management panel designed to be easy to use. Available as [services.pufferpanel](#opt-services.pufferpanel.enable).
- [jellyseerr](https://github.com/Fallenbagel/jellyseerr), a web-based requests manager for Jellyfin, forked from Overseerr. Available as [services.jellyseerr](#opt-services.jellyseerr.enable).
- [stargazer](https://sr.ht/~zethra/stargazer/), a fast and easy to use Gemini server. Available as [services.stargazer](#opt-services.stargazer.enable).
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
- [peroxide](https://github.com/ljanyst/peroxide), a fork of the official [ProtonMail bridge](https://github.com/ProtonMail/proton-bridge) that aims to be similar to [Hydroxide](https://github.com/emersion/hydroxide). Available as [services.peroxide](#opt-services.peroxide.enable).
@ -99,6 +109,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [trurl](https://github.com/curl/trurl), a command line tool for URL parsing and manipulation.
- [wgautomesh](https://git.deuxfleurs.fr/Deuxfleurs/wgautomesh), a simple utility to help connect wireguard nodes together in a full mesh topology. Available as [services.wgautomesh](options.html#opt-services.wgautomesh.enable).
- [woodpecker-agents](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-agents](#opt-services.woodpecker-agents.agents._name_.enable).
- [woodpecker-server](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable).
@ -109,6 +121,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [hardware.ipu6](#opt-hardware.ipu6.enable) adds support for ipu6 based webcams on intel tiger lake and alder lake.
- [ivpn](https://www.ivpn.net/), a secure, private VPN with fast WireGuard connections. Available as [services.ivpn](#opt-services.ivpn.enable).
## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -123,6 +137,10 @@ In addition to numerous new and upgraded packages, this release has the followin
- The `ssh` client tool now disables the `~C` escape sequence by default. This can be re-enabled by setting `EnableEscapeCommandline yes`
- The `ssh` module does not read `/etc/ssh/ssh_known_hosts2` anymore since this location is [deprecated since 2001](https://marc.info/?l=openssh-unix-dev&m=100508718416162&w=2).
- The openssh module does not read `~/.ssh/authorized_keys2` anymore since this location is [deprecated since 2001](https://marc.info/?l=openssh-unix-dev&m=100508718416162&w=2).
- `podman` now uses the `netavark` network stack. Users will need to delete all of their local containers, images, volumes, etc, by running `podman system reset --force` once before upgrading their systems.
- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
@ -131,7 +149,9 @@ In addition to numerous new and upgraded packages, this release has the followin
- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
- The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
- The [services.kubo.settings](#opt-services.kubo.settings) option is now no longer stateful. If you changed any of the options in [services.kubo.settings](#opt-services.kubo.settings) in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
- The Kubo HTTP API will no longer listen on localhost and will instead only listen on a Unix domain socket by default. Read the [services.kubo.settings.Addresses.API](#opt-services.kubo.settings.Addresses.API) option description for more information.
- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service`
@ -211,6 +231,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- To enable the HTTP3 (QUIC) protocol for a nginx virtual host, set the `quic` attribute on it to true, e.g. `services.nginx.virtualHosts.<name>.quic = true;`.
- The default Asterisk package was changed to v20 from v19. Asterisk versions 16 and 19 have been dropped due to being EOL. You may need to update /var/lib/asterisk to match the template files in `${asterisk-20}/var/lib/asterisk`.
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
- The `services.pipewire.config` options have been removed, as they have basically never worked correctly. All behavior defined by the default configuration can be overridden with drop-in files as necessary - see [below](#sec-release-23.05-migration-pipewire) for details.
@ -274,6 +296,8 @@ In addition to numerous new and upgraded packages, this release has the followin
replacement. It stores backups as volume dump files and thus better integrates
into contemporary backup solutions.
- `services.maddy` now allows to configure users and their credentials using `services.maddy.ensureCredentials`.
- The `dnsmasq` service now takes configuration via the
`services.dnsmasq.settings` attribute set. The option
`services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches
@ -332,6 +356,8 @@ In addition to numerous new and upgraded packages, this release has the followin
[headscale's example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
can be directly written as attribute-set in Nix within this option.
- `services.kubo` now unmounts `ipfsMountDir` and `ipnsMountDir` even if it is killed unexpectedly when `autoMount` is enabled.
- `nixos/lib/make-disk-image.nix` can now mutate EFI variables, run user-provided EFI firmware or variable templates. This is now extensively documented in the NixOS manual.
- `services.grafana` listens only on localhost by default again. This was changed to upstreams default of `0.0.0.0` by accident in the freeform setting conversion.
@ -388,6 +414,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The `unifi-poller` package and corresponding NixOS module have been renamed to `unpoller` to match upstream.
- The `rtsp-simple-server` package and corresponding NixOS module have been renamed to `mediamtx` to match upstream.
- The new option `services.tailscale.useRoutingFeatures` controls various settings for using Tailscale features like exit nodes and subnet routers. If you wish to use your machine as an exit node, you can set this setting to `server`, otherwise if you wish to use an exit node you can set this setting to `client`. The strict RPF warning has been removed as the RPF will be loosened automatically based on the value of this setting.
- `openjdk` from version 11 and above is not build with `openjfx` (i.e.: JavaFX) support by default anymore. You can re-enable it by overriding, e.g.: `openjdk11.override { enableJavaFX = true; };`.
@ -405,12 +433,16 @@ In addition to numerous new and upgraded packages, this release has the followin
- `k3s` can now be configured with an EnvironmentFile for its systemd service, allowing secrets to be provided without ending up in the Nix Store.
- `gitea` module options have been changed to be RFC042 conforming (i.e. some options were moved to be located under `services.gitea.settings`)
- `boot.initrd.luks.device.<name>` has a new `tryEmptyPassphrase` option, this is useful for OEM's who need to install an encrypted disk with a future settable passphrase
- Lisp gained a [manual section](https://nixos.org/manual/nixpkgs/stable/#lisp), documenting a new and backwards incompatible interface. The previous interface will be removed in a future release.
- The `bind` module now allows the per-zone `allow-query` setting to be configured (previously it was hard-coded to `any`; it still defaults to `any` to retain compatibility).
- `make-disk-image` handles `contents` arguments that are directories better, fixing a bug where it used to put them in a subdirectory of the intended `target`.
## Detailed migration information {#sec-release-23.05-migration}
### Pipewire configuration overrides {#sec-release-23.05-migration-pipewire}

View file

@ -402,11 +402,16 @@ let format' = format; in let
done
else
mkdir -p $root/$(dirname $target)
if ! [ -e $root/$target ]; then
rsync $rsync_flags $source $root/$target
else
if [ -e $root/$target ]; then
echo "duplicate entry $target -> $source"
exit 1
elif [ -d $source ]; then
# Append a slash to the end of source to get rsync to copy the
# directory _to_ the target instead of _inside_ the target.
# (See `man rsync`'s note on a trailing slash.)
rsync $rsync_flags $source/ $root/$target
else
rsync $rsync_flags $source $root/$target
fi
fi
done

View file

@ -428,6 +428,8 @@ let
uidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) cfg.users) "uid";
gidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) cfg.groups) "gid";
sdInitrdUidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) config.boot.initrd.systemd.users) "uid";
sdInitrdGidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) config.boot.initrd.systemd.groups) "gid";
spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
inherit (cfg) mutableUsers;
@ -534,6 +536,54 @@ in {
WARNING: enabling this can lock you out of your system. Enable this only if you know what are you doing.
'';
};
# systemd initrd
boot.initrd.systemd.users = mkOption {
visible = false;
description = ''
Users to include in initrd.
'';
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options.uid = mkOption {
visible = false;
type = types.int;
description = ''
ID of the user in initrd.
'';
defaultText = literalExpression "config.users.users.\${name}.uid";
default = cfg.users.${name}.uid;
};
options.group = mkOption {
visible = false;
type = types.singleLineStr;
description = ''
Group the user belongs to in initrd.
'';
defaultText = literalExpression "config.users.users.\${name}.group";
default = cfg.users.${name}.group;
};
}));
};
boot.initrd.systemd.groups = mkOption {
visible = false;
description = ''
Groups to include in initrd.
'';
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options.gid = mkOption {
visible = false;
type = types.int;
description = ''
ID of the group in initrd.
'';
defaultText = literalExpression "config.users.groups.\${name}.gid";
default = cfg.groups.${name}.gid;
};
}));
};
};
@ -639,10 +689,52 @@ in {
"/etc/profiles/per-user/$USER"
];
# systemd initrd
boot.initrd.systemd = lib.mkIf config.boot.initrd.systemd.enable {
contents = {
"/etc/passwd".text = ''
${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: { uid, group }: let
g = config.boot.initrd.systemd.groups.${group};
in "${n}:x:${toString uid}:${toString g.gid}::/var/empty:") config.boot.initrd.systemd.users)}
'';
"/etc/group".text = ''
${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: { gid }: "${n}:x:${toString gid}:") config.boot.initrd.systemd.groups)}
'';
};
users = {
root = {};
nobody = {};
};
groups = {
root = {};
nogroup = {};
systemd-journal = {};
tty = {};
dialout = {};
kmem = {};
input = {};
video = {};
render = {};
sgx = {};
audio = {};
video = {};
lp = {};
disk = {};
cdrom = {};
tape = {};
kvm = {};
};
};
assertions = [
{ assertion = !cfg.enforceIdUniqueness || (uidsAreUnique && gidsAreUnique);
message = "UIDs and GIDs must be unique!";
}
{ assertion = !cfg.enforceIdUniqueness || (sdInitrdUidsAreUnique && sdInitrdGidsAreUnique);
message = "systemd initrd UIDs and GIDs must be unique!";
}
{ # If mutableUsers is false, to prevent users creating a
# configuration that locks them out of the system, ensure that
# there is at least one "privileged" account that has a

View file

@ -10,10 +10,7 @@ let
check = x: (lib.types.package.check x) && (attrByPath ["meta" "isIbusEngine"] false x);
};
impanel =
if cfg.panel != null
then "--panel=${cfg.panel}"
else "";
impanel = optionalString (cfg.panel != null) "--panel=${cfg.panel}";
ibusAutostart = pkgs.writeTextFile {
name = "autostart-ibus-daemon";

View file

@ -22,8 +22,8 @@ let
(option: ''
menuentry '${defaults.name} ${
# Name appended to menuentry defaults to params if no specific name given.
option.name or (if option ? params then "(${option.params})" else "")
}' ${if option ? class then " --class ${option.class}" else ""} {
option.name or (optionalString (option ? params) "(${option.params})")
}' ${optionalString (option ? class) " --class ${option.class}"} {
linux ${defaults.image} \''${isoboot} ${defaults.params} ${
option.params or ""
}

View file

@ -200,7 +200,7 @@ sub pciCheck {
}
# In case this is a virtio scsi device, we need to explicitly make this available.
if ($vendor eq "0x1af4" && $device eq "0x1004") {
if ($vendor eq "0x1af4" && ($device eq "0x1004" || $device eq "0x1048") ) {
push @initrdAvailableKernelModules, "virtio_scsi";
}

View file

@ -149,6 +149,7 @@
./programs/cdemu.nix
./programs/cfs-zen-tweaks.nix
./programs/chromium.nix
./programs/clash-verge.nix
./programs/cnping.nix
./programs/command-not-found/command-not-found.nix
./programs/criu.nix
@ -514,6 +515,7 @@
./services/hardware/usbrelayd.nix
./services/hardware/vdr.nix
./services/hardware/keyd.nix
./services/home-automation/esphome.nix
./services/home-automation/evcc.nix
./services/home-automation/home-assistant.nix
./services/home-automation/zigbee2mqtt.nix
@ -668,6 +670,7 @@
./services/misc/polaris.nix
./services/misc/portunus.nix
./services/misc/prowlarr.nix
./services/misc/pufferpanel.nix
./services/misc/pykms.nix
./services/misc/radarr.nix
./services/misc/readarr.nix
@ -882,6 +885,7 @@
./services/networking/iscsi/initiator.nix
./services/networking/iscsi/root-initiator.nix
./services/networking/iscsi/target.nix
./services/networking/ivpn.nix
./services/networking/iwd.nix
./services/networking/jibri/default.nix
./services/networking/jicofo.nix
@ -1040,6 +1044,7 @@
./services/networking/wg-netmanager.nix
./services/networking/webhook.nix
./services/networking/wg-quick.nix
./services/networking/wgautomesh.nix
./services/networking/wireguard.nix
./services/networking/wpa_supplicant.nix
./services/networking/wstunnel.nix
@ -1130,7 +1135,7 @@
./services/video/epgstation/default.nix
./services/video/mirakurun.nix
./services/video/replay-sorcery.nix
./services/video/rtsp-simple-server.nix
./services/video/mediamtx.nix
./services/video/unifi-video.nix
./services/video/v4l2-relayd.nix
./services/wayland/cage.nix
@ -1165,7 +1170,6 @@
./services/web-apps/hledger-web.nix
./services/web-apps/icingaweb2/icingaweb2.nix
./services/web-apps/icingaweb2/module-monitoring.nix
./services/web-apps/ihatemoney
./services/web-apps/invidious.nix
./services/web-apps/invoiceplane.nix
./services/web-apps/isso.nix
@ -1181,6 +1185,7 @@
./services/web-apps/mattermost.nix
./services/web-apps/mediawiki.nix
./services/web-apps/miniflux.nix
./services/web-apps/monica.nix
./services/web-apps/moodle.nix
./services/web-apps/netbox.nix
./services/web-apps/nextcloud.nix
@ -1238,6 +1243,7 @@
./services/web-servers/nginx/gitweb.nix
./services/web-servers/phpfpm/default.nix
./services/web-servers/pomerium.nix
./services/web-servers/stargazer.nix
./services/web-servers/tomcat.nix
./services/web-servers/traefik.nix
./services/web-servers/trafficserver/default.nix

View file

@ -0,0 +1,41 @@
{ config, lib, pkgs, ... }:
{
options.programs.clash-verge = {
enable = lib.mkEnableOption (lib.mdDoc ''
Clash Verge.
'');
autoStart = lib.mkEnableOption (lib.mdDoc ''
Clash Verge Auto Launch.
'');
tunMode = lib.mkEnableOption (lib.mdDoc ''
Clash Verge Tun Mode.
'');
};
config =
let
cfg = config.programs.clash-verge;
in
lib.mkIf cfg.enable {
environment.systemPackages = [
pkgs.clash-verge
(lib.mkIf cfg.autoStart (pkgs.makeAutostartItem {
name = "clash-verge";
package = pkgs.clash-verge;
}))
];
security.wrappers.clash-verge = lib.mkIf cfg.tunMode {
owner = "root";
group = "root";
capabilities = "cap_net_bind_service,cap_net_admin=+ep";
source = "${lib.getExe pkgs.clash-verge}";
};
};
meta.maintainers = with lib.maintainers; [ zendo ];
}

View file

@ -201,6 +201,7 @@ in
nativeMessagingHosts = mapAttrs (_: v: mkEnableOption (mdDoc v)) {
browserpass = "Browserpass support";
bukubrow = "Bukubrow support";
euwebid = "Web eID support";
ff2mpv = "ff2mpv support";
fxCast = "fx_cast support";
gsconnect = "GSConnect support";
@ -217,6 +218,8 @@ in
extraPrefs = cfg.autoConfig;
extraNativeMessagingHosts = with pkgs; optionals nmh.ff2mpv [
ff2mpv
] ++ optionals nmh.euwebid [
web-eid-app
] ++ optionals nmh.gsconnect [
gnomeExtensions.gsconnect
] ++ optionals nmh.jabref [
@ -230,6 +233,7 @@ in
nixpkgs.config.firefox = {
enableBrowserpass = nmh.browserpass;
enableBukubrow = nmh.bukubrow;
enableEUWebID = nmh.euwebid;
enableTridactylNative = nmh.tridactyl;
enableUgetIntegrator = nmh.ugetIntegrator;
enableFXCastBridge = nmh.fxCast;

View file

@ -11,7 +11,7 @@ let
${concatStringsSep "\n"
(mapAttrsToList (command: action: "${command} ${action}") cfg.commands)
}
${if cfg.clearDefaultCommands then "#stop" else ""}
${optionalString cfg.clearDefaultCommands "#stop"}
#line-edit
${concatStringsSep "\n"

View file

@ -4,12 +4,8 @@ with lib;
let
cfg = config.programs.neovim;
runtime' = filter (f: f.enable) (attrValues cfg.runtime);
runtime = pkgs.linkFarm "neovim-runtime" (map (x: { name = "etc/${x.target}"; path = x.source; }) runtime');
in {
in
{
options.programs.neovim = {
enable = mkOption {
type = types.bool;
@ -70,7 +66,7 @@ in {
configure = mkOption {
type = types.attrs;
default = {};
default = { };
example = literalExpression ''
{
customRC = '''
@ -105,7 +101,7 @@ in {
};
runtime = mkOption {
default = {};
default = { };
example = literalExpression ''
{ "ftplugin/c.vim".text = "setlocal omnifunc=v:lua.vim.lsp.omnifunc"; }
'';
@ -115,14 +111,15 @@ in {
type = with types; attrsOf (submodule (
{ name, config, ... }:
{ options = {
{
options = {
enable = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether this /etc file should be generated. This
option allows specific /etc files to be disabled.
Whether this runtime directory should be generated. This
option allows specific runtime files to be disabled.
'';
};
@ -147,14 +144,9 @@ in {
};
config = {
target = mkDefault name;
source = mkIf (config.text != null) (
let name' = "neovim-runtime" + baseNameOf name;
in mkDefault (pkgs.writeText name' config.text));
};
}));
config.target = mkDefault name;
}
));
};
};
@ -165,14 +157,17 @@ in {
];
environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "nvim");
programs.neovim.finalPackage = pkgs.wrapNeovim cfg.package {
inherit (cfg) viAlias vimAlias withPython3 withNodeJs withRuby;
configure = cfg.configure // {
environment.etc = listToAttrs (attrValues (mapAttrs
(name: value: {
name = "xdg/nvim/${name}";
value = value // {
target = "xdg/nvim/${value.target}";
};
})
cfg.runtime));
customRC = (cfg.configure.customRC or "") + ''
set runtimepath^=${runtime}/etc
'';
};
programs.neovim.finalPackage = pkgs.wrapNeovim cfg.package {
inherit (cfg) viAlias vimAlias withPython3 withNodeJs withRuby configure;
};
};
}

View file

@ -50,7 +50,7 @@ in
config = lib.mkIf cfg.enable {
services.greetd = {
enable = lib.mkDefault true;
settings.default_session.command = lib.mkDefault "${lib.getExe pkgs.cage} -s -- ${lib.getExe cfg.package}";
settings.default_session.command = lib.mkDefault "${pkgs.dbus}/bin/dbus-run-session ${lib.getExe pkgs.cage} -s -- ${lib.getExe cfg.package}";
};
environment.etc = {

View file

@ -26,7 +26,7 @@ let
+ (if h.publicKey != null then h.publicKey else readFile h.publicKeyFile)
)) + "\n";
knownHostsFiles = [ "/etc/ssh/ssh_known_hosts" "/etc/ssh/ssh_known_hosts2" ]
knownHostsFiles = [ "/etc/ssh/ssh_known_hosts" ]
++ map pkgs.copyPathToStore cfg.knownHostsFiles;
in
@ -232,9 +232,8 @@ in
description = lib.mdDoc ''
Files containing SSH host keys to set as global known hosts.
`/etc/ssh/ssh_known_hosts` (which is
generated by {option}`programs.ssh.knownHosts`) and
`/etc/ssh/ssh_known_hosts2` are always
included.
generated by {option}`programs.ssh.knownHosts`) is
always included.
'';
example = literalExpression ''
[

View file

@ -1,7 +1,7 @@
{ config, pkgs, lib, ... }:
let
inherit (lib) mkOption mkIf types;
inherit (lib) mkOption mkIf types optionalString;
cfg = config.programs.tmux;
@ -17,17 +17,17 @@ let
set -g base-index ${toString cfg.baseIndex}
setw -g pane-base-index ${toString cfg.baseIndex}
${if cfg.newSession then "new-session" else ""}
${optionalString cfg.newSession "new-session"}
${if cfg.reverseSplit then ''
${optionalString cfg.reverseSplit ''
bind v split-window -h
bind s split-window -v
'' else ""}
''}
set -g status-keys ${cfg.keyMode}
set -g mode-keys ${cfg.keyMode}
${if cfg.keyMode == "vi" && cfg.customPaneNavigationAndResize then ''
${optionalString (cfg.keyMode == "vi" && cfg.customPaneNavigationAndResize) ''
bind h select-pane -L
bind j select-pane -D
bind k select-pane -U
@ -37,15 +37,15 @@ let
bind -r J resize-pane -D ${toString cfg.resizeAmount}
bind -r K resize-pane -U ${toString cfg.resizeAmount}
bind -r L resize-pane -R ${toString cfg.resizeAmount}
'' else ""}
''}
${if (cfg.shortcut != defaultShortcut) then ''
${optionalString (cfg.shortcut != defaultShortcut) ''
# rebind main key: C-${cfg.shortcut}
unbind C-${defaultShortcut}
set -g prefix C-${cfg.shortcut}
bind ${cfg.shortcut} send-prefix
bind C-${cfg.shortcut} last-window
'' else ""}
''}
setw -g aggressive-resize ${boolToStr cfg.aggressiveResize}
setw -g clock-mode-style ${if cfg.clock24 then "24" else "12"}
@ -160,7 +160,10 @@ in {
default = defaultTerminal;
example = "screen-256color";
type = types.str;
description = lib.mdDoc "Set the $TERM variable.";
description = lib.mdDoc ''
Set the $TERM variable. Use tmux-direct if italics or 24bit true color
support is needed.
'';
};
secureSocket = mkOption {

View file

@ -236,6 +236,9 @@ in
setopt ${concatStringsSep " " cfg.setOptions}
''}
# Alternative method of determining short and full hostname.
HOST=${config.networking.fqdnOrHostName}
# Setup command line history.
# Don't export these, otherwise other shells (bash) will try to use same HISTFILE.
SAVEHIST=${toString cfg.histSize}

View file

@ -58,6 +58,7 @@ with lib;
(mkRemovedOptionModule [ "services" "fourStoreEndpoint" ] "The fourStoreEndpoint module has been removed")
(mkRemovedOptionModule [ "services" "fprot" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "frab" ] "The frab module has been removed")
(mkRemovedOptionModule [ "services" "ihatemoney" ] "The ihatemoney module has been removed for lack of downstream maintainer")
(mkRemovedOptionModule [ "services" "kippo" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "mailpile" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "marathon" ] "The corresponding package was removed from nixpkgs.")
@ -106,6 +107,7 @@ with lib;
(mkRemovedOptionModule [ "services" "openfire" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "riak" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "cryptpad" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "rtsp-simple-server" ] "Package has been completely rebranded by upstream as mediamtx, and thus the service and the package were renamed in NixOS as well.")
(mkRemovedOptionModule [ "i18n" "inputMethod" "fcitx" ] "The fcitx module has been removed. Plesae use fcitx5 instead")

View file

@ -487,7 +487,7 @@ let
};
email = mkOption {
type = types.str;
type = types.nullOr types.str;
inherit (defaultAndText "email" null) default defaultText;
description = lib.mdDoc ''
Email address for account creation and correspondence from the CA.
@ -555,7 +555,7 @@ let
};
credentialsFile = mkOption {
type = types.path;
type = types.nullOr types.path;
inherit (defaultAndText "credentialsFile" null) default defaultText;
description = lib.mdDoc ''
Path to an EnvironmentFile for the cert's service containing any required and
@ -781,11 +781,11 @@ in {
# FIXME Most of these custom warnings and filters for security.acme.certs.* are required
# because using mkRemovedOptionModule/mkChangedOptionModule with attrsets isn't possible.
warnings = filter (w: w != "") (mapAttrsToList (cert: data: if data.extraDomains != "_mkMergedOptionModule" then ''
warnings = filter (w: w != "") (mapAttrsToList (cert: data: optionalString (data.extraDomains != "_mkMergedOptionModule") ''
The option definition `security.acme.certs.${cert}.extraDomains` has changed
to `security.acme.certs.${cert}.extraDomainNames` and is now a list of strings.
Setting a custom webroot for extra domains is not possible, instead use separate certs.
'' else "") cfg.certs);
'') cfg.certs);
assertions = let
certs = attrValues cfg.certs;

View file

@ -275,9 +275,9 @@ in {
warnings =
# https://github.com/badaix/snapcast/blob/98ac8b2fb7305084376607b59173ce4097c620d8/server/streamreader/stream_manager.cpp#L85
filter (w: w != "") (mapAttrsToList (k: v: if v.type == "spotify" then ''
filter (w: w != "") (mapAttrsToList (k: v: optionalString (v.type == "spotify") ''
services.snapserver.streams.${k}.type = "spotify" is deprecated, use services.snapserver.streams.${k}.type = "librespot" instead.
'' else "") cfg.streams);
'') cfg.streams);
systemd.services.snapserver = {
after = [ "network.target" ];

View file

@ -72,5 +72,8 @@ in
cfg.configurations;
systemd.packages = [ pkgs.borgmatic ];
# Workaround: https://github.com/NixOS/nixpkgs/issues/81138
systemd.timers.borgmatic.wantedBy = [ "timers.target" ];
};
}

View file

@ -20,7 +20,7 @@ let
'';
backupDatabaseScript = db: ''
dest="${cfg.location}/${db}.gz"
if ${mariadb}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
if ${mariadb}/bin/mysqldump ${optionalString cfg.singleTransaction "--single-transaction"} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
mv $dest.tmp $dest
echo "Backed up to $dest"
else

View file

@ -300,7 +300,7 @@ in
filesFromTmpFile = "/run/restic-backups-${name}/includes";
backupPaths =
if (backup.dynamicFilesFrom == null)
then if (backup.paths != null) then concatStringsSep " " backup.paths else ""
then optionalString (backup.paths != null) (concatStringsSep " " backup.paths)
else "--files-from ${filesFromTmpFile}";
pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
(resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))

View file

@ -196,9 +196,9 @@ in
--gcmode ${cfg.gcmode} \
--port ${toString cfg.port} \
--maxpeers ${toString cfg.maxpeers} \
${if cfg.http.enable then ''--http --http.addr ${cfg.http.address} --http.port ${toString cfg.http.port}'' else ""} \
${optionalString cfg.http.enable ''--http --http.addr ${cfg.http.address} --http.port ${toString cfg.http.port}''} \
${optionalString (cfg.http.apis != null) ''--http.api ${lib.concatStringsSep "," cfg.http.apis}''} \
${if cfg.websocket.enable then ''--ws --ws.addr ${cfg.websocket.address} --ws.port ${toString cfg.websocket.port}'' else ""} \
${optionalString cfg.websocket.enable ''--ws --ws.addr ${cfg.websocket.address} --ws.port ${toString cfg.websocket.port}''} \
${optionalString (cfg.websocket.apis != null) ''--ws.api ${lib.concatStringsSep "," cfg.websocket.apis}''} \
${optionalString cfg.metrics.enable ''--metrics --metrics.addr ${cfg.metrics.address} --metrics.port ${toString cfg.metrics.port}''} \
--authrpc.addr ${cfg.authrpc.address} --authrpc.port ${toString cfg.authrpc.port} --authrpc.vhosts ${lib.concatStringsSep "," cfg.authrpc.vhosts} \

View file

@ -6,7 +6,7 @@ let
cfg = config.services.boinc;
allowRemoteGuiRpcFlag = optionalString cfg.allowRemoteGuiRpc "--allow_remote_gui_rpc";
fhsEnv = pkgs.buildFHSUserEnv {
fhsEnv = pkgs.buildFHSEnv {
name = "boinc-fhs-env";
targetPkgs = pkgs': [ cfg.package ] ++ cfg.extraEnvPackages;
runScript = "/bin/boinc_client";

View file

@ -242,7 +242,7 @@ in {
jobdir="${jenkinsCfg.home}/$jenkinsjobname"
rm -rf "$jobdir"
done
'' + (if cfg.accessUser != "" then reloadScript else "");
'' + (optionalString (cfg.accessUser != "") reloadScript);
serviceConfig = {
Type = "oneshot";
User = jenkinsCfg.user;

View file

@ -50,6 +50,6 @@ in {
};
};
environment.systemPackages = [ cfg.package ];
environment.systemPackages = [ cfg.package pkgs.direnv ];
};
}

View file

@ -4,7 +4,7 @@ with lib;
let
cfg = config.services.minetest-server;
flag = val: name: if val != null then "--${name} ${toString val} " else "";
flag = val: name: optionalString (val != null) "--${name} ${toString val} ";
flags = [
(flag cfg.gameId "gameid")
(flag cfg.world "world")

View file

@ -16,16 +16,6 @@ let
'';
# networkd link files are used early by udev to set up interfaces early.
# This must be done in stage 1 to avoid race conditions between udev and
# network daemons.
# TODO move this into the initrd-network module when it exists
initrdLinkUnits = pkgs.runCommand "initrd-link-units" {} ''
mkdir -p $out
ln -s ${udev}/lib/systemd/network/*.link $out/
${lib.concatMapStringsSep "\n" (file: "ln -s ${file} $out/") (lib.mapAttrsToList (n: v: "${v.unit}/${n}") (lib.filterAttrs (n: _: hasSuffix ".link" n) config.systemd.network.units))}
'';
extraUdevRules = pkgs.writeTextFile {
name = "extra-udev-rules";
text = cfg.extraRules;
@ -398,7 +388,6 @@ in
systemd = config.boot.initrd.systemd.package;
binPackages = config.boot.initrd.services.udev.binPackages ++ [ config.boot.initrd.systemd.contents."/bin".source ];
};
"/etc/systemd/network".source = initrdLinkUnits;
};
# Insert initrd rules
boot.initrd.services.udev.packages = [

View file

@ -0,0 +1,136 @@
{ config, lib, pkgs, ... }:
let
inherit (lib)
literalExpression
maintainers
mkEnableOption
mkIf
mkOption
mdDoc
types
;
cfg = config.services.esphome;
stateDir = "/var/lib/esphome";
esphomeParams =
if cfg.enableUnixSocket
then "--socket /run/esphome/esphome.sock"
else "--address ${cfg.address} --port ${toString cfg.port}";
in
{
meta.maintainers = with maintainers; [ oddlama ];
options.services.esphome = {
enable = mkEnableOption (mdDoc "esphome");
package = mkOption {
type = types.package;
default = pkgs.esphome;
defaultText = literalExpression "pkgs.esphome";
description = mdDoc "The package to use for the esphome command.";
};
enableUnixSocket = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc "Listen on a unix socket `/run/esphome/esphome.sock` instead of the TCP port.";
};
address = mkOption {
type = types.str;
default = "localhost";
description = mdDoc "esphome address";
};
port = mkOption {
type = types.port;
default = 6052;
description = mdDoc "esphome port";
};
openFirewall = mkOption {
default = false;
type = types.bool;
description = mdDoc "Whether to open the firewall for the specified port.";
};
allowedDevices = mkOption {
default = ["char-ttyS" "char-ttyUSB"];
example = ["/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0"];
description = lib.mdDoc ''
A list of device nodes to which {command}`esphome` has access to.
Refer to DeviceAllow in systemd.resource-control(5) for more information.
Beware that if a device is referred to by an absolute path instead of a device category,
it will only allow devices that already are plugged in when the service is started.
'';
type = types.listOf types.str;
};
};
config = mkIf cfg.enable {
networking.firewall.allowedTCPPorts = mkIf (cfg.openFirewall && !cfg.enableUnixSocket) [cfg.port];
systemd.services.esphome = {
description = "ESPHome dashboard";
after = ["network.target"];
wantedBy = ["multi-user.target"];
path = [cfg.package];
# platformio fails to determine the home directory when using DynamicUser
environment.PLATFORMIO_CORE_DIR = "${stateDir}/.platformio";
serviceConfig = {
ExecStart = "${cfg.package}/bin/esphome dashboard ${esphomeParams} ${stateDir}";
DynamicUser = true;
User = "esphome";
Group = "esphome";
WorkingDirectory = stateDir;
StateDirectory = "esphome";
StateDirectoryMode = "0750";
Restart = "on-failure";
RuntimeDirectory = mkIf cfg.enableUnixSocket "esphome";
RuntimeDirectoryMode = "0750";
# Hardening
CapabilityBoundingSet = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
DevicePolicy = "closed";
DeviceAllow = map (d: "${d} rw") cfg.allowedDevices;
SupplementaryGroups = ["dialout"];
#NoNewPrivileges = true; # Implied by DynamicUser
PrivateUsers = true;
#PrivateTmp = true; # Implied by DynamicUser
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProcSubset = "pid";
ProtectSystem = "strict";
#RemoveIPC = true; # Implied by DynamicUser
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
"AF_UNIX"
];
RestrictNamespaces = false; # Required by platformio for chroot
RestrictRealtime = true;
#RestrictSUIDSGID = true; # Implied by DynamicUser
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"@mount" # Required by platformio for chroot
];
UMask = "0077";
};
};
};
}

View file

@ -83,9 +83,8 @@ let
};
mailOption =
if foldr (n: a: a || (n.mail or false) != false) false (attrValues cfg.settings)
then "--mail=${pkgs.mailutils}/bin/mail"
else "";
optionalString (foldr (n: a: a || (n.mail or false) != false) false (attrValues cfg.settings))
"--mail=${pkgs.mailutils}/bin/mail";
in
{
imports = [

View file

@ -7,7 +7,7 @@ let
cfg = config.services.syslogd;
syslogConf = pkgs.writeText "syslog.conf" ''
${if (cfg.tty != "") then "kern.warning;*.err;authpriv.none /dev/${cfg.tty}" else ""}
${optionalString (cfg.tty != "") "kern.warning;*.err;authpriv.none /dev/${cfg.tty}"}
${cfg.defaultConfig}
${cfg.extraConfig}
'';

View file

@ -228,8 +228,8 @@ in {
default = [];
description = lib.mdDoc ''
List of IMAP accounts which get automatically created. Note that for
a complete setup, user credentials for these accounts are required too
and can be created using the command `maddyctl creds`.
a complete setup, user credentials for these accounts are required
and can be created using the `ensureCredentials` option.
This option does not delete accounts which are not (anymore) listed.
'';
example = [
@ -238,6 +238,33 @@ in {
];
};
ensureCredentials = mkOption {
default = {};
description = lib.mdDoc ''
List of user accounts which get automatically created if they don't
exist yet. Note that for a complete setup, corresponding mail boxes
have to get created using the `ensureAccounts` option.
This option does not delete accounts which are not (anymore) listed.
'';
example = {
"user1@localhost".passwordFile = /secrets/user1-localhost;
"user2@localhost".passwordFile = /secrets/user2-localhost;
};
type = types.attrsOf (types.submodule {
options = {
passwordFile = mkOption {
type = types.path;
example = "/path/to/file";
default = null;
description = lib.mdDoc ''
Specifies the path to a file containing the
clear text password for the user.
'';
};
};
});
};
};
};
@ -265,6 +292,13 @@ in {
fi
'') cfg.ensureAccounts}
''}
${optionalString (cfg.ensureCredentials != {}) ''
${concatStringsSep "\n" (mapAttrsToList (name: cfg: ''
if ! ${pkgs.maddy}/bin/maddyctl creds list | grep "${name}"; then
${pkgs.maddy}/bin/maddyctl creds create --password $(cat ${escapeShellArg cfg.passwordFile}) ${name}
fi
'') cfg.ensureCredentials)}
''}
'';
serviceConfig = {
Type = "oneshot";

View file

@ -234,7 +234,7 @@ let
headerChecks = concatStringsSep "\n" (map (x: "${x.pattern} ${x.action}") cfg.headerChecks) + cfg.extraHeaderChecks;
aliases = let separator = if cfg.aliasMapType == "hash" then ":" else ""; in
aliases = let separator = optionalString (cfg.aliasMapType == "hash") ":"; in
optionalString (cfg.postmasterAlias != "") ''
postmaster${separator} ${cfg.postmasterAlias}
''

View file

@ -7,7 +7,7 @@ let
fpm = config.services.phpfpm.pools.roundcube;
localDB = cfg.database.host == "localhost";
user = cfg.database.username;
phpWithPspell = pkgs.php80.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
phpWithPspell = pkgs.php81.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
in
{
options.services.roundcube = {
@ -70,7 +70,12 @@ in
};
passwordFile = mkOption {
type = types.str;
description = lib.mdDoc "Password file for the postgresql connection. Must be readable by user `nginx`. Ignored if `database.host` is set to `localhost`, as peer authentication will be used.";
description = lib.mdDoc ''
Password file for the postgresql connection.
Must be formated according to PostgreSQL .pgpass standard (see https://www.postgresql.org/docs/current/libpq-pgpass.html)
but only one line, no comments and readable by user `nginx`.
Ignored if `database.host` is set to `localhost`, as peer authentication will be used.
'';
};
dbname = mkOption {
type = types.str;
@ -123,7 +128,13 @@ in
environment.etc."roundcube/config.inc.php".text = ''
<?php
${lib.optionalString (!localDB) "$password = file_get_contents('${cfg.database.passwordFile}');"}
${lib.optionalString (!localDB) ''
$password = file('${cfg.database.passwordFile}')[0];
$password = preg_split('~\\\\.(*SKIP)(*FAIL)|\:~s', $password);
$password = end($password);
$password = str_replace("\\:", ":", $password);
$password = str_replace("\\\\", "\\", $password);
''}
$config = array();
$config['db_dsnw'] = 'pgsql://${cfg.database.username}${lib.optionalString (!localDB) ":' . $password . '"}@${if localDB then "unix(/run/postgresql)" else cfg.database.host}/${cfg.database.dbname}';
@ -223,6 +234,7 @@ in
path = [ config.services.postgresql.package ];
})
{
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
script = let
psql = "${lib.optionalString (!localDB) "PGPASSFILE=${cfg.database.passwordFile}"} ${pkgs.postgresql}/bin/psql ${lib.optionalString (!localDB) "-h ${cfg.database.host} -U ${cfg.database.username} "} ${cfg.database.dbname}";

View file

@ -10,7 +10,7 @@ let
Connection = ${cfg.device.connection}
SynchronizeTime = ${if cfg.device.synchronizeTime then "yes" else "no"}
LogFormat = ${cfg.log.format}
${if (cfg.device.pin != null) then "PIN = ${cfg.device.pin}" else ""}
${optionalString (cfg.device.pin != null) "PIN = ${cfg.device.pin}"}
${cfg.extraConfig.gammu}
@ -33,10 +33,10 @@ let
${optionalString (cfg.backend.service == "sql" && cfg.backend.sql.driver == "native_pgsql") (
with cfg.backend; ''
Driver = ${sql.driver}
${if (sql.database!= null) then "Database = ${sql.database}" else ""}
${if (sql.host != null) then "Host = ${sql.host}" else ""}
${if (sql.user != null) then "User = ${sql.user}" else ""}
${if (sql.password != null) then "Password = ${sql.password}" else ""}
${optionalString (sql.database!= null) "Database = ${sql.database}"}
${optionalString (sql.host != null) "Host = ${sql.host}"}
${optionalString (sql.user != null) "User = ${sql.user}"}
${optionalString (sql.password != null) "Password = ${sql.password}"}
'')}
${cfg.extraConfig.smsd}

View file

@ -26,9 +26,18 @@ in
imports = [
(mkRenamedOptionModule [ "services" "gitea" "cookieSecure" ] [ "services" "gitea" "settings" "session" "COOKIE_SECURE" ])
(mkRenamedOptionModule [ "services" "gitea" "disableRegistration" ] [ "services" "gitea" "settings" "service" "DISABLE_REGISTRATION" ])
(mkRenamedOptionModule [ "services" "gitea" "domain" ] [ "services" "gitea" "settings" "server" "DOMAIN" ])
(mkRenamedOptionModule [ "services" "gitea" "httpAddress" ] [ "services" "gitea" "settings" "server" "HTTP_ADDR" ])
(mkRenamedOptionModule [ "services" "gitea" "httpPort" ] [ "services" "gitea" "settings" "server" "HTTP_PORT" ])
(mkRenamedOptionModule [ "services" "gitea" "log" "level" ] [ "services" "gitea" "settings" "log" "LEVEL" ])
(mkRenamedOptionModule [ "services" "gitea" "log" "rootPath" ] [ "services" "gitea" "settings" "log" "ROOT_PATH" ])
(mkRenamedOptionModule [ "services" "gitea" "rootUrl" ] [ "services" "gitea" "settings" "server" "ROOT_URL" ])
(mkRenamedOptionModule [ "services" "gitea" "ssh" "clonePort" ] [ "services" "gitea" "settings" "server" "SSH_PORT" ])
(mkRenamedOptionModule [ "services" "gitea" "staticRootPath" ] [ "services" "gitea" "settings" "server" "STATIC_ROOT_PATH" ])
(mkChangedOptionModule [ "services" "gitea" "enableUnixSocket" ] [ "services" "gitea" "settings" "server" "PROTOCOL" ] (
config: if config.services.gitea.enableUnixSocket then "http+unix" else "http"
))
(mkRemovedOptionModule [ "services" "gitea" "ssh" "enable" ] "services.gitea.ssh.enable has been migrated into freeform setting services.gitea.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted")
];
@ -57,7 +66,14 @@ in
stateDir = mkOption {
default = "/var/lib/gitea";
type = types.str;
description = lib.mdDoc "gitea data directory.";
description = lib.mdDoc "Gitea data directory.";
};
customDir = mkOption {
default = "${cfg.stateDir}/custom";
defaultText = literalExpression ''"''${config.${opt.stateDir}}/custom"'';
type = types.str;
description = lib.mdDoc "Gitea custom directory. Used for config, custom templates and other options.";
};
user = mkOption {
@ -66,6 +82,12 @@ in
description = lib.mdDoc "User account under which gitea runs.";
};
group = mkOption {
type = types.str;
default = "gitea";
description = lib.mdDoc "Group under which gitea runs.";
};
database = {
type = mkOption {
type = types.enum [ "sqlite3" "mysql" "postgres" ];
@ -216,44 +238,6 @@ in
description = lib.mdDoc "Path to the git repositories.";
};
domain = mkOption {
type = types.str;
default = "localhost";
description = lib.mdDoc "Domain name of your server.";
};
rootUrl = mkOption {
type = types.str;
default = "http://localhost:3000/";
description = lib.mdDoc "Full public URL of gitea server.";
};
httpAddress = mkOption {
type = types.str;
default = "0.0.0.0";
description = lib.mdDoc "HTTP listen address.";
};
httpPort = mkOption {
type = types.port;
default = 3000;
description = lib.mdDoc "HTTP listen port.";
};
enableUnixSocket = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc "Configure Gitea to listen on a unix socket instead of the default TCP port.";
};
staticRootPath = mkOption {
type = types.either types.str types.path;
default = cfg.package.data;
defaultText = literalExpression "package.data";
example = "/var/lib/gitea/data";
description = lib.mdDoc "Upper level of template and static files path.";
};
mailerPasswordFile = mkOption {
type = types.nullOr types.str;
default = null;
@ -285,7 +269,7 @@ in
};
}
'';
type = with types; submodule {
type = types.submodule {
freeformType = format.type;
options = {
log = {
@ -303,6 +287,46 @@ in
};
server = {
PROTOCOL = mkOption {
type = types.enum [ "http" "https" "fcgi" "http+unix" "fcgi+unix" ];
default = "http";
description = lib.mdDoc ''Listen protocol. `+unix` means "over unix", not "in addition to."'';
};
HTTP_ADDR = mkOption {
type = types.either types.str types.path;
default = if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0";
defaultText = literalExpression ''if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0"'';
description = lib.mdDoc "Listen address. Must be a path when using a unix socket.";
};
HTTP_PORT = mkOption {
type = types.port;
default = 3000;
description = lib.mdDoc "Listen port. Ignored when using a unix socket.";
};
DOMAIN = mkOption {
type = types.str;
default = "localhost";
description = lib.mdDoc "Domain name of your server.";
};
ROOT_URL = mkOption {
type = types.str;
default = "http://${cfg.settings.server.DOMAIN}:${toString cfg.settings.server.HTTP_PORT}/";
defaultText = literalExpression ''"http://''${config.services.gitea.settings.server.DOMAIN}:''${toString config.services.gitea.settings.server.HTTP_PORT}/"'';
description = lib.mdDoc "Full public URL of gitea server.";
};
STATIC_ROOT_PATH = mkOption {
type = types.either types.str types.path;
default = cfg.package.data;
defaultText = literalExpression "config.${opt.package}.data";
example = "/var/lib/gitea/data";
description = lib.mdDoc "Upper level of template and static files path.";
};
DISABLE_SSH = mkOption {
type = types.bool;
default = false;
@ -359,7 +383,7 @@ in
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.database.createDatabase -> cfg.database.user == cfg.user;
{ assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
message = "services.gitea.database.user must match services.gitea.user if the database is to be automatically provisioned";
}
];
@ -389,26 +413,10 @@ in
ROOT = cfg.repositoryRoot;
};
server = mkMerge [
{
DOMAIN = cfg.domain;
STATIC_ROOT_PATH = toString cfg.staticRootPath;
LFS_JWT_SECRET = "#lfsjwtsecret#";
ROOT_URL = cfg.rootUrl;
}
(mkIf cfg.enableUnixSocket {
PROTOCOL = "http+unix";
HTTP_ADDR = "/run/gitea/gitea.sock";
})
(mkIf (!cfg.enableUnixSocket) {
HTTP_ADDR = cfg.httpAddress;
HTTP_PORT = cfg.httpPort;
})
(mkIf cfg.lfs.enable {
LFS_START_SERVER = true;
})
];
server = mkIf cfg.lfs.enable {
LFS_START_SERVER = true;
LFS_JWT_SECRET = "#lfsjwtsecret#";
};
session = {
COOKIE_NAME = lib.mkDefault "session";
@ -428,7 +436,7 @@ in
JWT_SECRET = "#oauth2jwtsecret#";
};
lfs = mkIf (cfg.lfs.enable) {
lfs = mkIf cfg.lfs.enable {
PATH = cfg.lfs.contentDir;
};
};
@ -457,33 +465,35 @@ in
};
systemd.tmpfiles.rules = [
"d '${cfg.dump.backupDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.dump.backupDir}' 0750 ${cfg.user} gitea - -"
"Z '${cfg.dump.backupDir}' - ${cfg.user} gitea - -"
"d '${cfg.lfs.contentDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.lfs.contentDir}' 0750 ${cfg.user} gitea - -"
"Z '${cfg.lfs.contentDir}' - ${cfg.user} gitea - -"
"d '${cfg.repositoryRoot}' 0750 ${cfg.user} gitea - -"
"z '${cfg.repositoryRoot}' 0750 ${cfg.user} gitea - -"
"Z '${cfg.repositoryRoot}' - ${cfg.user} gitea - -"
"d '${cfg.stateDir}' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/data' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/data' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -"
"Z '${cfg.stateDir}' - ${cfg.user} gitea - -"
"d '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
"Z '${cfg.dump.backupDir}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
"Z '${cfg.repositoryRoot}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
"Z '${cfg.stateDir}' - ${cfg.user} ${cfg.group} - -"
# If we have a folder or symlink with gitea locales, remove it
# And symlink the current gitea locales in place
"L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
] ++ lib.optionals cfg.lfs.enable [
"d '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
"z '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
"Z '${cfg.lfs.contentDir}' - ${cfg.user} ${cfg.group} - -"
];
systemd.services.gitea = {
@ -500,47 +510,52 @@ in
# lfs_jwt_secret.
# We have to consider this to stay compatible with older installations.
preStart = let
runConfig = "${cfg.stateDir}/custom/conf/app.ini";
secretKey = "${cfg.stateDir}/custom/conf/secret_key";
oauth2JwtSecret = "${cfg.stateDir}/custom/conf/oauth2_jwt_secret";
oldLfsJwtSecret = "${cfg.stateDir}/custom/conf/jwt_secret"; # old file for LFS_JWT_SECRET
lfsJwtSecret = "${cfg.stateDir}/custom/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
internalToken = "${cfg.stateDir}/custom/conf/internal_token";
runConfig = "${cfg.customDir}/conf/app.ini";
secretKey = "${cfg.customDir}/conf/secret_key";
oauth2JwtSecret = "${cfg.customDir}/conf/oauth2_jwt_secret";
oldLfsJwtSecret = "${cfg.customDir}/conf/jwt_secret"; # old file for LFS_JWT_SECRET
lfsJwtSecret = "${cfg.customDir}/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
internalToken = "${cfg.customDir}/conf/internal_token";
replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
in ''
# copy custom configuration and generate a random secret key if needed
# copy custom configuration and generate random secrets if needed
${optionalString (!cfg.useWizard) ''
function gitea_setup {
cp -f ${configFile} ${runConfig}
cp -f '${configFile}' '${runConfig}'
if [ ! -s ${secretKey} ]; then
${exe} generate secret SECRET_KEY > ${secretKey}
if [ ! -s '${secretKey}' ]; then
${exe} generate secret SECRET_KEY > '${secretKey}'
fi
# Migrate LFS_JWT_SECRET filename
if [[ -s ${oldLfsJwtSecret} && ! -s ${lfsJwtSecret} ]]; then
mv ${oldLfsJwtSecret} ${lfsJwtSecret}
if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
fi
if [ ! -s ${oauth2JwtSecret} ]; then
${exe} generate secret JWT_SECRET > ${oauth2JwtSecret}
if [ ! -s '${oauth2JwtSecret}' ]; then
${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
fi
if [ ! -s ${lfsJwtSecret} ]; then
${exe} generate secret LFS_JWT_SECRET > ${lfsJwtSecret}
${lib.optionalString cfg.lfs.enable ''
if [ ! -s '${lfsJwtSecret}' ]; then
${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
fi
''}
if [ ! -s ${internalToken} ]; then
${exe} generate secret INTERNAL_TOKEN > ${internalToken}
if [ ! -s '${internalToken}' ]; then
${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
fi
chmod u+w '${runConfig}'
${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
${lib.optionalString cfg.lfs.enable ''
${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
''}
${lib.optionalString (cfg.mailerPasswordFile != null) ''
${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
''}
@ -565,7 +580,7 @@ in
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = "gitea";
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${exe} web --pid /run/gitea/gitea.pid";
Restart = "always";
@ -573,7 +588,7 @@ in
RuntimeDirectory = "gitea";
RuntimeDirectoryMode = "0755";
# Access write directories
ReadWritePaths = [ cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
ReadWritePaths = [ cfg.customDir cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
UMask = "0027";
# Capabilities
CapabilityBoundingSet = "";
@ -606,6 +621,7 @@ in
USER = cfg.user;
HOME = cfg.stateDir;
GITEA_WORK_DIR = cfg.stateDir;
GITEA_CUSTOM = cfg.customDir;
};
};
@ -614,12 +630,14 @@ in
description = "Gitea Service";
home = cfg.stateDir;
useDefaultShell = true;
group = "gitea";
group = cfg.group;
isSystemUser = true;
};
};
users.groups.gitea = {};
users.groups = mkIf (cfg.group == "gitea") {
gitea = {};
};
warnings =
optional (cfg.database.password != "") "config.services.gitea.database.password will be stored as plaintext in the Nix store. Use database.passwordFile instead." ++

View file

@ -1215,7 +1215,7 @@ in {
enableDelete = true; # This must be true, otherwise GitLab won't manage it correctly
extraConfig = {
auth.token = {
realm = "http${if cfg.https == true then "s" else ""}://${cfg.host}/jwt/auth";
realm = "http${optionalString (cfg.https == true) "s"}://${cfg.host}/jwt/auth";
service = cfg.registry.serviceName;
issuer = cfg.registry.issuer;
rootcertbundle = cfg.registry.certFile;

View file

@ -3,7 +3,7 @@ with lib;
let
cfg = config.services.mbpfan;
verbose = if cfg.verbose then "v" else "";
verbose = optionalString cfg.verbose "v";
settingsFormat = pkgs.formats.ini {};
settingsFile = settingsFormat.generate "mbpfan.ini" cfg.settings;

View file

@ -0,0 +1,176 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.pufferpanel;
in
{
options.services.pufferpanel = {
enable = lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc ''
Whether to enable PufferPanel game management server.
Note that [PufferPanel templates] and binaries downloaded by PufferPanel
expect [FHS environment]. It is possible to set {option}`package` option
to use PufferPanel wrapper with FHS environment. For example, to use
`Download Game from Steam` and `Download Java` template operations:
```Nix
{ lib, pkgs, ... }: {
services.pufferpanel = {
enable = true;
extraPackages = with pkgs; [ bash curl gawk gnutar gzip ];
package = pkgs.buildFHSUserEnv {
name = "pufferpanel-fhs";
runScript = lib.getExe pkgs.pufferpanel;
targetPkgs = pkgs': with pkgs'; [ icu openssl zlib ];
};
};
}
```
[PufferPanel templates]: https://github.com/PufferPanel/templates
[FHS environment]: https://wikipedia.org/wiki/Filesystem_Hierarchy_Standard
'';
};
package = lib.mkPackageOptionMD pkgs "pufferpanel" { };
extraGroups = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "podman" ];
description = lib.mdDoc ''
Additional groups for the systemd service.
'';
};
extraPackages = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [ ];
example = lib.literalExpression "[ pkgs.jre ]";
description = lib.mdDoc ''
Packages to add to the PATH environment variable. Both the {file}`bin`
and {file}`sbin` subdirectories of each package are added.
'';
};
environment = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = { };
example = lib.literalExpression ''
{
PUFFER_WEB_HOST = ":8080";
PUFFER_DAEMON_SFTP_HOST = ":5657";
PUFFER_DAEMON_CONSOLE_BUFFER = "1000";
PUFFER_DAEMON_CONSOLE_FORWARD = "true";
PUFFER_PANEL_REGISTRATIONENABLED = "false";
}
'';
description = lib.mdDoc ''
Environment variables to set for the service. Secrets should be
specified using {option}`environmentFile`.
Refer to the [PufferPanel source code][] for the list of available
configuration options. Variable name is an upper-cased configuration
entry name with underscores instead of dots, prefixed with `PUFFER_`.
For example, `panel.settings.companyName` entry can be set using
{env}`PUFFER_PANEL_SETTINGS_COMPANYNAME`.
When running with panel enabled (configured with `PUFFER_PANEL_ENABLE`
environment variable), it is recommended disable registration using
`PUFFER_PANEL_REGISTRATIONENABLED` environment variable (registration is
enabled by default). To create the initial administrator user, run
{command}`pufferpanel --workDir /var/lib/pufferpanel user add --admin`.
Some options override corresponding settings set via web interface (e.g.
`PUFFER_PANEL_REGISTRATIONENABLED`). Those options can be temporarily
toggled or set in settings but do not persist between restarts.
[PufferPanel source code]: https://github.com/PufferPanel/PufferPanel/blob/master/config/entries.go
'';
};
environmentFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = lib.mdDoc ''
File to load environment variables from. Loaded variables override
values set in {option}`environment`.
'';
};
};
config = lib.mkIf cfg.enable {
systemd.services.pufferpanel = {
description = "PufferPanel game management server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = cfg.extraPackages;
environment = cfg.environment;
# Note that we export environment variables for service directories if the
# value is not set. An empty environment variable is considered to be set.
# E.g.
# export PUFFER_LOGS=${PUFFER_LOGS-$LOGS_DIRECTORY}
# would set PUFFER_LOGS to $LOGS_DIRECTORY if PUFFER_LOGS environment
# variable is not defined.
script = ''
${lib.concatLines (lib.mapAttrsToList (name: value: ''
export ${name}="''${${name}-${value}}"
'') {
PUFFER_LOGS = "$LOGS_DIRECTORY";
PUFFER_DAEMON_DATA_CACHE = "$CACHE_DIRECTORY";
PUFFER_DAEMON_DATA_SERVERS = "$STATE_DIRECTORY/servers";
PUFFER_DAEMON_DATA_BINARIES = "$STATE_DIRECTORY/binaries";
})}
exec ${lib.getExe cfg.package} run --workDir "$STATE_DIRECTORY"
'';
serviceConfig = {
Type = "simple";
Restart = "always";
UMask = "0077";
SupplementaryGroups = cfg.extraGroups;
StateDirectory = "pufferpanel";
StateDirectoryMode = "0700";
CacheDirectory = "pufferpanel";
CacheDirectoryMode = "0700";
LogsDirectory = "pufferpanel";
LogsDirectoryMode = "0700";
EnvironmentFile = cfg.environmentFile;
# Command "pufferpanel shutdown --pid $MAINPID" sends SIGTERM (code 15)
# to the main process and waits for termination. This is essentially
# KillMode=mixed we are using here. See
# https://freedesktop.org/software/systemd/man/systemd.kill.html#KillMode=
KillMode = "mixed";
DynamicUser = true;
ProtectHome = true;
ProtectProc = "invisible";
ProtectClock = true;
ProtectHostname = true;
ProtectControlGroups = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
PrivateUsers = true;
PrivateDevices = true;
RestrictRealtime = true;
RestrictNamespaces = [ "user" "mnt" ]; # allow buildFHSUserEnv
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
LockPersonality = true;
DeviceAllow = [ "" ];
DevicePolicy = "closed";
CapabilityBoundingSet = [ "" ];
};
};
};
meta.maintainers = [ lib.maintainers.tie ];
}

View file

@ -283,13 +283,13 @@ in
services.redmine.settings = {
production = {
scm_subversion_command = if cfg.components.subversion then "${pkgs.subversion}/bin/svn" else "";
scm_mercurial_command = if cfg.components.mercurial then "${pkgs.mercurial}/bin/hg" else "";
scm_git_command = if cfg.components.git then "${pkgs.git}/bin/git" else "";
scm_cvs_command = if cfg.components.cvs then "${pkgs.cvs}/bin/cvs" else "";
scm_bazaar_command = if cfg.components.breezy then "${pkgs.breezy}/bin/bzr" else "";
imagemagick_convert_command = if cfg.components.imagemagick then "${pkgs.imagemagick}/bin/convert" else "";
gs_command = if cfg.components.ghostscript then "${pkgs.ghostscript}/bin/gs" else "";
scm_subversion_command = optionalString cfg.components.subversion "${pkgs.subversion}/bin/svn";
scm_mercurial_command = optionalString cfg.components.mercurial "${pkgs.mercurial}/bin/hg";
scm_git_command = optionalString cfg.components.git "${pkgs.git}/bin/git";
scm_cvs_command = optionalString cfg.components.cvs "${pkgs.cvs}/bin/cvs";
scm_bazaar_command = optionalString cfg.components.breezy "${pkgs.breezy}/bin/bzr";
imagemagick_convert_command = optionalString cfg.components.imagemagick "${pkgs.imagemagick}/bin/convert";
gs_command = optionalString cfg.components.ghostscript "${pkgs.ghostscript}/bin/gs";
minimagick_font_path = "${cfg.components.minimagick_font_path}";
};
};

View file

@ -20,7 +20,7 @@ let
${optionalString (cfg.hostsAllowReg != []) "hosts_allow_reg = ${concatStringsSep "," cfg.hostsAllowReg}"}
${optionalString (cfg.hostsAllowSip != []) "hosts_allow_sip = ${concatStringsSep "," cfg.hostsAllowSip}"}
${optionalString (cfg.hostsDenySip != []) "hosts_deny_sip = ${concatStringsSep "," cfg.hostsDenySip}"}
${if (cfg.passwordFile != "") then "proxy_auth_pwfile = ${cfg.passwordFile}" else ""}
${optionalString (cfg.passwordFile != "") "proxy_auth_pwfile = ${cfg.passwordFile}"}
${cfg.extraConfig}
'';

View file

@ -140,7 +140,7 @@ in
# We can't use Environment=HOSTNAME=%H, as it doesn't include the domain part.
export HOSTNAME=$(< /proc/sys/kernel/hostname)
exec ${cfg.package}/bin/agent -config.expand-env -config.file ${configFile}
exec ${lib.getExe cfg.package} -config.expand-env -config.file ${configFile}
'';
serviceConfig = {
Restart = "always";

View file

@ -58,10 +58,10 @@ in
};
};
serviceOpts = let
collectSettingsArgs = if (cfg.collectdBinary.enable) then ''
collectSettingsArgs = optionalString (cfg.collectdBinary.enable) ''
--collectd.listen-address ${cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \
--collectd.security-level ${cfg.collectdBinary.securityLevel} \
'' else "";
'';
in {
serviceConfig = {
ExecStart = ''

View file

@ -4,12 +4,12 @@ with lib;
let
cfg = config.services.prometheus.exporters.smartctl;
args = concatStrings [
"--web.listen-address=\"${cfg.listenAddress}:${toString cfg.port}\" "
"--smartctl.path=\"${pkgs.smartmontools}/bin/smartctl\" "
"--smartctl.interval=\"${cfg.maxInterval}\" "
"${concatMapStringsSep " " (device: "--smartctl.device=${device}") cfg.devices}"
] ++ cfg.extraFlags;
args = lib.escapeShellArgs ([
"--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
"--smartctl.path=${pkgs.smartmontools}/bin/smartctl"
"--smartctl.interval=${cfg.maxInterval}"
] ++ map (device: "--smartctl.device=${device}") cfg.devices
++ cfg.extraFlags);
in {
port = 9633;

View file

@ -22,6 +22,18 @@ let
configFile = settingsFormat.generate "kubo-config.json" customizedConfig;
# Create a fake repo containing only the file "api".
# $IPFS_PATH will point to this directory instead of the real one.
# For some reason the Kubo CLI tools insist on reading the
# config file when it exists. But the Kubo daemon sets the file
# permissions such that only the ipfs user is allowed to read
# this file. This prevents normal users from talking to the daemon.
# To work around this terrible design, create a fake repo with no
# config file, only an api file and everything should work as expected.
fakeKuboRepo = pkgs.writeTextDir "api" ''
/unix/run/ipfs.sock
'';
kuboFlags = utils.escapeSystemdExecArgs (
optional cfg.autoMount "--mount" ++
optional cfg.enableGC "--enable-gc" ++
@ -38,6 +50,22 @@ let
splitMulitaddr = addrRaw: lib.tail (lib.splitString "/" addrRaw);
multiaddrsToListenStreams = addrIn:
let
addrs = if builtins.typeOf addrIn == "list"
then addrIn else [ addrIn ];
unfilteredResult = map multiaddrToListenStream addrs;
in
builtins.filter (addr: addr != null) unfilteredResult;
multiaddrsToListenDatagrams = addrIn:
let
addrs = if builtins.typeOf addrIn == "list"
then addrIn else [ addrIn ];
unfilteredResult = map multiaddrToListenDatagram addrs;
in
builtins.filter (addr: addr != null) unfilteredResult;
multiaddrToListenStream = addrRaw:
let
addr = splitMulitaddr addrRaw;
@ -154,13 +182,18 @@ in
options = {
Addresses.API = mkOption {
type = types.str;
default = "/ip4/127.0.0.1/tcp/5001";
description = lib.mdDoc "Where Kubo exposes its API to";
type = types.oneOf [ types.str (types.listOf types.str) ];
default = [ ];
description = lib.mdDoc ''
Multiaddr or array of multiaddrs describing the address to serve the local HTTP API on.
In addition to the multiaddrs listed here, the daemon will also listen on a Unix domain socket.
To allow the ipfs CLI tools to communicate with the daemon over that socket,
add your user to the correct group, e.g. `users.users.alice.extraGroups = [ config.services.kubo.group ];`
'';
};
Addresses.Gateway = mkOption {
type = types.str;
type = types.oneOf [ types.str (types.listOf types.str) ];
default = "/ip4/127.0.0.1/tcp/8080";
description = lib.mdDoc "Where the IPFS Gateway can be reached";
};
@ -248,7 +281,7 @@ in
];
environment.systemPackages = [ cfg.package ];
environment.variables.IPFS_PATH = cfg.dataDir;
environment.variables.IPFS_PATH = fakeKuboRepo;
# https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
@ -319,6 +352,10 @@ in
# change when the changes are applied. Whyyyyyy.....
ipfs --offline config replace -
'';
postStop = mkIf cfg.autoMount ''
# After an unclean shutdown the fuse mounts at cfg.ipnsMountDir and cfg.ipfsMountDir are locked
umount --quiet '${cfg.ipnsMountDir}' '${cfg.ipfsMountDir}' || true
'';
serviceConfig = {
ExecStart = [ "" "${cfg.package}/bin/ipfs daemon ${kuboFlags}" ];
User = cfg.user;
@ -334,27 +371,23 @@ in
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream =
let
fromCfg = multiaddrToListenStream cfg.settings.Addresses.Gateway;
in
[ "" ] ++ lib.optional (fromCfg != null) fromCfg;
[ "" ] ++ (multiaddrsToListenStreams cfg.settings.Addresses.Gateway);
ListenDatagram =
let
fromCfg = multiaddrToListenDatagram cfg.settings.Addresses.Gateway;
in
[ "" ] ++ lib.optional (fromCfg != null) fromCfg;
[ "" ] ++ (multiaddrsToListenDatagrams cfg.settings.Addresses.Gateway);
};
};
systemd.sockets.ipfs-api = {
wantedBy = [ "sockets.target" ];
# We also include "%t/ipfs.sock" because there is no way to put the "%t"
# in the multiaddr.
socketConfig.ListenStream =
let
fromCfg = multiaddrToListenStream cfg.settings.Addresses.API;
in
[ "" "%t/ipfs.sock" ] ++ lib.optional (fromCfg != null) fromCfg;
socketConfig = {
# We also include "%t/ipfs.sock" because there is no way to put the "%t"
# in the multiaddr.
ListenStream =
[ "" "%t/ipfs.sock" ] ++ (multiaddrsToListenStreams cfg.settings.Addresses.API);
SocketMode = "0660";
SocketUser = cfg.user;
SocketGroup = cfg.group;
};
};
};

View file

@ -1,13 +1,13 @@
{ config, lib, ...}:
let
inherit (lib) concatStringsSep mkOption types;
inherit (lib) concatStringsSep mkOption types optionalString;
in {
mkCellServDB = cellName: db: ''
>${cellName}
'' + (concatStringsSep "\n" (map (dbm: if (dbm.ip != "" && dbm.dnsname != "") then dbm.ip + " #" + dbm.dnsname else "")
'' + (concatStringsSep "\n" (map (dbm: optionalString (dbm.ip != "" && dbm.dnsname != "") "${dbm.ip} #${dbm.dnsname}")
db))
+ "\n";

View file

@ -199,7 +199,7 @@ in
(filterAttrs (n: _: hasPrefix "consul.d/" n) config.environment.etc);
serviceConfig = {
ExecStart = "@${cfg.package}/bin/consul consul agent -config-dir /etc/consul.d"
ExecStart = "@${lib.getExe cfg.package} consul agent -config-dir /etc/consul.d"
+ concatMapStrings (n: " -config-file ${n}") configFiles;
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
PermissionsStartOnly = true;
@ -207,10 +207,10 @@ in
Restart = "on-failure";
TimeoutStartSec = "infinity";
} // (optionalAttrs (cfg.leaveOnStop) {
ExecStop = "${cfg.package}/bin/consul leave";
ExecStop = "${lib.getExe cfg.package} leave";
});
path = with pkgs; [ iproute2 gnugrep gawk consul ];
path = with pkgs; [ iproute2 gawk cfg.package ];
preStart = let
family = if cfg.forceAddrFamily == "ipv6" then
"-6"
@ -269,7 +269,7 @@ in
serviceConfig = {
ExecStart = ''
${cfg.alerts.package}/bin/consul-alerts start \
${lib.getExe cfg.alerts.package} start \
--alert-addr=${cfg.alerts.listenAddr} \
--consul-addr=${cfg.alerts.consulAddr} \
${optionalString cfg.alerts.watchChecks "--watch-checks"} \

View file

@ -185,6 +185,10 @@ in
assertion = cfg.loginAll -> cfg.target == null;
message = "iSCSI target name is set while login on all portals is enabled.";
}
{
assertion = !config.boot.initrd.systemd.enable;
message = "systemd stage 1 does not support iscsi yet.";
}
];
};
}

View file

@ -0,0 +1,51 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.ivpn;
in
with lib;
{
options.services.ivpn = {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
This option enables iVPN daemon.
This sets {option}`networking.firewall.checkReversePath` to "loose", which might be undesirable for security.
'';
};
};
config = mkIf cfg.enable {
boot.kernelModules = [ "tun" ];
environment.systemPackages = with pkgs; [ ivpn ivpn-service ];
# iVPN writes to /etc/iproute2/rt_tables
networking.iproute2.enable = true;
networking.firewall.checkReversePath = "loose";
systemd.services.ivpn-service = {
description = "iVPN daemon";
wantedBy = [ "multi-user.target" ];
wants = [ "network.target" ];
after = [
"network-online.target"
"NetworkManager.service"
"systemd-resolved.service"
];
path = [
# Needed for mount
"/run/wrappers"
];
startLimitBurst = 5;
startLimitIntervalSec = 20;
serviceConfig = {
ExecStart = "${pkgs.ivpn-service}/bin/ivpn-service --logging";
Restart = "always";
RestartSec = 1;
};
};
};
meta.maintainers = with maintainers; [ ataraxiasjel ];
}

View file

@ -17,7 +17,7 @@ let
ttl ${toString proxy.ttl}
${render proxy.rules (ruleNetworkName: rule: ''
rule ${prefer rule.network ruleNetworkName} {
${rule.method}${if rule.method == "iface" then " ${rule.interface}" else ""}
${rule.method}${optionalString (rule.method == "iface") " ${rule.interface}"}
}'')}
}'')}
'');

View file

@ -41,9 +41,10 @@ in {
documentation = [ "https://netbird.io/docs/" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
openresolv
];
serviceConfig = {
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
DynamicUser = true;
Environment = [
"NB_CONFIG=/var/lib/netbird/config.json"
"NB_LOG_FILE=console"

View file

@ -86,7 +86,7 @@ in
redis.createInstance = mkOption {
type = types.nullOr types.str;
default = if versionAtLeast config.system.stateVersion "22.05" then "ntopng" else "";
default = optionalString (versionAtLeast config.system.stateVersion "22.05") "ntopng";
description = lib.mdDoc ''
Local Redis instance name. Set to `null` to disable
local Redis instance. Defaults to `""` for

View file

@ -9,7 +9,7 @@ let
in
{
options.services.peroxide = {
enable = mkEnableOption (lib.mdDoc "enable");
enable = mkEnableOption (lib.mdDoc "peroxide");
package = mkPackageOptionMD pkgs "peroxide" {
default = [ "peroxide" ];

View file

@ -339,14 +339,9 @@ in
};
preStart = ''
mkdir -m 0755 -p ${smokepingHome}/cache ${smokepingHome}/data
rm -f ${smokepingHome}/cropper
ln -s ${cfg.package}/htdocs/cropper ${smokepingHome}/cropper
rm -f ${smokepingHome}/css
ln -s ${cfg.package}/htdocs/css ${smokepingHome}/css
rm -f ${smokepingHome}/js
ln -s ${cfg.package}/htdocs/js ${smokepingHome}/js
rm -f ${smokepingHome}/smokeping.fcgi
ln -s ${cgiHome} ${smokepingHome}/smokeping.fcgi
ln -sf ${cfg.package}/htdocs/css ${smokepingHome}/css
ln -sf ${cfg.package}/htdocs/js ${smokepingHome}/js
ln -sf ${cgiHome} ${smokepingHome}/smokeping.fcgi
${cfg.package}/bin/smokeping --check --config=${configPath}
${cfg.package}/bin/smokeping --static --config=${configPath}
'';

View file

@ -169,11 +169,11 @@ in
else (concatStrings (map (i: "--interface=\"${i}\"")
interfaces))} \
-h "${hostKey}" \
${if !syslog then "--no-syslog" else ""} \
${optionalString (!syslog) "--no-syslog" } \
${if passwordAuthentication then "--password" else "--no-password" } \
${if publicKeyAuthentication then "--publickey" else "--no-publickey" } \
${if rootLogin then "--root-login" else "--no-root-login" } \
${if loginShell != null then "--login-shell=\"${loginShell}\"" else "" } \
${optionalString (loginShell != null) "--login-shell=\"${loginShell}\"" } \
${if srpKeyExchange then "--srp-keyexchange" else "--no-srp-keyexchange" } \
${if !tcpForwarding then "--no-tcpip-forward" else "--tcpip-forward"} \
${if x11Forwarding then "--x11-forward" else "--no-x11-forward" } \

View file

@ -474,10 +474,10 @@ in
mkdir -m 0755 -p "$(dirname '${k.path}')"
ssh-keygen \
-t "${k.type}" \
${if k ? bits then "-b ${toString k.bits}" else ""} \
${if k ? rounds then "-a ${toString k.rounds}" else ""} \
${if k ? comment then "-C '${k.comment}'" else ""} \
${if k ? openSSHFormat && k.openSSHFormat then "-o" else ""} \
${optionalString (k ? bits) "-b ${toString k.bits}"} \
${optionalString (k ? rounds) "-a ${toString k.rounds}"} \
${optionalString (k ? comment) "-C '${k.comment}'"} \
${optionalString (k ? openSSHFormat && k.openSSHFormat) "-o"} \
-f "${k.path}" \
-N ""
fi
@ -536,7 +536,7 @@ in
# https://github.com/NixOS/nixpkgs/pull/10155
# https://github.com/NixOS/nixpkgs/pull/41745
services.openssh.authorizedKeysFiles =
[ "%h/.ssh/authorized_keys" "%h/.ssh/authorized_keys2" "/etc/ssh/authorized_keys.d/%u" ];
[ "%h/.ssh/authorized_keys" "/etc/ssh/authorized_keys.d/%u" ];
services.openssh.extraConfig = mkOrder 0
''
@ -550,7 +550,7 @@ in
'') cfg.ports}
${concatMapStrings ({ port, addr, ... }: ''
ListenAddress ${addr}${if port != null then ":" + toString port else ""}
ListenAddress ${addr}${optionalString (port != null) (":" + toString port)}
'') cfg.listenAddresses}
${optionalString cfgc.setXAuthLocation ''

View file

@ -4,7 +4,7 @@ let
inherit (builtins) toFile;
inherit (lib) concatMapStringsSep concatStringsSep mapAttrsToList
mkIf mkEnableOption mkOption types literalExpression;
mkIf mkEnableOption mkOption types literalExpression optionalString;
cfg = config.services.strongswan;
@ -34,8 +34,8 @@ let
strongswanConf = {setup, connections, ca, secretsFile, managePlugins, enabledPlugins}: toFile "strongswan.conf" ''
charon {
${if managePlugins then "load_modular = no" else ""}
${if managePlugins then ("load = " + (concatStringsSep " " enabledPlugins)) else ""}
${optionalString managePlugins "load_modular = no"}
${optionalString managePlugins ("load = " + (concatStringsSep " " enabledPlugins))}
plugins {
stroke {
secrets_file = ${secretsFile}

View file

@ -154,8 +154,8 @@ in
environment.systemPackages = [ pkgs.stunnel ];
environment.etc."stunnel.cfg".text = ''
${ if cfg.user != null then "setuid = ${cfg.user}" else "" }
${ if cfg.group != null then "setgid = ${cfg.group}" else "" }
${ optionalString (cfg.user != null) "setuid = ${cfg.user}" }
${ optionalString (cfg.group != null) "setgid = ${cfg.group}" }
debug = ${cfg.logLevel}

View file

@ -0,0 +1,161 @@
{ lib, config, pkgs, ... }:
with lib;
let
cfg = config.services.wgautomesh;
settingsFormat = pkgs.formats.toml { };
configFile =
# Have to remove nulls manually as TOML generator will not just skip key
# if value is null
settingsFormat.generate "wgautomesh-config.toml"
(filterAttrs (k: v: v != null)
(mapAttrs
(k: v:
if k == "peers"
then map (e: filterAttrs (k: v: v != null) e) v
else v)
cfg.settings));
runtimeConfigFile =
if cfg.enableGossipEncryption
then "/run/wgautomesh/wgautomesh.toml"
else configFile;
in
{
options.services.wgautomesh = {
enable = mkEnableOption (mdDoc "the wgautomesh daemon");
logLevel = mkOption {
type = types.enum [ "trace" "debug" "info" "warn" "error" ];
default = "info";
description = mdDoc "wgautomesh log level.";
};
enableGossipEncryption = mkOption {
type = types.bool;
default = true;
description = mdDoc "Enable encryption of gossip traffic.";
};
gossipSecretFile = mkOption {
type = types.path;
description = mdDoc ''
File containing the shared secret key to use for gossip encryption.
Required if `enableGossipEncryption` is set.
'';
};
enablePersistence = mkOption {
type = types.bool;
default = true;
description = mdDoc "Enable persistence of Wireguard peer info between restarts.";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = mdDoc "Automatically open gossip port in firewall (recommended).";
};
settings = mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {
interface = mkOption {
type = types.str;
description = mdDoc ''
Wireguard interface to manage (it is NOT created by wgautomesh, you
should use another NixOS option to create it such as
`networking.wireguard.interfaces.wg0 = {...};`).
'';
example = "wg0";
};
gossip_port = mkOption {
type = types.port;
description = mdDoc ''
wgautomesh gossip port, this MUST be the same number on all nodes in
the wgautomesh network.
'';
default = 1666;
};
lan_discovery = mkOption {
type = types.bool;
default = true;
description = mdDoc "Enable discovery of peers on the same LAN using UDP broadcast.";
};
upnp_forward_external_port = mkOption {
type = types.nullOr types.port;
default = null;
description = mdDoc ''
Public port number to try to redirect to this machine's Wireguard
daemon using UPnP IGD.
'';
};
peers = mkOption {
type = types.listOf (types.submodule {
options = {
pubkey = mkOption {
type = types.str;
description = mdDoc "Wireguard public key of this peer.";
};
address = mkOption {
type = types.str;
description = mdDoc ''
Wireguard address of this peer (a single IP address, multliple
addresses or address ranges are not supported).
'';
example = "10.0.0.42";
};
endpoint = mkOption {
type = types.nullOr types.str;
description = mdDoc ''
Bootstrap endpoint for connecting to this Wireguard peer if no
other address is known or none are working.
'';
default = null;
example = "wgnode.mydomain.example:51820";
};
};
});
default = [ ];
description = mdDoc "wgautomesh peer list.";
};
};
};
default = { };
description = mdDoc "Configuration for wgautomesh.";
};
};
config = mkIf cfg.enable {
services.wgautomesh.settings = {
gossip_secret_file = mkIf cfg.enableGossipEncryption "$CREDENTIALS_DIRECTORY/gossip_secret";
persist_file = mkIf cfg.enablePersistence "/var/lib/wgautomesh/state";
};
systemd.services.wgautomesh = {
path = [ pkgs.wireguard-tools ];
environment = { RUST_LOG = "wgautomesh=${cfg.logLevel}"; };
description = "wgautomesh";
serviceConfig = {
Type = "simple";
ExecStart = "${getExe pkgs.wgautomesh} ${runtimeConfigFile}";
Restart = "always";
RestartSec = "30";
LoadCredential = mkIf cfg.enableGossipEncryption [ "gossip_secret:${cfg.gossipSecretFile}" ];
ExecStartPre = mkIf cfg.enableGossipEncryption [
''${pkgs.envsubst}/bin/envsubst \
-i ${configFile} \
-o ${runtimeConfigFile}''
];
DynamicUser = true;
StateDirectory = "wgautomesh";
StateDirectoryMode = "0700";
RuntimeDirectory = "wgautomesh";
AmbientCapabilities = "CAP_NET_ADMIN";
CapabilityBoundingSet = "CAP_NET_ADMIN";
};
wantedBy = [ "multi-user.target" ];
};
networking.firewall.allowedUDPPorts =
mkIf cfg.openFirewall [ cfg.settings.gossip_port ];
};
}

View file

@ -294,7 +294,7 @@ let
DynamicUser = true;
SupplementaryGroups = optional (serverCfg.useACMEHost != null) certConfig.group;
PrivateTmp = true;
AmbientCapabilities = optional (serverCfg.listen.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
AmbientCapabilities = optionals (serverCfg.listen.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
NoNewPrivileges = true;
RestrictNamespaces = "uts ipc pid user cgroup";
ProtectSystem = "strict";
@ -340,7 +340,7 @@ let
EnvironmentFile = optional (clientCfg.environmentFile != null) clientCfg.environmentFile;
DynamicUser = true;
PrivateTmp = true;
AmbientCapabilities = (optional (clientCfg.soMark != null) [ "CAP_NET_ADMIN" ]) ++ (optional ((clientCfg.dynamicToRemote.port or 1024) < 1024 || (any (x: x.local.port < 1024) clientCfg.localToRemote)) [ "CAP_NET_BIND_SERVICE" ]);
AmbientCapabilities = (optionals (clientCfg.soMark != null) [ "CAP_NET_ADMIN" ]) ++ (optionals ((clientCfg.dynamicToRemote.port or 1024) < 1024 || (any (x: x.local.port < 1024) clientCfg.localToRemote)) [ "CAP_NET_BIND_SERVICE" ]);
NoNewPrivileges = true;
RestrictNamespaces = "uts ipc pid user cgroup";
ProtectSystem = "strict";

View file

@ -27,7 +27,7 @@ let
${optionalString srv.unlisted "type = UNLISTED"}
${optionalString (srv.flags != "") "flags = ${srv.flags}"}
socket_type = ${if srv.protocol == "udp" then "dgram" else "stream"}
${if srv.port != 0 then "port = ${toString srv.port}" else ""}
${optionalString (srv.port != 0) "port = ${toString srv.port}"}
wait = ${if srv.protocol == "udp" then "yes" else "no"}
user = ${srv.user}
server = ${srv.server}

View file

@ -317,6 +317,7 @@ in
environment.etc.cups.source = "/var/lib/cups";
services.dbus.packages = [ cups.out ] ++ optional polkitEnabled cups-pk-helper;
services.udev.packages = cfg.drivers;
# Allow asswordless printer admin for members of wheel group
security.polkit.extraConfig = mkIf polkitEnabled ''

View file

@ -100,6 +100,7 @@ in {
after = [ "network.target" ];
serviceConfig = {
LimitNOFILE=65536;
ExecStart = "${pkgs.qdrant}/bin/qdrant --config-path ${configFile}";
DynamicUser = true;
Restart = "on-failure";

View file

@ -336,7 +336,7 @@ in
ProtectProc = "noaccess";
ProtectSystem = "strict";
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;

View file

@ -78,6 +78,13 @@ in
'';
};
bantime = mkOption {
default = null;
type = types.nullOr types.str;
example = "10m";
description = lib.mdDoc "Number of seconds that a host is banned.";
};
maxretry = mkOption {
default = 3;
type = types.ints.unsigned;
@ -202,6 +209,20 @@ in
'';
};
extraSettings = mkOption {
type = with types; attrsOf (oneOf [ bool ints.positive str ]);
default = {};
description = lib.mdDoc ''
Extra default configuration for all jails (i.e. `[DEFAULT]`). See
<https://github.com/fail2ban/fail2ban/blob/master/config/jail.conf> for an overview.
'';
example = literalExpression ''
{
findtime = "15m";
}
'';
};
jails = mkOption {
default = { };
example = literalExpression ''
@ -320,11 +341,18 @@ in
''}
# Miscellaneous options
ignoreip = 127.0.0.1/8 ${optionalString config.networking.enableIPv6 "::1"} ${concatStringsSep " " cfg.ignoreIP}
${optionalString (cfg.bantime != null) ''
bantime = ${cfg.bantime}
''}
maxretry = ${toString cfg.maxretry}
backend = systemd
# Actions
banaction = ${cfg.banaction}
banaction_allports = ${cfg.banaction-allports}
${optionalString (cfg.extraSettings != {}) ''
# Extra settings
${generators.toKeyValue {} cfg.extraSettings}
''}
'';
# Block SSH if there are too many failing connection attempts.
# Benefits from verbose sshd logging to observe failed login attempts,

View file

@ -7,6 +7,18 @@ let
serverConfigFile = settingsFormat.generate "server.toml" (filterConfig cfg.serverSettings);
clientConfigFile = settingsFormat.generate "kanidm-config.toml" (filterConfig cfg.clientSettings);
unixConfigFile = settingsFormat.generate "kanidm-unixd.toml" (filterConfig cfg.unixSettings);
certPaths = builtins.map builtins.dirOf [ cfg.serverSettings.tls_chain cfg.serverSettings.tls_key ];
# Merge bind mount paths and remove paths where a prefix is already mounted.
# This makes sure that if e.g. the tls_chain is in the nix store and /nix/store is alread in the mount
# paths, no new bind mount is added. Adding subpaths caused problems on ofborg.
hasPrefixInList = list: newPath: lib.any (path: lib.hasPrefix (builtins.toString path) (builtins.toString newPath)) list;
mergePaths = lib.foldl' (merged: newPath: let
# If the new path is a prefix to some existing path, we need to filter it out
filteredPaths = lib.filter (p: !lib.hasPrefix (builtins.toString newPath) (builtins.toString p)) merged;
# If a prefix of the new path is already in the list, do not add it
filteredNew = if hasPrefixInList filteredPaths newPath then [] else [ newPath ];
in filteredPaths ++ filteredNew) [];
defaultServiceConfig = {
BindReadOnlyPaths = [
@ -16,7 +28,7 @@ let
"-/etc/hosts"
"-/etc/localtime"
];
CapabilityBoundingSet = "";
CapabilityBoundingSet = [];
# ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = "";
# Implies ProtectSystem=strict, which re-mounts all paths
@ -216,22 +228,28 @@ in
description = "kanidm identity management daemon";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = defaultServiceConfig // {
StateDirectory = "kanidm";
StateDirectoryMode = "0700";
ExecStart = "${pkgs.kanidm}/bin/kanidmd server -c ${serverConfigFile}";
User = "kanidm";
Group = "kanidm";
serviceConfig = lib.mkMerge [
# Merge paths and ignore existing prefixes needs to sidestep mkMerge
(defaultServiceConfig // {
BindReadOnlyPaths = mergePaths (defaultServiceConfig.BindReadOnlyPaths ++ certPaths);
})
{
StateDirectory = "kanidm";
StateDirectoryMode = "0700";
ExecStart = "${pkgs.kanidm}/bin/kanidmd server -c ${serverConfigFile}";
User = "kanidm";
Group = "kanidm";
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
# This would otherwise override the CAP_NET_BIND_SERVICE capability.
PrivateUsers = false;
# Port needs to be exposed to the host network
PrivateNetwork = false;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
TemporaryFileSystem = "/:ro";
};
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
# This would otherwise override the CAP_NET_BIND_SERVICE capability.
PrivateUsers = lib.mkForce false;
# Port needs to be exposed to the host network
PrivateNetwork = lib.mkForce false;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
TemporaryFileSystem = "/:ro";
}
];
environment.RUST_LOG = "info";
};
@ -240,34 +258,32 @@ in
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
restartTriggers = [ unixConfigFile clientConfigFile ];
serviceConfig = defaultServiceConfig // {
CacheDirectory = "kanidm-unixd";
CacheDirectoryMode = "0700";
RuntimeDirectory = "kanidm-unixd";
ExecStart = "${pkgs.kanidm}/bin/kanidm_unixd";
User = "kanidm-unixd";
Group = "kanidm-unixd";
serviceConfig = lib.mkMerge [
defaultServiceConfig
{
CacheDirectory = "kanidm-unixd";
CacheDirectoryMode = "0700";
RuntimeDirectory = "kanidm-unixd";
ExecStart = "${pkgs.kanidm}/bin/kanidm_unixd";
User = "kanidm-unixd";
Group = "kanidm-unixd";
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/hosts"
"-/etc/localtime"
"-/etc/kanidm"
"-/etc/static/kanidm"
"-/etc/ssl"
"-/etc/static/ssl"
];
BindPaths = [
# To create the socket
"/run/kanidm-unixd:/var/run/kanidm-unixd"
];
# Needs to connect to kanidmd
PrivateNetwork = false;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
TemporaryFileSystem = "/:ro";
};
BindReadOnlyPaths = [
"-/etc/kanidm"
"-/etc/static/kanidm"
"-/etc/ssl"
"-/etc/static/ssl"
];
BindPaths = [
# To create the socket
"/run/kanidm-unixd:/var/run/kanidm-unixd"
];
# Needs to connect to kanidmd
PrivateNetwork = lib.mkForce false;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
TemporaryFileSystem = "/:ro";
}
];
environment.RUST_LOG = "info";
};

View file

@ -72,15 +72,14 @@ let
} // (getProviderOptions cfg cfg.provider) // cfg.extraConfig;
mapConfig = key: attr:
if attr != null && attr != [] then (
optionalString (attr != null && attr != []) (
if isDerivation attr then mapConfig key (toString attr) else
if (builtins.typeOf attr) == "set" then concatStringsSep " "
(mapAttrsToList (name: value: mapConfig (key + "-" + name) value) attr) else
if (builtins.typeOf attr) == "list" then concatMapStringsSep " " (mapConfig key) attr else
if (builtins.typeOf attr) == "bool" then "--${key}=${boolToString attr}" else
if (builtins.typeOf attr) == "string" then "--${key}='${attr}'" else
"--${key}=${toString attr}")
else "";
"--${key}=${toString attr}");
configString = concatStringsSep " " (mapAttrsToList mapConfig allConfig);
in

View file

@ -72,7 +72,7 @@ in {
EnvironmentFile = cfg.credentialsFile;
ExecStart = ''
${cfg.package}/bin/cachix ${lib.optionalString cfg.verbose "--verbose"} ${lib.optionalString (cfg.host != null) "--host ${cfg.host}"} \
deploy agent ${cfg.name} ${if cfg.profile != null then cfg.profile else ""}
deploy agent ${cfg.name} ${optionalString (cfg.profile != null) cfg.profile}
'';
};
};

View file

@ -62,7 +62,13 @@ in
after = [ "network-online.target" ];
path = [ config.nix.package ];
wantedBy = [ "multi-user.target" ];
unitConfig = {
# allow to restart indefinitely
StartLimitIntervalSec = 0;
};
serviceConfig = {
# don't put too much stress on the machine when restarting
RestartSec = 1;
# we don't want to kill children processes as those are deployments
KillMode = "process";
Restart = "on-failure";

View file

@ -14,13 +14,17 @@ let
serviceDirectories = cfg.packages;
};
inherit (lib) mkOption mkIf mkMerge types;
inherit (lib) mkOption mkEnableOption mkIf mkMerge types;
in
{
options = {
boot.initrd.systemd.dbus = {
enable = mkEnableOption (lib.mdDoc "dbus in stage 1") // { visible = false; };
};
services.dbus = {
enable = mkOption {
@ -111,6 +115,21 @@ in
];
}
(mkIf config.boot.initrd.systemd.dbus.enable {
boot.initrd.systemd = {
users.messagebus = { };
groups.messagebus = { };
contents."/etc/dbus-1".source = pkgs.makeDBusConf {
inherit (cfg) apparmor;
suidHelper = "/bin/false";
serviceDirectories = [ pkgs.dbus ];
};
packages = [ pkgs.dbus ];
storePaths = [ "${pkgs.dbus}/bin/dbus-daemon" ];
targets.sockets.wants = [ "dbus.socket" ];
};
})
(mkIf (cfg.implementation == "dbus") {
environment.systemPackages = [
pkgs.dbus

View file

@ -3,19 +3,19 @@
with lib;
let
cfg = config.services.rtsp-simple-server;
package = pkgs.rtsp-simple-server;
cfg = config.services.mediamtx;
package = pkgs.mediamtx;
format = pkgs.formats.yaml {};
in
{
options = {
services.rtsp-simple-server = {
enable = mkEnableOption (lib.mdDoc "RTSP Simple Server");
services.mediamtx = {
enable = mkEnableOption (lib.mdDoc "MediaMTX");
settings = mkOption {
description = lib.mdDoc ''
Settings for rtsp-simple-server.
Read more at <https://github.com/aler9/rtsp-simple-server/blob/main/rtsp-simple-server.yml>
Settings for MediaMTX.
Read more at <https://github.com/aler9/mediamtx/blob/main/mediamtx.yml>
'';
type = format.type;
@ -25,7 +25,7 @@ in
"stdout"
];
# we set this so when the user uses it, it just works (see LogsDirectory below). but it's not used by default.
logFile = "/var/log/rtsp-simple-server/rtsp-simple-server.log";
logFile = "/var/log/mediamtx/mediamtx.log";
};
example = {
@ -40,20 +40,20 @@ in
env = mkOption {
type = with types; attrsOf anything;
description = lib.mdDoc "Extra environment variables for RTSP Simple Server";
description = lib.mdDoc "Extra environment variables for MediaMTX";
default = {};
example = {
RTSP_CONFKEY = "mykey";
MTX_CONFKEY = "mykey";
};
};
};
};
config = mkIf (cfg.enable) {
# NOTE: rtsp-simple-server watches this file and automatically reloads if it changes
environment.etc."rtsp-simple-server.yaml".source = format.generate "rtsp-simple-server.yaml" cfg.settings;
# NOTE: mediamtx watches this file and automatically reloads if it changes
environment.etc."mediamtx.yaml".source = format.generate "mediamtx.yaml" cfg.settings;
systemd.services.rtsp-simple-server = {
systemd.services.mediamtx = {
environment = cfg.env;
after = [ "network.target" ];
@ -65,15 +65,15 @@ in
serviceConfig = {
DynamicUser = true;
User = "rtsp-simple-server";
Group = "rtsp-simple-server";
User = "mediamtx";
Group = "mediamtx";
LogsDirectory = "rtsp-simple-server";
LogsDirectory = "mediamtx";
# user likely may want to stream cameras, can't hurt to add video group
SupplementaryGroups = "video";
ExecStart = "${package}/bin/rtsp-simple-server /etc/rtsp-simple-server.yaml";
ExecStart = "${package}/bin/mediamtx /etc/mediamtx.yaml";
};
};
};

View file

@ -1025,8 +1025,8 @@ in
services.postfix = lib.mkIf cfg.mail.incoming.enable {
enable = true;
sslCert = if cfg.sslCertificate != null then cfg.sslCertificate else "";
sslKey = if cfg.sslCertificateKey != null then cfg.sslCertificateKey else "";
sslCert = lib.optionalString (cfg.sslCertificate != null) cfg.sslCertificate;
sslKey = lib.optionalString (cfg.sslCertificateKey != null) cfg.sslCertificateKey;
origin = cfg.hostname;
relayDomains = [ cfg.hostname ];

Some files were not shown because too many files have changed in this diff Show more