Merge pull request #212684 from pennae/nixos-render-docs

nixos-render-docs: init, use for some manual rendering to docbook
This commit is contained in:
pennae 2023-01-30 19:26:07 +01:00 committed by GitHub
commit 5b6dcece88
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
105 changed files with 1834 additions and 7611 deletions

View file

@ -27,7 +27,7 @@ If the build succeeds, the manual will be in `./result/share/doc/nixpkgs/manual.
As per [RFC 0072](https://github.com/NixOS/rfcs/pull/72), all new documentation content should be written in [CommonMark](https://commonmark.org/) Markdown dialect.
Additional syntax extensions are available, though not all extensions can be used in NixOS option documentation. The following extensions are currently used:
Additional syntax extensions are available, all of which can be used in NixOS option documentation. The following extensions are currently used:
- []{#ssec-contributing-markup-anchors}
Explicitly defined **anchors** on headings, to allow linking to sections. These should be always used, to ensure the anchors can be linked even when the heading text changes, and to prevent conflicts between [automatically assigned identifiers](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/auto_identifiers.md).
@ -38,6 +38,10 @@ Additional syntax extensions are available, though not all extensions can be use
## Syntax {#sec-contributing-markup}
```
::: {.note}
NixOS option documentation does not support headings in general.
:::
- []{#ssec-contributing-markup-anchors-inline}
**Inline anchors**, which allow linking arbitrary place in the text (e.g. individual list items, sentences…).
@ -67,10 +71,6 @@ Additional syntax extensions are available, though not all extensions can be use
This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point). Though, the feature originates from [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage) with slightly different syntax.
::: {.note}
Inline roles are available for option documentation.
:::
- []{#ssec-contributing-markup-admonitions}
**Admonitions**, set off from the text to bring attention to something.
@ -96,10 +96,6 @@ Additional syntax extensions are available, though not all extensions can be use
- [`tip`](https://tdg.docbook.org/tdg/5.0/tip.html)
- [`warning`](https://tdg.docbook.org/tdg/5.0/warning.html)
::: {.note}
Admonitions are available for option documentation.
:::
- []{#ssec-contributing-markup-definition-lists}
[**Definition lists**](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/definition_lists.md), for defining a group of terms:

View file

@ -68,12 +68,15 @@ let
sources = lib.sourceFilesBySuffices ./. [".xml"];
modulesDoc = builtins.toFile "modules.xml" ''
<section xmlns:xi="http://www.w3.org/2001/XInclude" id="modules">
${(lib.concatMapStrings (path: ''
<xi:include href="${path}" />
'') (lib.catAttrs "value" config.meta.doc))}
</section>
modulesDoc = runCommand "modules.xml" {
nativeBuildInputs = [ pkgs.nixos-render-docs ];
} ''
nixos-render-docs manual docbook \
--manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \
"$out" \
--section \
--section-id modules \
--chapters ${lib.concatMapStrings (p: "${p.value} ") config.meta.doc}
'';
generatedSources = runCommand "generated-docbook" {} ''

View file

@ -23,7 +23,7 @@ file.
meta = {
maintainers = with lib.maintainers; [ ericsagnes ];
doc = ./default.xml;
doc = ./default.md;
buildDocsInSandbox = true;
};
}
@ -31,7 +31,9 @@ file.
- `maintainers` contains a list of the module maintainers.
- `doc` points to a valid DocBook file containing the module
- `doc` points to a valid [Nixpkgs-flavored CommonMark](
https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-markup
) file containing the module
documentation. Its contents is automatically added to
[](#ch-configuration). Changes to a module documentation have to
be checked to not break building the NixOS manual:
@ -40,26 +42,6 @@ file.
$ nix-build nixos/release.nix -A manual.x86_64-linux
```
This file should *not* usually be written by hand. Instead it is preferred
to write documentation using CommonMark and converting it to CommonMark
using pandoc. The simplest documentation can be converted using just
```ShellSession
$ pandoc doc.md -t docbook --top-level-division=chapter -f markdown+smart > doc.xml
```
More elaborate documentation may wish to add one or more of the pandoc
filters used to build the remainder of the manual, for example the GNOME
desktop uses
```ShellSession
$ pandoc gnome.md -t docbook --top-level-division=chapter \
--extract-media=media -f markdown+smart \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/myst-reader/roles.lua \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/docbook-writer/rst-roles.lua \
> gnome.xml
```
- `buildDocsInSandbox` indicates whether the option documentation for the
module can be built in a derivation sandbox. This option is currently only
honored for modules shipped by nixpkgs. User modules and modules taken from

View file

@ -28,7 +28,7 @@
meta = {
maintainers = with lib.maintainers; [ ericsagnes ];
doc = ./default.xml;
doc = ./default.md;
buildDocsInSandbox = true;
};
}
@ -42,35 +42,16 @@
</listitem>
<listitem>
<para>
<literal>doc</literal> points to a valid DocBook file containing
the module documentation. Its contents is automatically added to
<literal>doc</literal> points to a valid
<link xlink:href="https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-markup">Nixpkgs-flavored
CommonMark</link> file containing the module documentation. Its
contents is automatically added to
<xref linkend="ch-configuration" />. Changes to a module
documentation have to be checked to not break building the NixOS
manual:
</para>
<programlisting>
$ nix-build nixos/release.nix -A manual.x86_64-linux
</programlisting>
<para>
This file should <emphasis>not</emphasis> usually be written by
hand. Instead it is preferred to write documentation using
CommonMark and converting it to CommonMark using pandoc. The
simplest documentation can be converted using just
</para>
<programlisting>
$ pandoc doc.md -t docbook --top-level-division=chapter -f markdown+smart &gt; doc.xml
</programlisting>
<para>
More elaborate documentation may wish to add one or more of the
pandoc filters used to build the remainder of the manual, for
example the GNOME desktop uses
</para>
<programlisting>
$ pandoc gnome.md -t docbook --top-level-division=chapter \
--extract-media=media -f markdown+smart \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/myst-reader/roles.lua \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/docbook-writer/rst-roles.lua \
&gt; gnome.xml
</programlisting>
</listitem>
<listitem>

View file

@ -50,21 +50,3 @@ for mf in ${MD_FILES[*]}; do
done
popd
# now handle module chapters. we'll need extra checks to ensure that we don't process
# markdown files we're not interested in, so we'll require an x.nix file for ever x.md
# that we'll convert to xml.
pushd "$DIR/../../modules"
mapfile -t MD_FILES < <(find . -type f -regex '.*\.md$')
for mf in ${MD_FILES[*]}; do
[ -f "${mf%.md}.nix" ] || continue
pandoc --top-level-division=chapter "$mf" "${pandoc_flags[@]}" -o "${mf%.md}.xml"
sed -i -e '1 i <!-- Do not edit this file directly, edit its companion .md instead\
and regenerate this file using nixos/doc/manual/md-to-db.sh -->' \
"${mf%.md}.xml"
done
popd

View file

@ -148,42 +148,19 @@ in rec {
'';
optionsDocBook = pkgs.runCommand "options-docbook.xml" {
MANPAGE_URLS = pkgs.path + "/doc/manpage-urls.json";
OTD_DOCUMENT_TYPE = documentType;
OTD_VARIABLE_LIST_ID = variablelistId;
OTD_OPTION_ID_PREFIX = optionIdPrefix;
OTD_REVISION = revision;
nativeBuildInputs = [
(let
# python3Minimal can't be overridden with packages on Darwin, due to a missing framework.
# Instead of modifying stdenv, we take the easy way out, since most people on Darwin will
# just be hacking on the Nixpkgs manual (which also uses make-options-doc).
python = if pkgs.stdenv.isDarwin then pkgs.python3 else pkgs.python3Minimal;
self = (python.override {
inherit self;
includeSiteCustomize = true;
});
in self.withPackages (p:
let
# TODO add our own small test suite when rendering is split out into a new tool
markdown-it-py = p.markdown-it-py.override {
disableTests = true;
};
mdit-py-plugins = p.mdit-py-plugins.override {
inherit markdown-it-py;
disableTests = true;
};
in [
markdown-it-py
mdit-py-plugins
]))
pkgs.nixos-render-docs
];
} ''
python ${./optionsToDocbook.py} \
nixos-render-docs options docbook \
--manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \
--revision ${lib.escapeShellArg revision} \
--document-type ${lib.escapeShellArg documentType} \
--varlist-id ${lib.escapeShellArg variablelistId} \
--id-prefix ${lib.escapeShellArg optionIdPrefix} \
${lib.optionalString markdownByDefault "--markdown-by-default"} \
${optionsJSON}/share/doc/nixos/options.json \
> options.xml
options.xml
if grep /nixpkgs/nixos/modules options.xml; then
echo "The manual appears to depend on the location of Nixpkgs, which is bad"

View file

@ -1,343 +0,0 @@
import collections
import json
import os
import sys
from typing import Any, Dict, List
from collections.abc import MutableMapping, Sequence
import inspect
# for MD conversion
import markdown_it
import markdown_it.renderer
from markdown_it.token import Token
from markdown_it.utils import OptionsDict
from mdit_py_plugins.container import container_plugin
from mdit_py_plugins.deflist import deflist_plugin
from mdit_py_plugins.myst_role import myst_role_plugin
from xml.sax.saxutils import escape, quoteattr
manpage_urls = json.load(open(os.getenv('MANPAGE_URLS')))
class Renderer(markdown_it.renderer.RendererProtocol):
__output__ = "docbook"
def __init__(self, parser=None):
self.rules = {
k: v
for k, v in inspect.getmembers(self, predicate=inspect.ismethod)
if not (k.startswith("render") or k.startswith("_"))
} | {
"container_{.note}_open": self._note_open,
"container_{.note}_close": self._note_close,
"container_{.important}_open": self._important_open,
"container_{.important}_close": self._important_close,
"container_{.warning}_open": self._warning_open,
"container_{.warning}_close": self._warning_close,
}
def render(self, tokens: Sequence[Token], options: OptionsDict, env: MutableMapping) -> str:
assert '-link-tag-stack' not in env
env['-link-tag-stack'] = []
assert '-deflist-stack' not in env
env['-deflist-stack'] = []
def do_one(i, token):
if token.type == "inline":
assert token.children is not None
return self.renderInline(token.children, options, env)
elif token.type in self.rules:
return self.rules[token.type](tokens[i], tokens, i, options, env)
else:
raise NotImplementedError("md token not supported yet", token)
return "".join(map(lambda arg: do_one(*arg), enumerate(tokens)))
def renderInline(self, tokens: Sequence[Token], options: OptionsDict, env: MutableMapping) -> str:
# HACK to support docbook links and xrefs. link handling is only necessary because the docbook
# manpage stylesheet converts - in urls to a mathematical minus, which may be somewhat incorrect.
for i, token in enumerate(tokens):
if token.type != 'link_open':
continue
token.tag = 'link'
# turn [](#foo) into xrefs
if token.attrs['href'][0:1] == '#' and tokens[i + 1].type == 'link_close':
token.tag = "xref"
# turn <x> into links without contents
if tokens[i + 1].type == 'text' and tokens[i + 1].content == token.attrs['href']:
tokens[i + 1].content = ''
def do_one(i, token):
if token.type in self.rules:
return self.rules[token.type](tokens[i], tokens, i, options, env)
else:
raise NotImplementedError("md node not supported yet", token)
return "".join(map(lambda arg: do_one(*arg), enumerate(tokens)))
def text(self, token, tokens, i, options, env):
return escape(token.content)
def paragraph_open(self, token, tokens, i, options, env):
return "<para>"
def paragraph_close(self, token, tokens, i, options, env):
return "</para>"
def hardbreak(self, token, tokens, i, options, env):
return "<literallayout>\n</literallayout>"
def softbreak(self, token, tokens, i, options, env):
# should check options.breaks() and emit hard break if so
return "\n"
def code_inline(self, token, tokens, i, options, env):
return f"<literal>{escape(token.content)}</literal>"
def code_block(self, token, tokens, i, options, env):
return f"<programlisting>{escape(token.content)}</programlisting>"
def link_open(self, token, tokens, i, options, env):
env['-link-tag-stack'].append(token.tag)
(attr, start) = ('linkend', 1) if token.attrs['href'][0] == '#' else ('xlink:href', 0)
return f"<{token.tag} {attr}={quoteattr(token.attrs['href'][start:])}>"
def link_close(self, token, tokens, i, options, env):
return f"</{env['-link-tag-stack'].pop()}>"
def list_item_open(self, token, tokens, i, options, env):
return "<listitem>"
def list_item_close(self, token, tokens, i, options, env):
return "</listitem>\n"
# HACK open and close para for docbook change size. remove soon.
def bullet_list_open(self, token, tokens, i, options, env):
return "<para><itemizedlist>\n"
def bullet_list_close(self, token, tokens, i, options, env):
return "\n</itemizedlist></para>"
def em_open(self, token, tokens, i, options, env):
return "<emphasis>"
def em_close(self, token, tokens, i, options, env):
return "</emphasis>"
def strong_open(self, token, tokens, i, options, env):
return "<emphasis role=\"strong\">"
def strong_close(self, token, tokens, i, options, env):
return "</emphasis>"
def fence(self, token, tokens, i, options, env):
info = f" language={quoteattr(token.info)}" if token.info != "" else ""
return f"<programlisting{info}>{escape(token.content)}</programlisting>"
def blockquote_open(self, token, tokens, i, options, env):
return "<para><blockquote>"
def blockquote_close(self, token, tokens, i, options, env):
return "</blockquote></para>"
def _note_open(self, token, tokens, i, options, env):
return "<para><note>"
def _note_close(self, token, tokens, i, options, env):
return "</note></para>"
def _important_open(self, token, tokens, i, options, env):
return "<para><important>"
def _important_close(self, token, tokens, i, options, env):
return "</important></para>"
def _warning_open(self, token, tokens, i, options, env):
return "<para><warning>"
def _warning_close(self, token, tokens, i, options, env):
return "</warning></para>"
# markdown-it emits tokens based on the html syntax tree, but docbook is
# slightly different. html has <dl>{<dt/>{<dd/>}}</dl>,
# docbook has <variablelist>{<varlistentry><term/><listitem/></varlistentry>}<variablelist>
# we have to reject multiple definitions for the same term for time being.
def dl_open(self, token, tokens, i, options, env):
env['-deflist-stack'].append({})
return "<para><variablelist>"
def dl_close(self, token, tokens, i, options, env):
env['-deflist-stack'].pop()
return "</variablelist></para>"
def dt_open(self, token, tokens, i, options, env):
env['-deflist-stack'][-1]['has-dd'] = False
return "<varlistentry><term>"
def dt_close(self, token, tokens, i, options, env):
return "</term>"
def dd_open(self, token, tokens, i, options, env):
if env['-deflist-stack'][-1]['has-dd']:
raise Exception("multiple definitions per term not supported")
env['-deflist-stack'][-1]['has-dd'] = True
return "<listitem>"
def dd_close(self, token, tokens, i, options, env):
return "</listitem></varlistentry>"
def myst_role(self, token, tokens, i, options, env):
if token.meta['name'] == 'command':
return f"<command>{escape(token.content)}</command>"
if token.meta['name'] == 'file':
return f"<filename>{escape(token.content)}</filename>"
if token.meta['name'] == 'var':
return f"<varname>{escape(token.content)}</varname>"
if token.meta['name'] == 'env':
return f"<envar>{escape(token.content)}</envar>"
if token.meta['name'] == 'option':
return f"<option>{escape(token.content)}</option>"
if token.meta['name'] == 'manpage':
[page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ]
section = section[:-1]
man = f"{page}({section})"
title = f"<refentrytitle>{escape(page)}</refentrytitle>"
vol = f"<manvolnum>{escape(section)}</manvolnum>"
ref = f"<citerefentry>{title}{vol}</citerefentry>"
if man in manpage_urls:
return f"<link xlink:href={quoteattr(manpage_urls[man])}>{ref}</link>"
else:
return ref
raise NotImplementedError("md node not supported yet", token)
md = (
markdown_it.MarkdownIt(renderer_cls=Renderer)
# TODO maybe fork the plugin and have only a single rule for all?
.use(container_plugin, name="{.note}")
.use(container_plugin, name="{.important}")
.use(container_plugin, name="{.warning}")
.use(deflist_plugin)
.use(myst_role_plugin)
)
# converts in-place!
def convertMD(options: Dict[str, Any]) -> str:
def optionIs(option: Dict[str, Any], key: str, typ: str) -> bool:
if key not in option: return False
if type(option[key]) != dict: return False
if '_type' not in option[key]: return False
return option[key]['_type'] == typ
def convertCode(name: str, option: Dict[str, Any], key: str):
if optionIs(option, key, 'literalMD'):
option[key] = md.render(f"*{key.capitalize()}:*\n{option[key]['text']}")
elif optionIs(option, key, 'literalExpression'):
code = option[key]['text']
# for multi-line code blocks we only have to count ` runs at the beginning
# of a line, but this is much easier.
multiline = '\n' in code
longest, current = (0, 0)
for c in code:
current = current + 1 if c == '`' else 0
longest = max(current, longest)
# inline literals need a space to separate ticks from content, code blocks
# need newlines. inline literals need one extra tick, code blocks need three.
ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ')
code = f"{ticks}{sep}{code}{sep}{ticks}"
option[key] = md.render(f"*{key.capitalize()}:*\n{code}")
elif optionIs(option, key, 'literalDocBook'):
option[key] = f"<para><emphasis>{key.capitalize()}:</emphasis> {option[key]['text']}</para>"
elif key in option:
raise Exception(f"{name} {key} has unrecognized type", option[key])
for (name, option) in options.items():
try:
if optionIs(option, 'description', 'mdDoc'):
option['description'] = md.render(option['description']['text'])
elif markdownByDefault:
option['description'] = md.render(option['description'])
else:
option['description'] = ("<nixos:option-description><para>" +
option['description'] +
"</para></nixos:option-description>")
convertCode(name, option, 'example')
convertCode(name, option, 'default')
if 'relatedPackages' in option:
option['relatedPackages'] = md.render(option['relatedPackages'])
except Exception as e:
raise Exception(f"Failed to render option {name}") from e
return options
id_translate_table = {
ord('*'): ord('_'),
ord('<'): ord('_'),
ord(' '): ord('_'),
ord('>'): ord('_'),
ord('['): ord('_'),
ord(']'): ord('_'),
ord(':'): ord('_'),
ord('"'): ord('_'),
}
def need_env(n):
if n not in os.environ:
raise RuntimeError("required environment variable not set", n)
return os.environ[n]
OTD_REVISION = need_env('OTD_REVISION')
OTD_DOCUMENT_TYPE = need_env('OTD_DOCUMENT_TYPE')
OTD_VARIABLE_LIST_ID = need_env('OTD_VARIABLE_LIST_ID')
OTD_OPTION_ID_PREFIX = need_env('OTD_OPTION_ID_PREFIX')
def print_decl_def(header, locs):
print(f"""<para><emphasis>{header}:</emphasis></para>""")
print(f"""<simplelist>""")
for loc in locs:
# locations can be either plain strings (specific to nixpkgs), or attrsets
# { name = "foo/bar.nix"; url = "https://github.com/....."; }
if isinstance(loc, str):
# Hyperlink the filename either to the NixOS github
# repository (if its a module and we have a revision number),
# or to the local filesystem.
if not loc.startswith('/'):
if OTD_REVISION == 'local':
href = f"https://github.com/NixOS/nixpkgs/blob/master/{loc}"
else:
href = f"https://github.com/NixOS/nixpkgs/blob/{OTD_REVISION}/{loc}"
else:
href = f"file://{loc}"
# Print the filename and make it user-friendly by replacing the
# /nix/store/<hash> prefix by the default location of nixos
# sources.
if not loc.startswith('/'):
name = f"<nixpkgs/{loc}>"
elif loc.contains('nixops') and loc.contains('/nix/'):
name = f"<nixops/{loc[loc.find('/nix/') + 5:]}>"
else:
name = loc
print(f"""<member><filename xlink:href={quoteattr(href)}>""")
print(escape(name))
print(f"""</filename></member>""")
else:
href = f" xlink:href={quoteattr(loc['url'])}" if 'url' in loc else ""
print(f"""<member><filename{href}>{escape(loc['name'])}</filename></member>""")
print(f"""</simplelist>""")
markdownByDefault = False
optOffset = 0
for arg in sys.argv[1:]:
if arg == "--markdown-by-default":
optOffset += 1
markdownByDefault = True
options = convertMD(json.load(open(sys.argv[1 + optOffset], 'r')))
keys = list(options.keys())
keys.sort(key=lambda opt: [ (0 if p.startswith("enable") else 1 if p.startswith("package") else 2, p)
for p in options[opt]['loc'] ])
print(f"""<?xml version="1.0" encoding="UTF-8"?>""")
if OTD_DOCUMENT_TYPE == 'appendix':
print("""<appendix xmlns="http://docbook.org/ns/docbook" xml:id="appendix-configuration-options">""")
print(""" <title>Configuration Options</title>""")
print(f"""<variablelist xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:nixos="tag:nixos.org"
xmlns="http://docbook.org/ns/docbook"
xml:id="{OTD_VARIABLE_LIST_ID}">""")
for name in keys:
opt = options[name]
id = OTD_OPTION_ID_PREFIX + name.translate(id_translate_table)
print(f"""<varlistentry>""")
# NOTE adding extra spaces here introduces spaces into xref link expansions
print(f"""<term xlink:href={quoteattr("#" + id)} xml:id={quoteattr(id)}>""", end='')
print(f"""<option>{escape(name)}</option>""", end='')
print(f"""</term>""")
print(f"""<listitem>""")
print(opt['description'])
if typ := opt.get('type'):
ro = " <emphasis>(read only)</emphasis>" if opt.get('readOnly', False) else ""
print(f"""<para><emphasis>Type:</emphasis> {escape(typ)}{ro}</para>""")
if default := opt.get('default'):
print(default)
if example := opt.get('example'):
print(example)
if related := opt.get('relatedPackages'):
print(f"""<para>""")
print(f""" <emphasis>Related packages:</emphasis>""")
print(f"""</para>""")
print(related)
if decl := opt.get('declarations'):
print_decl_def("Declared by", decl)
if defs := opt.get('definitions'):
print_decl_def("Defined by", defs)
print(f"""</listitem>""")
print(f"""</varlistentry>""")
print("""</variablelist>""")
if OTD_DOCUMENT_TYPE == 'appendix':
print("""</appendix>""")

View file

@ -66,7 +66,7 @@ in
meta = {
maintainers = with lib.maintainers; [ ericsagnes ];
doc = ./default.xml;
doc = ./default.md;
};
}

View file

@ -1,275 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-input-methods">
<title>Input Methods</title>
<para>
Input methods are an operating system component that allows any
data, such as keyboard strokes or mouse movements, to be received as
input. In this way users can enter characters and symbols not found
on their input devices. Using an input method is obligatory for any
language that has more graphemes than there are keys on the
keyboard.
</para>
<para>
The following input methods are available in NixOS:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
IBus: The intelligent input bus.
</para>
</listitem>
<listitem>
<para>
Fcitx: A customizable lightweight input method.
</para>
</listitem>
<listitem>
<para>
Nabi: A Korean input method based on XIM.
</para>
</listitem>
<listitem>
<para>
Uim: The universal input method, is a library with a XIM bridge.
</para>
</listitem>
<listitem>
<para>
Hime: An extremely easy-to-use input method framework.
</para>
</listitem>
<listitem>
<para>
Kime: Korean IME
</para>
</listitem>
</itemizedlist>
<section xml:id="module-services-input-methods-ibus">
<title>IBus</title>
<para>
IBus is an Intelligent Input Bus. It provides full featured and
user friendly input method user interface.
</para>
<para>
The following snippet can be used to configure IBus:
</para>
<programlisting>
i18n.inputMethod = {
enabled = &quot;ibus&quot;;
ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
};
</programlisting>
<para>
<literal>i18n.inputMethod.ibus.engines</literal> is optional and
can be used to add extra IBus engines.
</para>
<para>
Available extra IBus engines are:
</para>
<itemizedlist>
<listitem>
<para>
Anthy (<literal>ibus-engines.anthy</literal>): Anthy is a
system for Japanese input method. It converts Hiragana text to
Kana Kanji mixed text.
</para>
</listitem>
<listitem>
<para>
Hangul (<literal>ibus-engines.hangul</literal>): Korean input
method.
</para>
</listitem>
<listitem>
<para>
m17n (<literal>ibus-engines.m17n</literal>): m17n is an input
method that uses input methods and corresponding icons in the
m17n database.
</para>
</listitem>
<listitem>
<para>
mozc (<literal>ibus-engines.mozc</literal>): A Japanese input
method from Google.
</para>
</listitem>
<listitem>
<para>
Table (<literal>ibus-engines.table</literal>): An input method
that load tables of input methods.
</para>
</listitem>
<listitem>
<para>
table-others (<literal>ibus-engines.table-others</literal>):
Various table-based input methods. To use this, and any other
table-based input methods, it must appear in the list of
engines along with <literal>table</literal>. For example:
</para>
<programlisting>
ibus.engines = with pkgs.ibus-engines; [ table table-others ];
</programlisting>
</listitem>
</itemizedlist>
<para>
To use any input method, the package must be added in the
configuration, as shown above, and also (after running
<literal>nixos-rebuild</literal>) the input method must be added
from IBus preference dialog.
</para>
<section xml:id="module-services-input-methods-troubleshooting">
<title>Troubleshooting</title>
<para>
If IBus works in some applications but not others, a likely
cause of this is that IBus is depending on a different version
of <literal>glib</literal> to what the applications are
depending on. This can be checked by running
<literal>nix-store -q --requisites &lt;path&gt; | grep glib</literal>,
where <literal>&lt;path&gt;</literal> is the path of either IBus
or an application in the Nix store. The <literal>glib</literal>
packages must match exactly. If they do not, uninstalling and
reinstalling the application is a likely fix.
</para>
</section>
</section>
<section xml:id="module-services-input-methods-fcitx">
<title>Fcitx</title>
<para>
Fcitx is an input method framework with extension support. It has
three built-in Input Method Engine, Pinyin, QuWei and Table-based
input methods.
</para>
<para>
The following snippet can be used to configure Fcitx:
</para>
<programlisting>
i18n.inputMethod = {
enabled = &quot;fcitx&quot;;
fcitx.engines = with pkgs.fcitx-engines; [ mozc hangul m17n ];
};
</programlisting>
<para>
<literal>i18n.inputMethod.fcitx.engines</literal> is optional and
can be used to add extra Fcitx engines.
</para>
<para>
Available extra Fcitx engines are:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Anthy (<literal>fcitx-engines.anthy</literal>): Anthy is a
system for Japanese input method. It converts Hiragana text to
Kana Kanji mixed text.
</para>
</listitem>
<listitem>
<para>
Chewing (<literal>fcitx-engines.chewing</literal>): Chewing is
an intelligent Zhuyin input method. It is one of the most
popular input methods among Traditional Chinese Unix users.
</para>
</listitem>
<listitem>
<para>
Hangul (<literal>fcitx-engines.hangul</literal>): Korean input
method.
</para>
</listitem>
<listitem>
<para>
Unikey (<literal>fcitx-engines.unikey</literal>): Vietnamese
input method.
</para>
</listitem>
<listitem>
<para>
m17n (<literal>fcitx-engines.m17n</literal>): m17n is an input
method that uses input methods and corresponding icons in the
m17n database.
</para>
</listitem>
<listitem>
<para>
mozc (<literal>fcitx-engines.mozc</literal>): A Japanese input
method from Google.
</para>
</listitem>
<listitem>
<para>
table-others (<literal>fcitx-engines.table-others</literal>):
Various table-based input methods.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-input-methods-nabi">
<title>Nabi</title>
<para>
Nabi is an easy to use Korean X input method. It allows you to
enter phonetic Korean characters (hangul) and pictographic Korean
characters (hanja).
</para>
<para>
The following snippet can be used to configure Nabi:
</para>
<programlisting>
i18n.inputMethod = {
enabled = &quot;nabi&quot;;
};
</programlisting>
</section>
<section xml:id="module-services-input-methods-uim">
<title>Uim</title>
<para>
Uim (short for <quote>universal input method</quote>) is a
multilingual input method framework. Applications can use it
through so-called bridges.
</para>
<para>
The following snippet can be used to configure uim:
</para>
<programlisting>
i18n.inputMethod = {
enabled = &quot;uim&quot;;
};
</programlisting>
<para>
Note: The <xref linkend="opt-i18n.inputMethod.uim.toolbar" />
option can be used to choose uim toolbar.
</para>
</section>
<section xml:id="module-services-input-methods-hime">
<title>Hime</title>
<para>
Hime is an extremely easy-to-use input method framework. It is
lightweight, stable, powerful and supports many commonly used
input methods, including Cangjie, Zhuyin, Dayi, Rank, Shrimp,
Greek, Korean Pinyin, Latin Alphabet, etc…
</para>
<para>
The following snippet can be used to configure Hime:
</para>
<programlisting>
i18n.inputMethod = {
enabled = &quot;hime&quot;;
};
</programlisting>
</section>
<section xml:id="module-services-input-methods-kime">
<title>Kime</title>
<para>
Kime is Korean IME. its built with Rust language and let you get
simple, safe, fast Korean typing
</para>
<para>
The following snippet can be used to configure Kime:
</para>
<programlisting>
i18n.inputMethod = {
enabled = &quot;kime&quot;;
};
</programlisting>
</section>
</chapter>

View file

@ -47,7 +47,7 @@ in
doc = mkOption {
type = docFile;
internal = true;
example = "./meta.chapter.xml";
example = "./meta.chapter.md";
description = lib.mdDoc ''
Documentation prologue for the set of options of each module. This
option should be defined at most once per module.

View file

@ -33,7 +33,7 @@ in
};
meta = {
doc = ./default.xml;
doc = ./default.md;
maintainers = with lib.maintainers; [ vidbina ];
};
}

View file

@ -1,70 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-programs-digitalbitbox">
<title>Digital Bitbox</title>
<para>
Digital Bitbox is a hardware wallet and second-factor authenticator.
</para>
<para>
The <literal>digitalbitbox</literal> programs module may be
installed by setting <literal>programs.digitalbitbox</literal> to
<literal>true</literal> in a manner similar to
</para>
<programlisting>
programs.digitalbitbox.enable = true;
</programlisting>
<para>
and bundles the <literal>digitalbitbox</literal> package (see
<xref linkend="sec-digitalbitbox-package" />), which contains the
<literal>dbb-app</literal> and <literal>dbb-cli</literal> binaries,
along with the hardware module (see
<xref linkend="sec-digitalbitbox-hardware-module" />) which sets up
the necessary udev rules to access the device.
</para>
<para>
Enabling the digitalbitbox module is pretty much the easiest way to
get a Digital Bitbox device working on your system.
</para>
<para>
For more information, see
<link xlink:href="https://digitalbitbox.com/start_linux">https://digitalbitbox.com/start_linux</link>.
</para>
<section xml:id="sec-digitalbitbox-package">
<title>Package</title>
<para>
The binaries, <literal>dbb-app</literal> (a GUI tool) and
<literal>dbb-cli</literal> (a CLI tool), are available through the
<literal>digitalbitbox</literal> package which could be installed
as follows:
</para>
<programlisting>
environment.systemPackages = [
pkgs.digitalbitbox
];
</programlisting>
</section>
<section xml:id="sec-digitalbitbox-hardware-module">
<title>Hardware</title>
<para>
The digitalbitbox hardware package enables the udev rules for
Digital Bitbox devices and may be installed as follows:
</para>
<programlisting>
hardware.digitalbitbox.enable = true;
</programlisting>
<para>
In order to alter the udev rules, one may provide different values
for the <literal>udevRule51</literal> and
<literal>udevRule52</literal> attributes by means of overriding as
follows:
</para>
<programlisting>
programs.digitalbitbox = {
enable = true;
package = pkgs.digitalbitbox.override {
udevRule51 = &quot;something else&quot;;
};
};
</programlisting>
</section>
</chapter>

View file

@ -8,7 +8,7 @@ in
{
meta = {
maintainers = pkgs.plotinus.meta.maintainers;
doc = ./plotinus.xml;
doc = ./plotinus.md;
};
###### interface

View file

@ -1,30 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-program-plotinus">
<title>Plotinus</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/programs/plotinus.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://github.com/p-e-w/plotinus">https://github.com/p-e-w/plotinus</link>
</para>
<para>
Plotinus is a searchable command palette in every modern GTK
application.
</para>
<para>
When in a GTK 3 application and Plotinus is enabled, you can press
<literal>Ctrl+Shift+P</literal> to open the command palette. The
command palette provides a searchable list of of all menu items in
the application.
</para>
<para>
To enable Plotinus, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
programs.plotinus.enable = true;
</programlisting>
</chapter>

View file

@ -142,5 +142,5 @@ in
};
meta.doc = ./oh-my-zsh.xml;
meta.doc = ./oh-my-zsh.md;
}

View file

@ -1,154 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-programs-zsh-ohmyzsh">
<title>Oh my ZSH</title>
<para>
<link xlink:href="https://ohmyz.sh/"><literal>oh-my-zsh</literal></link>
is a framework to manage your
<link xlink:href="https://www.zsh.org/">ZSH</link> configuration
including completion scripts for several CLI tools or custom prompt
themes.
</para>
<section xml:id="module-programs-oh-my-zsh-usage">
<title>Basic usage</title>
<para>
The module uses the <literal>oh-my-zsh</literal> package with all
available features. The initial setup using Nix expressions is
fairly similar to the configuration format of
<literal>oh-my-zsh</literal>.
</para>
<programlisting>
{
programs.zsh.ohMyZsh = {
enable = true;
plugins = [ &quot;git&quot; &quot;python&quot; &quot;man&quot; ];
theme = &quot;agnoster&quot;;
};
}
</programlisting>
<para>
For a detailed explanation of these arguments please refer to the
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/wiki"><literal>oh-my-zsh</literal>
docs</link>.
</para>
<para>
The expression generates the needed configuration and writes it
into your <literal>/etc/zshrc</literal>.
</para>
</section>
<section xml:id="module-programs-oh-my-zsh-additions">
<title>Custom additions</title>
<para>
Sometimes third-party or custom scripts such as a modified theme
may be needed. <literal>oh-my-zsh</literal> provides the
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/wiki/Customization#overriding-internals"><literal>ZSH_CUSTOM</literal></link>
environment variable for this which points to a directory with
additional scripts.
</para>
<para>
The module can do this as well:
</para>
<programlisting>
{
programs.zsh.ohMyZsh.custom = &quot;~/path/to/custom/scripts&quot;;
}
</programlisting>
</section>
<section xml:id="module-programs-oh-my-zsh-environments">
<title>Custom environments</title>
<para>
There are several extensions for <literal>oh-my-zsh</literal>
packaged in <literal>nixpkgs</literal>. One of them is
<link xlink:href="https://github.com/spwhitt/nix-zsh-completions">nix-zsh-completions</link>
which bundles completion scripts and a plugin for
<literal>oh-my-zsh</literal>.
</para>
<para>
Rather than using a single mutable path for
<literal>ZSH_CUSTOM</literal>, its also possible to generate this
path from a list of Nix packages:
</para>
<programlisting>
{ pkgs, ... }:
{
programs.zsh.ohMyZsh.customPkgs = [
pkgs.nix-zsh-completions
# and even more...
];
}
</programlisting>
<para>
Internally a single store path will be created using
<literal>buildEnv</literal>. Please refer to the docs of
<link xlink:href="https://nixos.org/nixpkgs/manual/#sec-building-environment"><literal>buildEnv</literal></link>
for further reference.
</para>
<para>
<emphasis>Please keep in mind that this is not compatible with
<literal>programs.zsh.ohMyZsh.custom</literal> as it requires an
immutable store path while <literal>custom</literal> shall remain
mutable! An evaluation failure will be thrown if both
<literal>custom</literal> and <literal>customPkgs</literal> are
set.</emphasis>
</para>
</section>
<section xml:id="module-programs-oh-my-zsh-packaging-customizations">
<title>Package your own customizations</title>
<para>
If third-party customizations (e.g. new themes) are supposed to be
added to <literal>oh-my-zsh</literal> there are several pitfalls
to keep in mind:
</para>
<itemizedlist>
<listitem>
<para>
To comply with the default structure of <literal>ZSH</literal>
the entire output needs to be written to
<literal>$out/share/zsh.</literal>
</para>
</listitem>
<listitem>
<para>
Completion scripts are supposed to be stored at
<literal>$out/share/zsh/site-functions</literal>. This
directory is part of the
<link xlink:href="http://zsh.sourceforge.net/Doc/Release/Functions.html"><literal>fpath</literal></link>
and the package should be compatible with pure
<literal>ZSH</literal> setups. The module will automatically
link the contents of <literal>site-functions</literal> to
completions directory in the proper store path.
</para>
</listitem>
<listitem>
<para>
The <literal>plugins</literal> directory needs the structure
<literal>pluginname/pluginname.plugin.zsh</literal> as
structured in the
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins">upstream
repo.</link>
</para>
</listitem>
</itemizedlist>
<para>
A derivation for <literal>oh-my-zsh</literal> may look like this:
</para>
<programlisting>
{ stdenv, fetchFromGitHub }:
stdenv.mkDerivation rec {
name = &quot;exemplary-zsh-customization-${version}&quot;;
version = &quot;1.0.0&quot;;
src = fetchFromGitHub {
# path to the upstream repository
};
dontBuild = true;
installPhase = ''
mkdir -p $out/share/zsh/site-functions
cp {themes,plugins} $out/share/zsh
cp completions $out/share/zsh/site-functions
'';
}
</programlisting>
</section>
</chapter>

View file

@ -916,6 +916,6 @@ in {
meta = {
maintainers = lib.teams.acme.members;
doc = ./default.xml;
doc = ./default.md;
};
}

View file

@ -1,395 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-security-acme">
<title>SSL/TLS Certificates with ACME</title>
<para>
NixOS supports automatic domain validation &amp; certificate
retrieval and renewal using the ACME protocol. Any provider can be
used, but by default NixOS uses Lets Encrypt. The alternative ACME
client
<link xlink:href="https://go-acme.github.io/lego/">lego</link> is
used under the hood.
</para>
<para>
Automatic cert validation and configuration for Apache and Nginx
virtual hosts is included in NixOS, however if you would like to
generate a wildcard cert or you are not using a web server you will
have to configure DNS based validation.
</para>
<section xml:id="module-security-acme-prerequisites">
<title>Prerequisites</title>
<para>
To use the ACME module, you must accept the providers terms of
service by setting
<xref linkend="opt-security.acme.acceptTerms" /> to
<literal>true</literal>. The Lets Encrypt ToS can be found
<link xlink:href="https://letsencrypt.org/repository/">here</link>.
</para>
<para>
You must also set an email address to be used when creating
accounts with Lets Encrypt. You can set this for all certs with
<xref linkend="opt-security.acme.defaults.email" /> and/or on a
per-cert basis with
<xref linkend="opt-security.acme.certs._name_.email" />. This
address is only used for registration and renewal reminders, and
cannot be used to administer the certificates in any way.
</para>
<para>
Alternatively, you can use a different ACME server by changing the
<xref linkend="opt-security.acme.defaults.server" /> option to a
provider of your choosing, or just change the server for one cert
with <xref linkend="opt-security.acme.certs._name_.server" />.
</para>
<para>
You will need an HTTP server or DNS server for verification. For
HTTP, the server must have a webroot defined that can serve
<filename>.well-known/acme-challenge</filename>. This directory
must be writeable by the user that will run the ACME client. For
DNS, you must set up credentials with your provider/server for use
with lego.
</para>
</section>
<section xml:id="module-security-acme-nginx">
<title>Using ACME certificates in Nginx</title>
<para>
NixOS supports fetching ACME certificates for you by setting
<literal>enableACME = true;</literal> in a virtualHost config. We
first create self-signed placeholder certificates in place of the
real ACME certs. The placeholder certs are overwritten when the
ACME certs arrive. For <literal>foo.example.com</literal> the
config would look like this:
</para>
<programlisting>
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
services.nginx = {
enable = true;
virtualHosts = {
&quot;foo.example.com&quot; = {
forceSSL = true;
enableACME = true;
# All serverAliases will be added as extra domain names on the certificate.
serverAliases = [ &quot;bar.example.com&quot; ];
locations.&quot;/&quot; = {
root = &quot;/var/www&quot;;
};
};
# We can also add a different vhost and reuse the same certificate
# but we have to append extraDomainNames manually beforehand:
# security.acme.certs.&quot;foo.example.com&quot;.extraDomainNames = [ &quot;baz.example.com&quot; ];
&quot;baz.example.com&quot; = {
forceSSL = true;
useACMEHost = &quot;foo.example.com&quot;;
locations.&quot;/&quot; = {
root = &quot;/var/www&quot;;
};
};
};
}
</programlisting>
</section>
<section xml:id="module-security-acme-httpd">
<title>Using ACME certificates in Apache/httpd</title>
<para>
Using ACME certificates with Apache virtual hosts is identical to
using them with Nginx. The attribute names are all the same, just
replace <quote>nginx</quote> with <quote>httpd</quote> where
appropriate.
</para>
</section>
<section xml:id="module-security-acme-configuring">
<title>Manual configuration of HTTP-01 validation</title>
<para>
First off you will need to set up a virtual host to serve the
challenges. This example uses a vhost called
<literal>certs.example.com</literal>, with the intent that you
will generate certs for all your vhosts and redirect everyone to
HTTPS.
</para>
<programlisting>
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user. The easiest way to achieve
# this is to add the Nginx user to the ACME group.
users.users.nginx.extraGroups = [ &quot;acme&quot; ];
services.nginx = {
enable = true;
virtualHosts = {
&quot;acmechallenge.example.com&quot; = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
serverAliases = [ &quot;*.example.com&quot; ];
locations.&quot;/.well-known/acme-challenge&quot; = {
root = &quot;/var/lib/acme/.challenges&quot;;
};
locations.&quot;/&quot; = {
return = &quot;301 https://$host$request_uri&quot;;
};
};
};
}
# Alternative config for Apache
users.users.wwwrun.extraGroups = [ &quot;acme&quot; ];
services.httpd = {
enable = true;
virtualHosts = {
&quot;acmechallenge.example.com&quot; = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
serverAliases = [ &quot;*.example.com&quot; ];
# /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
# By default, this is the case.
documentRoot = &quot;/var/lib/acme/.challenges&quot;;
extraConfig = ''
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
'';
};
};
}
</programlisting>
<para>
Now you need to configure ACME to generate a certificate.
</para>
<programlisting>
security.acme.certs.&quot;foo.example.com&quot; = {
webroot = &quot;/var/lib/acme/.challenges&quot;;
email = &quot;foo@example.com&quot;;
# Ensure that the web server you use can read the generated certs
# Take a look at the group option for the web server you choose.
group = &quot;nginx&quot;;
# Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything!
# Just make sure your DNS resolves them.
extraDomainNames = [ &quot;mail.example.com&quot; ];
};
</programlisting>
<para>
The private key <filename>key.pem</filename> and certificate
<filename>fullchain.pem</filename> will be put into
<filename>/var/lib/acme/foo.example.com</filename>.
</para>
<para>
Refer to <xref linkend="ch-options" /> for all available
configuration options for the
<link linkend="opt-security.acme.certs">security.acme</link>
module.
</para>
</section>
<section xml:id="module-security-acme-config-dns">
<title>Configuring ACME for DNS validation</title>
<para>
This is useful if you want to generate a wildcard certificate,
since ACME servers will only hand out wildcard certs over DNS
validation. There are a number of supported DNS providers and
servers you can utilise, see the
<link xlink:href="https://go-acme.github.io/lego/dns/">lego
docs</link> for provider/server specific configuration values. For
the sake of these docs, we will provide a fully self-hosted
example using bind.
</para>
<programlisting>
services.bind = {
enable = true;
extraConfig = ''
include &quot;/var/lib/secrets/dnskeys.conf&quot;;
'';
zones = [
rec {
name = &quot;example.com&quot;;
file = &quot;/var/db/bind/${name}&quot;;
master = true;
extraConfig = &quot;allow-update { key rfc2136key.example.com.; };&quot;;
}
];
}
# Now we can configure ACME
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
security.acme.certs.&quot;example.com&quot; = {
domain = &quot;*.example.com&quot;;
dnsProvider = &quot;rfc2136&quot;;
credentialsFile = &quot;/var/lib/secrets/certs.secret&quot;;
# We don't need to wait for propagation since this is a local DNS server
dnsPropagationCheck = false;
};
</programlisting>
<para>
The <filename>dnskeys.conf</filename> and
<filename>certs.secret</filename> must be kept secure and thus you
should not keep their contents in your Nix config. Instead,
generate them one time with a systemd service:
</para>
<programlisting>
systemd.services.dns-rfc2136-conf = {
requiredBy = [&quot;acme-example.com.service&quot; &quot;bind.service&quot;];
before = [&quot;acme-example.com.service&quot; &quot;bind.service&quot;];
unitConfig = {
ConditionPathExists = &quot;!/var/lib/secrets/dnskeys.conf&quot;;
};
serviceConfig = {
Type = &quot;oneshot&quot;;
UMask = 0077;
};
path = [ pkgs.bind ];
script = ''
mkdir -p /var/lib/secrets
chmod 755 /var/lib/secrets
tsig-keygen rfc2136key.example.com &gt; /var/lib/secrets/dnskeys.conf
chown named:root /var/lib/secrets/dnskeys.conf
chmod 400 /var/lib/secrets/dnskeys.conf
# extract secret value from the dnskeys.conf
while read x y; do if [ &quot;$x&quot; = &quot;secret&quot; ]; then secret=&quot;''${y:1:''${#y}-3}&quot;; fi; done &lt; /var/lib/secrets/dnskeys.conf
cat &gt; /var/lib/secrets/certs.secret &lt;&lt; EOF
RFC2136_NAMESERVER='127.0.0.1:53'
RFC2136_TSIG_ALGORITHM='hmac-sha256.'
RFC2136_TSIG_KEY='rfc2136key.example.com'
RFC2136_TSIG_SECRET='$secret'
EOF
chmod 400 /var/lib/secrets/certs.secret
'';
};
</programlisting>
<para>
Now youre all set to generate certs! You should monitor the first
invocation by running
<literal>systemctl start acme-example.com.service &amp; journalctl -fu acme-example.com.service</literal>
and watching its log output.
</para>
</section>
<section xml:id="module-security-acme-config-dns-with-vhosts">
<title>Using DNS validation with web server virtual hosts</title>
<para>
It is possible to use DNS-01 validation with all certificates,
including those automatically configured via the Nginx/Apache
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME"><literal>enableACME</literal></link>
option. This configuration pattern is fully supported and part of
the modules test suite for Nginx + Apache.
</para>
<para>
You must follow the guide above on configuring DNS-01 validation
first, however instead of setting the options for one certificate
(e.g.
<xref linkend="opt-security.acme.certs._name_.dnsProvider" />) you
will set them as defaults (e.g.
<xref linkend="opt-security.acme.defaults.dnsProvider" />).
</para>
<programlisting>
# Configure ACME appropriately
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
security.acme.defaults = {
dnsProvider = &quot;rfc2136&quot;;
credentialsFile = &quot;/var/lib/secrets/certs.secret&quot;;
# We don't need to wait for propagation since this is a local DNS server
dnsPropagationCheck = false;
};
# For each virtual host you would like to use DNS-01 validation with,
# set acmeRoot = null
services.nginx = {
enable = true;
virtualHosts = {
&quot;foo.example.com&quot; = {
enableACME = true;
acmeRoot = null;
};
};
}
</programlisting>
<para>
And thats it! Next time your configuration is rebuilt, or when
you add a new virtualHost, it will be DNS-01 validated.
</para>
</section>
<section xml:id="module-security-acme-root-owned">
<title>Using ACME with services demanding root owned
certificates</title>
<para>
Some services refuse to start if the configured certificate files
are not owned by root. PostgreSQL and OpenSMTPD are examples of
these. There is no way to change the user the ACME module uses (it
will always be <literal>acme</literal>), however you can use
systemds <literal>LoadCredential</literal> feature to resolve
this elegantly. Below is an example configuration for OpenSMTPD,
but this pattern can be applied to any service.
</para>
<programlisting>
# Configure ACME however you like (DNS or HTTP validation), adding
# the following configuration for the relevant certificate.
# Note: You cannot use `systemctl reload` here as that would mean
# the LoadCredential configuration below would be skipped and
# the service would continue to use old certificates.
security.acme.certs.&quot;mail.example.com&quot;.postRun = ''
systemctl restart opensmtpd
'';
# Now you must augment OpenSMTPD's systemd service to load
# the certificate files.
systemd.services.opensmtpd.requires = [&quot;acme-finished-mail.example.com.target&quot;];
systemd.services.opensmtpd.serviceConfig.LoadCredential = let
certDir = config.security.acme.certs.&quot;mail.example.com&quot;.directory;
in [
&quot;cert.pem:${certDir}/cert.pem&quot;
&quot;key.pem:${certDir}/key.pem&quot;
];
# Finally, configure OpenSMTPD to use these certs.
services.opensmtpd = let
credsDir = &quot;/run/credentials/opensmtpd.service&quot;;
in {
enable = true;
setSendmail = false;
serverConfiguration = ''
pki mail.example.com cert &quot;${credsDir}/cert.pem&quot;
pki mail.example.com key &quot;${credsDir}/key.pem&quot;
listen on localhost tls pki mail.example.com
action act1 relay host smtp://127.0.0.1:10027
match for local action act1
'';
};
</programlisting>
</section>
<section xml:id="module-security-acme-regenerate">
<title>Regenerating certificates</title>
<para>
Should you need to regenerate a particular certificate in a hurry,
such as when a vulnerability is found in Lets Encrypt, there is
now a convenient mechanism for doing so. Running
<literal>systemctl clean --what=state acme-example.com.service</literal>
will remove all certificate files and the account data for the
given domain, allowing you to then
<literal>systemctl start acme-example.com.service</literal> to
generate fresh ones.
</para>
</section>
<section xml:id="module-security-acme-fix-jws">
<title>Fixing JWS Verification error</title>
<para>
It is possible that your account credentials file may become
corrupt and need to be regenerated. In this scenario lego will
produce the error <literal>JWS verification error</literal>. The
solution is to simply delete the associated accounts file and
re-run the affected service(s).
</para>
<programlisting>
# Find the accounts folder for the certificate
systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
export accountdir=&quot;$(!!)&quot;
# Move this folder to some place else
mv /var/lib/acme/.lego/$accountdir{,.bak}
# Recreate the folder using systemd-tmpfiles
systemd-tmpfiles --create
# Get a new account and reissue certificates
# Note: Do this for all certs that share the same account email address
systemctl start acme-example.com.service
</programlisting>
</section>
</chapter>

View file

@ -226,7 +226,7 @@ let
in {
meta.maintainers = with maintainers; [ dotlambda ];
meta.doc = ./borgbackup.xml;
meta.doc = ./borgbackup.md;
###### interface

View file

@ -1,215 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-borgbase">
<title>BorgBackup</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/backup/borgbackup.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://borgbackup.readthedocs.io/">https://borgbackup.readthedocs.io/</link>
</para>
<para>
<link xlink:href="https://www.borgbackup.org/">BorgBackup</link>
(short: Borg) is a deduplicating backup program. Optionally, it
supports compression and authenticated encryption.
</para>
<para>
The main goal of Borg is to provide an efficient and secure way to
backup data. The data deduplication technique used makes Borg
suitable for daily backups since only changes are stored. The
authenticated encryption technique makes it suitable for backups to
not fully trusted targets.
</para>
<section xml:id="module-services-backup-borgbackup-configuring">
<title>Configuring</title>
<para>
A complete list of options for the Borgbase module may be found
<link linkend="opt-services.borgbackup.jobs">here</link>.
</para>
</section>
<section xml:id="opt-services-backup-borgbackup-local-directory">
<title>Basic usage for a local backup</title>
<para>
A very basic configuration for backing up to a locally accessible
directory is:
</para>
<programlisting>
{
opt.services.borgbackup.jobs = {
{ rootBackup = {
paths = &quot;/&quot;;
exclude = [ &quot;/nix&quot; &quot;/path/to/local/repo&quot; ];
repo = &quot;/path/to/local/repo&quot;;
doInit = true;
encryption = {
mode = &quot;repokey&quot;;
passphrase = &quot;secret&quot;;
};
compression = &quot;auto,lzma&quot;;
startAt = &quot;weekly&quot;;
};
}
};
}
</programlisting>
<warning>
<para>
If you do not want the passphrase to be stored in the
world-readable Nix store, use passCommand. You find an example
below.
</para>
</warning>
</section>
<section xml:id="opt-services-backup-create-server">
<title>Create a borg backup server</title>
<para>
You should use a different SSH key for each repository you write
to, because the specified keys are restricted to running borg
serve and can only access this single repository. You need the
output of the generate pub file.
</para>
<programlisting>
# sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo
# cat /run/keys/id_ed25519_my_borg_repo
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos
</programlisting>
<para>
Add the following snippet to your NixOS configuration:
</para>
<programlisting>
{
services.borgbackup.repos = {
my_borg_repo = {
authorizedKeys = [
&quot;ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos&quot;
] ;
path = &quot;/var/lib/my_borg_repo&quot; ;
};
};
}
</programlisting>
</section>
<section xml:id="opt-services-backup-borgbackup-remote-server">
<title>Backup to the borg repository server</title>
<para>
The following NixOS snippet creates an hourly backup to the
service (on the host nixos) as created in the section above. We
assume that you have stored a secret passphrasse in the file
<filename>/run/keys/borgbackup_passphrase</filename>, which should
be only accessible by root
</para>
<programlisting>
{
services.borgbackup.jobs = {
backupToLocalServer = {
paths = [ &quot;/etc/nixos&quot; ];
doInit = true;
repo = &quot;borg@nixos:.&quot; ;
encryption = {
mode = &quot;repokey-blake2&quot;;
passCommand = &quot;cat /run/keys/borgbackup_passphrase&quot;;
};
environment = { BORG_RSH = &quot;ssh -i /run/keys/id_ed25519_my_borg_repo&quot;; };
compression = &quot;auto,lzma&quot;;
startAt = &quot;hourly&quot;;
};
};
};
</programlisting>
<para>
The following few commands (run as root) let you test your backup.
</para>
<programlisting>
&gt; nixos-rebuild switch
...restarting the following units: polkit.service
&gt; systemctl restart borgbackup-job-backupToLocalServer
&gt; sleep 10
&gt; systemctl restart borgbackup-job-backupToLocalServer
&gt; export BORG_PASSPHRASE=topSecrect
&gt; borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:.
nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac]
nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68]
</programlisting>
</section>
<section xml:id="opt-services-backup-borgbackup-borgbase">
<title>Backup to a hosting service</title>
<para>
Several companies offer
<link xlink:href="https://www.borgbackup.org/support/commercial.html">(paid)
hosting services</link> for Borg repositories.
</para>
<para>
To backup your home directory to borgbase you have to:
</para>
<itemizedlist>
<listitem>
<para>
Generate a SSH key without a password, to access the remote
server. E.g.
</para>
<programlisting>
sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase
</programlisting>
</listitem>
<listitem>
<para>
Create the repository on the server by following the
instructions for your hosting server.
</para>
</listitem>
<listitem>
<para>
Initialize the repository on the server. Eg.
</para>
<programlisting>
sudo borg init --encryption=repokey-blake2 \
-rsh &quot;ssh -i /run/keys/id_ed25519_borgbase&quot; \
zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo
</programlisting>
</listitem>
<listitem>
<para>
Add it to your NixOS configuration, e.g.
</para>
<programlisting>
{
services.borgbackup.jobs = {
my_Remote_Backup = {
paths = [ &quot;/&quot; ];
exclude = [ &quot;/nix&quot; &quot;'**/.cache'&quot; ];
repo = &quot;zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo&quot;;
encryption = {
mode = &quot;repokey-blake2&quot;;
passCommand = &quot;cat /run/keys/borgbackup_passphrase&quot;;
};
environment = { BORG_RSH = &quot;ssh -i /run/keys/id_ed25519_borgbase&quot;; };
compression = &quot;auto,lzma&quot;;
startAt = &quot;daily&quot;;
};
};
}}
</programlisting>
</listitem>
</itemizedlist>
</section>
<section xml:id="opt-services-backup-borgbackup-vorta">
<title>Vorta backup client for the desktop</title>
<para>
Vorta is a backup client for macOS and Linux desktops. It
integrates the mighty BorgBackup with your desktop environment to
protect your data from disk failure, ransomware and theft.
</para>
<para>
It can be installed in NixOS e.g. by adding
<literal>pkgs.vorta</literal> to
<xref linkend="opt-environment.systemPackages" />.
</para>
<para>
Details about using Vorta can be found under
<link xlink:href="https://vorta.borgbase.com/usage">https://vorta.borgbase.com</link>
.
</para>
</section>
</chapter>

View file

@ -424,6 +424,6 @@ in
};
};
meta.doc = ./foundationdb.xml;
meta.doc = ./foundationdb.md;
meta.maintainers = with lib.maintainers; [ thoughtpolice ];
}

View file

@ -1,425 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-foundationdb">
<title>FoundationDB</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/databases/foundationdb.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://apple.github.io/foundationdb/">https://apple.github.io/foundationdb/</link>
</para>
<para>
<emphasis>Maintainer:</emphasis> Austin Seipp
</para>
<para>
<emphasis>Available version(s):</emphasis> 5.1.x, 5.2.x, 6.0.x
</para>
<para>
FoundationDB (or <quote>FDB</quote>) is an open source, distributed,
transactional key-value store.
</para>
<section xml:id="module-services-foundationdb-configuring">
<title>Configuring and basic setup</title>
<para>
To enable FoundationDB, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.foundationdb.enable = true;
services.foundationdb.package = pkgs.foundationdb52; # FoundationDB 5.2.x
</programlisting>
<para>
The <option>services.foundationdb.package</option> option is
required, and must always be specified. Due to the fact
FoundationDB network protocols and on-disk storage formats may
change between (major) versions, and upgrades must be explicitly
handled by the user, you must always manually specify this
yourself so that the NixOS module will use the proper version.
Note that minor, bugfix releases are always compatible.
</para>
<para>
After running <command>nixos-rebuild</command>, you can verify
whether FoundationDB is running by executing
<command>fdbcli</command> (which is added to
<option>environment.systemPackages</option>):
</para>
<programlisting>
$ sudo -u foundationdb fdbcli
Using cluster file `/etc/foundationdb/fdb.cluster'.
The database is available.
Welcome to the fdbcli. For help, type `help'.
fdb&gt; status
Using cluster file `/etc/foundationdb/fdb.cluster'.
Configuration:
Redundancy mode - single
Storage engine - memory
Coordinators - 1
Cluster:
FoundationDB processes - 1
Machines - 1
Memory availability - 5.4 GB per process on machine with least available
Fault Tolerance - 0 machines
Server time - 04/20/18 15:21:14
...
fdb&gt;
</programlisting>
<para>
You can also write programs using the available client libraries.
For example, the following Python program can be run in order to
grab the cluster status, as a quick example. (This example uses
<command>nix-shell</command> shebang support to automatically
supply the necessary Python modules).
</para>
<programlisting>
a@link&gt; cat fdb-status.py
#! /usr/bin/env nix-shell
#! nix-shell -i python -p python pythonPackages.foundationdb52
import fdb
import json
def main():
fdb.api_version(520)
db = fdb.open()
@fdb.transactional
def get_status(tr):
return str(tr['\xff\xff/status/json'])
obj = json.loads(get_status(db))
print('FoundationDB available: %s' % obj['client']['database_status']['available'])
if __name__ == &quot;__main__&quot;:
main()
a@link&gt; chmod +x fdb-status.py
a@link&gt; ./fdb-status.py
FoundationDB available: True
a@link&gt;
</programlisting>
<para>
FoundationDB is run under the <command>foundationdb</command> user
and group by default, but this may be changed in the NixOS
configuration. The systemd unit
<command>foundationdb.service</command> controls the
<command>fdbmonitor</command> process.
</para>
<para>
By default, the NixOS module for FoundationDB creates a single
SSD-storage based database for development and basic usage. This
storage engine is designed for SSDs and will perform poorly on
HDDs; however it can handle far more data than the alternative
<quote>memory</quote> engine and is a better default choice for
most deployments. (Note that you can change the storage backend
on-the-fly for a given FoundationDB cluster using
<command>fdbcli</command>.)
</para>
<para>
Furthermore, only 1 server process and 1 backup agent are started
in the default configuration. See below for more on scaling to
increase this.
</para>
<para>
FoundationDB stores all data for all server processes under
<filename>/var/lib/foundationdb</filename>. You can override this
using <option>services.foundationdb.dataDir</option>, e.g.
</para>
<programlisting>
services.foundationdb.dataDir = &quot;/data/fdb&quot;;
</programlisting>
<para>
Similarly, logs are stored under
<filename>/var/log/foundationdb</filename> by default, and there
is a corresponding <option>services.foundationdb.logDir</option>
as well.
</para>
</section>
<section xml:id="module-services-foundationdb-scaling">
<title>Scaling processes and backup agents</title>
<para>
Scaling the number of server processes is quite easy; simply
specify <option>services.foundationdb.serverProcesses</option> to
be the number of FoundationDB worker processes that should be
started on the machine.
</para>
<para>
FoundationDB worker processes typically require 4GB of RAM
per-process at minimum for good performance, so this option is set
to 1 by default since the maximum amount of RAM is unknown. Youre
advised to abide by this restriction, so pick a number of
processes so that each has 4GB or more.
</para>
<para>
A similar option exists in order to scale backup agent processes,
<option>services.foundationdb.backupProcesses</option>. Backup
agents are not as performance/RAM sensitive, so feel free to
experiment with the number of available backup processes.
</para>
</section>
<section xml:id="module-services-foundationdb-clustering">
<title>Clustering</title>
<para>
FoundationDB on NixOS works similarly to other Linux systems, so
this section will be brief. Please refer to the full FoundationDB
documentation for more on clustering.
</para>
<para>
FoundationDB organizes clusters using a set of
<emphasis>coordinators</emphasis>, which are just
specially-designated worker processes. By default, every
installation of FoundationDB on NixOS will start as its own
individual cluster, with a single coordinator: the first worker
process on <command>localhost</command>.
</para>
<para>
Coordinators are specified globally using the
<command>/etc/foundationdb/fdb.cluster</command> file, which all
servers and client applications will use to find and join
coordinators. Note that this file <emphasis>can not</emphasis> be
managed by NixOS so easily: FoundationDB is designed so that it
will rewrite the file at runtime for all clients and nodes when
cluster coordinators change, with clients transparently handling
this without intervention. It is fundamentally a mutable file, and
you should not try to manage it in any way in NixOS.
</para>
<para>
When dealing with a cluster, there are two main things you want to
do:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Add a node to the cluster for storage/compute.
</para>
</listitem>
<listitem>
<para>
Promote an ordinary worker to a coordinator.
</para>
</listitem>
</itemizedlist>
<para>
A node must already be a member of the cluster in order to
properly be promoted to a coordinator, so you must always add it
first if you wish to promote it.
</para>
<para>
To add a machine to a FoundationDB cluster:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Choose one of the servers to start as the initial coordinator.
</para>
</listitem>
<listitem>
<para>
Copy the <command>/etc/foundationdb/fdb.cluster</command> file
from this server to all the other servers. Restart
FoundationDB on all of these other servers, so they join the
cluster.
</para>
</listitem>
<listitem>
<para>
All of these servers are now connected and working together in
the cluster, under the chosen coordinator.
</para>
</listitem>
</itemizedlist>
<para>
At this point, you can add as many nodes as you want by just
repeating the above steps. By default there will still be a single
coordinator: you can use <command>fdbcli</command> to change this
and add new coordinators.
</para>
<para>
As a convenience, FoundationDB can automatically assign
coordinators based on the redundancy mode you wish to achieve for
the cluster. Once all the nodes have been joined, simply set the
replication policy, and then issue the
<command>coordinators auto</command> command
</para>
<para>
For example, assuming we have 3 nodes available, we can enable
double redundancy mode, then auto-select coordinators. For double
redundancy, 3 coordinators is ideal: therefore FoundationDB will
make <emphasis>every</emphasis> node a coordinator automatically:
</para>
<programlisting>
fdbcli&gt; configure double ssd
fdbcli&gt; coordinators auto
</programlisting>
<para>
This will transparently update all the servers within seconds, and
appropriately rewrite the <command>fdb.cluster</command> file, as
well as informing all client processes to do the same.
</para>
</section>
<section xml:id="module-services-foundationdb-connectivity">
<title>Client connectivity</title>
<para>
By default, all clients must use the current
<command>fdb.cluster</command> file to access a given FoundationDB
cluster. This file is located by default in
<command>/etc/foundationdb/fdb.cluster</command> on all machines
with the FoundationDB service enabled, so you may copy the active
one from your cluster to a new node in order to connect, if it is
not part of the cluster.
</para>
</section>
<section xml:id="module-services-foundationdb-authorization">
<title>Client authorization and TLS</title>
<para>
By default, any user who can connect to a FoundationDB process
with the correct cluster configuration can access anything.
FoundationDB uses a pluggable design to transport security, and
out of the box it supports a LibreSSL-based plugin for TLS
support. This plugin not only does in-flight encryption, but also
performs client authorization based on the given endpoints
certificate chain. For example, a FoundationDB server may be
configured to only accept client connections over TLS, where the
client TLS certificate is from organization <emphasis>Acme
Co</emphasis> in the <emphasis>Research and Development</emphasis>
unit.
</para>
<para>
Configuring TLS with FoundationDB is done using the
<option>services.foundationdb.tls</option> options in order to
control the peer verification string, as well as the certificate
and its private key.
</para>
<para>
Note that the certificate and its private key must be accessible
to the FoundationDB user account that the server runs under. These
files are also NOT managed by NixOS, as putting them into the
store may reveal private information.
</para>
<para>
After you have a key and certificate file in place, it is not
enough to simply set the NixOS module options you must also
configure the <command>fdb.cluster</command> file to specify that
a given set of coordinators use TLS. This is as simple as adding
the suffix <command>:tls</command> to your cluster coordinator
configuration, after the port number. For example, assuming you
have a coordinator on localhost with the default configuration,
simply specifying:
</para>
<programlisting>
XXXXXX:XXXXXX@127.0.0.1:4500:tls
</programlisting>
<para>
will configure all clients and server processes to use TLS from
now on.
</para>
</section>
<section xml:id="module-services-foundationdb-disaster-recovery">
<title>Backups and Disaster Recovery</title>
<para>
The usual rules for doing FoundationDB backups apply on NixOS as
written in the FoundationDB manual. However, one important
difference is the security profile for NixOS: by default, the
<command>foundationdb</command> systemd unit uses <emphasis>Linux
namespaces</emphasis> to restrict write access to the system,
except for the log directory, data directory, and the
<command>/etc/foundationdb/</command> directory. This is enforced
by default and cannot be disabled.
</para>
<para>
However, a side effect of this is that the
<command>fdbbackup</command> command doesnt work properly for
local filesystem backups: FoundationDB uses a server process
alongside the database processes to perform backups and copy the
backups to the filesystem. As a result, this process is put under
the restricted namespaces above: the backup process can only write
to a limited number of paths.
</para>
<para>
In order to allow flexible backup locations on local disks, the
FoundationDB NixOS module supports a
<option>services.foundationdb.extraReadWritePaths</option> option.
This option takes a list of paths, and adds them to the systemd
unit, allowing the processes inside the service to write (and
read) the specified directories.
</para>
<para>
For example, to create backups in
<command>/opt/fdb-backups</command>, first set up the paths in the
module options:
</para>
<programlisting>
services.foundationdb.extraReadWritePaths = [ &quot;/opt/fdb-backups&quot; ];
</programlisting>
<para>
Restart the FoundationDB service, and it will now be able to write
to this directory (even if it does not yet exist.) Note: this path
<emphasis>must</emphasis> exist before restarting the unit.
Otherwise, systemd will not include it in the private FoundationDB
namespace (and it will not add it dynamically at runtime).
</para>
<para>
You can now perform a backup:
</para>
<programlisting>
$ sudo -u foundationdb fdbbackup start -t default -d file:///opt/fdb-backups
$ sudo -u foundationdb fdbbackup status -t default
</programlisting>
</section>
<section xml:id="module-services-foundationdb-limitations">
<title>Known limitations</title>
<para>
The FoundationDB setup for NixOS should currently be considered
beta. FoundationDB is not new software, but the NixOS compilation
and integration has only undergone fairly basic testing of all the
available functionality.
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
There is no way to specify individual parameters for
individual <command>fdbserver</command> processes. Currently,
all server processes inherit all the global
<command>fdbmonitor</command> settings.
</para>
</listitem>
<listitem>
<para>
Ruby bindings are not currently installed.
</para>
</listitem>
<listitem>
<para>
Go bindings are not currently installed.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-foundationdb-options">
<title>Options</title>
<para>
NixOSs FoundationDB module allows you to configure all of the
most relevant configuration options for
<command>fdbmonitor</command>, matching it quite closely. A
complete list of options for the FoundationDB module may be found
<link linkend="opt-services.foundationdb.enable">here</link>. You
should also read the FoundationDB documentation as well.
</para>
</section>
<section xml:id="module-services-foundationdb-full-docs">
<title>Full documentation</title>
<para>
FoundationDB is a complex piece of software, and requires careful
administration to properly use. Full documentation for
administration can be found here:
<link xlink:href="https://apple.github.io/foundationdb/">https://apple.github.io/foundationdb/</link>.
</para>
</section>
</chapter>

View file

@ -585,6 +585,6 @@ in
};
meta.doc = ./postgresql.xml;
meta.doc = ./postgresql.md;
meta.maintainers = with lib.maintainers; [ thoughtpolice danbst ];
}

View file

@ -1,250 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-postgresql">
<title>PostgreSQL</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/databases/postgresql.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="http://www.postgresql.org/docs/">http://www.postgresql.org/docs/</link>
</para>
<para>
PostgreSQL is an advanced, free relational database.
</para>
<section xml:id="module-services-postgres-configuring">
<title>Configuring</title>
<para>
To enable PostgreSQL, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_11;
</programlisting>
<para>
Note that you are required to specify the desired version of
PostgreSQL (e.g. <literal>pkgs.postgresql_11</literal>). Since
upgrading your PostgreSQL version requires a database dump and
reload (see below), NixOS cannot provide a default value for
<xref linkend="opt-services.postgresql.package" /> such as the
most recent release of PostgreSQL.
</para>
<para>
By default, PostgreSQL stores its databases in
<filename>/var/lib/postgresql/$psqlSchema</filename>. You can
override this using
<xref linkend="opt-services.postgresql.dataDir" />, e.g.
</para>
<programlisting>
services.postgresql.dataDir = &quot;/data/postgresql&quot;;
</programlisting>
</section>
<section xml:id="module-services-postgres-upgrading">
<title>Upgrading</title>
<note>
<para>
The steps below demonstrate how to upgrade from an older version
to <literal>pkgs.postgresql_13</literal>. These instructions are
also applicable to other versions.
</para>
</note>
<para>
Major PostgreSQL upgrades require a downtime and a few imperative
steps to be called. This is the case because each major version
has some internal changes in the databases state during major
releases. Because of that, NixOS places the state into
<filename>/var/lib/postgresql/&lt;version&gt;</filename> where
each <literal>version</literal> can be obtained like this:
</para>
<programlisting>
$ nix-instantiate --eval -A postgresql_13.psqlSchema
&quot;13&quot;
</programlisting>
<para>
For an upgrade, a script like this can be used to simplify the
process:
</para>
<programlisting>
{ config, pkgs, ... }:
{
environment.systemPackages = [
(let
# XXX specify the postgresql package you'd like to upgrade to.
# Do not forget to list the extensions you need.
newPostgres = pkgs.postgresql_13.withPackages (pp: [
# pp.plv8
]);
in pkgs.writeScriptBin &quot;upgrade-pg-cluster&quot; ''
set -eux
# XXX it's perhaps advisable to stop all services that depend on postgresql
systemctl stop postgresql
export NEWDATA=&quot;/var/lib/postgresql/${newPostgres.psqlSchema}&quot;
export NEWBIN=&quot;${newPostgres}/bin&quot;
export OLDDATA=&quot;${config.services.postgresql.dataDir}&quot;
export OLDBIN=&quot;${config.services.postgresql.package}/bin&quot;
install -d -m 0700 -o postgres -g postgres &quot;$NEWDATA&quot;
cd &quot;$NEWDATA&quot;
sudo -u postgres $NEWBIN/initdb -D &quot;$NEWDATA&quot;
sudo -u postgres $NEWBIN/pg_upgrade \
--old-datadir &quot;$OLDDATA&quot; --new-datadir &quot;$NEWDATA&quot; \
--old-bindir $OLDBIN --new-bindir $NEWBIN \
&quot;$@&quot;
'')
];
}
</programlisting>
<para>
The upgrade process is:
</para>
<orderedlist numeration="arabic">
<listitem>
<para>
Rebuild nixos configuration with the configuration above added
to your <filename>configuration.nix</filename>. Alternatively,
add that into separate file and reference it in
<literal>imports</literal> list.
</para>
</listitem>
<listitem>
<para>
Login as root (<literal>sudo su -</literal>)
</para>
</listitem>
<listitem>
<para>
Run <literal>upgrade-pg-cluster</literal>. It will stop old
postgresql, initialize a new one and migrate the old one to
the new one. You may supply arguments like
<literal>--jobs 4</literal> and <literal>--link</literal> to
speedup migration process. See
<link xlink:href="https://www.postgresql.org/docs/current/pgupgrade.html">https://www.postgresql.org/docs/current/pgupgrade.html</link>
for details.
</para>
</listitem>
<listitem>
<para>
Change postgresql package in NixOS configuration to the one
you were upgrading to via
<xref linkend="opt-services.postgresql.package" />. Rebuild
NixOS. This should start new postgres using upgraded data
directory and all services you stopped during the upgrade.
</para>
</listitem>
<listitem>
<para>
After the upgrade its advisable to analyze the new cluster.
</para>
<itemizedlist>
<listitem>
<para>
For PostgreSQL ≥ 14, use the <literal>vacuumdb</literal>
command printed by the upgrades script.
</para>
</listitem>
<listitem>
<para>
For PostgreSQL &lt; 14, run (as
<literal>su -l postgres</literal> in the
<xref linkend="opt-services.postgresql.dataDir" />, in
this example <filename>/var/lib/postgresql/13</filename>):
</para>
<programlisting>
$ ./analyze_new_cluster.sh
</programlisting>
</listitem>
</itemizedlist>
<warning>
<para>
The next step removes the old state-directory!
</para>
</warning>
<programlisting>
$ ./delete_old_cluster.sh
</programlisting>
</listitem>
</orderedlist>
</section>
<section xml:id="module-services-postgres-options">
<title>Options</title>
<para>
A complete list of options for the PostgreSQL module may be found
<link linkend="opt-services.postgresql.enable">here</link>.
</para>
</section>
<section xml:id="module-services-postgres-plugins">
<title>Plugins</title>
<para>
Plugins collection for each PostgreSQL version can be accessed
with <literal>.pkgs</literal>. For example, for
<literal>pkgs.postgresql_11</literal> package, its plugin
collection is accessed by
<literal>pkgs.postgresql_11.pkgs</literal>:
</para>
<programlisting>
$ nix repl '&lt;nixpkgs&gt;'
Loading '&lt;nixpkgs&gt;'...
Added 10574 variables.
nix-repl&gt; postgresql_11.pkgs.&lt;TAB&gt;&lt;TAB&gt;
postgresql_11.pkgs.cstore_fdw postgresql_11.pkgs.pg_repack
postgresql_11.pkgs.pg_auto_failover postgresql_11.pkgs.pg_safeupdate
postgresql_11.pkgs.pg_bigm postgresql_11.pkgs.pg_similarity
postgresql_11.pkgs.pg_cron postgresql_11.pkgs.pg_topn
postgresql_11.pkgs.pg_hll postgresql_11.pkgs.pgjwt
postgresql_11.pkgs.pg_partman postgresql_11.pkgs.pgroonga
...
</programlisting>
<para>
To add plugins via NixOS configuration, set
<literal>services.postgresql.extraPlugins</literal>:
</para>
<programlisting>
services.postgresql.package = pkgs.postgresql_11;
services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [
pg_repack
postgis
];
</programlisting>
<para>
You can build custom PostgreSQL-with-plugins (to be used outside
of NixOS) using function <literal>.withPackages</literal>. For
example, creating a custom PostgreSQL package in an overlay can
look like:
</para>
<programlisting>
self: super: {
postgresql_custom = self.postgresql_11.withPackages (ps: [
ps.pg_repack
ps.postgis
]);
}
</programlisting>
<para>
Heres a recipe on how to override a particular plugin through an
overlay:
</para>
<programlisting>
self: super: {
postgresql_11 = super.postgresql_11.override { this = self.postgresql_11; } // {
pkgs = super.postgresql_11.pkgs // {
pg_repack = super.postgresql_11.pkgs.pg_repack.overrideAttrs (_: {
name = &quot;pg_repack-v20181024&quot;;
src = self.fetchzip {
url = &quot;https://github.com/reorg/pg_repack/archive/923fa2f3c709a506e111cc963034bf2fd127aa00.tar.gz&quot;;
sha256 = &quot;17k6hq9xaax87yz79j773qyigm4fwk8z4zh5cyp6z0sxnwfqxxw5&quot;;
};
});
};
};
}
</programlisting>
</section>
</chapter>

View file

@ -7,7 +7,7 @@ let
cfg = config.services.flatpak;
in {
meta = {
doc = ./flatpak.xml;
doc = ./flatpak.md;
maintainers = pkgs.flatpak.meta.maintainers;
};

View file

@ -1,59 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-flatpak">
<title>Flatpak</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/desktop/flatpak.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://github.com/flatpak/flatpak/wiki">https://github.com/flatpak/flatpak/wiki</link>
</para>
<para>
Flatpak is a system for building, distributing, and running
sandboxed desktop applications on Linux.
</para>
<para>
To enable Flatpak, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.flatpak.enable = true;
</programlisting>
<para>
For the sandboxed apps to work correctly, desktop integration
portals need to be installed. If you run GNOME, this will be handled
automatically for you; in other cases, you will need to add
something like the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
</programlisting>
<para>
Then, you will need to add a repository, for example,
<link xlink:href="https://github.com/flatpak/flatpak/wiki">Flathub</link>,
either using the following commands:
</para>
<programlisting>
$ flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
$ flatpak update
</programlisting>
<para>
or by opening the
<link xlink:href="https://flathub.org/repo/flathub.flatpakrepo">repository
file</link> in GNOME Software.
</para>
<para>
Finally, you can search and install programs:
</para>
<programlisting>
$ flatpak search bustle
$ flatpak install flathub org.freedesktop.Bustle
$ flatpak run org.freedesktop.Bustle
</programlisting>
<para>
Again, GNOME Software offers graphical interface for these tasks.
</para>
</chapter>

View file

@ -11,7 +11,7 @@ let
in {
meta = {
maintainers = pkgs.blackfire.meta.maintainers;
doc = ./blackfire.xml;
doc = ./blackfire.md;
};
options = {

View file

@ -1,61 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-blackfire">
<title>Blackfire profiler</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/development/blackfire.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://blackfire.io/docs/introduction">https://blackfire.io/docs/introduction</link>
</para>
<para>
<link xlink:href="https://blackfire.io">Blackfire</link> is a
proprietary tool for profiling applications. There are several
languages supported by the product but currently only PHP support is
packaged in Nixpkgs. The back-end consists of a module that is
loaded into the language runtime (called <emphasis>probe</emphasis>)
and a service (<emphasis>agent</emphasis>) that the probe connects
to and that sends the profiles to the server.
</para>
<para>
To use it, you will need to enable the agent and the probe on your
server. The exact method will depend on the way you use PHP but here
is an example of NixOS configuration for PHP-FPM:
</para>
<programlisting>
let
php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [
blackfire
]));
in {
# Enable the probe extension for PHP-FPM.
services.phpfpm = {
phpPackage = php;
};
# Enable and configure the agent.
services.blackfire-agent = {
enable = true;
settings = {
# You will need to get credentials at https://blackfire.io/my/settings/credentials
# You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent
server-id = &quot;XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX&quot;;
server-token = &quot;XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX&quot;;
};
};
# Make the agent run on start-up.
# (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138)
# Alternately, you can start it manually with `systemctl start blackfire-agent`.
systemd.services.blackfire-agent.wantedBy = [ &quot;phpfpm-foo.service&quot; ];
}
</programlisting>
<para>
On your developer machine, you will also want to install
<link xlink:href="https://blackfire.io/docs/up-and-running/installation#install-a-profiling-client">the
client</link> (see <literal>blackfire</literal> package) or the
browser extension to actually trigger the profiling.
</para>
</chapter>

View file

@ -99,5 +99,5 @@ in
environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "${editorScript}/bin/emacseditor");
};
meta.doc = ./emacs.xml;
meta.doc = ./emacs.md;
}

View file

@ -1,490 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-emacs">
<title>Emacs</title>
<para>
<link xlink:href="https://www.gnu.org/software/emacs/">Emacs</link>
is an extensible, customizable, self-documenting real-time display
editor — and more. At its core is an interpreter for Emacs Lisp, a
dialect of the Lisp programming language with extensions to support
text editing.
</para>
<para>
Emacs runs within a graphical desktop environment using the X Window
System, but works equally well on a text terminal. Under macOS, a
<quote>Mac port</quote> edition is available, which uses Apples
native GUI frameworks.
</para>
<para>
Nixpkgs provides a superior environment for running Emacs. Its
simple to create custom builds by overriding the default packages.
Chaotic collections of Emacs Lisp code and extensions can be brought
under control using declarative package management. NixOS even
provides a <command>systemd</command> user service for automatically
starting the Emacs daemon.
</para>
<section xml:id="module-services-emacs-installing">
<title>Installing Emacs</title>
<para>
Emacs can be installed in the normal way for Nix (see
<xref linkend="sec-package-management" />). In addition, a NixOS
<emphasis>service</emphasis> can be enabled.
</para>
<section xml:id="module-services-emacs-releases">
<title>The Different Releases of Emacs</title>
<para>
Nixpkgs defines several basic Emacs packages. The following are
attributes belonging to the <varname>pkgs</varname> set:
</para>
<variablelist spacing="compact">
<varlistentry>
<term>
<varname>emacs</varname>
</term>
<listitem>
<para>
The latest stable version of Emacs using the
<link xlink:href="http://www.gtk.org">GTK 2</link> widget
toolkit.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>emacs-nox</varname>
</term>
<listitem>
<para>
Emacs built without any dependency on X11 libraries.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>emacsMacport</varname>
</term>
<listitem>
<para>
Emacs with the <quote>Mac port</quote> patches, providing
a more native look and feel under macOS.
</para>
</listitem>
</varlistentry>
</variablelist>
<para>
If those arent suitable, then the following imitation Emacs
editors are also available in Nixpkgs:
<link xlink:href="https://www.gnu.org/software/zile/">Zile</link>,
<link xlink:href="http://homepage.boetes.org/software/mg/">mg</link>,
<link xlink:href="http://yi-editor.github.io/">Yi</link>,
<link xlink:href="https://joe-editor.sourceforge.io/">jmacs</link>.
</para>
</section>
<section xml:id="module-services-emacs-adding-packages">
<title>Adding Packages to Emacs</title>
<para>
Emacs includes an entire ecosystem of functionality beyond text
editing, including a project planner, mail and news reader,
debugger interface, calendar, and more.
</para>
<para>
Most extensions are gotten with the Emacs packaging system
(<filename>package.el</filename>) from
<link xlink:href="https://elpa.gnu.org/">Emacs Lisp Package
Archive (ELPA)</link>,
<link xlink:href="https://melpa.org/">MELPA</link>,
<link xlink:href="https://stable.melpa.org/">MELPA
Stable</link>, and
<link xlink:href="http://orgmode.org/elpa.html">Org ELPA</link>.
Nixpkgs is regularly updated to mirror all these archives.
</para>
<para>
Under NixOS, you can continue to use
<literal>package-list-packages</literal> and
<literal>package-install</literal> to install packages. You can
also declare the set of Emacs packages you need using the
derivations from Nixpkgs. The rest of this section discusses
declarative installation of Emacs packages through nixpkgs.
</para>
<para>
The first step to declare the list of packages you want in your
Emacs installation is to create a dedicated derivation. This can
be done in a dedicated <filename>emacs.nix</filename> file such
as:
</para>
<para>
<anchor xml:id="ex-emacsNix" />
</para>
<programlisting language="nix">
/*
This is a nix expression to build Emacs and some Emacs packages I like
from source on any distribution where Nix is installed. This will install
all the dependencies from the nixpkgs repository and build the binary files
without interfering with the host distribution.
To build the project, type the following from the current directory:
$ nix-build emacs.nix
To run the newly compiled executable:
$ ./result/bin/emacs
*/
# The first non-comment line in this file indicates that
# the whole file represents a function.
{ pkgs ? import &lt;nixpkgs&gt; {} }:
let
# The let expression below defines a myEmacs binding pointing to the
# current stable version of Emacs. This binding is here to separate
# the choice of the Emacs binary from the specification of the
# required packages.
myEmacs = pkgs.emacs;
# This generates an emacsWithPackages function. It takes a single
# argument: a function from a package set to a list of packages
# (the packages that will be available in Emacs).
emacsWithPackages = (pkgs.emacsPackagesFor myEmacs).emacsWithPackages;
in
# The rest of the file specifies the list of packages to install. In the
# example, two packages (magit and zerodark-theme) are taken from
# MELPA stable.
emacsWithPackages (epkgs: (with epkgs.melpaStablePackages; [
magit # ; Integrate git &lt;C-x g&gt;
zerodark-theme # ; Nicolas' theme
])
# Two packages (undo-tree and zoom-frm) are taken from MELPA.
++ (with epkgs.melpaPackages; [
undo-tree # ; &lt;C-x u&gt; to show the undo tree
zoom-frm # ; increase/decrease font size for all buffers %lt;C-x C-+&gt;
])
# Three packages are taken from GNU ELPA.
++ (with epkgs.elpaPackages; [
auctex # ; LaTeX mode
beacon # ; highlight my cursor when scrolling
nameless # ; hide current package name everywhere in elisp code
])
# notmuch is taken from a nixpkgs derivation which contains an Emacs mode.
++ [
pkgs.notmuch # From main packages set
])
</programlisting>
<para>
The result of this configuration will be an
<command>emacs</command> command which launches Emacs with all
of your chosen packages in the <varname>load-path</varname>.
</para>
<para>
You can check that it works by executing this in a terminal:
</para>
<programlisting>
$ nix-build emacs.nix
$ ./result/bin/emacs -q
</programlisting>
<para>
and then typing <literal>M-x package-initialize</literal>. Check
that you can use all the packages you want in this Emacs
instance. For example, try switching to the zerodark theme
through
<literal>M-x load-theme &lt;RET&gt; zerodark &lt;RET&gt; y</literal>.
</para>
<tip>
<para>
A few popular extensions worth checking out are: auctex,
company, edit-server, flycheck, helm, iedit, magit,
multiple-cursors, projectile, and yasnippet.
</para>
</tip>
<para>
The list of available packages in the various ELPA repositories
can be seen with the following commands:
<anchor xml:id="module-services-emacs-querying-packages" />
</para>
<programlisting>
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.elpaPackages
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.melpaPackages
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.melpaStablePackages
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.orgPackages
</programlisting>
<para>
If you are on NixOS, you can install this particular Emacs for
all users by adding it to the list of system packages (see
<xref linkend="sec-declarative-package-mgmt" />). Simply modify
your file <filename>configuration.nix</filename> to make it
contain:
<anchor xml:id="module-services-emacs-configuration-nix" />
</para>
<programlisting>
{
environment.systemPackages = [
# [...]
(import /path/to/emacs.nix { inherit pkgs; })
];
}
</programlisting>
<para>
In this case, the next <command>nixos-rebuild switch</command>
will take care of adding your <command>emacs</command> to the
<varname>PATH</varname> environment variable (see
<xref linkend="sec-changing-config" />).
</para>
<para>
If you are not on NixOS or want to install this particular Emacs
only for yourself, you can do so by adding it to your
<filename>~/.config/nixpkgs/config.nix</filename> (see
<link xlink:href="https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
manual</link>):
<anchor xml:id="module-services-emacs-config-nix" />
</para>
<programlisting>
{
packageOverrides = super: let self = super.pkgs; in {
myemacs = import /path/to/emacs.nix { pkgs = self; };
};
}
</programlisting>
<para>
In this case, the next
<literal>nix-env -f '&lt;nixpkgs&gt;' -iA myemacs</literal> will
take care of adding your emacs to the <varname>PATH</varname>
environment variable.
</para>
</section>
<section xml:id="module-services-emacs-advanced">
<title>Advanced Emacs Configuration</title>
<para>
If you want, you can tweak the Emacs package itself from your
<filename>emacs.nix</filename>. For example, if you want to have
a GTK 3-based Emacs instead of the default GTK 2-based binary
and remove the automatically generated
<filename>emacs.desktop</filename> (useful if you only use
<command>emacsclient</command>), you can change your file
<filename>emacs.nix</filename> in this way:
</para>
<para>
<anchor xml:id="ex-emacsGtk3Nix" />
</para>
<programlisting>
{ pkgs ? import &lt;nixpkgs&gt; {} }:
let
myEmacs = (pkgs.emacs.override {
# Use gtk3 instead of the default gtk2
withGTK3 = true;
withGTK2 = false;
}).overrideAttrs (attrs: {
# I don't want emacs.desktop file because I only use
# emacsclient.
postInstall = (attrs.postInstall or &quot;&quot;) + ''
rm $out/share/applications/emacs.desktop
'';
});
in [...]
</programlisting>
<para>
After building this file as shown in
<link linkend="ex-emacsNix">the example above</link>, you will
get an GTK 3-based Emacs binary pre-loaded with your favorite
packages.
</para>
</section>
</section>
<section xml:id="module-services-emacs-running">
<title>Running Emacs as a Service</title>
<para>
NixOS provides an optional <command>systemd</command> service
which launches
<link xlink:href="https://www.gnu.org/software/emacs/manual/html_node/emacs/Emacs-Server.html">Emacs
daemon</link> with the users login session.
</para>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/editors/emacs.nix</filename>
</para>
<section xml:id="module-services-emacs-enabling">
<title>Enabling the Service</title>
<para>
To install and enable the <command>systemd</command> user
service for Emacs daemon, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.emacs.enable = true;
services.emacs.package = import /home/cassou/.emacs.d { pkgs = pkgs; };
</programlisting>
<para>
The <varname>services.emacs.package</varname> option allows a
custom derivation to be used, for example, one created by
<literal>emacsWithPackages</literal>.
</para>
<para>
Ensure that the Emacs server is enabled for your users Emacs
configuration, either by customizing the
<varname>server-mode</varname> variable, or by adding
<literal>(server-start)</literal> to
<filename>~/.emacs.d/init.el</filename>.
</para>
<para>
To start the daemon, execute the following:
</para>
<programlisting>
$ nixos-rebuild switch # to activate the new configuration.nix
$ systemctl --user daemon-reload # to force systemd reload
$ systemctl --user start emacs.service # to start the Emacs daemon
</programlisting>
<para>
The server should now be ready to serve Emacs clients.
</para>
</section>
<section xml:id="module-services-emacs-starting-client">
<title>Starting the client</title>
<para>
Ensure that the emacs server is enabled, either by customizing
the <varname>server-mode</varname> variable, or by adding
<literal>(server-start)</literal> to
<filename>~/.emacs</filename>.
</para>
<para>
To connect to the emacs daemon, run one of the following:
</para>
<programlisting>
emacsclient FILENAME
emacsclient --create-frame # opens a new frame (window)
emacsclient --create-frame --tty # opens a new frame on the current terminal
</programlisting>
</section>
<section xml:id="module-services-emacs-editor-variable">
<title>Configuring the <varname>EDITOR</varname> variable</title>
<para>
If <xref linkend="opt-services.emacs.defaultEditor" /> is
<literal>true</literal>, the <varname>EDITOR</varname> variable
will be set to a wrapper script which launches
<command>emacsclient</command>.
</para>
<para>
Any setting of <varname>EDITOR</varname> in the shell config
files will override
<varname>services.emacs.defaultEditor</varname>. To make sure
<varname>EDITOR</varname> refers to the Emacs wrapper script,
remove any existing <varname>EDITOR</varname> assignment from
<filename>.profile</filename>, <filename>.bashrc</filename>,
<filename>.zshenv</filename> or any other shell config file.
</para>
<para>
If you have formed certain bad habits when editing files, these
can be corrected with a shell alias to the wrapper script:
</para>
<programlisting>
alias vi=$EDITOR
</programlisting>
</section>
<section xml:id="module-services-emacs-per-user">
<title>Per-User Enabling of the Service</title>
<para>
In general, <command>systemd</command> user services are
globally enabled by symlinks in
<filename>/etc/systemd/user</filename>. In the case where Emacs
daemon is not wanted for all users, it is possible to install
the service but not globally enable it:
</para>
<programlisting>
services.emacs.enable = false;
services.emacs.install = true;
</programlisting>
<para>
To enable the <command>systemd</command> user service for just
the currently logged in user, run:
</para>
<programlisting>
systemctl --user enable emacs
</programlisting>
<para>
This will add the symlink
<filename>~/.config/systemd/user/emacs.service</filename>.
</para>
</section>
</section>
<section xml:id="module-services-emacs-configuring">
<title>Configuring Emacs</title>
<para>
The Emacs init file should be changed to load the extension
packages at startup:
<anchor xml:id="module-services-emacs-package-initialisation" />
</para>
<programlisting>
(require 'package)
;; optional. makes unpure packages archives unavailable
(setq package-archives nil)
(setq package-enable-at-startup nil)
(package-initialize)
</programlisting>
<para>
After the declarative emacs package configuration has been tested,
previously downloaded packages can be cleaned up by removing
<filename>~/.emacs.d/elpa</filename> (do make a backup first, in
case you forgot a package).
</para>
<section xml:id="module-services-emacs-major-mode">
<title>A Major Mode for Nix Expressions</title>
<para>
Of interest may be <varname>melpaPackages.nix-mode</varname>,
which provides syntax highlighting for the Nix language. This is
particularly convenient if you regularly edit Nix files.
</para>
</section>
<section xml:id="module-services-emacs-man-pages">
<title>Accessing man pages</title>
<para>
You can use <literal>woman</literal> to get completion of all
available man pages. For example, type
<literal>M-x woman &lt;RET&gt; nixos-rebuild &lt;RET&gt;.</literal>
</para>
</section>
<section xml:id="sec-emacs-docbook-xml">
<title>Editing DocBook 5 XML Documents</title>
<para>
Emacs includes
<link xlink:href="https://www.gnu.org/software/emacs/manual/html_node/nxml-mode/Introduction.html">nXML</link>,
a major-mode for validating and editing XML documents. When
editing DocBook 5.0 documents, such as
<link linkend="book-nixos-manual">this one</link>, nXML needs to
be configured with the relevant schema, which is not included.
</para>
<para>
To install the DocBook 5.0 schemas, either add
<varname>pkgs.docbook5</varname> to
<xref linkend="opt-environment.systemPackages" />
(<link linkend="sec-declarative-package-mgmt">NixOS</link>), or
run <literal>nix-env -f '&lt;nixpkgs&gt;' -iA docbook5</literal>
(<link linkend="sec-ad-hoc-packages">Nix</link>).
</para>
<para>
Then customize the variable
<varname>rng-schema-locating-files</varname> to include
<filename>~/.emacs.d/schemas.xml</filename> and put the
following text into that file:
<anchor xml:id="ex-emacs-docbook-xml" />
</para>
<programlisting language="xml">
&lt;?xml version=&quot;1.0&quot;?&gt;
&lt;!--
To let emacs find this file, evaluate:
(add-to-list 'rng-schema-locating-files &quot;~/.emacs.d/schemas.xml&quot;)
--&gt;
&lt;locatingRules xmlns=&quot;http://thaiopensource.com/ns/locating-rules/1.0&quot;&gt;
&lt;!--
Use this variation if pkgs.docbook5 is added to environment.systemPackages
--&gt;
&lt;namespace ns=&quot;http://docbook.org/ns/docbook&quot;
uri=&quot;/run/current-system/sw/share/xml/docbook-5.0/rng/docbookxi.rnc&quot;/&gt;
&lt;!--
Use this variation if installing schema with &quot;nix-env -iA pkgs.docbook5&quot;.
&lt;namespace ns=&quot;http://docbook.org/ns/docbook&quot;
uri=&quot;../.nix-profile/share/xml/docbook-5.0/rng/docbookxi.rnc&quot;/&gt;
--&gt;
&lt;/locatingRules&gt;
</programlisting>
</section>
</section>
</chapter>

View file

@ -8,7 +8,7 @@ in {
### docs
meta = {
doc = ./trezord.xml;
doc = ./trezord.md;
};
### interface

View file

@ -1,29 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="trezor">
<title>Trezor</title>
<para>
Trezor is an open-source cryptocurrency hardware wallet and security
token allowing secure storage of private keys.
</para>
<para>
It offers advanced features such U2F two-factor authorization, SSH
login through
<link xlink:href="https://wiki.trezor.io/Apps:SSH_agent">Trezor SSH
agent</link>,
<link xlink:href="https://wiki.trezor.io/GPG">GPG</link> and a
<link xlink:href="https://wiki.trezor.io/Trezor_Password_Manager">password
manager</link>. For more information, guides and documentation, see
<link xlink:href="https://wiki.trezor.io">https://wiki.trezor.io</link>.
</para>
<para>
To enable Trezor support, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.trezord.enable = true;
</programlisting>
<para>
This will add all necessary udev rules and start Trezor Bridge.
</para>
</chapter>

View file

@ -642,7 +642,7 @@ in {
meta = {
maintainers = with lib.maintainers; [ lheckemann qyliss ma27 ];
doc = ./mailman.xml;
doc = ./mailman.md;
};
}

View file

@ -1,112 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mailman">
<title>Mailman</title>
<para>
<link xlink:href="https://www.list.org">Mailman</link> is free
software for managing electronic mail discussion and e-newsletter
lists. Mailman and its web interface can be configured using the
corresponding NixOS module. Note that this service is best used with
an existing, securely configured Postfix setup, as it does not
automatically configure this.
</para>
<section xml:id="module-services-mailman-basic-usage">
<title>Basic usage with Postfix</title>
<para>
For a basic configuration with Postfix as the MTA, the following
settings are suggested:
</para>
<programlisting>
{ config, ... }: {
services.postfix = {
enable = true;
relayDomains = [&quot;hash:/var/lib/mailman/data/postfix_domains&quot;];
sslCert = config.security.acme.certs.&quot;lists.example.org&quot;.directory + &quot;/full.pem&quot;;
sslKey = config.security.acme.certs.&quot;lists.example.org&quot;.directory + &quot;/key.pem&quot;;
config = {
transport_maps = [&quot;hash:/var/lib/mailman/data/postfix_lmtp&quot;];
local_recipient_maps = [&quot;hash:/var/lib/mailman/data/postfix_lmtp&quot;];
};
};
services.mailman = {
enable = true;
serve.enable = true;
hyperkitty.enable = true;
webHosts = [&quot;lists.example.org&quot;];
siteOwner = &quot;mailman@example.org&quot;;
};
services.nginx.virtualHosts.&quot;lists.example.org&quot;.enableACME = true;
networking.firewall.allowedTCPPorts = [ 25 80 443 ];
}
</programlisting>
<para>
DNS records will also be required:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<literal>AAAA</literal> and <literal>A</literal> records
pointing to the host in question, in order for browsers to be
able to discover the address of the web server;
</para>
</listitem>
<listitem>
<para>
An <literal>MX</literal> record pointing to a domain name at
which the host is reachable, in order for other mail servers
to be able to deliver emails to the mailing lists it hosts.
</para>
</listitem>
</itemizedlist>
<para>
After this has been done and appropriate DNS records have been set
up, the Postorius mailing list manager and the Hyperkitty archive
browser will be available at https://lists.example.org/. Note that
this setup is not sufficient to deliver emails to most email
providers nor to avoid spam a number of additional measures for
authenticating incoming and outgoing mails, such as SPF, DMARC and
DKIM are necessary, but outside the scope of the Mailman module.
</para>
</section>
<section xml:id="module-services-mailman-other-mtas">
<title>Using with other MTAs</title>
<para>
Mailman also supports other MTA, though with a little bit more
configuration. For example, to use Mailman with Exim, you can use
the following settings:
</para>
<programlisting>
{ config, ... }: {
services = {
mailman = {
enable = true;
siteOwner = &quot;mailman@example.org&quot;;
enablePostfix = false;
settings.mta = {
incoming = &quot;mailman.mta.exim4.LMTP&quot;;
outgoing = &quot;mailman.mta.deliver.deliver&quot;;
lmtp_host = &quot;localhost&quot;;
lmtp_port = &quot;8024&quot;;
smtp_host = &quot;localhost&quot;;
smtp_port = &quot;25&quot;;
configuration = &quot;python:mailman.config.exim4&quot;;
};
};
exim = {
enable = true;
# You can configure Exim in a separate file to reduce configuration.nix clutter
config = builtins.readFile ./exim.conf;
};
};
}
</programlisting>
<para>
The exim config needs some special additions to work with Mailman.
Currently NixOS cant manage Exim config with such granularity.
Please refer to
<link xlink:href="https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html">Mailman
documentation</link> for more info on configuring Mailman for
working with Exim.
</para>
</section>
</chapter>

View file

@ -236,7 +236,7 @@ in
};
meta = {
doc = ./mjolnir.xml;
doc = ./mjolnir.md;
maintainers = with maintainers; [ jojosch ];
};
}

View file

@ -1,148 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mjolnir">
<title>Mjolnir (Matrix Moderation Tool)</title>
<para>
This chapter will show you how to set up your own, self-hosted
<link xlink:href="https://github.com/matrix-org/mjolnir">Mjolnir</link>
instance.
</para>
<para>
As an all-in-one moderation tool, it can protect your server from
malicious invites, spam messages, and whatever else you dont want.
In addition to server-level protection, Mjolnir is great for
communities wanting to protect their rooms without having to use
their personal accounts for moderation.
</para>
<para>
The bot by default includes support for bans, redactions, anti-spam,
server ACLs, room directory changes, room alias transfers, account
deactivation, room shutdown, and more.
</para>
<para>
See the
<link xlink:href="https://github.com/matrix-org/mjolnir#readme">README</link>
page and the
<link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/docs/moderators.md">Moderators
guide</link> for additional instructions on how to setup and use
Mjolnir.
</para>
<para>
For <link linkend="opt-services.mjolnir.settings">additional
settings</link> see
<link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml">the
default configuration</link>.
</para>
<section xml:id="module-services-mjolnir-setup">
<title>Mjolnir Setup</title>
<para>
First create a new Room which will be used as a management room
for Mjolnir. In this room, Mjolnir will log possible errors and
debugging information. Youll need to set this Room-ID in
<link linkend="opt-services.mjolnir.managementRoom">services.mjolnir.managementRoom</link>.
</para>
<para>
Next, create a new user for Mjolnir on your homeserver, if not
present already.
</para>
<para>
The Mjolnir Matrix user expects to be free of any rate limiting.
See
<link xlink:href="https://github.com/matrix-org/synapse/issues/6286">Synapse
#6286</link> for an example on how to achieve this.
</para>
<para>
If you want Mjolnir to be able to deactivate users, move room
aliases, shutdown rooms, etc. youll need to make the Mjolnir user
a Matrix server admin.
</para>
<para>
Now invite the Mjolnir user to the management room.
</para>
<para>
It is recommended to use
<link xlink:href="https://github.com/matrix-org/pantalaimon">Pantalaimon</link>,
so your management room can be encrypted. This also applies if you
are looking to moderate an encrypted room.
</para>
<para>
To enable the Pantalaimon E2E Proxy for mjolnir, enable
<link linkend="opt-services.mjolnir.pantalaimon.enable">services.mjolnir.pantalaimon</link>.
This will autoconfigure a new Pantalaimon instance, which will
connect to the homeserver set in
<link linkend="opt-services.mjolnir.homeserverUrl">services.mjolnir.homeserverUrl</link>
and Mjolnir itself will be configured to connect to the new
Pantalaimon instance.
</para>
<programlisting>
{
services.mjolnir = {
enable = true;
homeserverUrl = &quot;https://matrix.domain.tld&quot;;
pantalaimon = {
enable = true;
username = &quot;mjolnir&quot;;
passwordFile = &quot;/run/secrets/mjolnir-password&quot;;
};
protectedRooms = [
&quot;https://matrix.to/#/!xxx:domain.tld&quot;
];
managementRoom = &quot;!yyy:domain.tld&quot;;
};
}
</programlisting>
<section xml:id="module-services-mjolnir-setup-ems">
<title>Element Matrix Services (EMS)</title>
<para>
If you are using a managed
<link xlink:href="https://ems.element.io/"><quote>Element Matrix
Services (EMS)</quote></link> server, you will need to consent
to the terms and conditions. Upon startup, an error log entry
with a URL to the consent page will be generated.
</para>
</section>
</section>
<section xml:id="module-services-mjolnir-matrix-synapse-antispam">
<title>Synapse Antispam Module</title>
<para>
A Synapse module is also available to apply the same rulesets the
bot uses across an entire homeserver.
</para>
<para>
To use the Antispam Module, add
<literal>matrix-synapse-plugins.matrix-synapse-mjolnir-antispam</literal>
to the Synapse plugin list and enable the
<literal>mjolnir.Module</literal> module.
</para>
<programlisting>
{
services.matrix-synapse = {
plugins = with pkgs; [
matrix-synapse-plugins.matrix-synapse-mjolnir-antispam
];
extraConfig = ''
modules:
- module: mjolnir.Module
config:
# Prevent servers/users in the ban lists from inviting users on this
# server to rooms. Default true.
block_invites: true
# Flag messages sent by servers/users in the ban lists as spam. Currently
# this means that spammy messages will appear as empty to users. Default
# false.
block_messages: false
# Remove users from the user directory search by filtering matrix IDs and
# display names by the entries in the user ban list. Default false.
block_usernames: false
# The room IDs of the ban lists to honour. Unlike other parts of Mjolnir,
# this list cannot be room aliases or permalinks. This server is expected
# to already be joined to the room - Mjolnir will not automatically join
# these rooms.
ban_lists:
- &quot;!roomid:example.org&quot;
'';
};
}
</programlisting>
</section>
</chapter>

View file

@ -801,7 +801,7 @@ in {
meta = {
buildDocsInSandbox = false;
doc = ./synapse.xml;
doc = ./synapse.md;
maintainers = teams.matrix.members;
};

View file

@ -1,263 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-matrix">
<title>Matrix</title>
<para>
<link xlink:href="https://matrix.org/">Matrix</link> is an open
standard for interoperable, decentralised, real-time communication
over IP. It can be used to power Instant Messaging, VoIP/WebRTC
signalling, Internet of Things communication - or anywhere you need
a standard HTTP API for publishing and subscribing to data whilst
tracking the conversation history.
</para>
<para>
This chapter will show you how to set up your own, self-hosted
Matrix homeserver using the Synapse reference homeserver, and how to
serve your own copy of the Element web client. See the
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> overview page for links to Element Apps for
Android and iOS, desktop clients, as well as bridges to other
networks and other projects around Matrix.
</para>
<section xml:id="module-services-matrix-synapse">
<title>Synapse Homeserver</title>
<para>
<link xlink:href="https://github.com/matrix-org/synapse">Synapse</link>
is the reference homeserver implementation of Matrix from the core
development team at matrix.org. The following configuration
example will set up a synapse server for the
<literal>example.org</literal> domain, served from the host
<literal>myhostname.example.org</literal>. For more information,
please refer to the
<link xlink:href="https://matrix-org.github.io/synapse/latest/setup/installation.html">installation
instructions of Synapse</link> .
</para>
<programlisting>
{ pkgs, lib, config, ... }:
let
fqdn = &quot;${config.networking.hostName}.${config.networking.domain}&quot;;
clientConfig = {
&quot;m.homeserver&quot;.base_url = &quot;https://${fqdn}&quot;;
&quot;m.identity_server&quot; = {};
};
serverConfig.&quot;m.server&quot; = &quot;${config.services.matrix-synapse.settings.server_name}:443&quot;;
mkWellKnown = data: ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON data}';
'';
in {
networking.hostName = &quot;myhostname&quot;;
networking.domain = &quot;example.org&quot;;
networking.firewall.allowedTCPPorts = [ 80 443 ];
services.postgresql.enable = true;
services.postgresql.initialScript = pkgs.writeText &quot;synapse-init.sql&quot; ''
CREATE ROLE &quot;matrix-synapse&quot; WITH LOGIN PASSWORD 'synapse';
CREATE DATABASE &quot;matrix-synapse&quot; WITH OWNER &quot;matrix-synapse&quot;
TEMPLATE template0
LC_COLLATE = &quot;C&quot;
LC_CTYPE = &quot;C&quot;;
'';
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
virtualHosts = {
# If the A and AAAA DNS records on example.org do not point on the same host as the
# records for myhostname.example.org, you can easily move the /.well-known
# virtualHost section of the code to the host that is serving example.org, while
# the rest stays on myhostname.example.org with no other changes required.
# This pattern also allows to seamlessly move the homeserver from
# myhostname.example.org to myotherhost.example.org by only changing the
# /.well-known redirection target.
&quot;${config.networking.domain}&quot; = {
enableACME = true;
forceSSL = true;
# This section is not needed if the server_name of matrix-synapse is equal to
# the domain (i.e. example.org from @foo:example.org) and the federation port
# is 8448.
# Further reference can be found in the docs about delegation under
# https://matrix-org.github.io/synapse/latest/delegate.html
locations.&quot;= /.well-known/matrix/server&quot;.extraConfig = mkWellKnown serverConfig;
# This is usually needed for homeserver discovery (from e.g. other Matrix clients).
# Further reference can be found in the upstream docs at
# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient
locations.&quot;= /.well-known/matrix/client&quot;.extraConfig = mkWellKnown clientConfig;
};
&quot;${fqdn}&quot; = {
enableACME = true;
forceSSL = true;
# It's also possible to do a redirect here or something else, this vhost is not
# needed for Matrix. It's recommended though to *not put* element
# here, see also the section about Element.
locations.&quot;/&quot;.extraConfig = ''
return 404;
'';
# Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash
# *must not* be used here.
locations.&quot;/_matrix&quot;.proxyPass = &quot;http://[::1]:8008&quot;;
# Forward requests for e.g. SSO and password-resets.
locations.&quot;/_synapse/client&quot;.proxyPass = &quot;http://[::1]:8008&quot;;
};
};
};
services.matrix-synapse = {
enable = true;
settings.server_name = config.networking.domain;
settings.listeners = [
{ port = 8008;
bind_addresses = [ &quot;::1&quot; ];
type = &quot;http&quot;;
tls = false;
x_forwarded = true;
resources = [ {
names = [ &quot;client&quot; &quot;federation&quot; ];
compress = true;
} ];
}
];
};
}
</programlisting>
</section>
<section xml:id="module-services-matrix-register-users">
<title>Registering Matrix users</title>
<para>
If you want to run a server with public registration by anybody,
you can then enable
<literal>services.matrix-synapse.settings.enable_registration = true;</literal>.
Otherwise, or you can generate a registration secret with
<command>pwgen -s 64 1</command> and set it with
<xref linkend="opt-services.matrix-synapse.settings.registration_shared_secret" />.
To create a new user or admin, run the following after you have
set the secret and have rebuilt NixOS:
</para>
<programlisting>
$ nix-shell -p matrix-synapse
$ register_new_matrix_user -k your-registration-shared-secret http://localhost:8008
New user localpart: your-username
Password:
Confirm password:
Make admin [no]:
Success!
</programlisting>
<para>
In the example, this would create a user with the Matrix
Identifier <literal>@your-username:example.org</literal>.
</para>
<warning>
<para>
When using
<xref linkend="opt-services.matrix-synapse.settings.registration_shared_secret" />,
the secret will end up in the world-readable store. Instead its
recommended to deploy the secret in an additional file like
this:
</para>
<itemizedlist>
<listitem>
<para>
Create a file with the following contents:
</para>
<programlisting>
registration_shared_secret: your-very-secret-secret
</programlisting>
</listitem>
<listitem>
<para>
Deploy the file with a secret-manager such as
<link xlink:href="https://nixops.readthedocs.io/en/latest/overview.html#managing-keys"><option>deployment.keys</option></link>
from
<citerefentry><refentrytitle>nixops</refentrytitle><manvolnum>1</manvolnum></citerefentry>
or
<link xlink:href="https://github.com/Mic92/sops-nix/">sops-nix</link>
to e.g.
<filename>/run/secrets/matrix-shared-secret</filename> and
ensure that its readable by
<literal>matrix-synapse</literal>.
</para>
</listitem>
<listitem>
<para>
Include the file like this in your configuration:
</para>
<programlisting>
{
services.matrix-synapse.extraConfigFiles = [
&quot;/run/secrets/matrix-shared-secret&quot;
];
}
</programlisting>
</listitem>
</itemizedlist>
</warning>
<note>
<para>
Its also possible to user alternative authentication mechanism
such as
<link xlink:href="https://github.com/matrix-org/matrix-synapse-ldap3">LDAP
(via <literal>matrix-synapse-ldap3</literal>)</link> or
<link xlink:href="https://matrix-org.github.io/synapse/latest/openid.html">OpenID</link>.
</para>
</note>
</section>
<section xml:id="module-services-matrix-element-web">
<title>Element (formerly known as Riot) Web Client</title>
<para>
<link xlink:href="https://github.com/vector-im/riot-web/">Element
Web</link> is the reference web client for Matrix and developed by
the core team at matrix.org. Element was formerly known as
Riot.im, see the
<link xlink:href="https://element.io/blog/welcome-to-element/">Element
introductory blog post</link> for more information. The following
snippet can be optionally added to the code before to complete the
synapse installation with a web client served at
<literal>https://element.myhostname.example.org</literal> and
<literal>https://element.example.org</literal>. Alternatively, you
can use the hosted copy at
<link xlink:href="https://app.element.io/">https://app.element.io/</link>,
or use other web clients or native client applications. Due to the
<literal>/.well-known</literal> urls set up done above, many
clients should fill in the required connection details
automatically when you enter your Matrix Identifier. See
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> for a list of existing clients and their
supported featureset.
</para>
<programlisting>
{
services.nginx.virtualHosts.&quot;element.${fqdn}&quot; = {
enableACME = true;
forceSSL = true;
serverAliases = [
&quot;element.${config.networking.domain}&quot;
];
root = pkgs.element-web.override {
conf = {
default_server_config = clientConfig; # see `clientConfig` from the snippet above.
};
};
};
}
</programlisting>
<note>
<para>
The Element developers do not recommend running Element and your
Matrix homeserver on the same fully-qualified domain name for
security reasons. In the example, this means that you should not
reuse the <literal>myhostname.example.org</literal> virtualHost
to also serve Element, but instead serve it on a different
subdomain, like <literal>element.example.org</literal> in the
example. See the
<link xlink:href="https://github.com/vector-im/element-web/tree/v1.10.0#important-security-notes">Element
Important Security Notes</link> for more information on this
subject.
</para>
</note>
</section>
</chapter>

View file

@ -1504,6 +1504,6 @@ in {
};
meta.doc = ./gitlab.xml;
meta.doc = ./gitlab.md;
}

View file

@ -1,143 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-gitlab">
<title>GitLab</title>
<para>
GitLab is a feature-rich git hosting service.
</para>
<section xml:id="module-services-gitlab-prerequisites">
<title>Prerequisites</title>
<para>
The <literal>gitlab</literal> service exposes only an Unix socket
at <literal>/run/gitlab/gitlab-workhorse.socket</literal>. You
need to configure a webserver to proxy HTTP requests to the
socket.
</para>
<para>
For instance, the following configuration could be used to use
nginx as frontend proxy:
</para>
<programlisting>
services.nginx = {
enable = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
virtualHosts.&quot;git.example.com&quot; = {
enableACME = true;
forceSSL = true;
locations.&quot;/&quot;.proxyPass = &quot;http://unix:/run/gitlab/gitlab-workhorse.socket&quot;;
};
};
</programlisting>
</section>
<section xml:id="module-services-gitlab-configuring">
<title>Configuring</title>
<para>
GitLab depends on both PostgreSQL and Redis and will automatically
enable both services. In the case of PostgreSQL, a database and a
role will be created.
</para>
<para>
The default state dir is <literal>/var/gitlab/state</literal>.
This is where all data like the repositories and uploads will be
stored.
</para>
<para>
A basic configuration with some custom settings could look like
this:
</para>
<programlisting>
services.gitlab = {
enable = true;
databasePasswordFile = &quot;/var/keys/gitlab/db_password&quot;;
initialRootPasswordFile = &quot;/var/keys/gitlab/root_password&quot;;
https = true;
host = &quot;git.example.com&quot;;
port = 443;
user = &quot;git&quot;;
group = &quot;git&quot;;
smtp = {
enable = true;
address = &quot;localhost&quot;;
port = 25;
};
secrets = {
dbFile = &quot;/var/keys/gitlab/db&quot;;
secretFile = &quot;/var/keys/gitlab/secret&quot;;
otpFile = &quot;/var/keys/gitlab/otp&quot;;
jwsFile = &quot;/var/keys/gitlab/jws&quot;;
};
extraConfig = {
gitlab = {
email_from = &quot;gitlab-no-reply@example.com&quot;;
email_display_name = &quot;Example GitLab&quot;;
email_reply_to = &quot;gitlab-no-reply@example.com&quot;;
default_projects_features = { builds = false; };
};
};
};
</programlisting>
<para>
If youre setting up a new GitLab instance, generate new secrets.
You for instance use
<literal>tr -dc A-Za-z0-9 &lt; /dev/urandom | head -c 128 &gt; /var/keys/gitlab/db</literal>
to generate a new db secret. Make sure the files can be read by,
and only by, the user specified by
<link linkend="opt-services.gitlab.user">services.gitlab.user</link>.
GitLab encrypts sensitive data stored in the database. If youre
restoring an existing GitLab instance, you must specify the
secrets secret from <literal>config/secrets.yml</literal> located
in your GitLab state folder.
</para>
<para>
When <literal>incoming_mail.enabled</literal> is set to
<literal>true</literal> in
<link linkend="opt-services.gitlab.extraConfig">extraConfig</link>
an additional service called <literal>gitlab-mailroom</literal> is
enabled for fetching incoming mail.
</para>
<para>
Refer to <xref linkend="ch-options" /> for all available
configuration options for the
<link linkend="opt-services.gitlab.enable">services.gitlab</link>
module.
</para>
</section>
<section xml:id="module-services-gitlab-maintenance">
<title>Maintenance</title>
<section xml:id="module-services-gitlab-maintenance-backups">
<title>Backups</title>
<para>
Backups can be configured with the options in
<link linkend="opt-services.gitlab.backup.keepTime">services.gitlab.backup</link>.
Use the
<link linkend="opt-services.gitlab.backup.startAt">services.gitlab.backup.startAt</link>
option to configure regular backups.
</para>
<para>
To run a manual backup, start the
<literal>gitlab-backup</literal> service:
</para>
<programlisting>
$ systemctl start gitlab-backup.service
</programlisting>
</section>
<section xml:id="module-services-gitlab-maintenance-rake">
<title>Rake tasks</title>
<para>
You can run GitLabs rake tasks with
<literal>gitlab-rake</literal> which will be available on the
system when GitLab is enabled. You will have to run the command
as the user that you configured to run GitLab with.
</para>
<para>
A list of all available rake tasks can be obtained by running:
</para>
<programlisting>
$ sudo -u git -H gitlab-rake -T
</programlisting>
</section>
</section>
</chapter>

View file

@ -1390,6 +1390,6 @@ in
'')
];
meta.doc = ./default.xml;
meta.doc = ./default.md;
meta.maintainers = with maintainers; [ tomberek ];
}

View file

@ -1,113 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-sourcehut">
<title>Sourcehut</title>
<para>
<link xlink:href="https://sr.ht.com/">Sourcehut</link> is an
open-source, self-hostable software development platform. The server
setup can be automated using
<link linkend="opt-services.sourcehut.enable">services.sourcehut</link>.
</para>
<section xml:id="module-services-sourcehut-basic-usage">
<title>Basic usage</title>
<para>
Sourcehut is a Python and Go based set of applications. This NixOS
module also provides basic configuration integrating Sourcehut
into locally running <literal>services.nginx</literal>,
<literal>services.redis.servers.sourcehut</literal>,
<literal>services.postfix</literal> and
<literal>services.postgresql</literal> services.
</para>
<para>
A very basic configuration may look like this:
</para>
<programlisting>
{ pkgs, ... }:
let
fqdn =
let
join = hostName: domain: hostName + optionalString (domain != null) &quot;.${domain}&quot;;
in join config.networking.hostName config.networking.domain;
in {
networking = {
hostName = &quot;srht&quot;;
domain = &quot;tld&quot;;
firewall.allowedTCPPorts = [ 22 80 443 ];
};
services.sourcehut = {
enable = true;
git.enable = true;
man.enable = true;
meta.enable = true;
nginx.enable = true;
postfix.enable = true;
postgresql.enable = true;
redis.enable = true;
settings = {
&quot;sr.ht&quot; = {
environment = &quot;production&quot;;
global-domain = fqdn;
origin = &quot;https://${fqdn}&quot;;
# Produce keys with srht-keygen from sourcehut.coresrht.
network-key = &quot;/run/keys/path/to/network-key&quot;;
service-key = &quot;/run/keys/path/to/service-key&quot;;
};
webhooks.private-key= &quot;/run/keys/path/to/webhook-key&quot;;
};
};
security.acme.certs.&quot;${fqdn}&quot;.extraDomainNames = [
&quot;meta.${fqdn}&quot;
&quot;man.${fqdn}&quot;
&quot;git.${fqdn}&quot;
];
services.nginx = {
enable = true;
# only recommendedProxySettings are strictly required, but the rest make sense as well.
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
# Settings to setup what certificates are used for which endpoint.
virtualHosts = {
&quot;${fqdn}&quot;.enableACME = true;
&quot;meta.${fqdn}&quot;.useACMEHost = fqdn:
&quot;man.${fqdn}&quot;.useACMEHost = fqdn:
&quot;git.${fqdn}&quot;.useACMEHost = fqdn:
};
};
}
</programlisting>
<para>
The <literal>hostName</literal> option is used internally to
configure the nginx reverse-proxy. The <literal>settings</literal>
attribute set is used by the configuration generator and the
result is placed in <literal>/etc/sr.ht/config.ini</literal>.
</para>
</section>
<section xml:id="module-services-sourcehut-configuration">
<title>Configuration</title>
<para>
All configuration parameters are also stored in
<literal>/etc/sr.ht/config.ini</literal> which is generated by the
module and linked from the store to ensure that all values from
<literal>config.ini</literal> can be modified by the module.
</para>
</section>
<section xml:id="module-services-sourcehut-httpd">
<title>Using an alternative webserver as reverse-proxy (e.g.
<literal>httpd</literal>)</title>
<para>
By default, <literal>nginx</literal> is used as reverse-proxy for
<literal>sourcehut</literal>. However, its possible to use e.g.
<literal>httpd</literal> by explicitly disabling
<literal>nginx</literal> using
<xref linkend="opt-services.nginx.enable" /> and fixing the
<literal>settings</literal>.
</para>
</section>
</chapter>

View file

@ -566,5 +566,5 @@ in {
})
];
meta.doc = ./default.xml;
meta.doc = ./default.md;
}

View file

@ -1,130 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-taskserver">
<title>Taskserver</title>
<para>
Taskserver is the server component of
<link xlink:href="https://taskwarrior.org/">Taskwarrior</link>, a
free and open source todo list application.
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://taskwarrior.org/docs/#taskd">https://taskwarrior.org/docs/#taskd</link>
</para>
<section xml:id="module-services-taskserver-configuration">
<title>Configuration</title>
<para>
Taskserver does all of its authentication via TLS using client
certificates, so you either need to roll your own CA or purchase a
certificate from a known CA, which allows creation of client
certificates. These certificates are usually advertised as
<quote>server certificates</quote>.
</para>
<para>
So in order to make it easier to handle your own CA, there is a
helper tool called <command>nixos-taskserver</command> which
manages the custom CA along with Taskserver organisations, users
and groups.
</para>
<para>
While the client certificates in Taskserver only authenticate
whether a user is allowed to connect, every user has its own UUID
which identifies it as an entity.
</para>
<para>
With <command>nixos-taskserver</command> the client certificate is
created along with the UUID of the user, so it handles all of the
credentials needed in order to setup the Taskwarrior client to
work with a Taskserver.
</para>
</section>
<section xml:id="module-services-taskserver-nixos-taskserver-tool">
<title>The nixos-taskserver tool</title>
<para>
Because Taskserver by default only provides scripts to setup users
imperatively, the <command>nixos-taskserver</command> tool is used
for addition and deletion of organisations along with users and
groups defined by
<xref linkend="opt-services.taskserver.organisations" /> and as
well for imperative set up.
</para>
<para>
The tool is designed to not interfere if the command is used to
manually set up some organisations, users or groups.
</para>
<para>
For example if you add a new organisation using
<command>nixos-taskserver org add foo</command>, the organisation
is not modified and deleted no matter what you define in
<option>services.taskserver.organisations</option>, even if youre
adding the same organisation in that option.
</para>
<para>
The tool is modelled to imitate the official
<command>taskd</command> command, documentation for each
subcommand can be shown by using the <option>--help</option>
switch.
</para>
</section>
<section xml:id="module-services-taskserver-declarative-ca-management">
<title>Declarative/automatic CA management</title>
<para>
Everything is done according to what you specify in the module
options, however in order to set up a Taskwarrior client for
synchronisation with a Taskserver instance, you have to transfer
the keys and certificates to the client machine.
</para>
<para>
This is done using
<command>nixos-taskserver user export $orgname $username</command>
which is printing a shell script fragment to stdout which can
either be used verbatim or adjusted to import the user on the
client machine.
</para>
<para>
For example, lets say you have the following configuration:
</para>
<programlisting>
{
services.taskserver.enable = true;
services.taskserver.fqdn = &quot;server&quot;;
services.taskserver.listenHost = &quot;::&quot;;
services.taskserver.organisations.my-company.users = [ &quot;alice&quot; ];
}
</programlisting>
<para>
This creates an organisation called <literal>my-company</literal>
with the user <literal>alice</literal>.
</para>
<para>
Now in order to import the <literal>alice</literal> user to
another machine <literal>alicebox</literal>, all we need to do is
something like this:
</para>
<programlisting>
$ ssh server nixos-taskserver user export my-company alice | sh
</programlisting>
<para>
Of course, if no SSH daemon is available on the server you can
also copy &amp; paste it directly into a shell.
</para>
<para>
After this step the user should be set up and you can start
synchronising your tasks for the first time with
<command>task sync init</command> on <literal>alicebox</literal>.
</para>
<para>
Subsequent synchronisation requests merely require the command
<command>task sync</command> after that stage.
</para>
</section>
<section xml:id="module-services-taskserver-manual-ca-management">
<title>Manual CA management</title>
<para>
If you set any options within
<link linkend="opt-services.taskserver.pki.manual.ca.cert">service.taskserver.pki.manual</link>.*,
<command>nixos-taskserver</command> wont issue certificates, but
you can still use it for adding or removing user accounts.
</para>
</section>
</chapter>

View file

@ -59,5 +59,5 @@ in
};
};
meta.doc = ./weechat.xml;
meta.doc = ./weechat.md;
}

View file

@ -1,63 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-weechat">
<title>WeeChat</title>
<para>
<link xlink:href="https://weechat.org/">WeeChat</link> is a fast and
extensible IRC client.
</para>
<section xml:id="module-services-weechat-basic-usage">
<title>Basic Usage</title>
<para>
By default, the module creates a
<link xlink:href="https://www.freedesktop.org/wiki/Software/systemd/"><literal>systemd</literal></link>
unit which runs the chat client in a detached
<link xlink:href="https://www.gnu.org/software/screen/"><literal>screen</literal></link>
session.
</para>
<para>
This can be done by enabling the <literal>weechat</literal>
service:
</para>
<programlisting>
{ ... }:
{
services.weechat.enable = true;
}
</programlisting>
<para>
The service is managed by a dedicated user named
<literal>weechat</literal> in the state directory
<literal>/var/lib/weechat</literal>.
</para>
</section>
<section xml:id="module-services-weechat-reattach">
<title>Re-attaching to WeeChat</title>
<para>
WeeChat runs in a screen session owned by a dedicated user. To
explicitly allow your another user to attach to this session, the
<literal>screenrc</literal> needs to be tweaked by adding
<link xlink:href="https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser">multiuser</link>
support:
</para>
<programlisting>
{
programs.screen.screenrc = ''
multiuser on
acladd normal_user
'';
}
</programlisting>
<para>
Now, the session can be re-attached like this:
</para>
<programlisting>
screen -x weechat/weechat-screen
</programlisting>
<para>
<emphasis>The session name can be changed using
<link xlink:href="options.html#opt-services.weechat.sessionName">services.weechat.sessionName.</link></emphasis>
</para>
</section>
</chapter>

View file

@ -539,6 +539,6 @@ in
};
};
meta.doc = ./parsedmarc.xml;
meta.doc = ./parsedmarc.md;
meta.maintainers = [ lib.maintainers.talyz ];
}

View file

@ -1,126 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-parsedmarc">
<title>parsedmarc</title>
<para>
<link xlink:href="https://domainaware.github.io/parsedmarc/">parsedmarc</link>
is a service which parses incoming
<link xlink:href="https://dmarc.org/">DMARC</link> reports and
stores or sends them to a downstream service for further analysis.
In combination with Elasticsearch, Grafana and the included Grafana
dashboard, it provides a handy overview of DMARC reports over time.
</para>
<section xml:id="module-services-parsedmarc-basic-usage">
<title>Basic usage</title>
<para>
A very minimal setup which reads incoming reports from an external
email address and saves them to a local Elasticsearch instance
looks like this:
</para>
<programlisting language="nix">
services.parsedmarc = {
enable = true;
settings.imap = {
host = &quot;imap.example.com&quot;;
user = &quot;alice@example.com&quot;;
password = &quot;/path/to/imap_password_file&quot;;
};
provision.geoIp = false; # Not recommended!
};
</programlisting>
<para>
Note that GeoIP provisioning is disabled in the example for
simplicity, but should be turned on for fully functional reports.
</para>
</section>
<section xml:id="module-services-parsedmarc-local-mail">
<title>Local mail</title>
<para>
Instead of watching an external inbox, a local inbox can be
automatically provisioned. The recipients name is by default set
to <literal>dmarc</literal>, but can be configured in
<link xlink:href="options.html#opt-services.parsedmarc.provision.localMail.recipientName">services.parsedmarc.provision.localMail.recipientName</link>.
You need to add an MX record pointing to the host. More
concretely: for the example to work, an MX record needs to be set
up for <literal>monitoring.example.com</literal> and the complete
email address that should be configured in the domains dmarc
policy is <literal>dmarc@monitoring.example.com</literal>.
</para>
<programlisting language="nix">
services.parsedmarc = {
enable = true;
provision = {
localMail = {
enable = true;
hostname = monitoring.example.com;
};
geoIp = false; # Not recommended!
};
};
</programlisting>
</section>
<section xml:id="module-services-parsedmarc-grafana-geoip">
<title>Grafana and GeoIP</title>
<para>
The reports can be visualized and summarized with parsedmarcs
official Grafana dashboard. For all views to work, and for the
data to be complete, GeoIP databases are also required. The
following example shows a basic deployment where the provisioned
Elasticsearch instance is automatically added as a Grafana
datasource, and the dashboard is added to Grafana as well.
</para>
<programlisting language="nix">
services.parsedmarc = {
enable = true;
provision = {
localMail = {
enable = true;
hostname = url;
};
grafana = {
datasource = true;
dashboard = true;
};
};
};
# Not required, but recommended for full functionality
services.geoipupdate = {
settings = {
AccountID = 000000;
LicenseKey = &quot;/path/to/license_key_file&quot;;
};
};
services.grafana = {
enable = true;
addr = &quot;0.0.0.0&quot;;
domain = url;
rootUrl = &quot;https://&quot; + url;
protocol = &quot;socket&quot;;
security = {
adminUser = &quot;admin&quot;;
adminPasswordFile = &quot;/path/to/admin_password_file&quot;;
secretKeyFile = &quot;/path/to/secret_key_file&quot;;
};
};
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
upstreams.grafana.servers.&quot;unix:/${config.services.grafana.socket}&quot; = {};
virtualHosts.${url} = {
root = config.services.grafana.staticRootPath;
enableACME = true;
forceSSL = true;
locations.&quot;/&quot;.tryFiles = &quot;$uri @grafana&quot;;
locations.&quot;@grafana&quot;.proxyPass = &quot;http://grafana&quot;;
};
};
users.users.nginx.extraGroups = [ &quot;grafana&quot; ];
</programlisting>
</section>
</chapter>

View file

@ -323,7 +323,7 @@ in
);
meta = {
doc = ./exporters.xml;
doc = ./exporters.md;
maintainers = [ maintainers.willibutz ];
};
}

View file

@ -1,245 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-prometheus-exporters">
<title>Prometheus exporters</title>
<para>
Prometheus exporters provide metrics for the
<link xlink:href="https://prometheus.io">prometheus monitoring
system</link>.
</para>
<section xml:id="module-services-prometheus-exporters-configuration">
<title>Configuration</title>
<para>
One of the most common exporters is the
<link xlink:href="https://github.com/prometheus/node_exporter">node
exporter</link>, it provides hardware and OS metrics from the host
its running on. The exporter could be configured as follows:
</para>
<programlisting>
services.prometheus.exporters.node = {
enable = true;
port = 9100;
enabledCollectors = [
&quot;logind&quot;
&quot;systemd&quot;
];
disabledCollectors = [
&quot;textfile&quot;
];
openFirewall = true;
firewallFilter = &quot;-i br0 -p tcp -m tcp --dport 9100&quot;;
};
</programlisting>
<para>
It should now serve all metrics from the collectors that are
explicitly enabled and the ones that are
<link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled
by default</link>, via http under <literal>/metrics</literal>. In
this example the firewall should just allow incoming connections
to the exporters port on the bridge interface
<literal>br0</literal> (this would have to be configured
separately of course). For more information about configuration
see <literal>man configuration.nix</literal> or search through the
<link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available
options</link>.
</para>
<para>
Prometheus can now be configured to consume the metrics produced
by the exporter:
</para>
<programlisting>
services.prometheus = {
# ...
scrapeConfigs = [
{
job_name = &quot;node&quot;;
static_configs = [{
targets = [ &quot;localhost:${toString config.services.prometheus.exporters.node.port}&quot; ];
}];
}
];
# ...
}
</programlisting>
</section>
<section xml:id="module-services-prometheus-exporters-new-exporter">
<title>Adding a new exporter</title>
<para>
To add a new exporter, it has to be packaged first (see
<literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for
examples), then a module can be added. The postfix exporter is
used in this example:
</para>
<itemizedlist>
<listitem>
<para>
Some default options for all exporters are provided by
<literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<literal>enable</literal>
</para>
</listitem>
<listitem>
<para>
<literal>port</literal>
</para>
</listitem>
<listitem>
<para>
<literal>listenAddress</literal>
</para>
</listitem>
<listitem>
<para>
<literal>extraFlags</literal>
</para>
</listitem>
<listitem>
<para>
<literal>openFirewall</literal>
</para>
</listitem>
<listitem>
<para>
<literal>firewallFilter</literal>
</para>
</listitem>
<listitem>
<para>
<literal>user</literal>
</para>
</listitem>
<listitem>
<para>
<literal>group</literal>
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
As there is already a package available, the module can now be
added. This is accomplished by adding a new file to the
<literal>nixos/modules/services/monitoring/prometheus/exporters/</literal>
directory, which will be called postfix.nix and contains all
exporter specific options and configuration:
</para>
<programlisting>
# nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix
{ config, lib, pkgs, options }:
with lib;
let
# for convenience we define cfg here
cfg = config.services.prometheus.exporters.postfix;
in
{
port = 9154; # The postfix exporter listens on this port by default
# `extraOpts` is an attribute set which contains additional options
# (and optional overrides for default options).
# Note that this attribute is optional.
extraOpts = {
telemetryPath = mkOption {
type = types.str;
default = &quot;/metrics&quot;;
description = ''
Path under which to expose metrics.
'';
};
logfilePath = mkOption {
type = types.path;
default = /var/log/postfix_exporter_input.log;
example = /var/log/mail.log;
description = ''
Path where Postfix writes log entries.
This file will be truncated by this exporter!
'';
};
showqPath = mkOption {
type = types.path;
default = /var/spool/postfix/public/showq;
example = /var/lib/postfix/queue/public/showq;
description = ''
Path at which Postfix places its showq socket.
'';
};
};
# `serviceOpts` is an attribute set which contains configuration
# for the exporter's systemd service. One of
# `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart`
# has to be specified here. This will be merged with the default
# service configuration.
# Note that by default 'DynamicUser' is 'true'.
serviceOpts = {
serviceConfig = {
DynamicUser = false;
ExecStart = ''
${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
--web.telemetry-path ${cfg.telemetryPath} \
${concatStringsSep &quot; \\\n &quot; cfg.extraFlags}
'';
};
};
}
</programlisting>
</listitem>
<listitem>
<para>
This should already be enough for the postfix exporter.
Additionally one could now add assertions and conditional
default values. This can be done in the
<quote>meta-module</quote> that combines all exporter
definitions and generates the submodules:
<literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-prometheus-exporters-update-exporter-module">
<title>Updating an exporter module</title>
<para>
Should an exporter option change at some point, it is possible to
add information about the change to the exporter definition
similar to <literal>nixpkgs/nixos/modules/rename.nix</literal>:
</para>
<programlisting>
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.nginx;
in
{
port = 9113;
extraOpts = {
# additional module options
# ...
};
serviceOpts = {
# service configuration
# ...
};
imports = [
# 'services.prometheus.exporters.nginx.telemetryEndpoint' -&gt; 'services.prometheus.exporters.nginx.telemetryPath'
(mkRenamedOptionModule [ &quot;telemetryEndpoint&quot; ] [ &quot;telemetryPath&quot; ])
# removed option 'services.prometheus.exporters.nginx.insecure'
(mkRemovedOptionModule [ &quot;insecure&quot; ] ''
This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true.
'')
({ options.warnings = options.warnings; })
];
}
</programlisting>
</section>
</chapter>

View file

@ -95,5 +95,5 @@ in
users.groups.litestream = {};
};
meta.doc = ./default.xml;
meta.doc = ./default.md;
}

View file

@ -1,62 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-litestream">
<title>Litestream</title>
<para>
<link xlink:href="https://litestream.io/">Litestream</link> is a
standalone streaming replication tool for SQLite.
</para>
<section xml:id="module-services-litestream-configuration">
<title>Configuration</title>
<para>
Litestream service is managed by a dedicated user named
<literal>litestream</literal> which needs permission to the
database file. Heres an example config which gives required
permissions to access
<link linkend="opt-services.grafana.settings.database.path">grafana
database</link>:
</para>
<programlisting>
{ pkgs, ... }:
{
users.users.litestream.extraGroups = [ &quot;grafana&quot; ];
systemd.services.grafana.serviceConfig.ExecStartPost = &quot;+&quot; + pkgs.writeShellScript &quot;grant-grafana-permissions&quot; ''
timeout=10
while [ ! -f /var/lib/grafana/data/grafana.db ];
do
if [ &quot;$timeout&quot; == 0 ]; then
echo &quot;ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db.&quot;
exit 1
fi
sleep 1
((timeout--))
done
find /var/lib/grafana -type d -exec chmod -v 775 {} \;
find /var/lib/grafana -type f -exec chmod -v 660 {} \;
'';
services.litestream = {
enable = true;
environmentFile = &quot;/run/secrets/litestream&quot;;
settings = {
dbs = [
{
path = &quot;/var/lib/grafana/data/grafana.db&quot;;
replicas = [{
url = &quot;s3://mybkt.litestream.io/grafana&quot;;
}];
}
];
};
};
}
</programlisting>
</section>
</chapter>

View file

@ -311,6 +311,6 @@ in
meta = {
maintainers = with lib.maintainers; [ pennae ];
doc = ./firefox-syncserver.xml;
doc = ./firefox-syncserver.md;
};
}

View file

@ -1,79 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-firefox-syncserver">
<title>Firefox Sync server</title>
<para>
A storage server for Firefox Sync that you can easily host yourself.
</para>
<section xml:id="module-services-firefox-syncserver-quickstart">
<title>Quickstart</title>
<para>
The absolute minimal configuration for the sync server looks like
this:
</para>
<programlisting language="nix">
services.mysql.package = pkgs.mariadb;
services.firefox-syncserver = {
enable = true;
secrets = builtins.toFile &quot;sync-secrets&quot; ''
SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
'';
singleNode = {
enable = true;
hostname = &quot;localhost&quot;;
url = &quot;http://localhost:5000&quot;;
};
};
</programlisting>
<para>
This will start a sync server that is only accessible locally.
Once the services is running you can navigate to
<literal>about:config</literal> in your Firefox profile and set
<literal>identity.sync.tokenserver.uri</literal> to
<literal>http://localhost:5000/1.0/sync/1.5</literal>. Your
browser will now use your local sync server for data storage.
</para>
<warning>
<para>
This configuration should never be used in production. It is not
encrypted and stores its secrets in a world-readable location.
</para>
</warning>
</section>
<section xml:id="module-services-firefox-syncserver-configuration">
<title>More detailed setup</title>
<para>
The <literal>firefox-syncserver</literal> service provides a
number of options to make setting up small deployment easier.
These are grouped under the <literal>singleNode</literal> element
of the option tree and allow simple configuration of the most
important parameters.
</para>
<para>
Single node setup is split into two kinds of options: those that
affect the sync server itself, and those that affect its
surroundings. Options that affect the sync server are
<literal>capacity</literal>, which configures how many accounts
may be active on this instance, and <literal>url</literal>, which
holds the URL under which the sync server can be accessed. The
<literal>url</literal> can be configured automatically when using
nginx.
</para>
<para>
Options that affect the surroundings of the sync server are
<literal>enableNginx</literal>, <literal>enableTLS</literal> and
<literal>hostnam</literal>. If <literal>enableNginx</literal> is
set the sync server module will automatically add an nginx virtual
host to the system using <literal>hostname</literal> as the domain
and set <literal>url</literal> accordingly. If
<literal>enableTLS</literal> is set the module will also enable
ACME certificates on the new virtual host and force all
connections to be made via TLS.
</para>
<para>
For actual deployment it is also recommended to store the
<literal>secrets</literal> file in a secure location.
</para>
</section>
</chapter>

View file

@ -671,6 +671,6 @@ in
meta = {
maintainers = with lib.maintainers; [ pennae ];
doc = ./mosquitto.xml;
doc = ./mosquitto.md;
};
}

View file

@ -1,149 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mosquitto">
<title>Mosquitto</title>
<para>
Mosquitto is a MQTT broker often used for IoT or home automation
data transport.
</para>
<section xml:id="module-services-mosquitto-quickstart">
<title>Quickstart</title>
<para>
A minimal configuration for Mosquitto is
</para>
<programlisting language="nix">
services.mosquitto = {
enable = true;
listeners = [ {
acl = [ &quot;pattern readwrite #&quot; ];
omitPasswordAuth = true;
settings.allow_anonymous = true;
} ];
};
</programlisting>
<para>
This will start a broker on port 1883, listening on all interfaces
of the machine, allowing read/write access to all topics to any
user without password requirements.
</para>
<para>
User authentication can be configured with the
<literal>users</literal> key of listeners. A config that gives
full read access to a user <literal>monitor</literal> and
restricted write access to a user <literal>service</literal> could
look like
</para>
<programlisting language="nix">
services.mosquitto = {
enable = true;
listeners = [ {
users = {
monitor = {
acl = [ &quot;read #&quot; ];
password = &quot;monitor&quot;;
};
service = {
acl = [ &quot;write service/#&quot; ];
password = &quot;service&quot;;
};
};
} ];
};
</programlisting>
<para>
TLS authentication is configured by setting TLS-related options of
the listener:
</para>
<programlisting language="nix">
services.mosquitto = {
enable = true;
listeners = [ {
port = 8883; # port change is not required, but helpful to avoid mistakes
# ...
settings = {
cafile = &quot;/path/to/mqtt.ca.pem&quot;;
certfile = &quot;/path/to/mqtt.pem&quot;;
keyfile = &quot;/path/to/mqtt.key&quot;;
};
} ];
</programlisting>
</section>
<section xml:id="module-services-mosquitto-config">
<title>Configuration</title>
<para>
The Mosquitto configuration has four distinct types of settings:
the global settings of the daemon, listeners, plugins, and
bridges. Bridges and listeners are part of the global
configuration, plugins are part of listeners. Users of the broker
are configured as parts of listeners rather than globally,
allowing configurations in which a given user is only allowed to
log in to the broker using specific listeners (eg to configure an
admin user with full access to all topics, but restricted to
localhost).
</para>
<para>
Almost all options of Mosquitto are available for configuration at
their appropriate levels, some as NixOS options written in camel
case, the remainders under <literal>settings</literal> with their
exact names in the Mosquitto config file. The exceptions are
<literal>acl_file</literal> (which is always set according to the
<literal>acl</literal> attributes of a listener and its users) and
<literal>per_listener_settings</literal> (which is always set to
<literal>true</literal>).
</para>
<section xml:id="module-services-mosquitto-config-passwords">
<title>Password authentication</title>
<para>
Mosquitto can be run in two modes, with a password file or
without. Each listener has its own password file, and different
listeners may use different password files. Password file
generation can be disabled by setting
<literal>omitPasswordAuth = true</literal> for a listener; in
this case it is necessary to either set
<literal>settings.allow_anonymous = true</literal> to allow all
logins, or to configure other authentication methods like TLS
client certificates with
<literal>settings.use_identity_as_username = true</literal>.
</para>
<para>
The default is to generate a password file for each listener
from the users configured to that listener. Users with no
configured password will not be added to the password file and
thus will not be able to use the broker.
</para>
</section>
<section xml:id="module-services-mosquitto-config-acl">
<title>ACL format</title>
<para>
Every listener has a Mosquitto <literal>acl_file</literal>
attached to it. This ACL is configured via two attributes of the
config:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
the <literal>acl</literal> attribute of the listener
configures pattern ACL entries and topic ACL entries for
anonymous users. Each entry must be prefixed with
<literal>pattern</literal> or <literal>topic</literal> to
distinguish between these two cases.
</para>
</listitem>
<listitem>
<para>
the <literal>acl</literal> attribute of every user
configures in the listener configured the ACL for that given
user. Only topic ACLs are supported by Mosquitto in this
setting, so no prefix is required or allowed.
</para>
</listitem>
</itemizedlist>
<para>
The default ACL for a listener is empty, disallowing all
accesses from all clients. To configure a completely open ACL,
set <literal>acl = [ &quot;pattern readwrite #&quot; ]</literal>
in the listener.
</para>
</section>
</section>
</chapter>

View file

@ -147,5 +147,5 @@ in {
};
meta.maintainers = with lib.maintainers; [ ninjatrappeur ];
meta.doc = ./pleroma.xml;
meta.doc = ./pleroma.md;
}

View file

@ -1,244 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-pleroma">
<title>Pleroma</title>
<para>
<link xlink:href="https://pleroma.social/">Pleroma</link> is a
lightweight activity pub server.
</para>
<section xml:id="module-services-pleroma-generate-config">
<title>Generating the Pleroma config</title>
<para>
The <literal>pleroma_ctl</literal> CLI utility will prompt you
some questions and it will generate an initial config file. This
is an example of usage
</para>
<programlisting>
$ mkdir tmp-pleroma
$ cd tmp-pleroma
$ nix-shell -p pleroma-otp
$ pleroma_ctl instance gen --output config.exs --output-psql setup.psql
</programlisting>
<para>
The <literal>config.exs</literal> file can be further customized
following the instructions on the
<link xlink:href="https://docs-develop.pleroma.social/backend/configuration/cheatsheet/">upstream
documentation</link>. Many refinements can be applied also after
the service is running.
</para>
</section>
<section xml:id="module-services-pleroma-initialize-db">
<title>Initializing the database</title>
<para>
First, the Postgresql service must be enabled in the NixOS
configuration
</para>
<programlisting>
services.postgresql = {
enable = true;
package = pkgs.postgresql_13;
};
</programlisting>
<para>
and activated with the usual
</para>
<programlisting>
$ nixos-rebuild switch
</programlisting>
<para>
Then you can create and seed the database, using the
<literal>setup.psql</literal> file that you generated in the
previous section, by running
</para>
<programlisting>
$ sudo -u postgres psql -f setup.psql
</programlisting>
</section>
<section xml:id="module-services-pleroma-enable">
<title>Enabling the Pleroma service locally</title>
<para>
In this section we will enable the Pleroma service only locally,
so its configurations can be improved incrementally.
</para>
<para>
This is an example of configuration, where
<xref linkend="opt-services.pleroma.configs" /> option contains
the content of the file <literal>config.exs</literal>, generated
<link linkend="module-services-pleroma-generate-config">in the
first section</link>, but with the secrets (database password,
endpoint secret key, salts, etc.) removed. Removing secrets is
important, because otherwise they will be stored publicly in the
Nix store.
</para>
<programlisting>
services.pleroma = {
enable = true;
secretConfigFile = &quot;/var/lib/pleroma/secrets.exs&quot;;
configs = [
''
import Config
config :pleroma, Pleroma.Web.Endpoint,
url: [host: &quot;pleroma.example.net&quot;, scheme: &quot;https&quot;, port: 443],
http: [ip: {127, 0, 0, 1}, port: 4000]
config :pleroma, :instance,
name: &quot;Test&quot;,
email: &quot;admin@example.net&quot;,
notify_email: &quot;admin@example.net&quot;,
limit: 5000,
registrations_open: true
config :pleroma, :media_proxy,
enabled: false,
redirect_on_failure: true
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: &quot;pleroma&quot;,
database: &quot;pleroma&quot;,
hostname: &quot;localhost&quot;
# Configure web push notifications
config :web_push_encryption, :vapid_details,
subject: &quot;mailto:admin@example.net&quot;
# ... TO CONTINUE ...
''
];
};
</programlisting>
<para>
Secrets must be moved into a file pointed by
<xref linkend="opt-services.pleroma.secretConfigFile" />, in our
case <literal>/var/lib/pleroma/secrets.exs</literal>. This file
can be created copying the previously generated
<literal>config.exs</literal> file and then removing all the
settings, except the secrets. This is an example
</para>
<programlisting>
# Pleroma instance passwords
import Config
config :pleroma, Pleroma.Web.Endpoint,
secret_key_base: &quot;&lt;the secret generated by pleroma_ctl&gt;&quot;,
signing_salt: &quot;&lt;the secret generated by pleroma_ctl&gt;&quot;
config :pleroma, Pleroma.Repo,
password: &quot;&lt;the secret generated by pleroma_ctl&gt;&quot;
# Configure web push notifications
config :web_push_encryption, :vapid_details,
public_key: &quot;&lt;the secret generated by pleroma_ctl&gt;&quot;,
private_key: &quot;&lt;the secret generated by pleroma_ctl&gt;&quot;
# ... TO CONTINUE ...
</programlisting>
<para>
Note that the lines of the same configuration group are comma
separated (i.e. all the lines end with a comma, except the last
one), so when the lines with passwords are added or removed,
commas must be adjusted accordingly.
</para>
<para>
The service can be enabled with the usual
</para>
<programlisting>
$ nixos-rebuild switch
</programlisting>
<para>
The service is accessible only from the local
<literal>127.0.0.1:4000</literal> port. It can be tested using a
port forwarding like this
</para>
<programlisting>
$ ssh -L 4000:localhost:4000 myuser@example.net
</programlisting>
<para>
and then accessing
<link xlink:href="http://localhost:4000">http://localhost:4000</link>
from a web browser.
</para>
</section>
<section xml:id="module-services-pleroma-admin-user">
<title>Creating the admin user</title>
<para>
After Pleroma service is running, all
<link xlink:href="https://docs-develop.pleroma.social/">Pleroma
administration utilities</link> can be used. In particular an
admin user can be created with
</para>
<programlisting>
$ pleroma_ctl user new &lt;nickname&gt; &lt;email&gt; --admin --moderator --password &lt;password&gt;
</programlisting>
</section>
<section xml:id="module-services-pleroma-nginx">
<title>Configuring Nginx</title>
<para>
In this configuration, Pleroma is listening only on the local port
4000. Nginx can be configured as a Reverse Proxy, for forwarding
requests from public ports to the Pleroma service. This is an
example of configuration, using
<link xlink:href="https://letsencrypt.org/">Lets Encrypt</link>
for the TLS certificates
</para>
<programlisting>
security.acme = {
email = &quot;root@example.net&quot;;
acceptTerms = true;
};
services.nginx = {
enable = true;
addSSL = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = false;
# NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
# specific settings, and they will enter in conflict.
virtualHosts = {
&quot;pleroma.example.net&quot; = {
http2 = true;
enableACME = true;
forceSSL = true;
locations.&quot;/&quot; = {
proxyPass = &quot;http://127.0.0.1:4000&quot;;
extraConfig = ''
etag on;
gzip on;
add_header 'Access-Control-Allow-Origin' '*' always;
add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
if ($request_method = OPTIONS) {
return 204;
}
add_header X-XSS-Protection &quot;1; mode=block&quot;;
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin;
add_header X-Download-Options noopen;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection &quot;upgrade&quot;;
proxy_set_header Host $host;
client_max_body_size 16m;
# NOTE: increase if users need to upload very big files
'';
};
};
};
};
</programlisting>
</section>
</chapter>

View file

@ -905,5 +905,5 @@ in
};
meta.doc = ./prosody.xml;
meta.doc = ./prosody.md;
}

View file

@ -1,92 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-prosody">
<title>Prosody</title>
<para>
<link xlink:href="https://prosody.im/">Prosody</link> is an
open-source, modern XMPP server.
</para>
<section xml:id="module-services-prosody-basic-usage">
<title>Basic usage</title>
<para>
A common struggle for most XMPP newcomers is to find the right set
of XMPP Extensions (XEPs) to setup. Forget to activate a few of
those and your XMPP experience might turn into a nightmare!
</para>
<para>
The XMPP community tackles this problem by creating a meta-XEP
listing a decent set of XEPs you should implement. This meta-XEP
is issued every year, the 2020 edition being
<link xlink:href="https://xmpp.org/extensions/xep-0423.html">XEP-0423</link>.
</para>
<para>
The NixOS Prosody module will implement most of these recommendend
XEPs out of the box. That being said, two components still require
some manual configuration: the
<link xlink:href="https://xmpp.org/extensions/xep-0045.html">Multi
User Chat (MUC)</link> and the
<link xlink:href="https://xmpp.org/extensions/xep-0363.html">HTTP
File Upload</link> ones. Youll need to create a DNS subdomain for
each of those. The current convention is to name your MUC endpoint
<literal>conference.example.org</literal> and your HTTP upload
domain <literal>upload.example.org</literal>.
</para>
<para>
A good configuration to start with, including a
<link xlink:href="https://xmpp.org/extensions/xep-0045.html">Multi
User Chat (MUC)</link> endpoint as well as a
<link xlink:href="https://xmpp.org/extensions/xep-0363.html">HTTP
File Upload</link> endpoint will look like this:
</para>
<programlisting>
services.prosody = {
enable = true;
admins = [ &quot;root@example.org&quot; ];
ssl.cert = &quot;/var/lib/acme/example.org/fullchain.pem&quot;;
ssl.key = &quot;/var/lib/acme/example.org/key.pem&quot;;
virtualHosts.&quot;example.org&quot; = {
enabled = true;
domain = &quot;example.org&quot;;
ssl.cert = &quot;/var/lib/acme/example.org/fullchain.pem&quot;;
ssl.key = &quot;/var/lib/acme/example.org/key.pem&quot;;
};
muc = [ {
domain = &quot;conference.example.org&quot;;
} ];
uploadHttp = {
domain = &quot;upload.example.org&quot;;
};
};
</programlisting>
</section>
<section xml:id="module-services-prosody-letsencrypt">
<title>Lets Encrypt Configuration</title>
<para>
As you can see in the code snippet from the
<link linkend="module-services-prosody-basic-usage">previous
section</link>, youll need a single TLS certificate covering your
main endpoint, the MUC one as well as the HTTP Upload one. We can
generate such a certificate by leveraging the ACME
<link linkend="opt-security.acme.certs._name_.extraDomainNames">extraDomainNames</link>
module option.
</para>
<para>
Provided the setup detailed in the previous section, youll need
the following acme configuration to generate a TLS certificate for
the three endponits:
</para>
<programlisting>
security.acme = {
email = &quot;root@example.org&quot;;
acceptTerms = true;
certs = {
&quot;example.org&quot; = {
webroot = &quot;/var/www/example.org&quot;;
email = &quot;root@example.org&quot;;
extraDomainNames = [ &quot;conference.example.org&quot; &quot;upload.example.org&quot; ];
};
};
};
</programlisting>
</section>
</chapter>

View file

@ -193,7 +193,7 @@ in {
environment.systemPackages = [ cfg.package ];
});
meta = {
doc = ./yggdrasil.xml;
doc = ./yggdrasil.md;
maintainers = with lib.maintainers; [ gazally ehmry ];
};
}

View file

@ -1,157 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-networking-yggdrasil">
<title>Yggdrasil</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/networking/yggdrasil/default.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://yggdrasil-network.github.io/">https://yggdrasil-network.github.io/</link>
</para>
<para>
Yggdrasil is an early-stage implementation of a fully end-to-end
encrypted, self-arranging IPv6 network.
</para>
<section xml:id="module-services-networking-yggdrasil-configuration">
<title>Configuration</title>
<section xml:id="module-services-networking-yggdrasil-configuration-simple">
<title>Simple ephemeral node</title>
<para>
An annotated example of a simple configuration:
</para>
<programlisting>
{
services.yggdrasil = {
enable = true;
persistentKeys = false;
# The NixOS module will generate new keys and a new IPv6 address each time
# it is started if persistentKeys is not enabled.
settings = {
Peers = [
# Yggdrasil will automatically connect and &quot;peer&quot; with other nodes it
# discovers via link-local multicast announcements. Unless this is the
# case (it probably isn't) a node needs peers within the existing
# network that it can tunnel to.
&quot;tcp://1.2.3.4:1024&quot;
&quot;tcp://1.2.3.5:1024&quot;
# Public peers can be found at
# https://github.com/yggdrasil-network/public-peers
];
};
};
}
</programlisting>
</section>
<section xml:id="module-services-networking-yggdrasil-configuration-prefix">
<title>Persistent node with prefix</title>
<para>
A node with a fixed address that announces a prefix:
</para>
<programlisting>
let
address = &quot;210:5217:69c0:9afc:1b95:b9f:8718:c3d2&quot;;
prefix = &quot;310:5217:69c0:9afc&quot;;
# taken from the output of &quot;yggdrasilctl getself&quot;.
in {
services.yggdrasil = {
enable = true;
persistentKeys = true; # Maintain a fixed public key and IPv6 address.
settings = {
Peers = [ &quot;tcp://1.2.3.4:1024&quot; &quot;tcp://1.2.3.5:1024&quot; ];
NodeInfo = {
# This information is visible to the network.
name = config.networking.hostName;
location = &quot;The North Pole&quot;;
};
};
};
boot.kernel.sysctl.&quot;net.ipv6.conf.all.forwarding&quot; = 1;
# Forward traffic under the prefix.
networking.interfaces.${eth0}.ipv6.addresses = [{
# Set a 300::/8 address on the local physical device.
address = prefix + &quot;::1&quot;;
prefixLength = 64;
}];
services.radvd = {
# Announce the 300::/8 prefix to eth0.
enable = true;
config = ''
interface eth0
{
AdvSendAdvert on;
prefix ${prefix}::/64 {
AdvOnLink on;
AdvAutonomous on;
};
route 200::/8 {};
};
'';
};
}
</programlisting>
</section>
<section xml:id="module-services-networking-yggdrasil-configuration-container">
<title>Yggdrasil attached Container</title>
<para>
A NixOS container attached to the Yggdrasil network via a node
running on the host:
</para>
<programlisting>
let
yggPrefix64 = &quot;310:5217:69c0:9afc&quot;;
# Again, taken from the output of &quot;yggdrasilctl getself&quot;.
in
{
boot.kernel.sysctl.&quot;net.ipv6.conf.all.forwarding&quot; = 1;
# Enable IPv6 forwarding.
networking = {
bridges.br0.interfaces = [ ];
# A bridge only to containers…
interfaces.br0 = {
# … configured with a prefix address.
ipv6.addresses = [{
address = &quot;${yggPrefix64}::1&quot;;
prefixLength = 64;
}];
};
};
containers.foo = {
autoStart = true;
privateNetwork = true;
hostBridge = &quot;br0&quot;;
# Attach the container to the bridge only.
config = { config, pkgs, ... }: {
networking.interfaces.eth0.ipv6 = {
addresses = [{
# Configure a prefix address.
address = &quot;${yggPrefix64}::2&quot;;
prefixLength = 64;
}];
routes = [{
# Configure the prefix route.
address = &quot;200::&quot;;
prefixLength = 7;
via = &quot;${yggPrefix64}::1&quot;;
}];
};
services.httpd.enable = true;
networking.firewall.allowedTCPPorts = [ 80 ];
};
};
}
</programlisting>
</section>
</section>
</chapter>

View file

@ -9,7 +9,7 @@ in
{
meta.maintainers = with maintainers; [ Br1ght0ne happysalada ];
meta.doc = ./meilisearch.xml;
meta.doc = ./meilisearch.md;
###### interface

View file

@ -1,87 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-meilisearch">
<title>Meilisearch</title>
<para>
Meilisearch is a lightweight, fast and powerful search engine. Think
elastic search with a much smaller footprint.
</para>
<section xml:id="module-services-meilisearch-quickstart">
<title>Quickstart</title>
<para>
the minimum to start meilisearch is
</para>
<programlisting language="nix">
services.meilisearch.enable = true;
</programlisting>
<para>
this will start the http server included with meilisearch on port
7700.
</para>
<para>
test with
<literal>curl -X GET 'http://localhost:7700/health'</literal>
</para>
</section>
<section xml:id="module-services-meilisearch-usage">
<title>Usage</title>
<para>
you first need to add documents to an index before you can search
for documents.
</para>
<section xml:id="module-services-meilisearch-quickstart-add">
<title>Add a documents to the <literal>movies</literal>
index</title>
<para>
<literal>curl -X POST 'http://127.0.0.1:7700/indexes/movies/documents' --data '[{&quot;id&quot;: &quot;123&quot;, &quot;title&quot;: &quot;Superman&quot;}, {&quot;id&quot;: 234, &quot;title&quot;: &quot;Batman&quot;}]'</literal>
</para>
</section>
<section xml:id="module-services-meilisearch-quickstart-search">
<title>Search documents in the <literal>movies</literal>
index</title>
<para>
<literal>curl 'http://127.0.0.1:7700/indexes/movies/search' --data '{ &quot;q&quot;: &quot;botman&quot; }'</literal>
(note the typo is intentional and there to demonstrate the typo
tolerant capabilities)
</para>
</section>
</section>
<section xml:id="module-services-meilisearch-defaults">
<title>Defaults</title>
<itemizedlist>
<listitem>
<para>
The default nixos package doesnt come with the
<link xlink:href="https://docs.meilisearch.com/learn/getting_started/quick_start.html#search">dashboard</link>,
since the dashboard features makes some assets downloads at
compile time.
</para>
</listitem>
<listitem>
<para>
Anonimized Analytics sent to meilisearch are disabled by
default.
</para>
</listitem>
<listitem>
<para>
Default deployment is development mode. It doesnt require a
secret master key. All routes are not protected and
accessible.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-meilisearch-missing">
<title>Missing</title>
<itemizedlist spacing="compact">
<listitem>
<para>
the snapshot feature is not yet configurable from the module,
its just a matter of adding the relevant environment
variables.
</para>
</listitem>
</itemizedlist>
</section>
</chapter>

View file

@ -1082,5 +1082,5 @@ in {
};
meta.maintainers = with maintainers; [ mvs ];
meta.doc = ./akkoma.xml;
meta.doc = ./akkoma.md;
}

View file

@ -1,398 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-akkoma">
<title>Akkoma</title>
<para>
<link xlink:href="https://akkoma.dev/">Akkoma</link> is a
lightweight ActivityPub microblogging server forked from Pleroma.
</para>
<section xml:id="modules-services-akkoma-service-configuration">
<title>Service configuration</title>
<para>
The Elixir configuration file required by Akkoma is generated
automatically from
<link xlink:href="options.html#opt-services.akkoma.config"><option>services.akkoma.config</option></link>.
Secrets must be included from external files outside of the Nix
store by setting the configuration option to an attribute set
containing the attribute <option>_secret</option> a string
pointing to the file containing the actual value of the option.
</para>
<para>
For the mandatory configuration settings these secrets will be
generated automatically if the referenced file does not exist
during startup, unless disabled through
<link xlink:href="options.html#opt-services.akkoma.initSecrets"><option>services.akkoma.initSecrets</option></link>.
</para>
<para>
The following configuration binds Akkoma to the Unix socket
<literal>/run/akkoma/socket</literal>, expecting to be run behind
a HTTP proxy on <literal>fediverse.example.com</literal>.
</para>
<programlisting language="nix">
services.akkoma.enable = true;
services.akkoma.config = {
&quot;:pleroma&quot; = {
&quot;:instance&quot; = {
name = &quot;My Akkoma instance&quot;;
description = &quot;More detailed description&quot;;
email = &quot;admin@example.com&quot;;
registration_open = false;
};
&quot;Pleroma.Web.Endpoint&quot; = {
url.host = &quot;fediverse.example.com&quot;;
};
};
};
</programlisting>
<para>
Please refer to the
<link xlink:href="https://docs.akkoma.dev/stable/configuration/cheatsheet/">configuration
cheat sheet</link> for additional configuration options.
</para>
</section>
<section xml:id="modules-services-akkoma-user-management">
<title>User management</title>
<para>
After the Akkoma service is running, the administration utility
can be used to
<link xlink:href="https://docs.akkoma.dev/stable/administration/CLI_tasks/user/">manage
users</link>. In particular an administrative user can be created
with
</para>
<programlisting>
$ pleroma_ctl user new &lt;nickname&gt; &lt;email&gt; --admin --moderator --password &lt;password&gt;
</programlisting>
</section>
<section xml:id="modules-services-akkoma-proxy-configuration">
<title>Proxy configuration</title>
<para>
Although it is possible to expose Akkoma directly, it is common
practice to operate it behind an HTTP reverse proxy such as nginx.
</para>
<programlisting language="nix">
services.akkoma.nginx = {
enableACME = true;
forceSSL = true;
};
services.nginx = {
enable = true;
clientMaxBodySize = &quot;16m&quot;;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
};
</programlisting>
<para>
Please refer to <xref linkend="module-security-acme" /> for
details on how to provision an SSL/TLS certificate.
</para>
<section xml:id="modules-services-akkoma-media-proxy">
<title>Media proxy</title>
<para>
Without the media proxy function, Akkoma does not store any
remote media like pictures or video locally, and clients have to
fetch them directly from the source server.
</para>
<programlisting language="nix">
# Enable nginx slice module distributed with Tengine
services.nginx.package = pkgs.tengine;
# Enable media proxy
services.akkoma.config.&quot;:pleroma&quot;.&quot;:media_proxy&quot; = {
enabled = true;
proxy_opts.redirect_on_failure = true;
};
# Adjust the persistent cache size as needed:
# Assuming an average object size of 128 KiB, around 1 MiB
# of memory is required for the key zone per GiB of cache.
# Ensure that the cache directory exists and is writable by nginx.
services.nginx.commonHttpConfig = ''
proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache
levels= keys_zone=akkoma_media_cache:16m max_size=16g
inactive=1y use_temp_path=off;
'';
services.akkoma.nginx = {
locations.&quot;/proxy&quot; = {
proxyPass = &quot;http://unix:/run/akkoma/socket&quot;;
extraConfig = ''
proxy_cache akkoma_media_cache;
# Cache objects in slices of 1 MiB
slice 1m;
proxy_cache_key $host$uri$is_args$args$slice_range;
proxy_set_header Range $slice_range;
# Decouple proxy and upstream responses
proxy_buffering on;
proxy_cache_lock on;
proxy_ignore_client_abort on;
# Default cache times for various responses
proxy_cache_valid 200 1y;
proxy_cache_valid 206 301 304 1h;
# Allow serving of stale items
proxy_cache_use_stale error timeout invalid_header updating;
'';
};
};
</programlisting>
<section xml:id="modules-services-akkoma-prefetch-remote-media">
<title>Prefetch remote media</title>
<para>
The following example enables the
<literal>MediaProxyWarmingPolicy</literal> MRF policy which
automatically fetches all media associated with a post through
the media proxy, as soon as the post is received by the
instance.
</para>
<programlisting language="nix">
services.akkoma.config.&quot;:pleroma&quot;.&quot;:mrf&quot;.policies =
map (pkgs.formats.elixirConf { }).lib.mkRaw [
&quot;Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy&quot;
];
</programlisting>
</section>
<section xml:id="modules-services-akkoma-media-previews">
<title>Media previews</title>
<para>
Akkoma can generate previews for media.
</para>
<programlisting language="nix">
services.akkoma.config.&quot;:pleroma&quot;.&quot;:media_preview_proxy&quot; = {
enabled = true;
thumbnail_max_width = 1920;
thumbnail_max_height = 1080;
};
</programlisting>
</section>
</section>
</section>
<section xml:id="modules-services-akkoma-frontend-management">
<title>Frontend management</title>
<para>
Akkoma will be deployed with the <literal>pleroma-fe</literal> and
<literal>admin-fe</literal> frontends by default. These can be
modified by setting
<link xlink:href="options.html#opt-services.akkoma.frontends"><option>services.akkoma.frontends</option></link>.
</para>
<para>
The following example overrides the primary frontends default
configuration using a custom derivation.
</para>
<programlisting language="nix">
services.akkoma.frontends.primary.package = pkgs.runCommand &quot;pleroma-fe&quot; {
config = builtins.toJSON {
expertLevel = 1;
collapseMessageWithSubject = false;
stopGifs = false;
replyVisibility = &quot;following&quot;;
webPushHideIfCW = true;
hideScopeNotice = true;
renderMisskeyMarkdown = false;
hideSiteFavicon = true;
postContentType = &quot;text/markdown&quot;;
showNavShortcuts = false;
};
nativeBuildInputs = with pkgs; [ jq xorg.lndir ];
passAsFile = [ &quot;config&quot; ];
} ''
mkdir $out
lndir ${pkgs.akkoma-frontends.pleroma-fe} $out
rm $out/static/config.json
jq -s add ${pkgs.akkoma-frontends.pleroma-fe}/static/config.json ${config} \
&gt;$out/static/config.json
'';
</programlisting>
</section>
<section xml:id="modules-services-akkoma-federation-policies">
<title>Federation policies</title>
<para>
Akkoma comes with a number of modules to police federation with
other ActivityPub instances. The most valuable for typical users
is the
<link xlink:href="https://docs.akkoma.dev/stable/configuration/cheatsheet/#mrf_simple"><literal>:mrf_simple</literal></link>
module which allows limiting federation based on instance
hostnames.
</para>
<para>
This configuration snippet provides an example on how these can be
used. Choosing an adequate federation policy is not trivial and
entails finding a balance between connectivity to the rest of the
fediverse and providing a pleasant experience to the users of an
instance.
</para>
<programlisting language="nix">
services.akkoma.config.&quot;:pleroma&quot; = with (pkgs.formats.elixirConf { }).lib; {
&quot;:mrf&quot;.policies = map mkRaw [
&quot;Pleroma.Web.ActivityPub.MRF.SimplePolicy&quot;
];
&quot;:mrf_simple&quot; = {
# Tag all media as sensitive
media_nsfw = mkMap {
&quot;nsfw.weird.kinky&quot; = &quot;Untagged NSFW content&quot;;
};
# Reject all activities except deletes
reject = mkMap {
&quot;kiwifarms.cc&quot; = &quot;Persistent harassment of users, no moderation&quot;;
};
# Force posts to be visible by followers only
followers_only = mkMap {
&quot;beta.birdsite.live&quot; = &quot;Avoid polluting timelines with Twitter posts&quot;;
};
};
};
</programlisting>
</section>
<section xml:id="modules-services-akkoma-upload-filters">
<title>Upload filters</title>
<para>
This example strips GPS and location metadata from uploads,
deduplicates them and anonymises the the file name.
</para>
<programlisting language="nix">
services.akkoma.config.&quot;:pleroma&quot;.&quot;Pleroma.Upload&quot;.filters =
map (pkgs.formats.elixirConf { }).lib.mkRaw [
&quot;Pleroma.Upload.Filter.Exiftool&quot;
&quot;Pleroma.Upload.Filter.Dedupe&quot;
&quot;Pleroma.Upload.Filter.AnonymizeFilename&quot;
];
</programlisting>
</section>
<section xml:id="modules-services-akkoma-migration-pleroma">
<title>Migration from Pleroma</title>
<para>
Pleroma instances can be migrated to Akkoma either by copying the
database and upload data or by pointing Akkoma to the existing
data. The necessary database migrations are run automatically
during startup of the service.
</para>
<para>
The configuration has to be copyedited manually.
</para>
<para>
Depending on the size of the database, the initial migration may
take a long time and exceed the startup timeout of the system
manager. To work around this issue one may adjust the startup
timeout
<option>systemd.services.akkoma.serviceConfig.TimeoutStartSec</option>
or simply run the migrations manually:
</para>
<programlisting>
pleroma_ctl migrate
</programlisting>
<section xml:id="modules-services-akkoma-migration-pleroma-copy">
<title>Copying data</title>
<para>
Copying the Pleroma data instead of reusing it in place may
permit easier reversion to Pleroma, but allows the two data sets
to diverge.
</para>
<para>
First disable Pleroma and then copy its database and upload
data:
</para>
<programlisting>
# Create a copy of the database
nix-shell -p postgresql --run 'createdb -T pleroma akkoma'
# Copy upload data
mkdir /var/lib/akkoma
cp -R --reflink=auto /var/lib/pleroma/uploads /var/lib/akkoma/
</programlisting>
<para>
After the data has been copied, enable the Akkoma service and
verify that the migration has been successful. If no longer
required, the original data may then be deleted:
</para>
<programlisting>
# Delete original database
nix-shell -p postgresql --run 'dropdb pleroma'
# Delete original Pleroma state
rm -r /var/lib/pleroma
</programlisting>
</section>
<section xml:id="modules-services-akkoma-migration-pleroma-reuse">
<title>Reusing data</title>
<para>
To reuse the Pleroma data in place, disable Pleroma and enable
Akkoma, pointing it to the Pleroma database and upload
directory.
</para>
<programlisting language="nix">
# Adjust these settings according to the database name and upload directory path used by Pleroma
services.akkoma.config.&quot;:pleroma&quot;.&quot;Pleroma.Repo&quot;.database = &quot;pleroma&quot;;
services.akkoma.config.&quot;:pleroma&quot;.&quot;:instance&quot;.upload_dir = &quot;/var/lib/pleroma/uploads&quot;;
</programlisting>
<para>
Please keep in mind that after the Akkoma service has been
started, any migrations applied by Akkoma have to be rolled back
before the database can be used again with Pleroma. This can be
achieved through <literal>pleroma_ctl ecto.rollback</literal>.
Refer to the
<link xlink:href="https://hexdocs.pm/ecto_sql/Mix.Tasks.Ecto.Rollback.html">Ecto
SQL documentation</link> for details.
</para>
</section>
</section>
<section xml:id="modules-services-akkoma-advanced-deployment">
<title>Advanced deployment options</title>
<section xml:id="modules-services-akkoma-confinement">
<title>Confinement</title>
<para>
The Akkoma systemd service may be confined to a chroot with
</para>
<programlisting language="nix">
services.systemd.akkoma.confinement.enable = true;
</programlisting>
<para>
Confinement of services is not generally supported in NixOS and
therefore disabled by default. Depending on the Akkoma
configuration, the default confinement settings may be
insufficient and lead to subtle errors at run time, requiring
adjustment:
</para>
<para>
Use
<link xlink:href="options.html#opt-systemd.services._name_.confinement.packages"><option>services.systemd.akkoma.confinement.packages</option></link>
to make packages available in the chroot.
</para>
<para>
<option>services.systemd.akkoma.serviceConfig.BindPaths</option>
and
<option>services.systemd.akkoma.serviceConfig.BindReadOnlyPaths</option>
permit access to outside paths through bind mounts. Refer to
<link xlink:href="https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths="><citerefentry><refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum></citerefentry></link>
for details.
</para>
</section>
<section xml:id="modules-services-akkoma-distributed-deployment">
<title>Distributed deployment</title>
<para>
Being an Elixir application, Akkoma can be deployed in a
distributed fashion.
</para>
<para>
This requires setting
<link xlink:href="options.html#opt-services.akkoma.dist.address"><option>services.akkoma.dist.address</option></link>
and
<link xlink:href="options.html#opt-services.akkoma.dist.cookie"><option>services.akkoma.dist.cookie</option></link>.
The specifics depend strongly on the deployment environment. For
more information please check the relevant
<link xlink:href="https://www.erlang.org/doc/reference_manual/distributed.html">Erlang
documentation</link>.
</para>
</section>
</section>
</chapter>

View file

@ -1080,6 +1080,6 @@ in
];
};
meta.doc = ./discourse.xml;
meta.doc = ./discourse.md;
meta.maintainers = [ lib.maintainers.talyz ];
}

View file

@ -1,331 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-discourse">
<title>Discourse</title>
<para>
<link xlink:href="https://www.discourse.org/">Discourse</link> is a
modern and open source discussion platform.
</para>
<section xml:id="module-services-discourse-basic-usage">
<title>Basic usage</title>
<para>
A minimal configuration using Lets Encrypt for TLS certificates
looks like this:
</para>
<programlisting>
services.discourse = {
enable = true;
hostname = &quot;discourse.example.com&quot;;
admin = {
email = &quot;admin@example.com&quot;;
username = &quot;admin&quot;;
fullName = &quot;Administrator&quot;;
passwordFile = &quot;/path/to/password_file&quot;;
};
secretKeyBaseFile = &quot;/path/to/secret_key_base_file&quot;;
};
security.acme.email = &quot;me@example.com&quot;;
security.acme.acceptTerms = true;
</programlisting>
<para>
Provided a proper DNS setup, youll be able to connect to the
instance at <literal>discourse.example.com</literal> and log in
using the credentials provided in
<literal>services.discourse.admin</literal>.
</para>
</section>
<section xml:id="module-services-discourse-tls">
<title>Using a regular TLS certificate</title>
<para>
To set up TLS using a regular certificate and key on file, use the
<xref linkend="opt-services.discourse.sslCertificate" /> and
<xref linkend="opt-services.discourse.sslCertificateKey" />
options:
</para>
<programlisting>
services.discourse = {
enable = true;
hostname = &quot;discourse.example.com&quot;;
sslCertificate = &quot;/path/to/ssl_certificate&quot;;
sslCertificateKey = &quot;/path/to/ssl_certificate_key&quot;;
admin = {
email = &quot;admin@example.com&quot;;
username = &quot;admin&quot;;
fullName = &quot;Administrator&quot;;
passwordFile = &quot;/path/to/password_file&quot;;
};
secretKeyBaseFile = &quot;/path/to/secret_key_base_file&quot;;
};
</programlisting>
</section>
<section xml:id="module-services-discourse-database">
<title>Database access</title>
<para>
Discourse uses PostgreSQL to store most of its data. A database
will automatically be enabled and a database and role created
unless <xref linkend="opt-services.discourse.database.host" /> is
changed from its default of <literal>null</literal> or
<xref linkend="opt-services.discourse.database.createLocally" />
is set to <literal>false</literal>.
</para>
<para>
External database access can also be configured by setting
<xref linkend="opt-services.discourse.database.host" />,
<xref linkend="opt-services.discourse.database.username" /> and
<xref linkend="opt-services.discourse.database.passwordFile" /> as
appropriate. Note that you need to manually create a database
called <literal>discourse</literal> (or the name you chose in
<xref linkend="opt-services.discourse.database.name" />) and allow
the configured database user full access to it.
</para>
</section>
<section xml:id="module-services-discourse-mail">
<title>Email</title>
<para>
In addition to the basic setup, youll want to configure an SMTP
server Discourse can use to send user registration and password
reset emails, among others. You can also optionally let Discourse
receive email, which enables people to reply to threads and
conversations via email.
</para>
<para>
A basic setup which assumes you want to use your configured
<link linkend="opt-services.discourse.hostname">hostname</link> as
email domain can be done like this:
</para>
<programlisting>
services.discourse = {
enable = true;
hostname = &quot;discourse.example.com&quot;;
sslCertificate = &quot;/path/to/ssl_certificate&quot;;
sslCertificateKey = &quot;/path/to/ssl_certificate_key&quot;;
admin = {
email = &quot;admin@example.com&quot;;
username = &quot;admin&quot;;
fullName = &quot;Administrator&quot;;
passwordFile = &quot;/path/to/password_file&quot;;
};
mail.outgoing = {
serverAddress = &quot;smtp.emailprovider.com&quot;;
port = 587;
username = &quot;user@emailprovider.com&quot;;
passwordFile = &quot;/path/to/smtp_password_file&quot;;
};
mail.incoming.enable = true;
secretKeyBaseFile = &quot;/path/to/secret_key_base_file&quot;;
};
</programlisting>
<para>
This assumes you have set up an MX record for the address youve
set in
<link linkend="opt-services.discourse.hostname">hostname</link>
and requires proper SPF, DKIM and DMARC configuration to be done
for the domain youre sending from, in order for email to be
reliably delivered.
</para>
<para>
If you want to use a different domain for your outgoing email (for
example <literal>example.com</literal> instead of
<literal>discourse.example.com</literal>) you should set
<xref linkend="opt-services.discourse.mail.notificationEmailAddress" />
and
<xref linkend="opt-services.discourse.mail.contactEmailAddress" />
manually.
</para>
<note>
<para>
Setup of TLS for incoming email is currently only configured
automatically when a regular TLS certificate is used, i.e. when
<xref linkend="opt-services.discourse.sslCertificate" /> and
<xref linkend="opt-services.discourse.sslCertificateKey" /> are
set.
</para>
</note>
</section>
<section xml:id="module-services-discourse-settings">
<title>Additional settings</title>
<para>
Additional site settings and backend settings, for which no
explicit NixOS options are provided, can be set in
<xref linkend="opt-services.discourse.siteSettings" /> and
<xref linkend="opt-services.discourse.backendSettings" />
respectively.
</para>
<section xml:id="module-services-discourse-site-settings">
<title>Site settings</title>
<para>
<quote>Site settings</quote> are the settings that can be
changed through the Discourse UI. Their
<emphasis>default</emphasis> values can be set using
<xref linkend="opt-services.discourse.siteSettings" />.
</para>
<para>
Settings are expressed as a Nix attribute set which matches the
structure of the configuration in
<link xlink:href="https://github.com/discourse/discourse/blob/master/config/site_settings.yml">config/site_settings.yml</link>.
To find a settings path, you only need to care about the first
two levels; i.e. its category (e.g. <literal>login</literal>)
and name (e.g. <literal>invite_only</literal>).
</para>
<para>
Settings containing secret data should be set to an attribute
set containing the attribute <literal>_secret</literal> - a
string pointing to a file containing the value the option should
be set to. See the example.
</para>
</section>
<section xml:id="module-services-discourse-backend-settings">
<title>Backend settings</title>
<para>
Settings are expressed as a Nix attribute set which matches the
structure of the configuration in
<link xlink:href="https://github.com/discourse/discourse/blob/stable/config/discourse_defaults.conf">config/discourse.conf</link>.
Empty parameters can be defined by setting them to
<literal>null</literal>.
</para>
</section>
<section xml:id="module-services-discourse-settings-example">
<title>Example</title>
<para>
The following example sets the title and description of the
Discourse instance and enables GitHub login in the site
settings, and changes a few request limits in the backend
settings:
</para>
<programlisting>
services.discourse = {
enable = true;
hostname = &quot;discourse.example.com&quot;;
sslCertificate = &quot;/path/to/ssl_certificate&quot;;
sslCertificateKey = &quot;/path/to/ssl_certificate_key&quot;;
admin = {
email = &quot;admin@example.com&quot;;
username = &quot;admin&quot;;
fullName = &quot;Administrator&quot;;
passwordFile = &quot;/path/to/password_file&quot;;
};
mail.outgoing = {
serverAddress = &quot;smtp.emailprovider.com&quot;;
port = 587;
username = &quot;user@emailprovider.com&quot;;
passwordFile = &quot;/path/to/smtp_password_file&quot;;
};
mail.incoming.enable = true;
siteSettings = {
required = {
title = &quot;My Cats&quot;;
site_description = &quot;Discuss My Cats (and be nice plz)&quot;;
};
login = {
enable_github_logins = true;
github_client_id = &quot;a2f6dfe838cb3206ce20&quot;;
github_client_secret._secret = /run/keys/discourse_github_client_secret;
};
};
backendSettings = {
max_reqs_per_ip_per_minute = 300;
max_reqs_per_ip_per_10_seconds = 60;
max_asset_reqs_per_ip_per_10_seconds = 250;
max_reqs_per_ip_mode = &quot;warn+block&quot;;
};
secretKeyBaseFile = &quot;/path/to/secret_key_base_file&quot;;
};
</programlisting>
<para>
In the resulting site settings file, the
<literal>login.github_client_secret</literal> key will be set to
the contents of the
<filename>/run/keys/discourse_github_client_secret</filename>
file.
</para>
</section>
</section>
<section xml:id="module-services-discourse-plugins">
<title>Plugins</title>
<para>
You can install Discourse plugins using the
<xref linkend="opt-services.discourse.plugins" /> option.
Pre-packaged plugins are provided in
<literal>&lt;your_discourse_package_here&gt;.plugins</literal>. If
you want the full suite of plugins provided through
<literal>nixpkgs</literal>, you can also set the
<xref linkend="opt-services.discourse.package" /> option to
<literal>pkgs.discourseAllPlugins</literal>.
</para>
<para>
Plugins can be built with the
<literal>&lt;your_discourse_package_here&gt;.mkDiscoursePlugin</literal>
function. Normally, it should suffice to provide a
<literal>name</literal> and <literal>src</literal> attribute. If
the plugin has Ruby dependencies, however, they need to be
packaged in accordance with the
<link xlink:href="https://nixos.org/manual/nixpkgs/stable/#developing-with-ruby">Developing
with Ruby</link> section of the Nixpkgs manual and the appropriate
gem options set in <literal>bundlerEnvArgs</literal> (normally
<literal>gemdir</literal> is sufficient). A plugins Ruby
dependencies are listed in its <filename>plugin.rb</filename> file
as function calls to <literal>gem</literal>. To construct the
corresponding <filename>Gemfile</filename> manually, run
<command>bundle init</command>, then add the
<literal>gem</literal> lines to it verbatim.
</para>
<para>
Much of the packaging can be done automatically by the
<filename>nixpkgs/pkgs/servers/web-apps/discourse/update.py</filename>
script - just add the plugin to the <literal>plugins</literal>
list in the <literal>update_plugins</literal> function and run the
script:
</para>
<programlisting language="bash">
./update.py update-plugins
</programlisting>
<para>
Some plugins provide
<link linkend="module-services-discourse-site-settings">site
settings</link>. Their defaults can be configured using
<xref linkend="opt-services.discourse.siteSettings" />, just like
regular site settings. To find the names of these settings, look
in the <literal>config/settings.yml</literal> file of the plugin
repo.
</para>
<para>
For example, to add the
<link xlink:href="https://github.com/discourse/discourse-spoiler-alert">discourse-spoiler-alert</link>
and
<link xlink:href="https://github.com/discourse/discourse-solved">discourse-solved</link>
plugins, and disable <literal>discourse-spoiler-alert</literal> by
default:
</para>
<programlisting>
services.discourse = {
enable = true;
hostname = &quot;discourse.example.com&quot;;
sslCertificate = &quot;/path/to/ssl_certificate&quot;;
sslCertificateKey = &quot;/path/to/ssl_certificate_key&quot;;
admin = {
email = &quot;admin@example.com&quot;;
username = &quot;admin&quot;;
fullName = &quot;Administrator&quot;;
passwordFile = &quot;/path/to/password_file&quot;;
};
mail.outgoing = {
serverAddress = &quot;smtp.emailprovider.com&quot;;
port = 587;
username = &quot;user@emailprovider.com&quot;;
passwordFile = &quot;/path/to/smtp_password_file&quot;;
};
mail.incoming.enable = true;
plugins = with config.services.discourse.package.plugins; [
discourse-spoiler-alert
discourse-solved
];
siteSettings = {
plugins = {
spoiler_enabled = false;
};
};
secretKeyBaseFile = &quot;/path/to/secret_key_base_file&quot;;
};
</programlisting>
</section>
</chapter>

View file

@ -167,6 +167,6 @@ in {
meta = {
maintainers = with maintainers; [ ma27 ];
doc = ./grocy.xml;
doc = ./grocy.md;
};
}

View file

@ -1,84 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-grocy">
<title>Grocy</title>
<para>
<link xlink:href="https://grocy.info/">Grocy</link> is a web-based
self-hosted groceries &amp; household management solution for your
home.
</para>
<section xml:id="module-services-grocy-basic-usage">
<title>Basic usage</title>
<para>
A very basic configuration may look like this:
</para>
<programlisting>
{ pkgs, ... }:
{
services.grocy = {
enable = true;
hostName = &quot;grocy.tld&quot;;
};
}
</programlisting>
<para>
This configures a simple vhost using
<link linkend="opt-services.nginx.enable">nginx</link> which
listens to <literal>grocy.tld</literal> with fully configured
ACME/LE (this can be disabled by setting
<link linkend="opt-services.grocy.nginx.enableSSL">services.grocy.nginx.enableSSL</link>
to <literal>false</literal>). After the initial setup the
credentials <literal>admin:admin</literal> can be used to login.
</para>
<para>
The applications state is persisted at
<literal>/var/lib/grocy/grocy.db</literal> in a
<literal>sqlite3</literal> database. The migration is applied when
requesting the <literal>/</literal>-route of the application.
</para>
</section>
<section xml:id="module-services-grocy-settings">
<title>Settings</title>
<para>
The configuration for <literal>grocy</literal> is located at
<literal>/etc/grocy/config.php</literal>. By default, the
following settings can be defined in the NixOS-configuration:
</para>
<programlisting>
{ pkgs, ... }:
{
services.grocy.settings = {
# The default currency in the system for invoices etc.
# Please note that exchange rates aren't taken into account, this
# is just the setting for what's shown in the frontend.
currency = &quot;EUR&quot;;
# The display language (and locale configuration) for grocy.
culture = &quot;de&quot;;
calendar = {
# Whether or not to show the week-numbers
# in the calendar.
showWeekNumber = true;
# Index of the first day to be shown in the calendar (0=Sunday, 1=Monday,
# 2=Tuesday and so on).
firstDayOfWeek = 2;
};
};
}
</programlisting>
<para>
If you want to alter the configuration file on your own, you can
do this manually with an expression like this:
</para>
<programlisting>
{ lib, ... }:
{
environment.etc.&quot;grocy/config.php&quot;.text = lib.mkAfter ''
// Arbitrary PHP code in grocy's configuration file
'';
}
</programlisting>
</section>
</chapter>

View file

@ -451,6 +451,6 @@ in
};
};
meta.doc = ./jitsi-meet.xml;
meta.doc = ./jitsi-meet.md;
meta.maintainers = lib.teams.jitsi.members;
}

View file

@ -1,55 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-jitsi-meet">
<title>Jitsi Meet</title>
<para>
With Jitsi Meet on NixOS you can quickly configure a complete,
private, self-hosted video conferencing solution.
</para>
<section xml:id="module-services-jitsi-basic-usage">
<title>Basic usage</title>
<para>
A minimal configuration using Lets Encrypt for TLS certificates
looks like this:
</para>
<programlisting>
{
services.jitsi-meet = {
enable = true;
hostName = &quot;jitsi.example.com&quot;;
};
services.jitsi-videobridge.openFirewall = true;
networking.firewall.allowedTCPPorts = [ 80 443 ];
security.acme.email = &quot;me@example.com&quot;;
security.acme.acceptTerms = true;
}
</programlisting>
</section>
<section xml:id="module-services-jitsi-configuration">
<title>Configuration</title>
<para>
Here is the minimal configuration with additional configurations:
</para>
<programlisting>
{
services.jitsi-meet = {
enable = true;
hostName = &quot;jitsi.example.com&quot;;
config = {
enableWelcomePage = false;
prejoinPageEnabled = true;
defaultLang = &quot;fi&quot;;
};
interfaceConfig = {
SHOW_JITSI_WATERMARK = false;
SHOW_WATERMARK_FOR_GUESTS = false;
};
};
services.jitsi-videobridge.openFirewall = true;
networking.firewall.allowedTCPPorts = [ 80 443 ];
security.acme.email = &quot;me@example.com&quot;;
security.acme.acceptTerms = true;
}
</programlisting>
</section>
</chapter>

View file

@ -674,6 +674,6 @@ in
mkIf createLocalMySQL (mkDefault dbPkg);
};
meta.doc = ./keycloak.xml;
meta.doc = ./keycloak.md;
meta.maintainers = [ maintainers.talyz ];
}

View file

@ -1,177 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-keycloak">
<title>Keycloak</title>
<para>
<link xlink:href="https://www.keycloak.org/">Keycloak</link> is an
open source identity and access management server with support for
<link xlink:href="https://openid.net/connect/">OpenID
Connect</link>, <link xlink:href="https://oauth.net/2/">OAUTH
2.0</link> and
<link xlink:href="https://en.wikipedia.org/wiki/SAML_2.0">SAML
2.0</link>.
</para>
<section xml:id="module-services-keycloak-admin">
<title>Administration</title>
<para>
An administrative user with the username <literal>admin</literal>
is automatically created in the <literal>master</literal> realm.
Its initial password can be configured by setting
<xref linkend="opt-services.keycloak.initialAdminPassword" /> and
defaults to <literal>changeme</literal>. The password is not
stored safely and should be changed immediately in the admin
panel.
</para>
<para>
Refer to the
<link xlink:href="https://www.keycloak.org/docs/latest/server_admin/index.html">Keycloak
Server Administration Guide</link> for information on how to
administer your Keycloak instance.
</para>
</section>
<section xml:id="module-services-keycloak-database">
<title>Database access</title>
<para>
Keycloak can be used with either PostgreSQL, MariaDB or MySQL.
Which one is used can be configured in
<xref linkend="opt-services.keycloak.database.type" />. The
selected database will automatically be enabled and a database and
role created unless
<xref linkend="opt-services.keycloak.database.host" /> is changed
from its default of <literal>localhost</literal> or
<xref linkend="opt-services.keycloak.database.createLocally" /> is
set to <literal>false</literal>.
</para>
<para>
External database access can also be configured by setting
<xref linkend="opt-services.keycloak.database.host" />,
<xref linkend="opt-services.keycloak.database.name" />,
<xref linkend="opt-services.keycloak.database.username" />,
<xref linkend="opt-services.keycloak.database.useSSL" /> and
<xref linkend="opt-services.keycloak.database.caCert" /> as
appropriate. Note that you need to manually create the database
and allow the configured database user full access to it.
</para>
<para>
<xref linkend="opt-services.keycloak.database.passwordFile" />
must be set to the path to a file containing the password used to
log in to the database. If
<xref linkend="opt-services.keycloak.database.host" /> and
<xref linkend="opt-services.keycloak.database.createLocally" />
are kept at their defaults, the database role
<literal>keycloak</literal> with that password is provisioned on
the local database instance.
</para>
<warning>
<para>
The path should be provided as a string, not a Nix path, since
Nix paths are copied into the world readable Nix store.
</para>
</warning>
</section>
<section xml:id="module-services-keycloak-hostname">
<title>Hostname</title>
<para>
The hostname is used to build the public URL used as base for all
frontend requests and must be configured through
<xref linkend="opt-services.keycloak.settings.hostname" />.
</para>
<note>
<para>
If youre migrating an old Wildfly based Keycloak instance and
want to keep compatibility with your current clients, youll
likely want to set
<xref linkend="opt-services.keycloak.settings.http-relative-path" />
to <literal>/auth</literal>. See the option description for more
details.
</para>
</note>
<para>
<xref linkend="opt-services.keycloak.settings.hostname-strict-backchannel" />
determines whether Keycloak should force all requests to go
through the frontend URL. By default, Keycloak allows backend
requests to instead use its local hostname or IP address and may
also advertise it to clients through its OpenID Connect Discovery
endpoint.
</para>
<para>
For more information on hostname configuration, see the
<link xlink:href="https://www.keycloak.org/server/hostname">Hostname
section of the Keycloak Server Installation and Configuration
Guide</link>.
</para>
</section>
<section xml:id="module-services-keycloak-tls">
<title>Setting up TLS/SSL</title>
<para>
By default, Keycloak wont accept unsecured HTTP connections
originating from outside its local network.
</para>
<para>
HTTPS support requires a TLS/SSL certificate and a private key,
both
<link xlink:href="https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail">PEM
formatted</link>. Their paths should be set through
<xref linkend="opt-services.keycloak.sslCertificate" /> and
<xref linkend="opt-services.keycloak.sslCertificateKey" />.
</para>
<warning>
<para>
The paths should be provided as a strings, not a Nix paths,
since Nix paths are copied into the world readable Nix store.
</para>
</warning>
</section>
<section xml:id="module-services-keycloak-themes">
<title>Themes</title>
<para>
You can package custom themes and make them visible to Keycloak
through <xref linkend="opt-services.keycloak.themes" />. See the
<link xlink:href="https://www.keycloak.org/docs/latest/server_development/#_themes">Themes
section of the Keycloak Server Development Guide</link> and the
description of the aforementioned NixOS option for more
information.
</para>
</section>
<section xml:id="module-services-keycloak-settings">
<title>Configuration file settings</title>
<para>
Keycloak server configuration parameters can be set in
<xref linkend="opt-services.keycloak.settings" />. These
correspond directly to options in
<filename>conf/keycloak.conf</filename>. Some of the most
important parameters are documented as suboptions, the rest can be
found in the
<link xlink:href="https://www.keycloak.org/server/all-config">All
configuration section of the Keycloak Server Installation and
Configuration Guide</link>.
</para>
<para>
Options containing secret data should be set to an attribute set
containing the attribute <literal>_secret</literal> - a string
pointing to a file containing the value the option should be set
to. See the description of
<xref linkend="opt-services.keycloak.settings" /> for an example.
</para>
</section>
<section xml:id="module-services-keycloak-example-config">
<title>Example configuration</title>
<para>
A basic configuration with some custom settings could look like
this:
</para>
<programlisting>
services.keycloak = {
enable = true;
settings = {
hostname = &quot;keycloak.example.com&quot;;
hostname-strict-backchannel = true;
};
initialAdminPassword = &quot;e6Wcm0RrtegMEHl&quot;; # change on first login
sslCertificate = &quot;/run/keys/ssl_cert&quot;;
sslCertificateKey = &quot;/run/keys/ssl_key&quot;;
database.passwordFile = &quot;/run/keys/db_password&quot;;
};
</programlisting>
</section>
</chapter>

View file

@ -6,7 +6,7 @@ let
in
{
meta.maintainers = with maintainers; [ happysalada ];
meta.doc = ./lemmy.xml;
meta.doc = ./lemmy.md;
imports = [
(mkRemovedOptionModule [ "services" "lemmy" "jwtSecretPath" ] "As of v0.13.0, Lemmy auto-generates the JWT secret.")

View file

@ -1,53 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-lemmy">
<title>Lemmy</title>
<para>
Lemmy is a federated alternative to reddit in rust.
</para>
<section xml:id="module-services-lemmy-quickstart">
<title>Quickstart</title>
<para>
the minimum to start lemmy is
</para>
<programlisting language="nix">
services.lemmy = {
enable = true;
settings = {
hostname = &quot;lemmy.union.rocks&quot;;
database.createLocally = true;
};
caddy.enable = true;
}
</programlisting>
<para>
this will start the backend on port 8536 and the frontend on port
1234. It will expose your instance with a caddy reverse proxy to
the hostname youve provided. Postgres will be initialized on that
same instance automatically.
</para>
</section>
<section xml:id="module-services-lemmy-usage">
<title>Usage</title>
<para>
On first connection you will be asked to define an admin user.
</para>
</section>
<section xml:id="module-services-lemmy-missing">
<title>Missing</title>
<itemizedlist spacing="compact">
<listitem>
<para>
Exposing with nginx is not implemented yet.
</para>
</listitem>
<listitem>
<para>
This has been tested using a local database with a unix socket
connection. Using different database settings will likely
require modifications
</para>
</listitem>
</itemizedlist>
</section>
</chapter>

View file

@ -325,7 +325,7 @@ in {
};
meta = {
doc = ./matomo.xml;
doc = ./matomo.md;
maintainers = with lib.maintainers; [ florianjacob ];
};
}

View file

@ -1,107 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-matomo">
<title>Matomo</title>
<para>
Matomo is a real-time web analytics application. This module
configures php-fpm as backend for Matomo, optionally configuring an
nginx vhost as well.
</para>
<para>
An automatic setup is not suported by Matomo, so you need to
configure Matomo itself in the browser-based Matomo setup.
</para>
<section xml:id="module-services-matomo-database-setup">
<title>Database Setup</title>
<para>
You also need to configure a MariaDB or MySQL database and -user
for Matomo yourself, and enter those credentials in your browser.
You can use passwordless database authentication via the
UNIX_SOCKET authentication plugin with the following SQL commands:
</para>
<programlisting>
# For MariaDB
INSTALL PLUGIN unix_socket SONAME 'auth_socket';
CREATE DATABASE matomo;
CREATE USER 'matomo'@'localhost' IDENTIFIED WITH unix_socket;
GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
# For MySQL
INSTALL PLUGIN auth_socket SONAME 'auth_socket.so';
CREATE DATABASE matomo;
CREATE USER 'matomo'@'localhost' IDENTIFIED WITH auth_socket;
GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
</programlisting>
<para>
Then fill in <literal>matomo</literal> as database user and
database name, and leave the password field blank. This
authentication works by allowing only the
<literal>matomo</literal> unix user to authenticate as the
<literal>matomo</literal> database user (without needing a
password), but no other users. For more information on
passwordless login, see
<link xlink:href="https://mariadb.com/kb/en/mariadb/unix_socket-authentication-plugin/">https://mariadb.com/kb/en/mariadb/unix_socket-authentication-plugin/</link>.
</para>
<para>
Of course, you can use password based authentication as well, e.g.
when the database is not on the same host.
</para>
</section>
<section xml:id="module-services-matomo-archive-processing">
<title>Archive Processing</title>
<para>
This module comes with the systemd service
<literal>matomo-archive-processing.service</literal> and a timer
that automatically triggers archive processing every hour. This
means that you can safely
<link xlink:href="https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour">disable
browser triggers for Matomo archiving</link> at
<literal>Administration &gt; System &gt; General Settings</literal>.
</para>
<para>
With automatic archive processing, you can now also enable to
<link xlink:href="https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs">delete
old visitor logs</link> at
<literal>Administration &gt; System &gt; Privacy</literal>, but
make sure that you run
<literal>systemctl start matomo-archive-processing.service</literal>
at least once without errors if you have already collected data
before, so that the reports get archived before the source data
gets deleted.
</para>
</section>
<section xml:id="module-services-matomo-backups">
<title>Backup</title>
<para>
You only need to take backups of your MySQL database and the
<filename>/var/lib/matomo/config/config.ini.php</filename> file.
Use a user in the <literal>matomo</literal> group or root to
access the file. For more information, see
<link xlink:href="https://matomo.org/faq/how-to-install/faq_138/">https://matomo.org/faq/how-to-install/faq_138/</link>.
</para>
</section>
<section xml:id="module-services-matomo-issues">
<title>Issues</title>
<itemizedlist spacing="compact">
<listitem>
<para>
Matomo will warn you that the JavaScript tracker is not
writable. This is because its located in the read-only nix
store. You can safely ignore this, unless you need a plugin
that needs JavaScript tracker access.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-matomo-other-web-servers">
<title>Using other Web Servers than nginx</title>
<para>
You can use other web servers by forwarding calls for
<filename>index.php</filename> and <filename>piwik.php</filename>
to the
<link linkend="opt-services.phpfpm.pools._name_.socket"><literal>services.phpfpm.pools.&lt;name&gt;.socket</literal></link>
fastcgi unix socket. You can use the nginx configuration in the
module code as a reference to what else should be configured.
</para>
</section>
</chapter>

View file

@ -1146,5 +1146,5 @@ in {
}
]);
meta.doc = ./nextcloud.xml;
meta.doc = ./nextcloud.md;
}

View file

@ -1,333 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-nextcloud">
<title>Nextcloud</title>
<para>
<link xlink:href="https://nextcloud.com/">Nextcloud</link> is an
open-source, self-hostable cloud platform. The server setup can be
automated using
<link linkend="opt-services.nextcloud.enable">services.nextcloud</link>.
A desktop client is packaged at
<literal>pkgs.nextcloud-client</literal>.
</para>
<para>
The current default by NixOS is <literal>nextcloud25</literal> which
is also the latest major version available.
</para>
<section xml:id="module-services-nextcloud-basic-usage">
<title>Basic usage</title>
<para>
Nextcloud is a PHP-based application which requires an HTTP server
(<link linkend="opt-services.nextcloud.enable"><literal>services.nextcloud</literal></link>
optionally supports
<link linkend="opt-services.nginx.enable"><literal>services.nginx</literal></link>)
and a database (its recommended to use
<link linkend="opt-services.postgresql.enable"><literal>services.postgresql</literal></link>).
</para>
<para>
A very basic configuration may look like this:
</para>
<programlisting>
{ pkgs, ... }:
{
services.nextcloud = {
enable = true;
hostName = &quot;nextcloud.tld&quot;;
config = {
dbtype = &quot;pgsql&quot;;
dbuser = &quot;nextcloud&quot;;
dbhost = &quot;/run/postgresql&quot;; # nextcloud will add /.s.PGSQL.5432 by itself
dbname = &quot;nextcloud&quot;;
adminpassFile = &quot;/path/to/admin-pass-file&quot;;
adminuser = &quot;root&quot;;
};
};
services.postgresql = {
enable = true;
ensureDatabases = [ &quot;nextcloud&quot; ];
ensureUsers = [
{ name = &quot;nextcloud&quot;;
ensurePermissions.&quot;DATABASE nextcloud&quot; = &quot;ALL PRIVILEGES&quot;;
}
];
};
# ensure that postgres is running *before* running the setup
systemd.services.&quot;nextcloud-setup&quot; = {
requires = [&quot;postgresql.service&quot;];
after = [&quot;postgresql.service&quot;];
};
networking.firewall.allowedTCPPorts = [ 80 443 ];
}
</programlisting>
<para>
The <literal>hostName</literal> option is used internally to
configure an HTTP server using
<link xlink:href="https://php-fpm.org/"><literal>PHP-FPM</literal></link>
and <literal>nginx</literal>. The <literal>config</literal>
attribute set is used by the imperative installer and all values
are written to an additional file to ensure that changes can be
applied by changing the modules options.
</para>
<para>
In case the application serves multiple domains (those are checked
with
<link xlink:href="http://php.net/manual/en/reserved.variables.server.php"><literal>$_SERVER['HTTP_HOST']</literal></link>)
its needed to add them to
<link linkend="opt-services.nextcloud.config.extraTrustedDomains"><literal>services.nextcloud.config.extraTrustedDomains</literal></link>.
</para>
<para>
Auto updates for Nextcloud apps can be enabled using
<link linkend="opt-services.nextcloud.autoUpdateApps.enable"><literal>services.nextcloud.autoUpdateApps</literal></link>.
</para>
</section>
<section xml:id="module-services-nextcloud-pitfalls-during-upgrade">
<title>Common problems</title>
<itemizedlist>
<listitem>
<para>
<emphasis role="strong">General notes.</emphasis>
Unfortunately Nextcloud appears to be very stateful when it
comes to managing its own configuration. The config file lives
in the home directory of the <literal>nextcloud</literal> user
(by default
<literal>/var/lib/nextcloud/config/config.php</literal>) and
is also used to track several states of the application (e.g.,
whether installed or not).
</para>
<para>
All configuration parameters are also stored in
<filename>/var/lib/nextcloud/config/override.config.php</filename>
which is generated by the module and linked from the store to
ensure that all values from <filename>config.php</filename>
can be modified by the module. However
<filename>config.php</filename> manages the applications
state and shouldnt be touched manually because of that.
</para>
<warning>
<para>
Dont delete <filename>config.php</filename>! This file
tracks the applications state and a deletion can cause
unwanted side-effects!
</para>
</warning>
<warning>
<para>
Dont rerun
<literal>nextcloud-occ maintenance:install</literal>! This
command tries to install the application and can cause
unwanted side-effects!
</para>
</warning>
</listitem>
<listitem>
<para>
<emphasis role="strong">Multiple version upgrades.</emphasis>
Nextcloud doesnt allow to move more than one major-version
forward. E.g., if youre on <literal>v16</literal>, you cannot
upgrade to <literal>v18</literal>, you need to upgrade to
<literal>v17</literal> first. This is ensured automatically as
long as the
<link linkend="opt-system.stateVersion">stateVersion</link> is
declared properly. In that case the oldest version available
(one major behind the one from the previous NixOS release)
will be selected by default and the module will generate a
warning that reminds the user to upgrade to latest Nextcloud
<emphasis>after</emphasis> that deploy.
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong"><literal>Error: Command &quot;upgrade&quot; is not defined.</literal></emphasis>
This error usually occurs if the initial installation
(<command>nextcloud-occ maintenance:install</command>) has
failed. After that, the application is not installed, but the
upgrade is attempted to be executed. Further context can be
found in
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/111175">NixOS/nixpkgs#111175</link>.
</para>
<para>
First of all, it makes sense to find out what went wrong by
looking at the logs of the installation via
<command>journalctl -u nextcloud-setup</command> and try to
fix the underlying issue.
</para>
<itemizedlist>
<listitem>
<para>
If this occurs on an <emphasis>existing</emphasis> setup,
this is most likely because the maintenance mode is
active. It can be deactivated by running
<command>nextcloud-occ maintenance:mode --off</command>.
Its advisable though to check the logs first on why the
maintenance mode was activated.
</para>
</listitem>
<listitem>
<warning>
<para>
Only perform the following measures on <emphasis>freshly
installed instances!</emphasis>
</para>
</warning>
<para>
A re-run of the installer can be forced by
<emphasis>deleting</emphasis>
<filename>/var/lib/nextcloud/config/config.php</filename>.
This is the only time advisable because the fresh install
doesnt have any state that can be lost. In case that
doesnt help, an entire re-creation can be forced via
<command>rm -rf ~nextcloud/</command>.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
<emphasis role="strong">Server-side encryption.</emphasis>
Nextcloud supports
<link xlink:href="https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html">server-side
encryption (SSE)</link>. This is not an end-to-end encryption,
but can be used to encrypt files that will be persisted to
external storage such as S3. Please note that this wont work
anymore when using OpenSSL 3 for PHPs openssl extension
because this is implemented using the legacy cipher RC4. If
<xref linkend="opt-system.stateVersion" /> is
<emphasis>above</emphasis> <literal>22.05</literal>, this is
disabled by default. To turn it on again and for further
information please refer to
<xref linkend="opt-services.nextcloud.enableBrokenCiphersForSSE" />.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-nextcloud-httpd">
<title>Using an alternative webserver as reverse-proxy (e.g.
<literal>httpd</literal>)</title>
<para>
By default, <literal>nginx</literal> is used as reverse-proxy for
<literal>nextcloud</literal>. However, its possible to use e.g.
<literal>httpd</literal> by explicitly disabling
<literal>nginx</literal> using
<xref linkend="opt-services.nginx.enable" /> and fixing the
settings <literal>listen.owner</literal> &amp;
<literal>listen.group</literal> in the
<link linkend="opt-services.phpfpm.pools">corresponding
<literal>phpfpm</literal> pool</link>.
</para>
<para>
An exemplary configuration may look like this:
</para>
<programlisting>
{ config, lib, pkgs, ... }: {
services.nginx.enable = false;
services.nextcloud = {
enable = true;
hostName = &quot;localhost&quot;;
/* further, required options */
};
services.phpfpm.pools.nextcloud.settings = {
&quot;listen.owner&quot; = config.services.httpd.user;
&quot;listen.group&quot; = config.services.httpd.group;
};
services.httpd = {
enable = true;
adminAddr = &quot;webmaster@localhost&quot;;
extraModules = [ &quot;proxy_fcgi&quot; ];
virtualHosts.&quot;localhost&quot; = {
documentRoot = config.services.nextcloud.package;
extraConfig = ''
&lt;Directory &quot;${config.services.nextcloud.package}&quot;&gt;
&lt;FilesMatch &quot;\.php$&quot;&gt;
&lt;If &quot;-f %{REQUEST_FILENAME}&quot;&gt;
SetHandler &quot;proxy:unix:${config.services.phpfpm.pools.nextcloud.socket}|fcgi://localhost/&quot;
&lt;/If&gt;
&lt;/FilesMatch&gt;
&lt;IfModule mod_rewrite.c&gt;
RewriteEngine On
RewriteBase /
RewriteRule ^index\.php$ - [L]
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /index.php [L]
&lt;/IfModule&gt;
DirectoryIndex index.php
Require all granted
Options +FollowSymLinks
&lt;/Directory&gt;
'';
};
};
}
</programlisting>
</section>
<section xml:id="installing-apps-php-extensions-nextcloud">
<title>Installing Apps and PHP extensions</title>
<para>
Nextcloud apps are installed statefully through the web interface.
Some apps may require extra PHP extensions to be installed. This
can be configured with the
<xref linkend="opt-services.nextcloud.phpExtraExtensions" />
setting.
</para>
<para>
Alternatively, extra apps can also be declared with the
<xref linkend="opt-services.nextcloud.extraApps" /> setting. When
using this setting, apps can no longer be managed statefully
because this can lead to Nextcloud updating apps that are managed
by Nix. If you want automatic updates it is recommended that you
use web interface to install apps.
</para>
</section>
<section xml:id="module-services-nextcloud-maintainer-info">
<title>Maintainer information</title>
<para>
As stated in the previous paragraph, we must provide a clean
upgrade-path for Nextcloud since it cannot move more than one
major version forward on a single upgrade. This chapter adds some
notes how Nextcloud updates should be rolled out in the future.
</para>
<para>
While minor and patch-level updates are no problem and can be done
directly in the package-expression (and should be backported to
supported stable branches after that), major-releases should be
added in a new attribute (e.g. Nextcloud
<literal>v19.0.0</literal> should be available in
<literal>nixpkgs</literal> as
<literal>pkgs.nextcloud19</literal>). To provide simple upgrade
paths its generally useful to backport those as well to stable
branches. As long as the package-default isnt altered, this wont
break existing setups. After that, the versioning-warning in the
<literal>nextcloud</literal>-module should be updated to make sure
that the
<link linkend="opt-services.nextcloud.package">package</link>-option
selects the latest version on fresh setups.
</para>
<para>
If major-releases will be abandoned by upstream, we should check
first if those are needed in NixOS for a safe upgrade-path before
removing those. In that case we should keep those packages, but
mark them as insecure in an expression like this (in
<literal>&lt;nixpkgs/pkgs/servers/nextcloud/default.nix&gt;</literal>):
</para>
<programlisting>
/* ... */
{
nextcloud17 = generic {
version = &quot;17.0.x&quot;;
sha256 = &quot;0000000000000000000000000000000000000000000000000000&quot;;
eol = true;
};
}
</programlisting>
<para>
Ideally we should make sure that its possible to jump two NixOS
versions forward: i.e. the warnings and the logic in the module
should guard a user to upgrade from a Nextcloud on e.g. 19.09 to a
Nextcloud on 20.09.
</para>
</section>
</chapter>

View file

@ -5,7 +5,7 @@ let
in
{
meta.maintainers = with maintainers; [ happysalada ];
meta.doc = ./pict-rs.xml;
meta.doc = ./pict-rs.md;
options.services.pict-rs = {
enable = mkEnableOption (lib.mdDoc "pict-rs server");

View file

@ -1,185 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-pict-rs">
<title>Pict-rs</title>
<para>
pict-rs is a a simple image hosting service.
</para>
<section xml:id="module-services-pict-rs-quickstart">
<title>Quickstart</title>
<para>
the minimum to start pict-rs is
</para>
<programlisting language="nix">
services.pict-rs.enable = true;
</programlisting>
<para>
this will start the http server on port 8080 by default.
</para>
</section>
<section xml:id="module-services-pict-rs-usage">
<title>Usage</title>
<para>
pict-rs offers the following endpoints:
</para>
<itemizedlist>
<listitem>
<para>
<literal>POST /image</literal> for uploading an image.
Uploaded content must be valid multipart/form-data with an
image array located within the <literal>images[]</literal> key
</para>
<para>
This endpoint returns the following JSON structure on success
with a 201 Created status
</para>
<programlisting language="json">
{
&quot;files&quot;: [
{
&quot;delete_token&quot;: &quot;JFvFhqJA98&quot;,
&quot;file&quot;: &quot;lkWZDRvugm.jpg&quot;
},
{
&quot;delete_token&quot;: &quot;kAYy9nk2WK&quot;,
&quot;file&quot;: &quot;8qFS0QooAn.jpg&quot;
},
{
&quot;delete_token&quot;: &quot;OxRpM3sf0Y&quot;,
&quot;file&quot;: &quot;1hJaYfGE01.jpg&quot;
}
],
&quot;msg&quot;: &quot;ok&quot;
}
</programlisting>
</listitem>
<listitem>
<para>
<literal>GET /image/download?url=...</literal> Download an
image from a remote server, returning the same JSON payload as
the <literal>POST</literal> endpoint
</para>
</listitem>
<listitem>
<para>
<literal>GET /image/original/{file}</literal> for getting a
full-resolution image. <literal>file</literal> here is the
<literal>file</literal> key from the <literal>/image</literal>
endpoints JSON
</para>
</listitem>
<listitem>
<para>
<literal>GET /image/details/original/{file}</literal> for
getting the details of a full-resolution image. The returned
JSON is structured like so:
</para>
<programlisting language="json">
{
&quot;width&quot;: 800,
&quot;height&quot;: 537,
&quot;content_type&quot;: &quot;image/webp&quot;,
&quot;created_at&quot;: [
2020,
345,
67376,
394363487
]
}
</programlisting>
</listitem>
<listitem>
<para>
<literal>GET /image/process.{ext}?src={file}&amp;...</literal>
get a file with transformations applied. existing
transformations include
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<literal>identity=true</literal>: apply no changes
</para>
</listitem>
<listitem>
<para>
<literal>blur={float}</literal>: apply a gaussian blur to
the file
</para>
</listitem>
<listitem>
<para>
<literal>thumbnail={int}</literal>: produce a thumbnail of
the image fitting inside an <literal>{int}</literal> by
<literal>{int}</literal> square using raw pixel sampling
</para>
</listitem>
<listitem>
<para>
<literal>resize={int}</literal>: produce a thumbnail of
the image fitting inside an <literal>{int}</literal> by
<literal>{int}</literal> square using a Lanczos2 filter.
This is slower than sampling but looks a bit better in
some cases
</para>
</listitem>
<listitem>
<para>
<literal>crop={int-w}x{int-h}</literal>: produce a cropped
version of the image with an <literal>{int-w}</literal> by
<literal>{int-h}</literal> aspect ratio. The resulting
crop will be centered on the image. Either the width or
height of the image will remain full-size, depending on
the images aspect ratio and the requested aspect ratio.
For example, a 1600x900 image cropped with a 1x1 aspect
ratio will become 900x900. A 1600x1100 image cropped with
a 16x9 aspect ratio will become 1600x900.
</para>
</listitem>
</itemizedlist>
<para>
Supported <literal>ext</literal> file extensions include
<literal>png</literal>, <literal>jpg</literal>, and
<literal>webp</literal>
</para>
<para>
An example of usage could be
</para>
<programlisting>
GET /image/process.jpg?src=asdf.png&amp;thumbnail=256&amp;blur=3.0
</programlisting>
<para>
which would create a 256x256px JPEG thumbnail and blur it
</para>
</listitem>
<listitem>
<para>
<literal>GET /image/details/process.{ext}?src={file}&amp;...</literal>
for getting the details of a processed image. The returned
JSON is the same format as listed for the full-resolution
details endpoint.
</para>
</listitem>
<listitem>
<para>
<literal>DELETE /image/delete/{delete_token}/{file}</literal>
or <literal>GET /image/delete/{delete_token}/{file}</literal>
to delete a file, where <literal>delete_token</literal> and
<literal>file</literal> are from the <literal>/image</literal>
endpoints JSON
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-pict-rs-missing">
<title>Missing</title>
<itemizedlist spacing="compact">
<listitem>
<para>
Configuring the secure-api-key is not included yet. The
envisioned basic use case is consumption on localhost by other
services without exposing the service to the internet.
</para>
</listitem>
</itemizedlist>
</section>
</chapter>

View file

@ -292,5 +292,5 @@ in {
};
meta.maintainers = with maintainers; [ ma27 ];
meta.doc = ./plausible.xml;
meta.doc = ./plausible.md;
}

View file

@ -1,45 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-plausible">
<title>Plausible</title>
<para>
<link xlink:href="https://plausible.io/">Plausible</link> is a
privacy-friendly alternative to Google analytics.
</para>
<section xml:id="module-services-plausible-basic-usage">
<title>Basic Usage</title>
<para>
At first, a secret key is needed to be generated. This can be done
with e.g.
</para>
<programlisting>
$ openssl rand -base64 64
</programlisting>
<para>
After that, <literal>plausible</literal> can be deployed like
this:
</para>
<programlisting>
{
services.plausible = {
enable = true;
adminUser = {
# activate is used to skip the email verification of the admin-user that's
# automatically created by plausible. This is only supported if
# postgresql is configured by the module. This is done by default, but
# can be turned off with services.plausible.database.postgres.setup.
activate = true;
email = &quot;admin@localhost&quot;;
passwordFile = &quot;/run/secrets/plausible-admin-pwd&quot;;
};
server = {
baseUrl = &quot;http://analytics.example.org&quot;;
# secretKeybaseFile is a path to the file which contains the secret generated
# with openssl as described above.
secretKeybaseFile = &quot;/run/secrets/plausible-secret-key-base&quot;;
};
};
}
</programlisting>
</section>
</chapter>

View file

@ -9,7 +9,7 @@ let
in
{
meta = {
doc = ./garage.xml;
doc = ./garage.md;
maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
};

View file

@ -1,206 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-garage">
<title>Garage</title>
<para>
<link xlink:href="https://garagehq.deuxfleurs.fr/">Garage</link> is
an open-source, self-hostable S3 store, simpler than MinIO, for
geodistributed stores. The server setup can be automated using
<link linkend="opt-services.garage.enable">services.garage</link>. A
client configured to your local Garage instance is available in the
global environment as <literal>garage-manage</literal>.
</para>
<para>
The current default by NixOS is <literal>garage_0_8</literal> which
is also the latest major version available.
</para>
<section xml:id="module-services-garage-upgrade-scenarios">
<title>General considerations on upgrades</title>
<para>
Garage provides a cookbook documentation on how to upgrade:
<link xlink:href="https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/">https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/</link>
</para>
<warning>
<para>
Garage has two types of upgrades: patch-level upgrades and
minor/major version upgrades.
</para>
<para>
In all cases, you should read the changelog and ideally test the
upgrade on a staging cluster.
</para>
<para>
Checking the health of your cluster can be achieved using
<literal>garage-manage repair</literal>.
</para>
</warning>
<warning>
<para>
Until 1.0 is released, patch-level upgrades are considered as
minor version upgrades. Minor version upgrades are considered as
major version upgrades. i.e. 0.6 to 0.7 is a major version
upgrade.
</para>
</warning>
<itemizedlist spacing="compact">
<listitem>
<para>
<emphasis role="strong">Straightforward upgrades (patch-level
upgrades).</emphasis> Upgrades must be performed one by one,
i.e. for each node, stop it, upgrade it : change
<link linkend="opt-system.stateVersion">stateVersion</link> or
<link linkend="opt-services.garage.package">services.garage.package</link>,
restart it if it was not already by switching.
</para>
</listitem>
<listitem>
<para>
<emphasis role="strong">Multiple version upgrades.</emphasis>
Garage do not provide any guarantee on moving more than one
major-version forward. E.g., if youre on
<literal>0.7</literal>, you cannot upgrade to
<literal>0.9</literal>. You need to upgrade to
<literal>0.8</literal> first. As long as
<link linkend="opt-system.stateVersion">stateVersion</link> is
declared properly, this is enforced automatically. The module
will issue a warning to remind the user to upgrade to latest
Garage <emphasis>after</emphasis> that deploy.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-garage-advanced-upgrades">
<title>Advanced upgrades (minor/major version upgrades)</title>
<para>
Here are some baseline instructions to handle advanced upgrades in
Garage, when in doubt, please refer to upstream instructions.
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Disable API and web access to Garage.
</para>
</listitem>
<listitem>
<para>
Perform
<literal>garage-manage repair --all-nodes --yes tables</literal>
and
<literal>garage-manage repair --all-nodes --yes blocks</literal>.
</para>
</listitem>
<listitem>
<para>
Verify the resulting logs and check that data is synced
properly between all nodes. If you have time, do additional
checks (<literal>scrub</literal>,
<literal>block_refs</literal>, etc.).
</para>
</listitem>
<listitem>
<para>
Check if queues are empty by
<literal>garage-manage stats</literal> or through monitoring
tools.
</para>
</listitem>
<listitem>
<para>
Run <literal>systemctl stop garage</literal> to stop the
actual Garage version.
</para>
</listitem>
<listitem>
<para>
Backup the metadata folder of ALL your nodes, e.g. for a
metadata directory (the default one) in
<literal>/var/lib/garage/meta</literal>, you can run
<literal>pushd /var/lib/garage; tar -acf meta-v0.7.tar.zst meta/; popd</literal>.
</para>
</listitem>
<listitem>
<para>
Run the offline migration:
<literal>nix-shell -p garage_0_8 --run &quot;garage offline-repair --yes&quot;</literal>,
this can take some time depending on how many objects are
stored in your cluster.
</para>
</listitem>
<listitem>
<para>
Bump Garage version in your NixOS configuration, either by
changing
<link linkend="opt-system.stateVersion">stateVersion</link> or
bumping
<link linkend="opt-services.garage.package">services.garage.package</link>,
this should restart Garage automatically.
</para>
</listitem>
<listitem>
<para>
Perform
<literal>garage-manage repair --all-nodes --yes tables</literal>
and
<literal>garage-manage repair --all-nodes --yes blocks</literal>.
</para>
</listitem>
<listitem>
<para>
Wait for a full table sync to run.
</para>
</listitem>
</itemizedlist>
<para>
Your upgraded cluster should be in a working state, re-enable API
and web access.
</para>
</section>
<section xml:id="module-services-garage-maintainer-info">
<title>Maintainer information</title>
<para>
As stated in the previous paragraph, we must provide a clean
upgrade-path for Garage since it cannot move more than one major
version forward on a single upgrade. This chapter adds some notes
how Garage updates should be rolled out in the future. This is
inspired from how Nextcloud does it.
</para>
<para>
While patch-level updates are no problem and can be done directly
in the package-expression (and should be backported to supported
stable branches after that), major-releases should be added in a
new attribute (e.g. Garage <literal>v0.8.0</literal> should be
available in <literal>nixpkgs</literal> as
<literal>pkgs.garage_0_8_0</literal>). To provide simple upgrade
paths its generally useful to backport those as well to stable
branches. As long as the package-default isnt altered, this wont
break existing setups. After that, the versioning-warning in the
<literal>garage</literal>-module should be updated to make sure
that the
<link linkend="opt-services.garage.package">package</link>-option
selects the latest version on fresh setups.
</para>
<para>
If major-releases will be abandoned by upstream, we should check
first if those are needed in NixOS for a safe upgrade-path before
removing those. In that case we shold keep those packages, but
mark them as insecure in an expression like this (in
<literal>&lt;nixpkgs/pkgs/tools/filesystem/garage/default.nix&gt;</literal>):
</para>
<programlisting>
/* ... */
{
garage_0_7_3 = generic {
version = &quot;0.7.3&quot;;
sha256 = &quot;0000000000000000000000000000000000000000000000000000&quot;;
eol = true;
};
}
</programlisting>
<para>
Ideally we should make sure that its possible to jump two NixOS
versions forward: i.e. the warnings and the logic in the module
should guard a user to upgrade from a Garage on e.g. 22.11 to a
Garage on 23.11.
</para>
</section>
</chapter>

View file

@ -66,7 +66,7 @@ in
{
meta = {
doc = ./gnome.xml;
doc = ./gnome.md;
maintainers = teams.gnome.members;
};

View file

@ -1,261 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="chap-gnome">
<title>GNOME Desktop</title>
<para>
GNOME provides a simple, yet full-featured desktop environment with
a focus on productivity. Its Mutter compositor supports both Wayland
and X server, and the GNOME Shell user interface is fully
customizable by extensions.
</para>
<section xml:id="sec-gnome-enable">
<title>Enabling GNOME</title>
<para>
All of the core apps, optional apps, games, and core developer
tools from GNOME are available.
</para>
<para>
To enable the GNOME desktop use:
</para>
<programlisting>
services.xserver.desktopManager.gnome.enable = true;
services.xserver.displayManager.gdm.enable = true;
</programlisting>
<note>
<para>
While it is not strictly necessary to use GDM as the display
manager with GNOME, it is recommended, as some features such as
screen lock
<link linkend="sec-gnome-faq-can-i-use-lightdm-with-gnome">might
not work</link> without it.
</para>
</note>
<para>
The default applications used in NixOS are very minimal, inspired
by the defaults used in
<link xlink:href="https://gitlab.gnome.org/GNOME/gnome-build-meta/blob/40.0/elements/core/meta-gnome-core-utilities.bst">gnome-build-meta</link>.
</para>
<section xml:id="sec-gnome-without-the-apps">
<title>GNOME without the apps</title>
<para>
If youd like to only use the GNOME desktop and not the apps,
you can disable them with:
</para>
<programlisting>
services.gnome.core-utilities.enable = false;
</programlisting>
<para>
and none of them will be installed.
</para>
<para>
If youd only like to omit a subset of the core utilities, you
can use
<xref linkend="opt-environment.gnome.excludePackages" />. Note
that this mechanism can only exclude core utilities, games and
core developer tools.
</para>
</section>
<section xml:id="sec-gnome-disabling-services">
<title>Disabling GNOME services</title>
<para>
It is also possible to disable many of the
<link xlink:href="https://github.com/NixOS/nixpkgs/blob/b8ec4fd2a4edc4e30d02ba7b1a2cc1358f3db1d5/nixos/modules/services/x11/desktop-managers/gnome.nix#L329-L348">core
services</link>. For example, if you do not need indexing files,
you can disable Tracker with:
</para>
<programlisting>
services.gnome.tracker-miners.enable = false;
services.gnome.tracker.enable = false;
</programlisting>
<para>
Note, however, that doing so is not supported and might break
some applications. Notably, GNOME Music cannot work without
Tracker.
</para>
</section>
<section xml:id="sec-gnome-games">
<title>GNOME games</title>
<para>
You can install all of the GNOME games with:
</para>
<programlisting>
services.gnome.games.enable = true;
</programlisting>
</section>
<section xml:id="sec-gnome-core-developer-tools">
<title>GNOME core developer tools</title>
<para>
You can install GNOME core developer tools with:
</para>
<programlisting>
services.gnome.core-developer-tools.enable = true;
</programlisting>
</section>
</section>
<section xml:id="sec-gnome-enable-flashback">
<title>Enabling GNOME Flashback</title>
<para>
GNOME Flashback provides a desktop environment based on the
classic GNOME 2 architecture. You can enable the default GNOME
Flashback session, which uses the Metacity window manager, with:
</para>
<programlisting>
services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
</programlisting>
<para>
It is also possible to create custom sessions that replace
Metacity with a different window manager using
<xref linkend="opt-services.xserver.desktopManager.gnome.flashback.customSessions" />.
</para>
<para>
The following example uses <literal>xmonad</literal> window
manager:
</para>
<programlisting>
services.xserver.desktopManager.gnome.flashback.customSessions = [
{
wmName = &quot;xmonad&quot;;
wmLabel = &quot;XMonad&quot;;
wmCommand = &quot;${pkgs.haskellPackages.xmonad}/bin/xmonad&quot;;
enableGnomePanel = false;
}
];
</programlisting>
</section>
<section xml:id="sec-gnome-icons-and-gtk-themes">
<title>Icons and GTK Themes</title>
<para>
Icon themes and GTK themes dont require any special option to
install in NixOS.
</para>
<para>
You can add them to
<xref linkend="opt-environment.systemPackages" /> and switch to
them with GNOME Tweaks. If youd like to do this manually in
dconf, change the values of the following keys:
</para>
<programlisting>
/org/gnome/desktop/interface/gtk-theme
/org/gnome/desktop/interface/icon-theme
</programlisting>
<para>
in <literal>dconf-editor</literal>
</para>
</section>
<section xml:id="sec-gnome-shell-extensions">
<title>Shell Extensions</title>
<para>
Most Shell extensions are packaged under the
<literal>gnomeExtensions</literal> attribute. Some packages that
include Shell extensions, like <literal>gnome.gpaste</literal>,
dont have their extension decoupled under this attribute.
</para>
<para>
You can install them like any other package:
</para>
<programlisting>
environment.systemPackages = [
gnomeExtensions.dash-to-dock
gnomeExtensions.gsconnect
gnomeExtensions.mpris-indicator-button
];
</programlisting>
<para>
Unfortunately, we lack a way for these to be managed in a
completely declarative way. So you have to enable them manually
with an Extensions application. It is possible to use a
<link linkend="sec-gnome-gsettings-overrides">GSettings
override</link> for this on
<literal>org.gnome.shell.enabled-extensions</literal>, but that
will only influence the default value.
</para>
</section>
<section xml:id="sec-gnome-gsettings-overrides">
<title>GSettings Overrides</title>
<para>
Majority of software building on the GNOME platform use GLibs
<link xlink:href="https://developer.gnome.org/gio/unstable/GSettings.html">GSettings</link>
system to manage runtime configuration. For our purposes, the
system consists of XML schemas describing the individual
configuration options, stored in the package, and a settings
backend, where the values of the settings are stored. On NixOS,
like on most Linux distributions, dconf database is used as the
backend.
</para>
<para>
<link xlink:href="https://developer.gnome.org/gio/unstable/GSettings.html#id-1.4.19.2.9.25">GSettings
vendor overrides</link> can be used to adjust the default values
for settings of the GNOME desktop and apps by replacing the
default values specified in the XML schemas. Using overrides will
allow you to pre-seed user settings before you even start the
session.
</para>
<warning>
<para>
Overrides really only change the default values for GSettings
keys so if you or an application changes the setting value, the
value set by the override will be ignored. Until
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/54150">NixOSs
dconf module implements changing values</link>, you will either
need to keep that in mind and clear the setting from the backend
using <literal>dconf reset</literal> command when that happens,
or use the
<link xlink:href="https://nix-community.github.io/home-manager/options.html#opt-dconf.settings">module
from home-manager</link>.
</para>
</warning>
<para>
You can override the default GSettings values using the
<xref linkend="opt-services.xserver.desktopManager.gnome.extraGSettingsOverrides" />
option.
</para>
<para>
Take note that whatever packages you want to override GSettings
for, you need to add them to
<xref linkend="opt-services.xserver.desktopManager.gnome.extraGSettingsOverridePackages" />.
</para>
<para>
You can use <literal>dconf-editor</literal> tool to explore which
GSettings you can set.
</para>
<section xml:id="sec-gnome-gsettings-overrides-example">
<title>Example</title>
<programlisting>
services.xserver.desktopManager.gnome = {
extraGSettingsOverrides = ''
# Change default background
[org.gnome.desktop.background]
picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}'
# Favorite apps in gnome-shell
[org.gnome.shell]
favorite-apps=['org.gnome.Photos.desktop', 'org.gnome.Nautilus.desktop']
'';
extraGSettingsOverridePackages = [
pkgs.gsettings-desktop-schemas # for org.gnome.desktop
pkgs.gnome.gnome-shell # for org.gnome.shell
];
};
</programlisting>
</section>
</section>
<section xml:id="sec-gnome-faq">
<title>Frequently Asked Questions</title>
<section xml:id="sec-gnome-faq-can-i-use-lightdm-with-gnome">
<title>Can I use LightDM with GNOME?</title>
<para>
Yes you can, and any other display-manager in NixOS.
</para>
<para>
However, it doesnt work correctly for the Wayland session of
GNOME Shell yet, and wont be able to lock your screen.
</para>
<para>
See
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/56342">this
issue.</link>
</para>
</section>
</section>
</chapter>

View file

@ -17,7 +17,7 @@ in
{
meta = {
doc = ./pantheon.xml;
doc = ./pantheon.md;
maintainers = teams.pantheon.members;
};

View file

@ -1,171 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="chap-pantheon">
<title>Pantheon Desktop</title>
<para>
Pantheon is the desktop environment created for the elementary OS
distribution. It is written from scratch in Vala, utilizing GNOME
technologies with GTK and Granite.
</para>
<section xml:id="sec-pantheon-enable">
<title>Enabling Pantheon</title>
<para>
All of Pantheon is working in NixOS and the applications should be
available, aside from a few
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/58161">exceptions</link>.
To enable Pantheon, set
</para>
<programlisting>
services.xserver.desktopManager.pantheon.enable = true;
</programlisting>
<para>
This automatically enables LightDM and Pantheons LightDM greeter.
If youd like to disable this, set
</para>
<programlisting>
services.xserver.displayManager.lightdm.greeters.pantheon.enable = false;
services.xserver.displayManager.lightdm.enable = false;
</programlisting>
<para>
but please be aware using Pantheon without LightDM as a display
manager will break screenlocking from the UI. The NixOS module for
Pantheon installs all of Pantheons default applications. If youd
like to not install Pantheons apps, set
</para>
<programlisting>
services.pantheon.apps.enable = false;
</programlisting>
<para>
You can also use
<xref linkend="opt-environment.pantheon.excludePackages" /> to
remove any other app (like <literal>elementary-mail</literal>).
</para>
</section>
<section xml:id="sec-pantheon-wingpanel-switchboard">
<title>Wingpanel and Switchboard plugins</title>
<para>
Wingpanel and Switchboard work differently than they do in other
distributions, as far as using plugins. You cannot install a
plugin globally (like with
<option>environment.systemPackages</option>) to start using it.
You should instead be using the following options:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<xref linkend="opt-services.xserver.desktopManager.pantheon.extraWingpanelIndicators" />
</para>
</listitem>
<listitem>
<para>
<xref linkend="opt-services.xserver.desktopManager.pantheon.extraSwitchboardPlugs" />
</para>
</listitem>
</itemizedlist>
<para>
to configure the programs with plugs or indicators.
</para>
<para>
The difference in NixOS is both these programs are patched to load
plugins from a directory that is the value of an environment
variable. All of which is controlled in Nix. If you need to
configure the particular packages manually you can override the
packages like:
</para>
<programlisting>
wingpanel-with-indicators.override {
indicators = [
pkgs.some-special-indicator
];
};
switchboard-with-plugs.override {
plugs = [
pkgs.some-special-plug
];
};
</programlisting>
<para>
please note that, like how the NixOS options describe these as
extra plugins, this would only add to the default plugins included
with the programs. If for some reason youd like to configure
which plugins to use exactly, both packages have an argument for
this:
</para>
<programlisting>
wingpanel-with-indicators.override {
useDefaultIndicators = false;
indicators = specialListOfIndicators;
};
switchboard-with-plugs.override {
useDefaultPlugs = false;
plugs = specialListOfPlugs;
};
</programlisting>
<para>
this could be most useful for testing a particular plug-in in
isolation.
</para>
</section>
<section xml:id="sec-pantheon-faq">
<title>FAQ</title>
<variablelist spacing="compact">
<varlistentry>
<term>
<anchor xml:id="sec-pantheon-faq-messed-up-theme" />I have
switched from a different desktop and Pantheons theming looks
messed up.
</term>
<listitem>
<para>
Open Switchboard and go to: Administration → About → Restore
Default Settings → Restore Settings. This will reset any
dconf settings to their Pantheon defaults. Note this could
reset certain GNOME specific preferences if that desktop was
used prior.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<anchor xml:id="sec-pantheon-faq-gnome-and-pantheon" />I
cannot enable both GNOME and Pantheon.
</term>
<listitem>
<para>
This is a known
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/64611">issue</link>
and there is no known workaround.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<anchor xml:id="sec-pantheon-faq-appcenter" />Does AppCenter
work, or is it available?
</term>
<listitem>
<para>
AppCenter has been available since 20.03. Starting from
21.11, the Flatpak backend should work so you can install
some Flatpak applications using it. However, due to missing
appstream metadata, the Packagekit backend does not function
currently. See this
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/15932">issue</link>.
</para>
<para>
If you are using Pantheon, AppCenter should be installed by
default if you have
<link linkend="module-services-flatpak">Flatpak
support</link> enabled. If you also wish to add the
<literal>appcenter</literal> Flatpak remote:
</para>
<programlisting>
$ flatpak remote-add --if-not-exists appcenter https://flatpak.elementary.io/repo.flatpakrepo
</programlisting>
</listitem>
</varlistentry>
</variablelist>
</section>
</chapter>

View file

@ -8,7 +8,7 @@ in
{
meta = {
maintainers = with maintainers; [ cole-h grahamc raitobezarius ];
doc = ./external.xml;
doc = ./external.md;
};
options.boot.loader.external = {

View file

@ -1,43 +0,0 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-bootloader-external">
<title>External Bootloader Backends</title>
<para>
NixOS has support for several bootloader backends by default:
systemd-boot, grub, uboot, etc. The built-in bootloader backend
support is generic and supports most use cases. Some users may
prefer to create advanced workflows around managing the bootloader
and bootable entries.
</para>
<para>
You can replace the built-in bootloader support with your own
tooling using the <quote>external</quote> bootloader option.
</para>
<para>
Imagine you have created a new package called FooBoot. FooBoot
provides a program at
<literal>${pkgs.fooboot}/bin/fooboot-install</literal> which takes
the system closures path as its only argument and configures the
systems bootloader.
</para>
<para>
You can enable FooBoot like this:
</para>
<programlisting language="nix">
{ pkgs, ... }: {
boot.loader.external = {
enable = true;
installHook = &quot;${pkgs.fooboot}/bin/fooboot-install&quot;;
};
}
</programlisting>
<section xml:id="sec-bootloader-external-developing">
<title>Developing Custom Bootloader Backends</title>
<para>
Bootloaders should use
<link xlink:href="https://github.com/NixOS/rfcs/pull/125">RFC-0125</link>s
Bootspec format and synthesis tools to identify the key properties
for bootable system generations.
</para>
</section>
</chapter>

View file

@ -0,0 +1,63 @@
{ lib
, stdenv
, python3
, python3Minimal
}:
let
# python3Minimal can't be overridden with packages on Darwin, due to a missing framework.
# Instead of modifying stdenv, we take the easy way out, since most people on Darwin will
# just be hacking on the Nixpkgs manual (which also uses make-options-doc).
python = ((if stdenv.isDarwin then python3 else python3Minimal).override {
self = python;
includeSiteCustomize = true;
});
# TODO add our own small test suite, maybe add tests for these deps to channels?
markdown-it-py-no-tests = python.pkgs.markdown-it-py.override {
disableTests = true;
};
mdit-py-plugins-no-tests = python.pkgs.mdit-py-plugins.override {
markdown-it-py = markdown-it-py-no-tests;
disableTests = true;
};
in
python.pkgs.buildPythonApplication {
pname = "nixos-render-docs";
version = "0.0";
format = "pyproject";
src = lib.cleanSourceWith {
filter = name: type:
lib.cleanSourceFilter name type
&& ! (type == "directory"
&& builtins.elem
(baseNameOf name)
[
".pytest_cache"
".mypy_cache"
"__pycache__"
]);
src = ./src;
};
nativeBuildInputs = [
python.pkgs.setuptools
python.pkgs.pytestCheckHook
];
propagatedBuildInputs = [
markdown-it-py-no-tests
mdit-py-plugins-no-tests
python.pkgs.frozendict
];
pytestFlagsArray = [ "-vvrP" "tests/" ];
meta = with lib; {
description = "Renderer for NixOS manual and option docs";
license = licenses.mit;
maintainers = [ ];
};
}

View file

@ -0,0 +1,24 @@
import argparse
import os
import sys
from typing import Any, Dict
from .md import Converter
from . import manual
from . import options
def main() -> None:
parser = argparse.ArgumentParser(description='render nixos manual bits')
commands = parser.add_subparsers(dest='command', required=True)
options.build_cli(commands.add_parser('options'))
manual.build_cli(commands.add_parser('manual'))
args = parser.parse_args()
if args.command == 'options':
options.run_cli(args)
elif args.command == 'manual':
manual.run_cli(args)
else:
raise RuntimeError('command not hooked up', args)

View file

@ -0,0 +1,254 @@
from collections.abc import Mapping, MutableMapping, Sequence
from frozendict import frozendict # type: ignore[attr-defined]
from typing import Any, cast, Optional, NamedTuple
import markdown_it
from markdown_it.token import Token
from markdown_it.utils import OptionsDict
from xml.sax.saxutils import escape, quoteattr
from .md import Renderer
_xml_id_translate_table = {
ord('*'): ord('_'),
ord('<'): ord('_'),
ord(' '): ord('_'),
ord('>'): ord('_'),
ord('['): ord('_'),
ord(']'): ord('_'),
ord(':'): ord('_'),
ord('"'): ord('_'),
}
def make_xml_id(s: str) -> str:
return s.translate(_xml_id_translate_table)
class Deflist:
has_dd = False
class Heading(NamedTuple):
container_tag: str
level: int
class DocBookRenderer(Renderer):
__output__ = "docbook"
_link_tags: list[str]
_deflists: list[Deflist]
_headings: list[Heading]
def __init__(self, manpage_urls: Mapping[str, str], parser: Optional[markdown_it.MarkdownIt] = None):
super().__init__(manpage_urls, parser)
self._link_tags = []
self._deflists = []
self._headings = []
def render(self, tokens: Sequence[Token], options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
result = super().render(tokens, options, env)
result += self._close_headings(None, env)
return result
def renderInline(self, tokens: Sequence[Token], options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
# HACK to support docbook links and xrefs. link handling is only necessary because the docbook
# manpage stylesheet converts - in urls to a mathematical minus, which may be somewhat incorrect.
for i, token in enumerate(tokens):
if token.type != 'link_open':
continue
token.tag = 'link'
# turn [](#foo) into xrefs
if token.attrs['href'][0:1] == '#' and tokens[i + 1].type == 'link_close': # type: ignore[index]
token.tag = "xref"
# turn <x> into links without contents
if tokens[i + 1].type == 'text' and tokens[i + 1].content == token.attrs['href']:
tokens[i + 1].content = ''
return super().renderInline(tokens, options, env)
def text(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return escape(token.content)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para>"
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</para>"
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<literallayout>\n</literallayout>"
def softbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
# should check options.breaks() and emit hard break if so
return "\n"
def code_inline(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return f"<literal>{escape(token.content)}</literal>"
def code_block(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return f"<programlisting>{escape(token.content)}</programlisting>"
def link_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
self._link_tags.append(token.tag)
href = cast(str, token.attrs['href'])
(attr, start) = ('linkend', 1) if href[0] == '#' else ('xlink:href', 0)
return f"<{token.tag} {attr}={quoteattr(href[start:])}>"
def link_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return f"</{self._link_tags.pop()}>"
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<listitem>"
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</listitem>\n"
# HACK open and close para for docbook change size. remove soon.
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
spacing = ' spacing="compact"' if token.attrs.get('compact', False) else ''
return f"<para><itemizedlist{spacing}>\n"
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "\n</itemizedlist></para>"
def em_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<emphasis>"
def em_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</emphasis>"
def strong_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<emphasis role=\"strong\">"
def strong_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</emphasis>"
def fence(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
info = f" language={quoteattr(token.info)}" if token.info != "" else ""
return f"<programlisting{info}>{escape(token.content)}</programlisting>"
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para><blockquote>"
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</blockquote></para>"
def note_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para><note>"
def note_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</note></para>"
def caution_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para><caution>"
def caution_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</caution></para>"
def important_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para><important>"
def important_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</important></para>"
def tip_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para><tip>"
def tip_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</tip></para>"
def warning_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "<para><warning>"
def warning_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</warning></para>"
# markdown-it emits tokens based on the html syntax tree, but docbook is
# slightly different. html has <dl>{<dt/>{<dd/>}}</dl>,
# docbook has <variablelist>{<varlistentry><term/><listitem/></varlistentry>}<variablelist>
# we have to reject multiple definitions for the same term for time being.
def dl_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
self._deflists.append(Deflist())
return "<para><variablelist>"
def dl_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
self._deflists.pop()
return "</variablelist></para>"
def dt_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
self._deflists[-1].has_dd = False
return "<varlistentry><term>"
def dt_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</term>"
def dd_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
if self._deflists[-1].has_dd:
raise Exception("multiple definitions per term not supported")
self._deflists[-1].has_dd = True
return "<listitem>"
def dd_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "</listitem></varlistentry>"
def myst_role(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
if token.meta['name'] == 'command':
return f"<command>{escape(token.content)}</command>"
if token.meta['name'] == 'file':
return f"<filename>{escape(token.content)}</filename>"
if token.meta['name'] == 'var':
return f"<varname>{escape(token.content)}</varname>"
if token.meta['name'] == 'env':
return f"<envar>{escape(token.content)}</envar>"
if token.meta['name'] == 'option':
return f"<option>{escape(token.content)}</option>"
if token.meta['name'] == 'manpage':
[page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ]
section = section[:-1]
man = f"{page}({section})"
title = f"<refentrytitle>{escape(page)}</refentrytitle>"
vol = f"<manvolnum>{escape(section)}</manvolnum>"
ref = f"<citerefentry>{title}{vol}</citerefentry>"
if man in self._manpage_urls:
return f"<link xlink:href={quoteattr(self._manpage_urls[man])}>{ref}</link>"
else:
return ref
raise NotImplementedError("md node not supported yet", token)
def inline_anchor(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return f'<anchor xml:id={quoteattr(cast(str, token.attrs["id"]))} />'
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
start = f' startingnumber="{token.attrs["start"]}"' if 'start' in token.attrs else ""
spacing = ' spacing="compact"' if token.attrs.get('compact', False) else ''
return f"<orderedlist{start}{spacing}>"
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return f"</orderedlist>"
def heading_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
hlevel = int(token.tag[1:])
result = self._close_headings(hlevel, env)
(tag, attrs) = self._heading_tag(token, tokens, i, options, env)
self._headings.append(Heading(tag, hlevel))
attrs_str = "".join([ f" {k}={quoteattr(v)}" for k, v in attrs.items() ])
return result + f'<{tag}{attrs_str}>\n<title>'
def heading_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return '</title>'
def _close_headings(self, level: Optional[int], env: MutableMapping[str, Any]) -> str:
# we rely on markdown-it producing h{1..6} tags in token.tag for this to work
result = []
while len(self._headings):
if level is None or self._headings[-1].level >= level:
result.append(f"</{self._headings[-1].container_tag}>")
self._headings.pop()
else:
break
return "\n".join(result)
def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> tuple[str, dict[str, str]]:
attrs = {}
if id := token.attrs.get('id'):
attrs['xml:id'] = cast(str, id)
return ("section", attrs)

View file

@ -0,0 +1,143 @@
import argparse
import json
from abc import abstractmethod
from collections.abc import MutableMapping, Sequence
from typing import Any, cast, NamedTuple, Optional, Union
from xml.sax.saxutils import escape, quoteattr
from markdown_it.token import Token
from markdown_it.utils import OptionsDict
from .docbook import DocBookRenderer
from .md import Converter
class RenderedSection:
id: Optional[str]
chapters: list[str]
def __init__(self, id: Optional[str]) -> None:
self.id = id
self.chapters = []
class BaseConverter(Converter):
_sections: list[RenderedSection]
def __init__(self, manpage_urls: dict[str, str]):
super().__init__(manpage_urls)
self._sections = []
def add_section(self, id: Optional[str], chapters: list[str]) -> None:
self._sections.append(RenderedSection(id))
for content in chapters:
self._md.renderer._title_seen = False # type: ignore[attr-defined]
self._sections[-1].chapters.append(self._render(content))
@abstractmethod
def finalize(self) -> str: raise NotImplementedError()
class ManualDocBookRenderer(DocBookRenderer):
# needed to check correctness of chapters.
# we may want to use front matter instead of this kind of heuristic.
_title_seen = False
def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> tuple[str, dict[str, str]]:
(tag, attrs) = super()._heading_tag(token, tokens, i, options, env)
if self._title_seen:
if token.tag == 'h1':
raise RuntimeError("only one title heading allowed", token)
return (tag, attrs)
self._title_seen = True
return ("chapter", attrs | {
'xmlns': "http://docbook.org/ns/docbook",
'xmlns:xlink': "http://www.w3.org/1999/xlink",
})
# TODO minimize docbook diffs with existing conversions. remove soon.
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return super().paragraph_open(token, tokens, i, options, env) + "\n "
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return "\n" + super().paragraph_close(token, tokens, i, options, env)
def code_block(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return f"<programlisting>\n{escape(token.content)}</programlisting>"
def fence(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
info = f" language={quoteattr(token.info)}" if token.info != "" else ""
return f"<programlisting{info}>\n{escape(token.content)}</programlisting>"
class DocBookConverter(BaseConverter):
__renderer__ = ManualDocBookRenderer
def finalize(self) -> str:
result = []
for section in self._sections:
id = "id=" + quoteattr(section.id) if section.id is not None else ""
result.append(f'<section {id}>')
result += section.chapters
result.append(f'</section>')
return "\n".join(result)
class Section:
id: Optional[str] = None
chapters: list[str]
def __init__(self) -> None:
self.chapters = []
class SectionAction(argparse.Action):
def __call__(self, parser: argparse.ArgumentParser, ns: argparse.Namespace,
values: Union[str, Sequence[Any], None], opt_str: Optional[str] = None) -> None:
sections = getattr(ns, self.dest)
if sections is None: sections = []
sections.append(Section())
setattr(ns, self.dest, sections)
class SectionIDAction(argparse.Action):
def __call__(self, parser: argparse.ArgumentParser, ns: argparse.Namespace,
values: Union[str, Sequence[Any], None], opt_str: Optional[str] = None) -> None:
sections = getattr(ns, self.dest)
if sections is None: raise argparse.ArgumentError(self, "no active section")
sections[-1].id = cast(str, values)
class ChaptersAction(argparse.Action):
def __call__(self, parser: argparse.ArgumentParser, ns: argparse.Namespace,
values: Union[str, Sequence[Any], None], opt_str: Optional[str] = None) -> None:
sections = getattr(ns, self.dest)
if sections is None: raise argparse.ArgumentError(self, "no active section")
sections[-1].chapters.extend(cast(Sequence[str], values))
def _build_cli_db(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument("outfile")
p.add_argument("--section", dest="contents", action=SectionAction, nargs=0)
p.add_argument("--section-id", dest="contents", action=SectionIDAction)
p.add_argument("--chapters", dest="contents", action=ChaptersAction, nargs='+')
def _run_cli_db(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = DocBookConverter(json.load(manpage_urls))
for section in args.contents:
chapters = []
for p in section.chapters:
with open(p, 'r') as f:
chapters.append(f.read())
md.add_section(section.id, chapters)
with open(args.outfile, 'w') as f:
f.write(md.finalize())
def build_cli(p: argparse.ArgumentParser) -> None:
formats = p.add_subparsers(dest='format', required=True)
_build_cli_db(formats.add_parser('docbook'))
def run_cli(args: argparse.Namespace) -> None:
if args.format == 'docbook':
_run_cli_db(args)
else:
raise RuntimeError('format not hooked up', args)

View file

@ -0,0 +1,385 @@
from abc import ABC
from collections.abc import Mapping, MutableMapping, Sequence
from frozendict import frozendict # type: ignore[attr-defined]
from typing import Any, Callable, Optional
import re
from .types import RenderFn
import markdown_it
from markdown_it.token import Token
from markdown_it.utils import OptionsDict
from mdit_py_plugins.container import container_plugin # type: ignore[attr-defined]
from mdit_py_plugins.deflist import deflist_plugin # type: ignore[attr-defined]
from mdit_py_plugins.myst_role import myst_role_plugin # type: ignore[attr-defined]
_md_escape_table = {
ord('*'): '\\*',
ord('<'): '\\<',
ord('['): '\\[',
ord('`'): '\\`',
ord('.'): '\\.',
ord('#'): '\\#',
ord('&'): '\\&',
ord('\\'): '\\\\',
}
def md_escape(s: str) -> str:
return s.translate(_md_escape_table)
class Renderer(markdown_it.renderer.RendererProtocol):
_admonitions: dict[str, tuple[RenderFn, RenderFn]]
_admonition_stack: list[str]
def __init__(self, manpage_urls: Mapping[str, str], parser: Optional[markdown_it.MarkdownIt] = None):
self._manpage_urls = manpage_urls
self.rules = {
'text': self.text,
'paragraph_open': self.paragraph_open,
'paragraph_close': self.paragraph_close,
'hardbreak': self.hardbreak,
'softbreak': self.softbreak,
'code_inline': self.code_inline,
'code_block': self.code_block,
'link_open': self.link_open,
'link_close': self.link_close,
'list_item_open': self.list_item_open,
'list_item_close': self.list_item_close,
'bullet_list_open': self.bullet_list_open,
'bullet_list_close': self.bullet_list_close,
'em_open': self.em_open,
'em_close': self.em_close,
'strong_open': self.strong_open,
'strong_close': self.strong_close,
'fence': self.fence,
'blockquote_open': self.blockquote_open,
'blockquote_close': self.blockquote_close,
'dl_open': self.dl_open,
'dl_close': self.dl_close,
'dt_open': self.dt_open,
'dt_close': self.dt_close,
'dd_open': self.dd_open,
'dd_close': self.dd_close,
'myst_role': self.myst_role,
"container_admonition_open": self.admonition_open,
"container_admonition_close": self.admonition_close,
"inline_anchor": self.inline_anchor,
"heading_open": self.heading_open,
"heading_close": self.heading_close,
"ordered_list_open": self.ordered_list_open,
"ordered_list_close": self.ordered_list_close,
}
self._admonitions = {
"{.note}": (self.note_open, self.note_close),
"{.caution}": (self.caution_open,self.caution_close),
"{.tip}": (self.tip_open, self.tip_close),
"{.important}": (self.important_open, self.important_close),
"{.warning}": (self.warning_open, self.warning_close),
}
self._admonition_stack = []
def admonition_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
tag = token.info.strip()
self._admonition_stack.append(tag)
return self._admonitions[tag][0](token, tokens, i, options, env)
def admonition_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
return self._admonitions[self._admonition_stack.pop()][1](token, tokens, i, options, env)
def render(self, tokens: Sequence[Token], options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
def do_one(i: int, token: Token) -> str:
if token.type == "inline":
assert token.children is not None
return self.renderInline(token.children, options, env)
elif token.type in self.rules:
return self.rules[token.type](tokens[i], tokens, i, options, env)
else:
raise NotImplementedError("md token not supported yet", token)
return "".join(map(lambda arg: do_one(*arg), enumerate(tokens)))
def renderInline(self, tokens: Sequence[Token], options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
def do_one(i: int, token: Token) -> str:
if token.type in self.rules:
return self.rules[token.type](tokens[i], tokens, i, options, env)
else:
raise NotImplementedError("md token not supported yet", token)
return "".join(map(lambda arg: do_one(*arg), enumerate(tokens)))
def text(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def softbreak(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def code_inline(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def code_block(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def link_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def link_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def em_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def em_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def strong_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def strong_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def fence(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def note_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def note_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def caution_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def caution_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def important_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def important_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def tip_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def tip_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def warning_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def warning_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def dl_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def dl_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def dt_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def dt_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def dd_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def dd_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def myst_role(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def inline_anchor(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def heading_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def heading_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported", token)
def _is_escaped(src: str, pos: int) -> bool:
found = 0
while pos >= 0 and src[pos] == '\\':
found += 1
pos -= 1
return found % 2 == 1
_INLINE_ANCHOR_PATTERN = re.compile(r"\{\s*#([\w-]+)\s*\}")
def _inline_anchor_plugin(md: markdown_it.MarkdownIt) -> None:
def inline_anchor(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool:
if state.src[state.pos] != '[':
return False
if _is_escaped(state.src, state.pos - 1):
return False
# treat the inline span like a link label for simplicity.
label_begin = state.pos + 1
label_end = markdown_it.helpers.parseLinkLabel(state, state.pos)
input_end = state.posMax
if label_end < 0:
return False
# match id
match = _INLINE_ANCHOR_PATTERN.match(state.src[label_end + 1 : ])
if not match:
return False
if not silent:
token = state.push("inline_anchor", "", 0) # type: ignore[no-untyped-call]
token.attrs['id'] = match[1]
state.pos = label_begin
state.posMax = label_end
state.md.inline.tokenize(state)
state.pos = label_end + match.end() + 1
state.posMax = input_end
return True
md.inline.ruler.before("link", "inline_anchor", inline_anchor)
def _inline_comment_plugin(md: markdown_it.MarkdownIt) -> None:
def inline_comment(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool:
if state.src[state.pos : state.pos + 4] != '<!--':
return False
if _is_escaped(state.src, state.pos - 1):
return False
for i in range(state.pos + 4, state.posMax - 2):
if state.src[i : i + 3] == '-->': # -->
state.pos = i + 3
return True
return False
md.inline.ruler.after("autolink", "inline_comment", inline_comment)
def _block_comment_plugin(md: markdown_it.MarkdownIt) -> None:
def block_comment(state: markdown_it.rules_block.StateBlock, startLine: int, endLine: int,
silent: bool) -> bool:
pos = state.bMarks[startLine] + state.tShift[startLine]
posMax = state.eMarks[startLine]
if state.src[pos : pos + 4] != '<!--':
return False
nextLine = startLine
while nextLine < endLine:
pos = state.bMarks[nextLine] + state.tShift[nextLine]
posMax = state.eMarks[nextLine]
if state.src[posMax - 3 : posMax] == '-->':
state.line = nextLine + 1
return True
nextLine += 1
return False
md.block.ruler.after("code", "block_comment", block_comment)
_HEADER_ID_RE = re.compile(r"\s*\{\s*\#([\w-]+)\s*\}\s*$")
class Converter(ABC):
__renderer__: Callable[[Mapping[str, str], markdown_it.MarkdownIt], Renderer]
def __init__(self, manpage_urls: Mapping[str, str]):
self._manpage_urls = frozendict(manpage_urls)
self._md = markdown_it.MarkdownIt(
"commonmark",
{
'maxNesting': 100, # default is 20
'html': False, # not useful since we target many formats
'typographer': True, # required for smartquotes
},
renderer_cls=lambda parser: self.__renderer__(self._manpage_urls, parser)
)
self._md.use(
container_plugin,
name="admonition",
validate=lambda name, *args: (
name.strip() in self._md.renderer._admonitions # type: ignore[attr-defined]
)
)
self._md.use(deflist_plugin)
self._md.use(myst_role_plugin)
self._md.use(_inline_anchor_plugin)
self._md.use(_inline_comment_plugin)
self._md.use(_block_comment_plugin)
self._md.enable(["smartquotes", "replacements"])
def _post_parse(self, tokens: list[Token]) -> list[Token]:
for i in range(0, len(tokens)):
# parse header IDs. this is purposely simple and doesn't support
# classes or other inds of attributes.
if tokens[i].type == 'heading_open':
children = tokens[i + 1].children
assert children is not None
if len(children) == 0 or children[-1].type != 'text':
continue
if m := _HEADER_ID_RE.search(children[-1].content):
tokens[i].attrs['id'] = m[1]
children[-1].content = children[-1].content[:-len(m[0])].rstrip()
# markdown-it signifies wide lists by setting the wrapper paragraphs
# of each item to hidden. this is not useful for our stylesheets, which
# signify this with a special css class on list elements instead.
wide_stack = []
for i in range(0, len(tokens)):
if tokens[i].type in [ 'bullet_list_open', 'ordered_list_open' ]:
wide_stack.append([i, True])
elif tokens[i].type in [ 'bullet_list_close', 'ordered_list_close' ]:
(idx, compact) = wide_stack.pop()
tokens[idx].attrs['compact'] = compact
elif len(wide_stack) > 0 and tokens[i].type == 'paragraph_open' and not tokens[i].hidden:
wide_stack[-1][1] = False
return tokens
def _parse(self, src: str, env: Optional[MutableMapping[str, Any]] = None) -> list[Token]:
tokens = self._md.parse(src, env if env is not None else {})
return self._post_parse(tokens)
def _render(self, src: str) -> str:
env: dict[str, Any] = {}
tokens = self._parse(src, env)
return self._md.renderer.render(tokens, self._md.options, env) # type: ignore[no-any-return]

View file

@ -0,0 +1,284 @@
import argparse
import json
from abc import abstractmethod
from collections.abc import MutableMapping, Sequence
from markdown_it.utils import OptionsDict
from markdown_it.token import Token
from typing import Any, Optional
from xml.sax.saxutils import escape, quoteattr
from .docbook import DocBookRenderer, make_xml_id
from .md import Converter, md_escape
from .types import OptionLoc, Option, RenderedOption
def option_is(option: Option, key: str, typ: str) -> Optional[dict[str, str]]:
if key not in option:
return None
if type(option[key]) != dict:
return None
if option[key].get('_type') != typ: # type: ignore[union-attr]
return None
return option[key] # type: ignore[return-value]
class BaseConverter(Converter):
_options: dict[str, RenderedOption]
def __init__(self, manpage_urls: dict[str, str],
revision: str,
document_type: str,
varlist_id: str,
id_prefix: str,
markdown_by_default: bool):
super().__init__(manpage_urls)
self._options = {}
self._revision = revision
self._document_type = document_type
self._varlist_id = varlist_id
self._id_prefix = id_prefix
self._markdown_by_default = markdown_by_default
def _format_decl_def_loc(self, loc: OptionLoc) -> tuple[Optional[str], str]:
# locations can be either plain strings (specific to nixpkgs), or attrsets
# { name = "foo/bar.nix"; url = "https://github.com/....."; }
if isinstance(loc, str):
# Hyperlink the filename either to the NixOS github
# repository (if its a module and we have a revision number),
# or to the local filesystem.
if not loc.startswith('/'):
if self._revision == 'local':
href = f"https://github.com/NixOS/nixpkgs/blob/master/{loc}"
else:
href = f"https://github.com/NixOS/nixpkgs/blob/{self._revision}/{loc}"
else:
href = f"file://{loc}"
# Print the filename and make it user-friendly by replacing the
# /nix/store/<hash> prefix by the default location of nixos
# sources.
if not loc.startswith('/'):
name = f"<nixpkgs/{loc}>"
elif 'nixops' in loc and '/nix/' in loc:
name = f"<nixops/{loc[loc.find('/nix/') + 5:]}>"
else:
name = loc
return (href, name)
else:
return (loc['url'] if 'url' in loc else None, loc['name'])
@abstractmethod
def _decl_def_header(self, header: str) -> list[str]: raise NotImplementedError()
@abstractmethod
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: raise NotImplementedError()
@abstractmethod
def _decl_def_footer(self) -> list[str]: raise NotImplementedError()
def _render_decl_def(self, header: str, locs: list[OptionLoc]) -> list[str]:
result = []
result += self._decl_def_header(header)
for loc in locs:
href, name = self._format_decl_def_loc(loc)
result += self._decl_def_entry(href, name)
result += self._decl_def_footer()
return result
def _render_code(self, option: Option, key: str) -> list[str]:
if lit := option_is(option, key, 'literalMD'):
return [ self._render(f"*{key.capitalize()}:*\n{lit['text']}") ]
elif lit := option_is(option, key, 'literalExpression'):
code = lit['text']
# for multi-line code blocks we only have to count ` runs at the beginning
# of a line, but this is much easier.
multiline = '\n' in code
longest, current = (0, 0)
for c in code:
current = current + 1 if c == '`' else 0
longest = max(current, longest)
# inline literals need a space to separate ticks from content, code blocks
# need newlines. inline literals need one extra tick, code blocks need three.
ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ')
code = f"{ticks}{sep}{code}{sep}{ticks}"
return [ self._render(f"*{key.capitalize()}:*\n{code}") ]
elif key in option:
raise Exception(f"{key} has unrecognized type", option[key])
else:
return []
def _render_description(self, desc: str | dict[str, str]) -> list[str]:
if isinstance(desc, str) and self._markdown_by_default:
return [ self._render(desc) ]
elif isinstance(desc, dict) and desc.get('_type') == 'mdDoc':
return [ self._render(desc['text']) ]
else:
raise Exception("description has unrecognized type", desc)
@abstractmethod
def _related_packages_header(self) -> list[str]: raise NotImplementedError()
def _convert_one(self, option: dict[str, Any]) -> list[str]:
result = []
if desc := option.get('description'):
result += self._render_description(desc)
if typ := option.get('type'):
ro = " *(read only)*" if option.get('readOnly', False) else ""
result.append(self._render(f"*Type:* {md_escape(typ)}{ro}"))
result += self._render_code(option, 'default')
result += self._render_code(option, 'example')
if related := option.get('relatedPackages'):
result += self._related_packages_header()
result.append(self._render(related))
if decl := option.get('declarations'):
result += self._render_decl_def("Declared by", decl)
if defs := option.get('definitions'):
result += self._render_decl_def("Defined by", defs)
return result
def add_options(self, options: dict[str, Any]) -> None:
for (name, option) in options.items():
try:
self._options[name] = RenderedOption(option['loc'], self._convert_one(option))
except Exception as e:
raise Exception(f"Failed to render option {name}") from e
@abstractmethod
def finalize(self) -> str: raise NotImplementedError()
class OptionsDocBookRenderer(DocBookRenderer):
def heading_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported in options doc", token)
def heading_close(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise RuntimeError("md token not supported in options doc", token)
# TODO keep optionsDocBook diff small. remove soon if rendering is still good.
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
token.attrs['compact'] = False
return super().ordered_list_open(token, tokens, i, options, env)
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int, options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
token.attrs['compact'] = False
return super().bullet_list_open(token, tokens, i, options, env)
class DocBookConverter(BaseConverter):
__renderer__ = OptionsDocBookRenderer
def _render_code(self, option: dict[str, Any], key: str) -> list[str]:
if lit := option_is(option, key, 'literalDocBook'):
return [ f"<para><emphasis>{key.capitalize()}:</emphasis> {lit['text']}</para>" ]
else:
return super()._render_code(option, key)
def _render_description(self, desc: str | dict[str, Any]) -> list[str]:
if isinstance(desc, str) and not self._markdown_by_default:
return [ f"<nixos:option-description><para>{desc}</para></nixos:option-description>" ]
else:
return super()._render_description(desc)
def _related_packages_header(self) -> list[str]:
return [
"<para>",
" <emphasis>Related packages:</emphasis>",
"</para>",
]
def _decl_def_header(self, header: str) -> list[str]:
return [
f"<para><emphasis>{header}:</emphasis></para>",
"<simplelist>"
]
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]:
if href is not None:
href = " xlink:href=" + quoteattr(href)
return [
f"<member><filename{href}>",
escape(name),
"</filename></member>"
]
def _decl_def_footer(self) -> list[str]:
return [ "</simplelist>" ]
def finalize(self) -> str:
keys = list(self._options.keys())
keys.sort(key=lambda opt: [ (0 if p.startswith("enable") else 1 if p.startswith("package") else 2, p)
for p in self._options[opt].loc ])
result = []
result.append('<?xml version="1.0" encoding="UTF-8"?>')
if self._document_type == 'appendix':
result += [
'<appendix xmlns="http://docbook.org/ns/docbook"',
' xml:id="appendix-configuration-options">',
' <title>Configuration Options</title>',
]
result += [
f'<variablelist xmlns:xlink="http://www.w3.org/1999/xlink"',
' xmlns:nixos="tag:nixos.org"',
' xmlns="http://docbook.org/ns/docbook"',
f' xml:id="{self._varlist_id}">',
]
for name in keys:
id = make_xml_id(self._id_prefix + name)
result += [
"<varlistentry>",
# NOTE adding extra spaces here introduces spaces into xref link expansions
(f"<term xlink:href={quoteattr('#' + id)} xml:id={quoteattr(id)}>" +
f"<option>{escape(name)}</option></term>"),
"<listitem>"
]
result += self._options[name].lines
result += [
"</listitem>",
"</varlistentry>"
]
result.append("</variablelist>")
if self._document_type == 'appendix':
result.append("</appendix>")
return "\n".join(result)
def _build_cli_db(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument('--document-type', required=True)
p.add_argument('--varlist-id', required=True)
p.add_argument('--id-prefix', required=True)
p.add_argument('--markdown-by-default', default=False, action='store_true')
p.add_argument("infile")
p.add_argument("outfile")
def _run_cli_db(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = DocBookConverter(
json.load(manpage_urls),
revision = args.revision,
document_type = args.document_type,
varlist_id = args.varlist_id,
id_prefix = args.id_prefix,
markdown_by_default = args.markdown_by_default)
with open(args.infile, 'r') as f:
md.add_options(json.load(f))
with open(args.outfile, 'w') as f:
f.write(md.finalize())
def build_cli(p: argparse.ArgumentParser) -> None:
formats = p.add_subparsers(dest='format', required=True)
_build_cli_db(formats.add_parser('docbook'))
def run_cli(args: argparse.Namespace) -> None:
if args.format == 'docbook':
_run_cli_db(args)
else:
raise RuntimeError('format not hooked up', args)

View file

@ -0,0 +1,13 @@
from collections.abc import Sequence, MutableMapping
from typing import Any, Callable, Optional, Tuple, NamedTuple
from markdown_it.token import Token
from markdown_it.utils import OptionsDict
OptionLoc = str | dict[str, str]
Option = dict[str, str | dict[str, str] | list[OptionLoc]]
RenderedOption = NamedTuple('RenderedOption', [('loc', list[str]),
('lines', list[str])])
RenderFn = Callable[[Token, Sequence[Token], int, OptionsDict, MutableMapping[str, Any]], str]

View file

@ -0,0 +1,15 @@
[project]
name = "nixos-render-docs"
version = "0.0"
description = "Renderer for NixOS manual and option docs"
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
[project.scripts]
nixos-render-docs = "nixos_render_docs:main"
[build-system]
requires = ["setuptools"]

Some files were not shown because too many files have changed in this diff Show more