Merge pull request 'style: check formatting using nixpkgs standard and fail early in CI to enforce it' (#183) from ci/check-formatting into main

Reviewed-on: #183
Reviewed-by: b12f <b12f@noreply.git.pub.solar>
This commit is contained in:
teutat3s 2024-05-08 21:00:50 +00:00
commit 599e69fcaf
Signed by: pub.solar gitea
GPG key ID: F0332B04B7054873
76 changed files with 974 additions and 1319 deletions

View file

@ -46,6 +46,10 @@ jobs:
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
useDaemon: false useDaemon: false
- name: Check formatting
run: |
nix --accept-flake-config --access-tokens '' develop --command treefmt --fail-on-change
- name: Run flake checks - name: Run flake checks
run: | run: |
# Prevent cache garbage collection by creating GC roots # Prevent cache garbage collection by creating GC roots

View file

@ -1,9 +1,12 @@
# Process for handling a deletion request # Process for handling a deletion request
### Keycloak ### Keycloak
Required: Required:
- auth.pub.solar ops user credentials - auth.pub.solar ops user credentials
- SSH access to host nachtigall - SSH access to host nachtigall
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar
@ -20,8 +23,8 @@ sudo --user keycloak kcadm.sh update --config /tmp/kcadm.config users/2ec6f173-3
Docs: https://www.keycloak.org/docs/latest/server_admin/index.html#updating-a-user Docs: https://www.keycloak.org/docs/latest/server_admin/index.html#updating-a-user
### Nextcloud ### Nextcloud
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar
nextcloud-occ user:delete <username> nextcloud-occ user:delete <username>
@ -29,8 +32,8 @@ nextcloud-occ user:delete <username>
Docs: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/occ_command.html#user-commands-label Docs: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/occ_command.html#user-commands-label
### Mastodon ### Mastodon
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar
sudo -u mastodon mastodon-tootctl accounts delete --email <mail-address> sudo -u mastodon mastodon-tootctl accounts delete --email <mail-address>
@ -38,8 +41,8 @@ sudo -u mastodon mastodon-tootctl accounts delete --email <mail-address>
Docs: https://docs.joinmastodon.org/admin/tootctl/#accounts-delete Docs: https://docs.joinmastodon.org/admin/tootctl/#accounts-delete
### Forgejo ### Forgejo
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar
sudo -u gitea gitea admin user delete --config /var/lib/forgejo/custom/conf/app.ini --purge --email <mail-address> sudo -u gitea gitea admin user delete --config /var/lib/forgejo/custom/conf/app.ini --purge --email <mail-address>
@ -47,8 +50,8 @@ sudo -u gitea gitea admin user delete --config /var/lib/forgejo/custom/conf/app.
Docs: https://forgejo.org/docs/latest/admin/command-line/#delete Docs: https://forgejo.org/docs/latest/admin/command-line/#delete
### Matrix ### Matrix
``` ```
ssh bartender@matrix.pub.solar -p 2020 ssh bartender@matrix.pub.solar -p 2020
curl --header "Authorization: Bearer <admin-access-token>" --request POST http://172.18.0.3:8008/_synapse/admin/v1/deactivate/@<username>:pub.solar --data '{"erase": true}' curl --header "Authorization: Bearer <admin-access-token>" --request POST http://172.18.0.3:8008/_synapse/admin/v1/deactivate/@<username>:pub.solar --data '{"erase": true}'
@ -56,6 +59,6 @@ curl --header "Authorization: Bearer <admin-access-token>" --request POST http:/
Docs: https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#deactivate-account Docs: https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#deactivate-account
### OpenBikeSensor ### OpenBikeSensor
Not implemented, see: https://github.com/openbikesensor/portal/issues/95 Not implemented, see: https://github.com/openbikesensor/portal/issues/95

View file

@ -8,11 +8,13 @@ To deploy, make sure you have a [working development shell](./development-shell.
Then, run `deploy-rs` with the hostname of the server you want to deploy: Then, run `deploy-rs` with the hostname of the server you want to deploy:
For nachtigall.pub.solar: For nachtigall.pub.solar:
``` ```
deploy --targets '.#nachtigall' --magic-rollback false --auto-rollback false deploy --targets '.#nachtigall' --magic-rollback false --auto-rollback false
``` ```
For flora-6.pub.solar: For flora-6.pub.solar:
``` ```
deploy --targets '.#flora-6' --magic-rollback false --auto-rollback false deploy --targets '.#flora-6' --magic-rollback false --auto-rollback false
``` ```
@ -29,4 +31,5 @@ to enable switching to the new config quickly at a later moment.
You'll need to have SSH Access to the boxes to be able to run `deploy`. You'll need to have SSH Access to the boxes to be able to run `deploy`.
### Getting SSH access ### Getting SSH access
See [administrative-access.md](./administrative-access.md). See [administrative-access.md](./administrative-access.md).

View file

@ -15,12 +15,15 @@ Please follow https://docs.greenbaum.cloud/en/devops/triton-cli.html for the det
You will need to setup the following [namecheap API credentials](https://www.namecheap.com/support/api/intro), You will need to setup the following [namecheap API credentials](https://www.namecheap.com/support/api/intro),
look for "namecheap API key" in the pub.solar Keepass database. look for "namecheap API key" in the pub.solar Keepass database.
``` ```
NAMECHEAP_API_KEY NAMECHEAP_API_KEY
NAMECHEAP_API_USER NAMECHEAP_API_USER
NAMECHEAP_USER_NAME NAMECHEAP_USER_NAME
``` ```
You will probably also need to add your external IP to the [API allow list](https://ap.www.namecheap.com/settings/tools/apiaccess/whitelisted-ips). You will probably also need to add your external IP to the [API allow list](https://ap.www.namecheap.com/settings/tools/apiaccess/whitelisted-ips).
``` ```
dig -4 ip @dns.toys dig -4 ip @dns.toys
``` ```
@ -35,16 +38,19 @@ terraform init
``` ```
Make your changes, e.g. in `dns.tf`. Make your changes, e.g. in `dns.tf`.
``` ```
$EDITOR dns.tf $EDITOR dns.tf
``` ```
Plan your changes using: Plan your changes using:
``` ```
terraform plan -out pub-solar-infra.plan terraform plan -out pub-solar-infra.plan
``` ```
After verification, apply your changes with: After verification, apply your changes with:
``` ```
terraform apply "pub-solar-infra.plan" terraform apply "pub-solar-infra.plan"
``` ```
@ -52,7 +58,9 @@ terraform apply "pub-solar-infra.plan"
### Useful links ### Useful links
We use the Manta remote backend to save the terraform state for collaboration. We use the Manta remote backend to save the terraform state for collaboration.
- https://www.terraform.io/language/v1.2.x/settings/backends/manta - https://www.terraform.io/language/v1.2.x/settings/backends/manta
Namecheap Terraform provider docs: Namecheap Terraform provider docs:
- https://registry.terraform.io/providers/namecheap/namecheap/latest/docs - https://registry.terraform.io/providers/namecheap/namecheap/latest/docs

View file

@ -1,9 +1,12 @@
# Process for getting a list of email addresses of all keycloak users # Process for getting a list of email addresses of all keycloak users
### Keycloak ### Keycloak
Required: Required:
- auth.pub.solar ops user credentials - auth.pub.solar ops user credentials
- SSH access to host nachtigall - SSH access to host nachtigall
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar

View file

@ -1,9 +1,12 @@
# Process for resetting keycloak user passwords # Process for resetting keycloak user passwords
### Keycloak ### Keycloak
Required: Required:
- auth.pub.solar ops user credentials - auth.pub.solar ops user credentials
- SSH access to host nachtigall - SSH access to host nachtigall
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar

View file

@ -1,9 +1,12 @@
# Process for updating a keycloak realm via CLI # Process for updating a keycloak realm via CLI
### Keycloak ### Keycloak
Required: Required:
- auth.pub.solar ops user credentials - auth.pub.solar ops user credentials
- SSH access to host nachtigall - SSH access to host nachtigall
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar

View file

@ -24,6 +24,7 @@ deploy --targets '.#nachtigall'
``` ```
Then, finalize the update by running the database migration script: Then, finalize the update by running the database migration script:
``` ```
ssh barkeeper@nachtigall.pub.solar ssh barkeeper@nachtigall.pub.solar
docker exec -it mediawiki bash docker exec -it mediawiki bash

View file

@ -1,10 +1,10 @@
# OpenBikeSensor Portal # OpenBikeSensor Portal
## Docker Containers ## Docker Containers
* portal
* worker
* db
- portal
- worker
- db
## Run database migrations ## Run database migrations

View file

@ -1,3 +1 @@
# Reverting to an old version # Reverting to an old version

View file

@ -6,4 +6,4 @@ After a boot, the encrypted root partition will have to be unlocked. This is don
ssh root@nachtigall.pub.solar -p2222 ssh root@nachtigall.pub.solar -p2222
``` ```
After connecting, paste the crypt passphrase you can find in the shared keepass. This will disconnect the SSH session right away and the server will keep booting into stage 2. After connecting, paste the crypt passphrase you can find in the shared keepass. This will disconnect the SSH session right away and the server will keep booting into stage 2.

View file

@ -40,9 +40,13 @@
element-stickers.inputs.nixpkgs.follows = "nixpkgs"; element-stickers.inputs.nixpkgs.follows = "nixpkgs";
}; };
outputs = inputs@{ self, ... }: outputs =
inputs@{ self, ... }:
inputs.flake-parts.lib.mkFlake { inherit inputs; } { inputs.flake-parts.lib.mkFlake { inherit inputs; } {
systems = [ "x86_64-linux" "aarch64-linux" ]; systems = [
"x86_64-linux"
"aarch64-linux"
];
imports = [ imports = [
inputs.nixos-flake.flakeModule inputs.nixos-flake.flakeModule
@ -52,37 +56,42 @@
./hosts ./hosts
]; ];
perSystem = { system, pkgs, config, ... }: { perSystem =
_module.args = { {
inherit inputs; system,
pkgs = import inputs.nixpkgs { pkgs,
inherit system; config,
overlays = [ ...
inputs.agenix.overlays.default }:
{
_module.args = {
inherit inputs;
pkgs = import inputs.nixpkgs {
inherit system;
overlays = [ inputs.agenix.overlays.default ];
};
unstable = import inputs.unstable { inherit system; };
master = import inputs.master { inherit system; };
};
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
deploy-rs
nixpkgs-fmt
agenix
age-plugin-yubikey
cachix
editorconfig-checker
nodePackages.prettier
nvfetcher
shellcheck
shfmt
treefmt
nixos-generators
inputs.nixpkgs-2205.legacyPackages.${system}.terraform
jq
]; ];
}; };
unstable = import inputs.unstable { inherit system; };
master = import inputs.master { inherit system; };
}; };
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
deploy-rs
nixpkgs-fmt
agenix
age-plugin-yubikey
cachix
editorconfig-checker
nodePackages.prettier
nvfetcher
shellcheck
shfmt
treefmt
nixos-generators
inputs.nixpkgs-2205.legacyPackages.${system}.terraform
jq
];
};
};
flake = flake =
let let
@ -92,19 +101,15 @@
inherit username; inherit username;
nixosModules = builtins.listToAttrs ( nixosModules = builtins.listToAttrs (
map map (x: {
(x: { name = x;
name = x; value = import (./modules + "/${x}");
value = import (./modules + "/${x}"); }) (builtins.attrNames (builtins.readDir ./modules))
})
(builtins.attrNames (builtins.readDir ./modules))
); );
checks = builtins.mapAttrs checks = builtins.mapAttrs (
( system: deployLib: deployLib.deployChecks self.deploy
system: deployLib: deployLib.deployChecks self.deploy ) inputs.deploy-rs.lib;
)
inputs.deploy-rs.lib;
formatter."x86_64-linux" = inputs.unstable.legacyPackages."x86_64-linux".nixfmt-rfc-style; formatter."x86_64-linux" = inputs.unstable.legacyPackages."x86_64-linux".nixfmt-rfc-style;

View file

@ -1,8 +1,9 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
...
}: }:
let let
psCfg = config.pub-solar; psCfg = config.pub-solar;

View file

@ -1,12 +1,11 @@
{ ... }: { ... }:
{ {
imports = imports = [
[ # Include the results of the hardware scan.
# Include the results of the hardware scan. ./hardware-configuration.nix
./hardware-configuration.nix ./configuration.nix
./configuration.nix ./triton-vmtools.nix
./triton-vmtools.nix ./wireguard.nix
./wireguard.nix ];
];
} }

View file

@ -1,15 +1,23 @@
# Do not modify this file! It was generated by nixos-generate-config # Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config {
, lib config,
, pkgs lib,
, modulesPath pkgs,
, ... modulesPath,
}: { ...
}:
{
imports = [ ]; imports = [ ];
boot.initrd.availableKernelModules = [ "ahci" "virtio_pci" "xhci_pci" "sr_mod" "virtio_blk" ]; boot.initrd.availableKernelModules = [
"ahci"
"virtio_pci"
"xhci_pci"
"sr_mod"
"virtio_blk"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];

View file

@ -1,7 +1,5 @@
{ pkgs { pkgs, flake, ... }:
, flake {
, ...
}: {
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
flake.inputs.triton-vmtools.packages.${pkgs.system}.default flake.inputs.triton-vmtools.packages.${pkgs.system}.default
]; ];

View file

@ -2,7 +2,8 @@
config, config,
pkgs, pkgs,
flake, flake,
... }: ...
}:
{ {
networking.firewall.allowedUDPPorts = [ 51820 ]; networking.firewall.allowedUDPPorts = [ 51820 ];
@ -18,10 +19,14 @@
]; ];
privateKeyFile = config.age.secrets.wg-private-key.path; privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.admins.wireguardDevices ++ [ peers = flake.self.logins.admins.wireguardDevices ++ [
{ # nachtigall.pub.solar {
# nachtigall.pub.solar
endpoint = "138.201.80.102:51820"; endpoint = "138.201.80.102:51820";
publicKey = "qzNywKY9RvqTnDO8eLik75/SHveaSk9OObilDzv+xkk="; publicKey = "qzNywKY9RvqTnDO8eLik75/SHveaSk9OObilDzv+xkk=";
allowedIPs = [ "10.7.6.1/32" "fd00:fae:fae:fae:fae:1::/96" ]; allowedIPs = [
"10.7.6.1/32"
"fd00:fae:fae:fae:fae:1::/96"
];
} }
]; ];
}; };

View file

@ -1,4 +1,5 @@
{ flake, ... }: { { flake, ... }:
{
age.secrets."restic-repo-droppie" = { age.secrets."restic-repo-droppie" = {
file = "${flake.self}/secrets/restic-repo-droppie.age"; file = "${flake.self}/secrets/restic-repo-droppie.age";
mode = "400"; mode = "400";

View file

@ -1,8 +1,10 @@
{ flake {
, config flake,
, pkgs config,
, ... pkgs,
}: { ...
}:
{
# Use GRUB2 as the boot loader. # Use GRUB2 as the boot loader.
# We don't use systemd-boot because Hetzner uses BIOS legacy boot. # We don't use systemd-boot because Hetzner uses BIOS legacy boot.
boot.loader.systemd-boot.enable = false; boot.loader.systemd-boot.enable = false;
@ -11,15 +13,11 @@
efiSupport = false; efiSupport = false;
mirroredBoots = [ mirroredBoots = [
{ {
devices = [ devices = [ "/dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NF0R517371" ];
"/dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NF0R517371"
];
path = "/boot1"; path = "/boot1";
} }
{ {
devices = [ devices = [ "/dev/disk/by-id/nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL" ];
"/dev/disk/by-id/nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL"
];
path = "/boot2"; path = "/boot2";
} }
]; ];

View file

@ -1,15 +1,13 @@
{ flake, ... }: { flake, ... }:
{ {
imports = imports = [
[ # Include the results of the hardware scan.
# Include the results of the hardware scan. ./hardware-configuration.nix
./hardware-configuration.nix ./configuration.nix
./configuration.nix
./networking.nix ./networking.nix
./wireguard.nix ./wireguard.nix
./backups.nix ./backups.nix
];
];
} }

View file

@ -1,54 +1,54 @@
# Do not modify this file! It was generated by nixos-generate-config # Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
[
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [ "ahci" "nvme" ]; boot.initrd.availableKernelModules = [
"ahci"
"nvme"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ]; boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "root_pool/root";
device = "root_pool/root"; fsType = "zfs";
fsType = "zfs"; };
};
fileSystems."/var/lib" = fileSystems."/var/lib" = {
{ device = "root_pool/data";
device = "root_pool/data"; fsType = "zfs";
fsType = "zfs"; };
};
fileSystems."/var/lib/postgresql" = fileSystems."/var/lib/postgresql" = {
{ device = "root_pool/data/postgresql";
device = "root_pool/data/postgresql"; fsType = "zfs";
fsType = "zfs"; };
};
fileSystems."/var/lib/docker" = fileSystems."/var/lib/docker" = {
{ device = "root_pool/data/docker";
device = "root_pool/data/docker"; fsType = "zfs";
fsType = "zfs"; };
};
fileSystems."/boot1" = fileSystems."/boot1" = {
{ device = "/dev/disk/by-uuid/5493-EFF5";
device = "/dev/disk/by-uuid/5493-EFF5"; fsType = "vfat";
fsType = "vfat"; };
};
fileSystems."/boot2" = fileSystems."/boot2" = {
{ device = "/dev/disk/by-uuid/5494-BA1E";
device = "/dev/disk/by-uuid/5494-BA1E"; fsType = "vfat";
fsType = "vfat"; };
};
swapDevices = [ ]; swapDevices = [ ];

View file

@ -2,7 +2,8 @@
config, config,
pkgs, pkgs,
flake, flake,
... }: ...
}:
{ {
networking.hostName = "nachtigall"; networking.hostName = "nachtigall";
@ -24,5 +25,8 @@
} }
]; ];
networking.defaultGateway = "138.201.80.65"; networking.defaultGateway = "138.201.80.65";
networking.defaultGateway6 = { address = "fe80::1"; interface = "enp35s0"; }; networking.defaultGateway6 = {
address = "fe80::1";
interface = "enp35s0";
};
} }

View file

@ -2,7 +2,8 @@
config, config,
pkgs, pkgs,
flake, flake,
... }: ...
}:
{ {
networking.firewall.allowedUDPPorts = [ 51820 ]; networking.firewall.allowedUDPPorts = [ 51820 ];
@ -18,10 +19,14 @@
]; ];
privateKeyFile = config.age.secrets.wg-private-key.path; privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.admins.wireguardDevices ++ [ peers = flake.self.logins.admins.wireguardDevices ++ [
{ # flora-6.pub.solar {
# flora-6.pub.solar
endpoint = "80.71.153.210:51820"; endpoint = "80.71.153.210:51820";
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU="; publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
allowedIPs = [ "10.7.6.2/32" "fd00:fae:fae:fae:fae:2::/96" ]; allowedIPs = [
"10.7.6.2/32"
"fd00:fae:fae:fae:fae:2::/96"
];
} }
]; ];
}; };

View file

@ -1,16 +1,16 @@
let let
lock = builtins.fromJSON (builtins.readFile builtins.path { lock = builtins.fromJSON (
path = ../../flake.lock; builtins.readFile builtins.path {
name = "lockPath"; path = ../../flake.lock;
}); name = "lockPath";
}
);
flake = flake =
import import
( (fetchTarball {
fetchTarball { url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; sha256 = lock.nodes.flake-compat.locked.narHash;
sha256 = lock.nodes.flake-compat.locked.narHash; })
}
)
{ {
src = builtins.path { src = builtins.path {
path = ../../.; path = ../../.;

View file

@ -1,4 +1,10 @@
{ self, lib, inputs, ... }: { {
self,
lib,
inputs,
...
}:
{
# Configuration common to all Linux systems # Configuration common to all Linux systems
flake = { flake = {
lib = lib =

View file

@ -1,9 +1,9 @@
/* /*
* The contents of this file are adapted from digga The contents of this file are adapted from digga
* https://github.com/divnix/digga https://github.com/divnix/digga
*
* Licensed under the MIT license Licensed under the MIT license
*/ */
{ lib, inputs }: { lib, inputs }:
let let
@ -14,62 +14,61 @@ let
inherit system; inherit system;
overlays = [ overlays = [
inputs.deploy-rs.overlay inputs.deploy-rs.overlay
(self: super: { deploy-rs = { inherit (pkgs) deploy-rs; lib = super.deploy-rs.lib; }; }) (self: super: {
deploy-rs = {
inherit (pkgs) deploy-rs;
lib = super.deploy-rs.lib;
};
})
]; ];
}; };
getFqdn = c: getFqdn =
c:
let let
net = c.config.networking; net = c.config.networking;
fqdn = fqdn =
if (net ? domain) && (net.domain != null) if (net ? domain) && (net.domain != null) then "${net.hostName}.${net.domain}" else net.hostName;
then "${net.hostName}.${net.domain}"
else net.hostName;
in in
fqdn; fqdn;
in in
{ {
mkDeployNodes = systemConfigurations: extraConfig: mkDeployNodes =
systemConfigurations: extraConfig:
/* /*
* *
Synopsis: mkNodes _systemConfigurations_ _extraConfig_ Synopsis: mkNodes _systemConfigurations_ _extraConfig_
Generate the `nodes` attribute expected by deploy-rs Generate the `nodes` attribute expected by deploy-rs
where _systemConfigurations_ are `nodes`. where _systemConfigurations_ are `nodes`.
_systemConfigurations_ should take the form of a flake's _systemConfigurations_ should take the form of a flake's
_nixosConfigurations_. Note that deploy-rs does not currently support _nixosConfigurations_. Note that deploy-rs does not currently support
deploying to darwin hosts. deploying to darwin hosts.
_extraConfig_, if specified, will be merged into each of the _extraConfig_, if specified, will be merged into each of the
nodes' configurations. nodes' configurations.
Example _systemConfigurations_ input: Example _systemConfigurations_ input:
``` ```
{ {
hostname-1 = { hostname-1 = {
fastConnection = true; fastConnection = true;
sshOpts = [ "-p" "25" ]; sshOpts = [ "-p" "25" ];
}; };
hostname-2 = { hostname-2 = {
sshOpts = [ "-p" "19999" ]; sshOpts = [ "-p" "19999" ];
sshUser = "root"; sshUser = "root";
}; };
} }
``` ```
* *
*/ */
lib.recursiveUpdate lib.recursiveUpdate (lib.mapAttrs (_: c: {
(lib.mapAttrs hostname = getFqdn c;
( profiles.system = {
_: c: { user = "root";
hostname = getFqdn c; path = deployPkgs.deploy-rs.lib.activate.nixos c;
profiles.system = { };
user = "root"; }) systemConfigurations) extraConfig;
path = deployPkgs.deploy-rs.lib.activate.nixos c;
};
}
)
systemConfigurations)
extraConfig;
} }

View file

@ -10,7 +10,10 @@
{ {
# tuxnix # tuxnix
publicKey = "fTvULvdsc92binFaBV+uWwFi33bi8InShcaPnoxUZEA="; publicKey = "fTvULvdsc92binFaBV+uWwFi33bi8InShcaPnoxUZEA=";
allowedIPs = [ "10.7.6.203/32" "fd00:fae:fae:fae:fae:203::/96" ]; allowedIPs = [
"10.7.6.203/32"
"fd00:fae:fae:fae:fae:203::/96"
];
} }
]; ];
}; };
@ -27,9 +30,13 @@
} // sshPubKeys; } // sshPubKeys;
wireguardDevices = [ wireguardDevices = [
{ # stroopwafel {
# stroopwafel
publicKey = "NNb7T8Jmn+V2dTZ8T6Fcq7hGomHGDckKoV3kK2oAhSE="; publicKey = "NNb7T8Jmn+V2dTZ8T6Fcq7hGomHGDckKoV3kK2oAhSE=";
allowedIPs = [ "10.7.6.200/32" "fd00:fae:fae:fae:fae:200::/96" ]; allowedIPs = [
"10.7.6.200/32"
"fd00:fae:fae:fae:fae:200::/96"
];
} }
]; ];
}; };
@ -42,9 +49,13 @@
secretEncryptionKeys = sshPubKeys; secretEncryptionKeys = sshPubKeys;
wireguardDevices = [ wireguardDevices = [
{ # judy {
# judy
publicKey = "I+gN7v1VXkAGoSir6L8aebtLbguvy5nAx1QVDTzdckk="; publicKey = "I+gN7v1VXkAGoSir6L8aebtLbguvy5nAx1QVDTzdckk=";
allowedIPs = [ "10.7.6.202/32" "fd00:fae:fae:fae:fae:202::/96" ]; allowedIPs = [
"10.7.6.202/32"
"fd00:fae:fae:fae:fae:202::/96"
];
} }
]; ];
}; };
@ -59,13 +70,21 @@
}; };
wireguardDevices = [ wireguardDevices = [
{ # dumpyourvms {
# dumpyourvms
publicKey = "3UrVLQrwXnPAVXPiTAd7eM3fZYxnFSYgKAGpNMUwnUk="; publicKey = "3UrVLQrwXnPAVXPiTAd7eM3fZYxnFSYgKAGpNMUwnUk=";
allowedIPs = [ "10.7.6.201/32" "fd00:fae:fae:fae:fae:201::/96" ]; allowedIPs = [
"10.7.6.201/32"
"fd00:fae:fae:fae:fae:201::/96"
];
} }
{ # ryzensun {
# ryzensun
publicKey = "oVF2/s7eIxyVjtG0MhKPx5SZ1JllZg+ZFVF2eVYtPGo="; publicKey = "oVF2/s7eIxyVjtG0MhKPx5SZ1JllZg+ZFVF2eVYtPGo=";
allowedIPs = [ "10.7.6.204/32" "fd00:fae:fae:fae:fae:204::/96" ]; allowedIPs = [
"10.7.6.204/32"
"fd00:fae:fae:fae:fae:204::/96"
];
} }
]; ];
}; };

View file

@ -1,13 +1,24 @@
{ lib, ... }: let { lib, ... }:
let
admins = import ./admins.nix; admins = import ./admins.nix;
robots = import ./robots.nix; robots = import ./robots.nix;
in { in
{
flake = { flake = {
logins = { logins = {
admins = lib.lists.foldl (logins: adminConfig: { admins =
sshPubKeys = logins.sshPubKeys ++ (lib.attrsets.attrValues adminConfig.sshPubKeys); lib.lists.foldl
wireguardDevices = logins.wireguardDevices ++ (if adminConfig ? "wireguardDevices" then adminConfig.wireguardDevices else []); (logins: adminConfig: {
}) { sshPubKeys = []; wireguardDevices = []; } (lib.attrsets.attrValues admins); sshPubKeys = logins.sshPubKeys ++ (lib.attrsets.attrValues adminConfig.sshPubKeys);
wireguardDevices =
logins.wireguardDevices
++ (if adminConfig ? "wireguardDevices" then adminConfig.wireguardDevices else [ ]);
})
{
sshPubKeys = [ ];
wireguardDevices = [ ];
}
(lib.attrsets.attrValues admins);
robots.sshPubKeys = lib.attrsets.attrValues robots; robots.sshPubKeys = lib.attrsets.attrValues robots;
}; };
}; };

View file

@ -1,11 +1,12 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
...
}: }:
{ {
services.caddy = { services.caddy = {
enable = lib.mkForce true; enable = lib.mkForce true;
group = config.pub-solar-os.authentication.robot.username; group = config.pub-solar-os.authentication.robot.username;
email = config.pub-solar-os.adminEmail; email = config.pub-solar-os.adminEmail;
@ -14,5 +15,8 @@
grace_period 60s grace_period 60s
''; '';
}; };
networking.firewall.allowedTCPPorts = [ 80 443 ]; networking.firewall.allowedTCPPorts = [
80
443
];
} }

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, self pkgs,
, ... self,
}: { ...
}:
{
services.nginx.virtualHosts."collabora.${config.pub-solar-os.networking.domain}" = { services.nginx.virtualHosts."collabora.${config.pub-solar-os.networking.domain}" = {
enableACME = true; enableACME = true;
forceSSL = true; forceSSL = true;
@ -24,9 +26,7 @@
containers."collabora" = { containers."collabora" = {
image = "collabora/code"; image = "collabora/code";
autoStart = true; autoStart = true;
ports = [ ports = [ "127.0.0.1:9980:9980" ];
"127.0.0.1:9980:9980"
];
extraOptions = [ extraOptions = [
"--cap-add=MKNOD" "--cap-add=MKNOD"
"--pull=always" "--pull=always"

View file

@ -1,4 +1,11 @@
{ pkgs, config, flake, lib, ... }: { {
pkgs,
config,
flake,
lib,
...
}:
{
imports = [ imports = [
./nix.nix ./nix.nix
./networking.nix ./networking.nix
@ -29,7 +36,11 @@
config = { config = {
environment = { environment = {
# Just a couple of global packages to make our lives easier # Just a couple of global packages to make our lives easier
systemPackages = with pkgs; [ git vim wget ]; systemPackages = with pkgs; [
git
vim
wget
];
}; };
# Select internationalization properties # Select internationalization properties

View file

@ -3,7 +3,8 @@
lib, lib,
config, config,
... ...
}: { }:
{
options.pub-solar-os.networking = with lib; { options.pub-solar-os.networking = with lib; {
domain = mkOption { domain = mkOption {
description = "domain on which all services should run. This defaults to pub.solar"; description = "domain on which all services should run. This defaults to pub.solar";
@ -23,8 +24,8 @@
networking.firewall.interfaces.wg-ssh.allowedTCPPorts = [ 22 ]; networking.firewall.interfaces.wg-ssh.allowedTCPPorts = [ 22 ];
networking.hosts = { networking.hosts = {
"10.7.6.1" = ["nachtigall.${config.pub-solar-os.networking.domain}"]; "10.7.6.1" = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
"10.7.6.2" = ["flora-6.${config.pub-solar-os.networking.domain}"]; "10.7.6.2" = [ "flora-6.${config.pub-solar-os.networking.domain}" ];
}; };
services.openssh = { services.openssh = {

View file

@ -1,11 +1,12 @@
{ config {
, pkgs config,
, lib pkgs,
, flake lib,
, ... flake,
}: { ...
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ }:
]; {
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ ];
nix = { nix = {
# Use default version alias for nix package # Use default version alias for nix package
@ -25,7 +26,10 @@
# Prevents impurities in builds # Prevents impurities in builds
sandbox = true; sandbox = true;
# Give root and @wheel special privileges with nix # Give root and @wheel special privileges with nix
trusted-users = [ "root" "@wheel" ]; trusted-users = [
"root"
"@wheel"
];
# Allow only group wheel to connect to the nix daemon # Allow only group wheel to connect to the nix daemon
allowed-users = [ "@wheel" ]; allowed-users = [ "@wheel" ];
}; };

View file

@ -1,4 +1,5 @@
{ flake, config, ... }: { { flake, config, ... }:
{
home-manager.users.${config.pub-solar-os.authentication.username} = { home-manager.users.${config.pub-solar-os.authentication.username} = {
programs.git.enable = true; programs.git.enable = true;
programs.starship.enable = true; programs.starship.enable = true;

View file

@ -4,7 +4,8 @@
lib, lib,
config, config,
... ...
}: { }:
{
options.pub-solar-os.authentication = with lib; { options.pub-solar-os.authentication = with lib; {
username = mkOption { username = mkOption {
description = "Username for the adminstrative user"; description = "Username for the adminstrative user";
@ -41,7 +42,10 @@
users.users.${config.pub-solar-os.authentication.username} = { users.users.${config.pub-solar-os.authentication.username} = {
name = config.pub-solar-os.authentication.username; name = config.pub-solar-os.authentication.username;
group = config.pub-solar-os.authentication.username; group = config.pub-solar-os.authentication.username;
extraGroups = [ "wheel" "docker" ]; extraGroups = [
"wheel"
"docker"
];
isNormalUser = true; isNormalUser = true;
openssh.authorizedKeys.keys = config.pub-solar-os.authentication.sshPubKeys; openssh.authorizedKeys.keys = config.pub-solar-os.authentication.sshPubKeys;
}; };
@ -63,7 +67,8 @@
users.groups.${config.pub-solar-os.authentication.robot.username} = { }; users.groups.${config.pub-solar-os.authentication.robot.username} = { };
users.users.root.initialHashedPassword = config.pub-solar-os.authentication.root.initialHashedPassword; users.users.root.initialHashedPassword =
config.pub-solar-os.authentication.root.initialHashedPassword;
security.sudo.wheelNeedsPassword = false; security.sudo.wheelNeedsPassword = false;
}; };

View file

@ -1,4 +1,9 @@
{ flake, config, lib, ... }: {
flake,
config,
lib,
...
}:
{ {
age.secrets."coturn-static-auth-secret" = { age.secrets."coturn-static-auth-secret" = {
file = "${flake.self}/secrets/coturn-static-auth-secret.age"; file = "${flake.self}/secrets/coturn-static-auth-secret.age";
@ -19,8 +24,12 @@
pkey = "${config.security.acme.certs.${realm}.directory}/key.pem"; pkey = "${config.security.acme.certs.${realm}.directory}/key.pem";
extraConfig = extraConfig =
let let
externalIPv4s = lib.strings.concatMapStringsSep "\n" ({ address, ... }: "external-ip=${address}") config.networking.interfaces.enp35s0.ipv4.addresses; externalIPv4s = lib.strings.concatMapStringsSep "\n" (
externalIPv6s = lib.strings.concatMapStringsSep "\n" ({ address, ... }: "external-ip=${address}") config.networking.interfaces.enp35s0.ipv6.addresses; { address, ... }: "external-ip=${address}"
) config.networking.interfaces.enp35s0.ipv4.addresses;
externalIPv6s = lib.strings.concatMapStringsSep "\n" (
{ address, ... }: "external-ip=${address}"
) config.networking.interfaces.enp35s0.ipv6.addresses;
in in
'' ''
${externalIPv4s} ${externalIPv4s}
@ -61,28 +70,35 @@
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
''; '';
}; };
networking.firewall = { networking.firewall = {
interfaces.enp35s0 = interfaces.enp35s0 =
let let
range = with config.services.coturn; [{ range = with config.services.coturn; [
from = min-port; {
to = max-port; from = min-port;
}]; to = max-port;
}
];
in in
{ {
allowedUDPPortRanges = range; allowedUDPPortRanges = range;
allowedUDPPorts = [ 3478 5349 ]; allowedUDPPorts = [
3478
5349
];
allowedTCPPortRanges = [ ]; allowedTCPPortRanges = [ ];
allowedTCPPorts = [ 3478 5349 ]; allowedTCPPorts = [
3478
5349
];
}; };
}; };
# get a certificate # get a certificate
security.acme.certs.${config.services.coturn.realm} = { security.acme.certs.${config.services.coturn.realm} = {
/* insert here the right configuration to obtain a certificate */ # insert here the right configuration to obtain a certificate
postRun = "systemctl restart coturn.service"; postRun = "systemctl restart coturn.service";
group = "turnserver"; group = "turnserver";
}; };

View file

@ -1,4 +1,5 @@
{ pkgs, ... }: { { pkgs, ... }:
{
virtualisation.docker = { virtualisation.docker = {
enable = true; enable = true;
extraOptions = '' extraOptions = ''

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
age.secrets.drone-secrets = { age.secrets.drone-secrets = {
file = "${flake.self}/secrets/drone-secrets.age"; file = "${flake.self}/secrets/drone-secrets.age";
mode = "600"; mode = "600";
@ -26,9 +28,7 @@
users.groups.drone = { }; users.groups.drone = { };
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [ "d '/var/lib/drone-db' 0750 drone drone - -" ];
"d '/var/lib/drone-db' 0750 drone drone - -"
];
services.caddy.virtualHosts."ci.${config.pub-solar-os.networking.domain}" = { services.caddy.virtualHosts."ci.${config.pub-solar-os.networking.domain}" = {
logFormat = lib.mkForce '' logFormat = lib.mkForce ''
@ -66,23 +66,15 @@
image = "postgres:14"; image = "postgres:14";
autoStart = true; autoStart = true;
user = "994"; user = "994";
volumes = [ volumes = [ "/var/lib/drone-db:/var/lib/postgresql/data" ];
"/var/lib/drone-db:/var/lib/postgresql/data" extraOptions = [ "--network=drone-net" ];
]; environmentFiles = [ config.age.secrets.drone-db-secrets.path ];
extraOptions = [
"--network=drone-net"
];
environmentFiles = [
config.age.secrets.drone-db-secrets.path
];
}; };
containers."drone-server" = { containers."drone-server" = {
image = "drone/drone:2"; image = "drone/drone:2";
autoStart = true; autoStart = true;
user = "994"; user = "994";
ports = [ ports = [ "127.0.0.1:4000:80" ];
"127.0.0.1:4000:80"
];
dependsOn = [ "drone-db" ]; dependsOn = [ "drone-db" ];
extraOptions = [ extraOptions = [
"--network=drone-net" "--network=drone-net"
@ -95,18 +87,14 @@
DRONE_SERVER_PROTO = "https"; DRONE_SERVER_PROTO = "https";
DRONE_DATABASE_DRIVER = "postgres"; DRONE_DATABASE_DRIVER = "postgres";
}; };
environmentFiles = [ environmentFiles = [ config.age.secrets.drone-secrets.path ];
config.age.secrets.drone-secrets.path
];
}; };
containers."drone-docker-runner" = { containers."drone-docker-runner" = {
image = "drone/drone-runner-docker:1"; image = "drone/drone-runner-docker:1";
autoStart = true; autoStart = true;
# needs to run as root # needs to run as root
#user = "994"; #user = "994";
volumes = [ volumes = [ "/var/run/docker.sock:/var/run/docker.sock" ];
"/var/run/docker.sock:/var/run/docker.sock"
];
dependsOn = [ "drone-db" ]; dependsOn = [ "drone-db" ];
extraOptions = [ extraOptions = [
"--network=drone-net" "--network=drone-net"
@ -119,9 +107,7 @@
DRONE_RUNNER_CAPACITY = "2"; DRONE_RUNNER_CAPACITY = "2";
DRONE_RUNNER_NAME = "flora-6-docker-runner"; DRONE_RUNNER_NAME = "flora-6-docker-runner";
}; };
environmentFiles = [ environmentFiles = [ config.age.secrets.drone-secrets.path ];
config.age.secrets.drone-secrets.path
];
}; };
}; };
}; };

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
age.secrets.forgejo-actions-runner-token = { age.secrets.forgejo-actions-runner-token = {
file = "${flake.self}/secrets/forgejo-actions-runner-token.age"; file = "${flake.self}/secrets/forgejo-actions-runner-token.age";
mode = "644"; mode = "644";
@ -20,7 +22,7 @@
isSystemUser = true; isSystemUser = true;
}; };
users.groups.gitea-runner = {}; users.groups.gitea-runner = { };
systemd.services."gitea-runner-flora\\x2d6".serviceConfig = { systemd.services."gitea-runner-flora\\x2d6".serviceConfig = {
DynamicUser = lib.mkForce false; DynamicUser = lib.mkForce false;

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
age.secrets.forgejo-database-password = { age.secrets.forgejo-database-password = {
file = "${flake.self}/secrets/forgejo-database-password.age"; file = "${flake.self}/secrets/forgejo-database-password.age";
mode = "600"; mode = "600";
@ -52,7 +54,7 @@
isSystemUser = true; isSystemUser = true;
}; };
users.groups.gitea = {}; users.groups.gitea = { };
# Expose SSH port only for forgejo SSH # Expose SSH port only for forgejo SSH
networking.firewall.interfaces.enp35s0.allowedTCPPorts = [ 2223 ]; networking.firewall.interfaces.enp35s0.allowedTCPPorts = [ 2223 ];

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
age.secrets.grafana-admin-password = { age.secrets.grafana-admin-password = {
file = "${flake.self}/secrets/grafana-admin-password.age"; file = "${flake.self}/secrets/grafana-admin-password.age";
mode = "644"; mode = "644";

View file

@ -70,9 +70,7 @@
"icon": "external link", "icon": "external link",
"includeVars": true, "includeVars": true,
"keepTime": true, "keepTime": true,
"tags": [ "tags": ["matrix"],
"matrix"
],
"title": "Dashboards", "title": "Dashboards",
"type": "dashboards" "type": "dashboards"
} }
@ -4313,9 +4311,7 @@
"id": "byNames", "id": "byNames",
"options": { "options": {
"mode": "exclude", "mode": "exclude",
"names": [ "names": ["libera.chat "],
"libera.chat "
],
"prefix": "All except:", "prefix": "All except:",
"readOnly": true "readOnly": true
} }
@ -4435,9 +4431,7 @@
"id": "byNames", "id": "byNames",
"options": { "options": {
"mode": "exclude", "mode": "exclude",
"names": [ "names": ["libera.chat"],
"libera.chat"
],
"prefix": "All except:", "prefix": "All except:",
"readOnly": true "readOnly": true
} }
@ -13266,9 +13260,7 @@
"refresh": false, "refresh": false,
"schemaVersion": 37, "schemaVersion": 37,
"style": "dark", "style": "dark",
"tags": [ "tags": ["matrix"],
"matrix"
],
"templating": { "templating": {
"list": [ "list": [
{ {
@ -13453,17 +13445,7 @@
"2h", "2h",
"1d" "1d"
], ],
"time_options": [ "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"]
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
}, },
"timezone": "", "timezone": "",
"title": "Synapse", "title": "Synapse",

View file

@ -1,13 +1,15 @@
{ flake {
, config flake,
, lib config,
, pkgs lib,
, ... pkgs,
}: { ...
}:
{
options.pub-solar-os.auth = with lib; { options.pub-solar-os.auth = with lib; {
enable = mkEnableOption "Enable keycloak to run on the node"; enable = mkEnableOption "Enable keycloak to run on the node";
realm = mkOption { realm = mkOption {
description = "Name of the realm"; description = "Name of the realm";
type = types.str; type = types.str;
default = config.pub-solar-os.networking.domain; default = config.pub-solar-os.networking.domain;
@ -53,14 +55,13 @@
features = "declarative-user-profile"; features = "declarative-user-profile";
}; };
themes = { themes = {
"pub.solar" = flake.inputs.keycloak-theme-pub-solar.legacyPackages.${pkgs.system}.keycloak-theme-pub-solar; "pub.solar" =
flake.inputs.keycloak-theme-pub-solar.legacyPackages.${pkgs.system}.keycloak-theme-pub-solar;
}; };
}; };
services.restic.backups.keycloak-droppie = { services.restic.backups.keycloak-droppie = {
paths = [ paths = [ "/tmp/keycloak-backup.sql" ];
"/tmp/keycloak-backup.sql"
];
timerConfig = { timerConfig = {
OnCalendar = "*-*-* 02:00:00 Etc/UTC"; OnCalendar = "*-*-* 02:00:00 Etc/UTC";
# droppie will be offline if nachtigall misses the timer # droppie will be offline if nachtigall misses the timer
@ -83,9 +84,7 @@
}; };
services.restic.backups.keycloak-storagebox = { services.restic.backups.keycloak-storagebox = {
paths = [ paths = [ "/tmp/keycloak-backup.sql" ];
"/tmp/keycloak-backup.sql"
];
timerConfig = { timerConfig = {
OnCalendar = "*-*-* 04:10:00 Etc/UTC"; OnCalendar = "*-*-* 04:10:00 Etc/UTC";
}; };

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
services.caddy.virtualHosts = { services.caddy.virtualHosts = {
"flora-6.${config.pub-solar-os.networking.domain}" = { "flora-6.${config.pub-solar-os.networking.domain}" = {
logFormat = lib.mkForce '' logFormat = lib.mkForce ''
@ -51,16 +53,18 @@
retention_delete_worker_count = 150; retention_delete_worker_count = 150;
}; };
schema_config = { schema_config = {
configs = [{ configs = [
from = "2020-05-15"; {
store = "boltdb-shipper"; from = "2020-05-15";
object_store = "filesystem"; store = "boltdb-shipper";
schema = "v11"; object_store = "filesystem";
index = { schema = "v11";
prefix = "index_"; index = {
period = "24h"; prefix = "index_";
}; period = "24h";
}]; };
}
];
}; };
}; };
}; };
@ -75,23 +79,29 @@
positions = { positions = {
filename = "/tmp/positions.yaml"; filename = "/tmp/positions.yaml";
}; };
clients = [{ clients = [
url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push"; {
}]; url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
scrape_configs = [{ }
job_name = "journal"; ];
journal = { scrape_configs = [
max_age = "24h"; {
labels = { job_name = "journal";
job = "systemd-journal"; journal = {
host = "flora-6"; max_age = "24h";
labels = {
job = "systemd-journal";
host = "flora-6";
};
}; };
}; relabel_configs = [
relabel_configs = [{ {
source_labels = [ "__journal__systemd_unit" ]; source_labels = [ "__journal__systemd_unit" ];
target_label = "unit"; target_label = "unit";
}]; }
}]; ];
}
];
}; };
}; };
} }

View file

@ -1,8 +1,9 @@
{ flake {
, config flake,
, lib config,
, pkgs lib,
, ... pkgs,
...
}: }:
{ {
networking.firewall.allowedTCPPorts = [ 25 ]; networking.firewall.allowedTCPPorts = [ 25 ];

View file

@ -1,4 +1,10 @@
{ config, pkgs, flake, inputs, ... }: {
config,
pkgs,
flake,
inputs,
...
}:
{ {
age.secrets."mastodon-secret-key-base" = { age.secrets."mastodon-secret-key-base" = {
@ -64,9 +70,7 @@
mediaAutoRemove = { mediaAutoRemove = {
olderThanDays = 7; olderThanDays = 7;
}; };
extraEnvFiles = [ extraEnvFiles = [ "/run/agenix/mastodon-extra-env-secrets" ];
"/run/agenix/mastodon-extra-env-secrets"
];
extraConfig = { extraConfig = {
WEB_DOMAIN = "mastodon.${config.pub-solar-os.networking.domain}"; WEB_DOMAIN = "mastodon.${config.pub-solar-os.networking.domain}";
# Defined in ./opensearch.nix # Defined in ./opensearch.nix
@ -97,9 +101,7 @@
}; };
services.restic.backups.mastodon-droppie = { services.restic.backups.mastodon-droppie = {
paths = [ paths = [ "/tmp/mastodon-backup.sql" ];
"/tmp/mastodon-backup.sql"
];
timerConfig = { timerConfig = {
OnCalendar = "*-*-* 02:00:00 Etc/UTC"; OnCalendar = "*-*-* 02:00:00 Etc/UTC";
# droppie will be offline if nachtigall misses the timer # droppie will be offline if nachtigall misses the timer
@ -122,9 +124,7 @@
}; };
services.restic.backups.mastodon-storagebox = { services.restic.backups.mastodon-storagebox = {
paths = [ paths = [ "/tmp/mastodon-backup.sql" ];
"/tmp/mastodon-backup.sql"
];
timerConfig = { timerConfig = {
OnCalendar = "*-*-* 04:05:00 Etc/UTC"; OnCalendar = "*-*-* 04:05:00 Etc/UTC";
}; };

View file

@ -1,15 +1,18 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
let let
# Find element in list config.services.matrix-synapse.settings.listeners.*.resources # Find element in list config.services.matrix-synapse.settings.listeners.*.resources
# that sets names = "client" # that sets names = "client"
nameHasClient = name: name == "client"; nameHasClient = name: name == "client";
resourceHasClient = resource: builtins.any nameHasClient resource.names; resourceHasClient = resource: builtins.any nameHasClient resource.names;
listenerWithClient = lib.findFirst listenerWithClient =
(listener: lib.findFirst (listener: builtins.any resourceHasClient listener.resources)
builtins.any resourceHasClient listener.resources) (throw "Found no matrix-synapse.settings.listeners.*.resources.*.names containing string client")
(throw "Found no matrix-synapse.settings.listeners.*.resources.*.names containing string client") config.services.matrix-synapse.settings.listeners;
config.services.matrix-synapse.settings.listeners
;
synapseClientPort = "${toString listenerWithClient.port}"; synapseClientPort = "${toString listenerWithClient.port}";
in in
{ {
@ -46,7 +49,11 @@ in
}; };
metrics = { metrics = {
enabled = true; enabled = true;
remoteUserAgeBuckets = [ "1h" "1d" "1w" ]; remoteUserAgeBuckets = [
"1h"
"1d"
"1w"
];
}; };
provisioning = { provisioning = {
enabled = false; enabled = false;
@ -122,4 +129,3 @@ in
}; };
}; };
} }

View file

@ -1,4 +1,10 @@
{ flake, lib, config, pkgs, ... }: {
flake,
lib,
config,
pkgs,
...
}:
{ {
age.secrets."matrix-mautrix-telegram-env-file" = { age.secrets."matrix-mautrix-telegram-env-file" = {
file = "${flake.self}/secrets/matrix-mautrix-telegram-env-file.age"; file = "${flake.self}/secrets/matrix-mautrix-telegram-env-file.age";

View file

@ -1,4 +1,9 @@
{ flake, config, pkgs, ... }: {
flake,
config,
pkgs,
...
}:
let let
publicDomain = "matrix.${config.pub-solar-os.networking.domain}"; publicDomain = "matrix.${config.pub-solar-os.networking.domain}";
serverDomain = "${config.pub-solar-os.networking.domain}"; serverDomain = "${config.pub-solar-os.networking.domain}";
@ -40,22 +45,16 @@ in
}; };
listeners = [ listeners = [
{ {
bind_addresses = [ bind_addresses = [ "127.0.0.1" ];
"127.0.0.1"
];
port = 8008; port = 8008;
resources = [ resources = [
{ {
compress = true; compress = true;
names = [ names = [ "client" ];
"client"
];
} }
{ {
compress = false; compress = false;
names = [ names = [ "federation" ];
"federation"
];
} }
]; ];
tls = false; tls = false;
@ -63,29 +62,23 @@ in
x_forwarded = true; x_forwarded = true;
} }
{ {
bind_addresses = [ bind_addresses = [ "127.0.0.1" ];
"127.0.0.1"
];
port = 8012; port = 8012;
resources = [ resources = [ { names = [ "metrics" ]; } ];
{
names = [
"metrics"
];
}
];
tls = false; tls = false;
type = "metrics"; type = "metrics";
} }
]; ];
account_threepid_delegates.msisdn = ""; account_threepid_delegates.msisdn = "";
alias_creation_rules = [{ alias_creation_rules = [
action = "allow"; {
alias = "*"; action = "allow";
room_id = "*"; alias = "*";
user_id = "*"; room_id = "*";
}]; user_id = "*";
}
];
allow_guest_access = false; allow_guest_access = false;
allow_public_rooms_over_federation = true; allow_public_rooms_over_federation = true;
allow_public_rooms_without_auth = false; allow_public_rooms_without_auth = false;
@ -152,7 +145,7 @@ in
}; };
per_user = { per_user = {
burst_count = 5; burst_count = 5;
per_second = 0.003; per_second = 3.0e-3;
}; };
}; };
rc_joins = { rc_joins = {
@ -162,7 +155,7 @@ in
}; };
remote = { remote = {
burst_count = 10; burst_count = 10;
per_second = 0.01; per_second = 1.0e-2;
}; };
}; };
rc_login = { rc_login = {
@ -194,17 +187,19 @@ in
registrations_require_3pid = [ "email" ]; registrations_require_3pid = [ "email" ];
report_stats = false; report_stats = false;
require_auth_for_profile_requests = false; require_auth_for_profile_requests = false;
room_list_publication_rules = [{ room_list_publication_rules = [
action = "allow"; {
alias = "*"; action = "allow";
room_id = "*"; alias = "*";
user_id = "*"; room_id = "*";
}]; user_id = "*";
}
];
signing_key_path = "/run/agenix/matrix-synapse-signing-key"; signing_key_path = "/run/agenix/matrix-synapse-signing-key";
stream_writers = { }; stream_writers = { };
trusted_key_servers = [{ server_name = "matrix.org"; }]; trusted_key_servers = [ { server_name = "matrix.org"; } ];
suppress_key_server_warning = true; suppress_key_server_warning = true;
turn_allow_guests = false; turn_allow_guests = false;
@ -276,9 +271,7 @@ in
"redis" "redis"
]; ];
plugins = [ plugins = [ config.services.matrix-synapse.package.plugins.matrix-synapse-shared-secret-auth ];
config.services.matrix-synapse.package.plugins.matrix-synapse-shared-secret-auth
];
sliding-sync = { sliding-sync = {
enable = true; enable = true;

View file

@ -2,7 +2,7 @@ version: 1
formatters: formatters:
precise: precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' format: "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
filters: filters:
context: context:

View file

@ -1,8 +1,9 @@
{ flake {
, config flake,
, lib config,
, pkgs lib,
, ... pkgs,
...
}: }:
let let
localSettingsPHP = pkgs.writeScript "LocalSettings.php" '' localSettingsPHP = pkgs.writeScript "LocalSettings.php" ''
@ -201,7 +202,9 @@ in
group = "mediawiki"; group = "mediawiki";
inherit uid; inherit uid;
}; };
users.groups.mediawiki = { inherit gid; }; users.groups.mediawiki = {
inherit gid;
};
virtualisation = { virtualisation = {
oci-containers = { oci-containers = {
@ -212,9 +215,7 @@ in
user = "1000:${builtins.toString gid}"; user = "1000:${builtins.toString gid}";
autoStart = true; autoStart = true;
ports = [ ports = [ "127.0.0.1:8293:80" ];
"127.0.0.1:8293:80"
];
extraOptions = [ extraOptions = [
"--add-host=host.docker.internal:host-gateway" "--add-host=host.docker.internal:host-gateway"

View file

@ -1,7 +1,8 @@
{ config {
, pkgs config,
, flake pkgs,
, ... flake,
...
}: }:
{ {
age.secrets."nextcloud-secrets" = { age.secrets."nextcloud-secrets" = {

View file

@ -1,7 +1,4 @@
{ { config, ... }:
config,
...
}:
let let
objStorHost = "link.tardigradeshare.io"; objStorHost = "link.tardigradeshare.io";

View file

@ -30,7 +30,12 @@ in
}; };
locations."@proxy" = { locations."@proxy" = {
proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-web/web.socket" else "http://127.0.0.1:${toString(cfg.webPort)}"); proxyPass = (
if cfg.enableUnixSocket then
"http://unix:/run/mastodon-web/web.socket"
else
"http://127.0.0.1:${toString (cfg.webPort)}"
);
proxyWebsockets = true; proxyWebsockets = true;
}; };
@ -45,13 +50,12 @@ in
extraConfig = '' extraConfig = ''
least_conn; least_conn;
''; '';
servers = builtins.listToAttrs servers = builtins.listToAttrs (
(map map (i: {
(i: { name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket";
name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket"; value = { };
value = { }; }) (lib.range 1 cfg.streamingProcesses)
}) );
(lib.range 1 cfg.streamingProcesses));
}; };
}; };
} }

View file

@ -1,4 +1,9 @@
{ lib, pkgs, config, ... }: {
lib,
pkgs,
config,
...
}:
let let
commonHeaders = '' commonHeaders = ''
add_header Permissions-Policy interest-cohort=() always; add_header Permissions-Policy interest-cohort=() always;
@ -71,9 +76,7 @@ in
"chat.${config.pub-solar-os.networking.domain}" = { "chat.${config.pub-solar-os.networking.domain}" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
root = pkgs.element-web.override { root = pkgs.element-web.override { conf = clientConfig; };
conf = clientConfig;
};
}; };
"stickers.chat.${config.pub-solar-os.networking.domain}" = { "stickers.chat.${config.pub-solar-os.networking.domain}" = {
@ -126,16 +129,18 @@ in
serverName = "matrix.${config.pub-solar-os.networking.domain}"; serverName = "matrix.${config.pub-solar-os.networking.domain}";
forceSSL = lib.mkDefault true; forceSSL = lib.mkDefault true;
enableACME = lib.mkDefault true; enableACME = lib.mkDefault true;
listen = [{ listen = [
port = 8448; {
addr = "0.0.0.0"; port = 8448;
ssl = true; addr = "0.0.0.0";
} ssl = true;
}
{ {
port = 8448; port = 8448;
addr = "[::]"; addr = "[::]";
ssl = true; ssl = true;
}]; }
];
root = "/dev/null"; root = "/dev/null";
extraConfig = '' extraConfig = ''
server_tokens off; server_tokens off;
@ -159,4 +164,3 @@ in
}; };
networking.firewall.allowedTCPPorts = [ 8448 ]; networking.firewall.allowedTCPPorts = [ 8448 ];
} }

View file

@ -1,4 +1,5 @@
{ pkgs, lib, ... }: { { pkgs, lib, ... }:
{
default_server_config = { default_server_config = {
"m.homeserver" = { "m.homeserver" = {
base_url = "https://matrix.pub.solar"; base_url = "https://matrix.pub.solar";
@ -35,10 +36,12 @@
homeUrl = ""; homeUrl = "";
}; };
branding = { branding = {
auth_footer_links = [{ auth_footer_links = [
text = "Privacy"; {
url = "https://pub.solar/privacy"; text = "Privacy";
}]; url = "https://pub.solar/privacy";
}
];
# FUTUREWORK: Replace with pub.solar logo # FUTUREWORK: Replace with pub.solar logo
auth_header_logo_url = "themes/element/img/logos/element-logo.svg"; auth_header_logo_url = "themes/element/img/logos/element-logo.svg";
}; };

View file

@ -1,13 +1,16 @@
{ config, flake, lib, ... }: {
config,
flake,
lib,
...
}:
let let
# Find element in list config.services.matrix-synapse.settings.listeners # Find element in list config.services.matrix-synapse.settings.listeners
# that sets type = "metrics" # that sets type = "metrics"
listenerWithMetrics = lib.findFirst listenerWithMetrics =
(listener: lib.findFirst (listener: listener.type == "metrics")
listener.type == "metrics") (throw "Found no matrix-synapse.settings.listeners.*.type containing string metrics")
(throw "Found no matrix-synapse.settings.listeners.*.type containing string metrics") config.services.matrix-synapse.settings.listeners;
config.services.matrix-synapse.settings.listeners
;
synapseMetricsPort = "${toString listenerWithMetrics.port}"; synapseMetricsPort = "${toString listenerWithMetrics.port}";
in in
{ {
@ -22,7 +25,7 @@ in
addSSL = true; addSSL = true;
basicAuthFile = "${config.age.secrets.nachtigall-metrics-nginx-basic-auth.path}"; basicAuthFile = "${config.age.secrets.nachtigall-metrics-nginx-basic-auth.path}";
locations."/metrics" = { locations."/metrics" = {
proxyPass = "http://127.0.0.1:${toString(config.services.prometheus.exporters.node.port)}"; proxyPass = "http://127.0.0.1:${toString (config.services.prometheus.exporters.node.port)}";
}; };
locations."/_synapse/metrics" = { locations."/_synapse/metrics" = {
proxyPass = "http://127.0.0.1:${synapseMetricsPort}"; proxyPass = "http://127.0.0.1:${synapseMetricsPort}";

View file

@ -1,9 +1,7 @@
{ ... }: { ... }:
{ {
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [ "d '/srv/www/miom.space' 0750 hakkonaut hakkonaut - -" ];
"d '/srv/www/miom.space' 0750 hakkonaut hakkonaut - -"
];
services.nginx.virtualHosts = { services.nginx.virtualHosts = {
"www.miom.space" = { "www.miom.space" = {

View file

@ -1,5 +1,5 @@
{ lib, config, ... }:
{ {
lib, config, ... }: {
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [
"d '/srv/www/${config.pub-solar-os.networking.domain}' 0750 hakkonaut hakkonaut - -" "d '/srv/www/${config.pub-solar-os.networking.domain}' 0750 hakkonaut hakkonaut - -"
]; ];
@ -54,7 +54,8 @@
}; };
# Responsible disclosure information https://securitytxt.org/ # Responsible disclosure information https://securitytxt.org/
"/.well-known/security.txt" = let "/.well-known/security.txt" =
let
securityTXT = lib.lists.foldr (a: b: a + "\n" + b) "" [ securityTXT = lib.lists.foldr (a: b: a + "\n" + b) "" [
"Contact: mailto:admins@pub.solar" "Contact: mailto:admins@pub.solar"
"Expires: 2025-01-04T23:00:00.000Z" "Expires: 2025-01-04T23:00:00.000Z"
@ -62,12 +63,13 @@
"Preferred-Languages: en,de" "Preferred-Languages: en,de"
"Canonical: https://${config.pub-solar-os.networking.domain}/.well-known/security.txt" "Canonical: https://${config.pub-solar-os.networking.domain}/.well-known/security.txt"
]; ];
in { in
extraConfig = '' {
add_header Content-Type text/plain; extraConfig = ''
return 200 '${securityTXT}'; add_header Content-Type text/plain;
''; return 200 '${securityTXT}';
}; '';
};
"/satzung" = { "/satzung" = {
extraConfig = '' extraConfig = ''

View file

@ -1,8 +1,9 @@
{ config {
, lib config,
, pkgs lib,
, self pkgs,
, ... self,
...
}: }:
let let
acmeEmailAddress = config.pub-solar-os.adminEmail; acmeEmailAddress = config.pub-solar-os.adminEmail;
@ -38,5 +39,8 @@ in
defaults.email = acmeEmailAddress; defaults.email = acmeEmailAddress;
}; };
networking.firewall.allowedTCPPorts = [ 80 443 ]; networking.firewall.allowedTCPPorts = [
80
443
];
} }

View file

@ -1,29 +1,31 @@
{ config {
, lib config,
, pkgs lib,
, self pkgs,
, flake self,
, ... flake,
}: let ...
}:
let
configPy = pkgs.writeText "obs-portal-config.py" '' configPy = pkgs.writeText "obs-portal-config.py" ''
DEBUG = False DEBUG = False
VERBOSE = DEBUG VERBOSE = DEBUG
AUTO_RESTART = DEBUG AUTO_RESTART = DEBUG
LEAN_MODE = False LEAN_MODE = False
FRONTEND_URL = None FRONTEND_URL = None
FRONTEND_HTTPS = True FRONTEND_HTTPS = True
FRONTEND_DIR = "../frontend/build/" FRONTEND_DIR = "../frontend/build/"
FRONTEND_CONFIG = { FRONTEND_CONFIG = {
"imprintUrl": "${config.pub-solar-os.imprintUrl}", "imprintUrl": "${config.pub-solar-os.imprintUrl}",
"privacyPolicyUrl": "${config.pub-solar-os.privacyPolicyUrl}", "privacyPolicyUrl": "${config.pub-solar-os.privacyPolicyUrl}",
"mapHome": {"zoom": 12, "latitude": 50.93, "longitude": 6.97}, "mapHome": {"zoom": 12, "latitude": 50.93, "longitude": 6.97},
"banner": { "banner": {
"text": "This is an installation serving the Cologne/Bonn region run for Team OBSKöln by pub.solar n.e.V.", "text": "This is an installation serving the Cologne/Bonn region run for Team OBSKöln by pub.solar n.e.V.",
"style": "info" "style": "info"
}, },
} }
TILES_FILE = None TILES_FILE = None
ADDITIONAL_CORS_ORIGINS = None ADDITIONAL_CORS_ORIGINS = None
''; '';
env = { env = {
@ -41,7 +43,8 @@ ADDITIONAL_CORS_ORIGINS = None
OBS_DATA_DIR = "/data"; OBS_DATA_DIR = "/data";
OBS_PROXIES_COUNT = "1"; OBS_PROXIES_COUNT = "1";
}; };
in { in
{
age.secrets.obs-portal-env = { age.secrets.obs-portal-env = {
file = "${flake.self}/secrets/obs-portal-env.age"; file = "${flake.self}/secrets/obs-portal-env.age";
mode = "600"; mode = "600";
@ -59,8 +62,16 @@ in {
in in
{ {
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
before = [ "docker-obs-portal.service" "docker-obs-portal-db.service" "docker-obs-portal-worker.service" ]; before = [
requiredBy = [ "docker-obs-portal.service" "docker-obs-portal-db.service" "docker-obs-portal-worker.service" ]; "docker-obs-portal.service"
"docker-obs-portal-db.service"
"docker-obs-portal-worker.service"
];
requiredBy = [
"docker-obs-portal.service"
"docker-obs-portal-db.service"
"docker-obs-portal-worker.service"
];
script = '' script = ''
${dockerBin} network inspect obs-portal-net >/dev/null 2>&1 || ${dockerBin} network create obs-portal-net --subnet 172.20.0.0/24 ${dockerBin} network inspect obs-portal-net >/dev/null 2>&1 || ${dockerBin} network create obs-portal-net --subnet 172.20.0.0/24
''; '';
@ -101,16 +112,17 @@ in {
"/var/lib/obs-portal/pbf/:/pbf" "/var/lib/obs-portal/pbf/:/pbf"
]; ];
extraOptions = [ extraOptions = [ "--network=obs-portal-net" ];
"--network=obs-portal-net"
];
}; };
containers."obs-portal-worker" = { containers."obs-portal-worker" = {
image = "git.pub.solar/pub-solar/obs-portal:latest"; image = "git.pub.solar/pub-solar/obs-portal:latest";
autoStart = true; autoStart = true;
cmd = [ "python" "tools/process_track.py" ]; cmd = [
"python"
"tools/process_track.py"
];
environment = env; environment = env;
environmentFiles = [ config.age.secrets.obs-portal-env.path ]; environmentFiles = [ config.age.secrets.obs-portal-env.path ];
@ -120,9 +132,7 @@ in {
"/var/lib/obs-portal${env.OBS_DATA_DIR}:${env.OBS_DATA_DIR}" "/var/lib/obs-portal${env.OBS_DATA_DIR}:${env.OBS_DATA_DIR}"
]; ];
extraOptions = [ extraOptions = [ "--network=obs-portal-net" ];
"--network=obs-portal-net"
];
}; };
containers."obs-portal-db" = { containers."obs-portal-db" = {
@ -131,13 +141,9 @@ in {
environmentFiles = [ config.age.secrets.obs-portal-database-env.path ]; environmentFiles = [ config.age.secrets.obs-portal-database-env.path ];
volumes = [ volumes = [ "/var/lib/postgres-obs-portal/data:/var/lib/postgresql/data" ];
"/var/lib/postgres-obs-portal/data:/var/lib/postgresql/data"
];
extraOptions = [ extraOptions = [ "--network=obs-portal-net" ];
"--network=obs-portal-net"
];
}; };
}; };
}; };

View file

@ -1,9 +1,11 @@
{ flake {
, config flake,
, lib config,
, pkgs lib,
, ... pkgs,
}: { ...
}:
{
services.nginx.virtualHosts."stream.${config.pub-solar-os.networking.domain}" = { services.nginx.virtualHosts."stream.${config.pub-solar-os.networking.domain}" = {
enableACME = true; enableACME = true;
forceSSL = true; forceSSL = true;

View file

@ -7,11 +7,7 @@
}; };
systemd.services.postgresql = { systemd.services.postgresql = {
after = [ after = [ "var-lib-postgresql.mount" ];
"var-lib-postgresql.mount" requisite = [ "var-lib-postgresql.mount" ];
];
requisite = [
"var-lib-postgresql.mount"
];
}; };
} }

View file

@ -1,6 +1,5 @@
{ config { config, ... }:
, ... {
}: {
services.prometheus = { services.prometheus = {
exporters = { exporters = {
node = { node = {

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
age.secrets.nachtigall-metrics-prometheus-basic-auth-password = { age.secrets.nachtigall-metrics-prometheus-basic-auth-password = {
file = "${flake.self}/secrets/nachtigall-metrics-prometheus-basic-auth-password.age"; file = "${flake.self}/secrets/nachtigall-metrics-prometheus-basic-auth-password.age";
mode = "600"; mode = "600";
@ -27,12 +29,14 @@
scrapeConfigs = [ scrapeConfigs = [
{ {
job_name = "node-exporter-http"; job_name = "node-exporter-http";
static_configs = [{ static_configs = [
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ]; {
labels = { targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ];
instance = "flora-6"; labels = {
}; instance = "flora-6";
}]; };
}
];
} }
{ {
job_name = "node-exporter-https"; job_name = "node-exporter-https";
@ -42,12 +46,14 @@
username = "hakkonaut"; username = "hakkonaut";
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}"; password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
}; };
static_configs = [{ static_configs = [
targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ]; {
labels = { targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
instance = "nachtigall"; labels = {
}; instance = "nachtigall";
}]; };
}
];
} }
{ {
job_name = "matrix-synapse"; job_name = "matrix-synapse";
@ -57,12 +63,14 @@
username = "hakkonaut"; username = "hakkonaut";
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}"; password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
}; };
static_configs = [{ static_configs = [
targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ]; {
labels = { targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
instance = "nachtigall"; labels = {
}; instance = "nachtigall";
}]; };
}
];
} }
]; ];
}; };

View file

@ -1,9 +1,11 @@
{ config {
, lib config,
, pkgs lib,
, flake pkgs,
, ... flake,
}: { ...
}:
{
age.secrets.nachtigall-metrics-prometheus-basic-auth-password = { age.secrets.nachtigall-metrics-prometheus-basic-auth-password = {
file = "${flake.self}/secrets/nachtigall-metrics-prometheus-basic-auth-password.age"; file = "${flake.self}/secrets/nachtigall-metrics-prometheus-basic-auth-password.age";
mode = "600"; mode = "600";
@ -20,27 +22,33 @@
positions = { positions = {
filename = "/tmp/positions.yaml"; filename = "/tmp/positions.yaml";
}; };
clients = [{ clients = [
url = "https://flora-6.${config.pub-solar-os.networking.domain}/loki/api/v1/push"; {
basic_auth = { url = "https://flora-6.${config.pub-solar-os.networking.domain}/loki/api/v1/push";
username = "hakkonaut"; basic_auth = {
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}"; username = "hakkonaut";
}; password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
}];
scrape_configs = [{
job_name = "journal";
journal = {
max_age = "24h";
labels = {
job = "systemd-journal";
host = "nachtigall";
}; };
}; }
relabel_configs = [{ ];
source_labels = [ "__journal__systemd_unit" ]; scrape_configs = [
target_label = "unit"; {
}]; job_name = "journal";
}]; journal = {
max_age = "24h";
labels = {
job = "systemd-journal";
host = "nachtigall";
};
};
relabel_configs = [
{
source_labels = [ "__journal__systemd_unit" ];
target_label = "unit";
}
];
}
];
}; };
}; };
} }

View file

@ -1,8 +1,9 @@
{ flake {
, config flake,
, lib config,
, pkgs lib,
, ... pkgs,
...
}: }:
{ {
age.secrets.searx-environment = { age.secrets.searx-environment = {
@ -64,9 +65,18 @@
}; };
engine = [ engine = [
{ engine = "startpage"; disabled = false; } {
{ engine = "yahoo"; disabled = false; } engine = "startpage";
{ engine = "tagesschau"; disabled = false; } disabled = false;
}
{
engine = "yahoo";
disabled = false;
}
{
engine = "tagesschau";
disabled = false;
}
]; ];
ui = { ui = {

View file

@ -1,4 +1,4 @@
{ config,... }: { config, ... }:
{ {
services.tmate-ssh-server = { services.tmate-ssh-server = {
enable = true; enable = true;

View file

@ -1,4 +1,5 @@
{ flake, config, ... }: { { flake, config, ... }:
{
# From https://nixos.wiki/wiki/ZFS#Unlock_encrypted_zfs_via_ssh_on_boot # From https://nixos.wiki/wiki/ZFS#Unlock_encrypted_zfs_via_ssh_on_boot
boot.initrd.network = { boot.initrd.network = {
enable = true; enable = true;

View file

@ -1,24 +1,27 @@
{ self { self, inputs, ... }:
, inputs {
, ...
}: {
flake = { flake = {
nixosModules = rec { nixosModules = rec {
overlays = ({ ... }: { overlays = (
nixpkgs.overlays = [ { ... }:
(final: prev: {
let nixpkgs.overlays = [
unstable = import inputs.unstable { (
system = prev.system; final: prev:
}; let
in unstable = import inputs.unstable { system = prev.system; };
{ in
forgejo-runner = unstable.forgejo-runner; {
element-themes = prev.callPackage ./pkgs/element-themes { inherit (inputs) element-themes; }; forgejo-runner = unstable.forgejo-runner;
element-stickerpicker = prev.callPackage ./pkgs/element-stickerpicker { inherit (inputs) element-stickers maunium-stickerpicker; }; element-themes = prev.callPackage ./pkgs/element-themes { inherit (inputs) element-themes; };
}) element-stickerpicker = prev.callPackage ./pkgs/element-stickerpicker {
]; inherit (inputs) element-stickers maunium-stickerpicker;
}); };
}
)
];
}
);
}; };
}; };
} }

View file

@ -1,4 +1,8 @@
{ stdenvNoCC, element-stickers, maunium-stickerpicker }: {
stdenvNoCC,
element-stickers,
maunium-stickerpicker,
}:
stdenvNoCC.mkDerivation { stdenvNoCC.mkDerivation {
src = maunium-stickerpicker; src = maunium-stickerpicker;
name = "element-stickers"; name = "element-stickers";

View file

@ -1,4 +1,8 @@
{ stdenvNoCC, jq, element-themes }: {
stdenvNoCC,
jq,
element-themes,
}:
stdenvNoCC.mkDerivation { stdenvNoCC.mkDerivation {
src = element-themes; src = element-themes;
name = "element-themes"; name = "element-themes";

View file

@ -4,15 +4,13 @@ let
nachtigall-host = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP7G0ufi+MNvaAZLDgpieHrABPGN7e/kD5kMFwSk4ABj root@nachtigall"; nachtigall-host = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP7G0ufi+MNvaAZLDgpieHrABPGN7e/kD5kMFwSk4ABj root@nachtigall";
flora-6-host = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGP1InpTBN4AlF/4V8HHumAMLJzeO8DpzjUv9Co/+J09 root@flora-6"; flora-6-host = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGP1InpTBN4AlF/4V8HHumAMLJzeO8DpzjUv9Co/+J09 root@flora-6";
adminKeys = builtins.foldl' (keys: login: keys ++ (builtins.attrValues login.secretEncryptionKeys)) [] (builtins.attrValues admins); adminKeys = builtins.foldl' (
keys: login: keys ++ (builtins.attrValues login.secretEncryptionKeys)
) [ ] (builtins.attrValues admins);
nachtigallKeys = [ nachtigallKeys = [ nachtigall-host ];
nachtigall-host
];
flora6Keys = [ flora6Keys = [ flora-6-host ];
flora-6-host
];
in in
{ {
# ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBB5XaH02a6+TchnyQED2VwaltPgeFCbildbE2h6nF5e root@nachtigall # ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBB5XaH02a6+TchnyQED2VwaltPgeFCbildbE2h6nF5e root@nachtigall
@ -63,7 +61,8 @@ in
"grafana-smtp-password.age".publicKeys = flora6Keys ++ adminKeys; "grafana-smtp-password.age".publicKeys = flora6Keys ++ adminKeys;
"nachtigall-metrics-nginx-basic-auth.age".publicKeys = nachtigallKeys ++ adminKeys; "nachtigall-metrics-nginx-basic-auth.age".publicKeys = nachtigallKeys ++ adminKeys;
"nachtigall-metrics-prometheus-basic-auth-password.age".publicKeys = flora6Keys ++ nachtigallKeys ++ adminKeys; "nachtigall-metrics-prometheus-basic-auth-password.age".publicKeys =
flora6Keys ++ nachtigallKeys ++ adminKeys;
"obs-portal-env.age".publicKeys = nachtigallKeys ++ adminKeys; "obs-portal-env.age".publicKeys = nachtigallKeys ++ adminKeys;
"obs-portal-database-env.age".publicKeys = nachtigallKeys ++ adminKeys; "obs-portal-database-env.age".publicKeys = nachtigallKeys ++ adminKeys;

View file

@ -4,7 +4,8 @@
lib, lib,
config, config,
... ...
}: { }:
{
name = "website"; name = "website";
nodes.nachtigall-test = self.nixosConfigurations.nachtigall-test; nodes.nachtigall-test = self.nixosConfigurations.nachtigall-test;

24
treefmt.toml Normal file
View file

@ -0,0 +1,24 @@
[formatter.nix]
command = "nix"
options = ["fmt"]
includes = ["*.nix"]
excludes = []
[formatter.prettier]
command = "prettier"
options = ["--write"]
includes = [
"*.json",
"*.yaml",
"*.md",
]
[formatter.shell]
command = "shfmt"
options = [
"-s",
"-w",
"-i",
"2",
]
includes = ["*.sh"]