forked from pub-solar/infra
style: run treefmt
This commit is contained in:
parent
affdc02afe
commit
2ca0bd7c3e
|
@ -1,9 +1,12 @@
|
|||
# Process for handling a deletion request
|
||||
|
||||
### Keycloak
|
||||
|
||||
Required:
|
||||
|
||||
- auth.pub.solar ops user credentials
|
||||
- SSH access to host nachtigall
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
|
||||
|
@ -20,8 +23,8 @@ sudo --user keycloak kcadm.sh update --config /tmp/kcadm.config users/2ec6f173-3
|
|||
|
||||
Docs: https://www.keycloak.org/docs/latest/server_admin/index.html#updating-a-user
|
||||
|
||||
|
||||
### Nextcloud
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
nextcloud-occ user:delete <username>
|
||||
|
@ -29,8 +32,8 @@ nextcloud-occ user:delete <username>
|
|||
|
||||
Docs: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/occ_command.html#user-commands-label
|
||||
|
||||
|
||||
### Mastodon
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
sudo -u mastodon mastodon-tootctl accounts delete --email <mail-address>
|
||||
|
@ -38,8 +41,8 @@ sudo -u mastodon mastodon-tootctl accounts delete --email <mail-address>
|
|||
|
||||
Docs: https://docs.joinmastodon.org/admin/tootctl/#accounts-delete
|
||||
|
||||
|
||||
### Forgejo
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
sudo -u gitea gitea admin user delete --config /var/lib/forgejo/custom/conf/app.ini --purge --email <mail-address>
|
||||
|
@ -47,8 +50,8 @@ sudo -u gitea gitea admin user delete --config /var/lib/forgejo/custom/conf/app.
|
|||
|
||||
Docs: https://forgejo.org/docs/latest/admin/command-line/#delete
|
||||
|
||||
|
||||
### Matrix
|
||||
|
||||
```
|
||||
ssh bartender@matrix.pub.solar -p 2020
|
||||
curl --header "Authorization: Bearer <admin-access-token>" --request POST http://172.18.0.3:8008/_synapse/admin/v1/deactivate/@<username>:pub.solar --data '{"erase": true}'
|
||||
|
@ -56,6 +59,6 @@ curl --header "Authorization: Bearer <admin-access-token>" --request POST http:/
|
|||
|
||||
Docs: https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#deactivate-account
|
||||
|
||||
|
||||
### OpenBikeSensor
|
||||
|
||||
Not implemented, see: https://github.com/openbikesensor/portal/issues/95
|
||||
|
|
|
@ -8,11 +8,13 @@ To deploy, make sure you have a [working development shell](./development-shell.
|
|||
Then, run `deploy-rs` with the hostname of the server you want to deploy:
|
||||
|
||||
For nachtigall.pub.solar:
|
||||
|
||||
```
|
||||
deploy --targets '.#nachtigall' --magic-rollback false --auto-rollback false
|
||||
```
|
||||
|
||||
For flora-6.pub.solar:
|
||||
|
||||
```
|
||||
deploy --targets '.#flora-6' --magic-rollback false --auto-rollback false
|
||||
```
|
||||
|
@ -29,4 +31,5 @@ to enable switching to the new config quickly at a later moment.
|
|||
You'll need to have SSH Access to the boxes to be able to run `deploy`.
|
||||
|
||||
### Getting SSH access
|
||||
|
||||
See [administrative-access.md](./administrative-access.md).
|
||||
|
|
|
@ -15,12 +15,15 @@ Please follow https://docs.greenbaum.cloud/en/devops/triton-cli.html for the det
|
|||
|
||||
You will need to setup the following [namecheap API credentials](https://www.namecheap.com/support/api/intro),
|
||||
look for "namecheap API key" in the pub.solar Keepass database.
|
||||
|
||||
```
|
||||
NAMECHEAP_API_KEY
|
||||
NAMECHEAP_API_USER
|
||||
NAMECHEAP_USER_NAME
|
||||
```
|
||||
|
||||
You will probably also need to add your external IP to the [API allow list](https://ap.www.namecheap.com/settings/tools/apiaccess/whitelisted-ips).
|
||||
|
||||
```
|
||||
dig -4 ip @dns.toys
|
||||
```
|
||||
|
@ -35,16 +38,19 @@ terraform init
|
|||
```
|
||||
|
||||
Make your changes, e.g. in `dns.tf`.
|
||||
|
||||
```
|
||||
$EDITOR dns.tf
|
||||
```
|
||||
|
||||
Plan your changes using:
|
||||
|
||||
```
|
||||
terraform plan -out pub-solar-infra.plan
|
||||
```
|
||||
|
||||
After verification, apply your changes with:
|
||||
|
||||
```
|
||||
terraform apply "pub-solar-infra.plan"
|
||||
```
|
||||
|
@ -52,7 +58,9 @@ terraform apply "pub-solar-infra.plan"
|
|||
### Useful links
|
||||
|
||||
We use the Manta remote backend to save the terraform state for collaboration.
|
||||
|
||||
- https://www.terraform.io/language/v1.2.x/settings/backends/manta
|
||||
|
||||
Namecheap Terraform provider docs:
|
||||
|
||||
- https://registry.terraform.io/providers/namecheap/namecheap/latest/docs
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
# Process for getting a list of email addresses of all keycloak users
|
||||
|
||||
### Keycloak
|
||||
|
||||
Required:
|
||||
|
||||
- auth.pub.solar ops user credentials
|
||||
- SSH access to host nachtigall
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
# Process for resetting keycloak user passwords
|
||||
|
||||
### Keycloak
|
||||
|
||||
Required:
|
||||
|
||||
- auth.pub.solar ops user credentials
|
||||
- SSH access to host nachtigall
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
# Process for updating a keycloak realm via CLI
|
||||
|
||||
### Keycloak
|
||||
|
||||
Required:
|
||||
|
||||
- auth.pub.solar ops user credentials
|
||||
- SSH access to host nachtigall
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ deploy --targets '.#nachtigall'
|
|||
```
|
||||
|
||||
Then, finalize the update by running the database migration script:
|
||||
|
||||
```
|
||||
ssh barkeeper@nachtigall.pub.solar
|
||||
docker exec -it mediawiki bash
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# OpenBikeSensor Portal
|
||||
|
||||
## Docker Containers
|
||||
* portal
|
||||
* worker
|
||||
* db
|
||||
|
||||
- portal
|
||||
- worker
|
||||
- db
|
||||
|
||||
## Run database migrations
|
||||
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
# Reverting to an old version
|
||||
|
||||
|
||||
|
|
|
@ -6,4 +6,4 @@ After a boot, the encrypted root partition will have to be unlocked. This is don
|
|||
ssh root@nachtigall.pub.solar -p2222
|
||||
```
|
||||
|
||||
After connecting, paste the crypt passphrase you can find in the shared keepass. This will disconnect the SSH session right away and the server will keep booting into stage 2.
|
||||
After connecting, paste the crypt passphrase you can find in the shared keepass. This will disconnect the SSH session right away and the server will keep booting into stage 2.
|
||||
|
|
87
flake.nix
87
flake.nix
|
@ -40,9 +40,13 @@
|
|||
element-stickers.inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
|
||||
outputs = inputs@{ self, ... }:
|
||||
outputs =
|
||||
inputs@{ self, ... }:
|
||||
inputs.flake-parts.lib.mkFlake { inherit inputs; } {
|
||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
];
|
||||
|
||||
imports = [
|
||||
inputs.nixos-flake.flakeModule
|
||||
|
@ -52,37 +56,42 @@
|
|||
./hosts
|
||||
];
|
||||
|
||||
perSystem = { system, pkgs, config, ... }: {
|
||||
_module.args = {
|
||||
inherit inputs;
|
||||
pkgs = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
overlays = [
|
||||
inputs.agenix.overlays.default
|
||||
perSystem =
|
||||
{
|
||||
system,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{
|
||||
_module.args = {
|
||||
inherit inputs;
|
||||
pkgs = import inputs.nixpkgs {
|
||||
inherit system;
|
||||
overlays = [ inputs.agenix.overlays.default ];
|
||||
};
|
||||
unstable = import inputs.unstable { inherit system; };
|
||||
master = import inputs.master { inherit system; };
|
||||
};
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = with pkgs; [
|
||||
deploy-rs
|
||||
nixpkgs-fmt
|
||||
agenix
|
||||
age-plugin-yubikey
|
||||
cachix
|
||||
editorconfig-checker
|
||||
nodePackages.prettier
|
||||
nvfetcher
|
||||
shellcheck
|
||||
shfmt
|
||||
treefmt
|
||||
nixos-generators
|
||||
inputs.nixpkgs-2205.legacyPackages.${system}.terraform
|
||||
jq
|
||||
];
|
||||
};
|
||||
unstable = import inputs.unstable { inherit system; };
|
||||
master = import inputs.master { inherit system; };
|
||||
};
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = with pkgs; [
|
||||
deploy-rs
|
||||
nixpkgs-fmt
|
||||
agenix
|
||||
age-plugin-yubikey
|
||||
cachix
|
||||
editorconfig-checker
|
||||
nodePackages.prettier
|
||||
nvfetcher
|
||||
shellcheck
|
||||
shfmt
|
||||
treefmt
|
||||
nixos-generators
|
||||
inputs.nixpkgs-2205.legacyPackages.${system}.terraform
|
||||
jq
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
flake =
|
||||
let
|
||||
|
@ -92,19 +101,15 @@
|
|||
inherit username;
|
||||
|
||||
nixosModules = builtins.listToAttrs (
|
||||
map
|
||||
(x: {
|
||||
name = x;
|
||||
value = import (./modules + "/${x}");
|
||||
})
|
||||
(builtins.attrNames (builtins.readDir ./modules))
|
||||
map (x: {
|
||||
name = x;
|
||||
value = import (./modules + "/${x}");
|
||||
}) (builtins.attrNames (builtins.readDir ./modules))
|
||||
);
|
||||
|
||||
checks = builtins.mapAttrs
|
||||
(
|
||||
system: deployLib: deployLib.deployChecks self.deploy
|
||||
)
|
||||
inputs.deploy-rs.lib;
|
||||
checks = builtins.mapAttrs (
|
||||
system: deployLib: deployLib.deployChecks self.deploy
|
||||
) inputs.deploy-rs.lib;
|
||||
|
||||
formatter."x86_64-linux" = inputs.unstable.legacyPackages."x86_64-linux".nixfmt-rfc-style;
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
let
|
||||
psCfg = config.pub-solar;
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
{ ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
# Include the results of the hardware scan.
|
||||
./hardware-configuration.nix
|
||||
./configuration.nix
|
||||
./triton-vmtools.nix
|
||||
./wireguard.nix
|
||||
];
|
||||
imports = [
|
||||
# Include the results of the hardware scan.
|
||||
./hardware-configuration.nix
|
||||
./configuration.nix
|
||||
./triton-vmtools.nix
|
||||
./wireguard.nix
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,15 +1,23 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, modulesPath
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [ ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ahci" "virtio_pci" "xhci_pci" "sr_mod" "virtio_blk" ];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"ahci"
|
||||
"virtio_pci"
|
||||
"xhci_pci"
|
||||
"sr_mod"
|
||||
"virtio_blk"
|
||||
];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
{ pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{ pkgs, flake, ... }:
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
flake.inputs.triton-vmtools.packages.${pkgs.system}.default
|
||||
];
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
config,
|
||||
pkgs,
|
||||
flake,
|
||||
... }:
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.firewall.allowedUDPPorts = [ 51820 ];
|
||||
|
||||
|
@ -18,16 +19,20 @@
|
|||
];
|
||||
privateKeyFile = config.age.secrets.wg-private-key.path;
|
||||
peers = flake.self.logins.admins.wireguardDevices ++ [
|
||||
{ # nachtigall.pub.solar
|
||||
{
|
||||
# nachtigall.pub.solar
|
||||
endpoint = "138.201.80.102:51820";
|
||||
publicKey = "qzNywKY9RvqTnDO8eLik75/SHveaSk9OObilDzv+xkk=";
|
||||
allowedIPs = [ "10.7.6.1/32" "fd00:fae:fae:fae:fae:1::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.1/32"
|
||||
"fd00:fae:fae:fae:fae:1::/96"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh.listenAddresses = [
|
||||
services.openssh.listenAddresses = [
|
||||
{
|
||||
addr = "10.7.6.2";
|
||||
port = 22;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{ flake, ... }: {
|
||||
{ flake, ... }:
|
||||
{
|
||||
age.secrets."restic-repo-droppie" = {
|
||||
file = "${flake.self}/secrets/restic-repo-droppie.age";
|
||||
mode = "400";
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
{ flake
|
||||
, config
|
||||
, pkgs
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Use GRUB2 as the boot loader.
|
||||
# We don't use systemd-boot because Hetzner uses BIOS legacy boot.
|
||||
boot.loader.systemd-boot.enable = false;
|
||||
|
@ -11,15 +13,11 @@
|
|||
efiSupport = false;
|
||||
mirroredBoots = [
|
||||
{
|
||||
devices = [
|
||||
"/dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NF0R517371"
|
||||
];
|
||||
devices = [ "/dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NF0R517371" ];
|
||||
path = "/boot1";
|
||||
}
|
||||
{
|
||||
devices = [
|
||||
"/dev/disk/by-id/nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL"
|
||||
];
|
||||
devices = [ "/dev/disk/by-id/nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL" ];
|
||||
path = "/boot2";
|
||||
}
|
||||
];
|
||||
|
|
|
@ -1,15 +1,13 @@
|
|||
{ flake, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
# Include the results of the hardware scan.
|
||||
./hardware-configuration.nix
|
||||
./configuration.nix
|
||||
imports = [
|
||||
# Include the results of the hardware scan.
|
||||
./hardware-configuration.nix
|
||||
./configuration.nix
|
||||
|
||||
./networking.nix
|
||||
./wireguard.nix
|
||||
./backups.nix
|
||||
|
||||
];
|
||||
./networking.nix
|
||||
./wireguard.nix
|
||||
./backups.nix
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,54 +1,54 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
imports = [ (modulesPath + "/installer/scan/not-detected.nix") ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ahci" "nvme" ];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"ahci"
|
||||
"nvme"
|
||||
];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "root_pool/root";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/" = {
|
||||
device = "root_pool/root";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/lib" =
|
||||
{
|
||||
device = "root_pool/data";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/var/lib" = {
|
||||
device = "root_pool/data";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/lib/postgresql" =
|
||||
{
|
||||
device = "root_pool/data/postgresql";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/var/lib/postgresql" = {
|
||||
device = "root_pool/data/postgresql";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/lib/docker" =
|
||||
{
|
||||
device = "root_pool/data/docker";
|
||||
fsType = "zfs";
|
||||
};
|
||||
fileSystems."/var/lib/docker" = {
|
||||
device = "root_pool/data/docker";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot1" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/5493-EFF5";
|
||||
fsType = "vfat";
|
||||
};
|
||||
fileSystems."/boot1" = {
|
||||
device = "/dev/disk/by-uuid/5493-EFF5";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
fileSystems."/boot2" =
|
||||
{
|
||||
device = "/dev/disk/by-uuid/5494-BA1E";
|
||||
fsType = "vfat";
|
||||
};
|
||||
fileSystems."/boot2" = {
|
||||
device = "/dev/disk/by-uuid/5494-BA1E";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
config,
|
||||
pkgs,
|
||||
flake,
|
||||
... }:
|
||||
...
|
||||
}:
|
||||
{
|
||||
|
||||
networking.hostName = "nachtigall";
|
||||
|
@ -24,5 +25,8 @@
|
|||
}
|
||||
];
|
||||
networking.defaultGateway = "138.201.80.65";
|
||||
networking.defaultGateway6 = { address = "fe80::1"; interface = "enp35s0"; };
|
||||
networking.defaultGateway6 = {
|
||||
address = "fe80::1";
|
||||
interface = "enp35s0";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
config,
|
||||
pkgs,
|
||||
flake,
|
||||
... }:
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.firewall.allowedUDPPorts = [ 51820 ];
|
||||
|
||||
|
@ -18,16 +19,20 @@
|
|||
];
|
||||
privateKeyFile = config.age.secrets.wg-private-key.path;
|
||||
peers = flake.self.logins.admins.wireguardDevices ++ [
|
||||
{ # flora-6.pub.solar
|
||||
{
|
||||
# flora-6.pub.solar
|
||||
endpoint = "80.71.153.210:51820";
|
||||
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
|
||||
allowedIPs = [ "10.7.6.2/32" "fd00:fae:fae:fae:fae:2::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.2/32"
|
||||
"fd00:fae:fae:fae:fae:2::/96"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh.listenAddresses = [
|
||||
services.openssh.listenAddresses = [
|
||||
{
|
||||
addr = "10.7.6.1";
|
||||
port = 22;
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
let
|
||||
lock = builtins.fromJSON (builtins.readFile builtins.path {
|
||||
path = ../../flake.lock;
|
||||
name = "lockPath";
|
||||
});
|
||||
lock = builtins.fromJSON (
|
||||
builtins.readFile builtins.path {
|
||||
path = ../../flake.lock;
|
||||
name = "lockPath";
|
||||
}
|
||||
);
|
||||
flake =
|
||||
import
|
||||
(
|
||||
fetchTarball {
|
||||
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||
}
|
||||
)
|
||||
(fetchTarball {
|
||||
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||
})
|
||||
{
|
||||
src = builtins.path {
|
||||
path = ../../.;
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
{ self, lib, inputs, ... }: {
|
||||
{
|
||||
self,
|
||||
lib,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Configuration common to all Linux systems
|
||||
flake = {
|
||||
lib =
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
/*
|
||||
* The contents of this file are adapted from digga
|
||||
* https://github.com/divnix/digga
|
||||
*
|
||||
* Licensed under the MIT license
|
||||
*/
|
||||
The contents of this file are adapted from digga
|
||||
https://github.com/divnix/digga
|
||||
|
||||
Licensed under the MIT license
|
||||
*/
|
||||
|
||||
{ lib, inputs }:
|
||||
let
|
||||
|
@ -14,62 +14,61 @@ let
|
|||
inherit system;
|
||||
overlays = [
|
||||
inputs.deploy-rs.overlay
|
||||
(self: super: { deploy-rs = { inherit (pkgs) deploy-rs; lib = super.deploy-rs.lib; }; })
|
||||
(self: super: {
|
||||
deploy-rs = {
|
||||
inherit (pkgs) deploy-rs;
|
||||
lib = super.deploy-rs.lib;
|
||||
};
|
||||
})
|
||||
];
|
||||
};
|
||||
getFqdn = c:
|
||||
getFqdn =
|
||||
c:
|
||||
let
|
||||
net = c.config.networking;
|
||||
fqdn =
|
||||
if (net ? domain) && (net.domain != null)
|
||||
then "${net.hostName}.${net.domain}"
|
||||
else net.hostName;
|
||||
if (net ? domain) && (net.domain != null) then "${net.hostName}.${net.domain}" else net.hostName;
|
||||
in
|
||||
fqdn;
|
||||
in
|
||||
{
|
||||
mkDeployNodes = systemConfigurations: extraConfig:
|
||||
mkDeployNodes =
|
||||
systemConfigurations: extraConfig:
|
||||
/*
|
||||
*
|
||||
Synopsis: mkNodes _systemConfigurations_ _extraConfig_
|
||||
*
|
||||
Synopsis: mkNodes _systemConfigurations_ _extraConfig_
|
||||
|
||||
Generate the `nodes` attribute expected by deploy-rs
|
||||
where _systemConfigurations_ are `nodes`.
|
||||
Generate the `nodes` attribute expected by deploy-rs
|
||||
where _systemConfigurations_ are `nodes`.
|
||||
|
||||
_systemConfigurations_ should take the form of a flake's
|
||||
_nixosConfigurations_. Note that deploy-rs does not currently support
|
||||
deploying to darwin hosts.
|
||||
_systemConfigurations_ should take the form of a flake's
|
||||
_nixosConfigurations_. Note that deploy-rs does not currently support
|
||||
deploying to darwin hosts.
|
||||
|
||||
_extraConfig_, if specified, will be merged into each of the
|
||||
nodes' configurations.
|
||||
_extraConfig_, if specified, will be merged into each of the
|
||||
nodes' configurations.
|
||||
|
||||
Example _systemConfigurations_ input:
|
||||
Example _systemConfigurations_ input:
|
||||
|
||||
```
|
||||
{
|
||||
hostname-1 = {
|
||||
fastConnection = true;
|
||||
sshOpts = [ "-p" "25" ];
|
||||
};
|
||||
hostname-2 = {
|
||||
sshOpts = [ "-p" "19999" ];
|
||||
sshUser = "root";
|
||||
};
|
||||
}
|
||||
```
|
||||
*
|
||||
*/
|
||||
lib.recursiveUpdate
|
||||
(lib.mapAttrs
|
||||
(
|
||||
_: c: {
|
||||
hostname = getFqdn c;
|
||||
profiles.system = {
|
||||
user = "root";
|
||||
path = deployPkgs.deploy-rs.lib.activate.nixos c;
|
||||
};
|
||||
}
|
||||
)
|
||||
systemConfigurations)
|
||||
extraConfig;
|
||||
```
|
||||
{
|
||||
hostname-1 = {
|
||||
fastConnection = true;
|
||||
sshOpts = [ "-p" "25" ];
|
||||
};
|
||||
hostname-2 = {
|
||||
sshOpts = [ "-p" "19999" ];
|
||||
sshUser = "root";
|
||||
};
|
||||
}
|
||||
```
|
||||
*
|
||||
*/
|
||||
lib.recursiveUpdate (lib.mapAttrs (_: c: {
|
||||
hostname = getFqdn c;
|
||||
profiles.system = {
|
||||
user = "root";
|
||||
path = deployPkgs.deploy-rs.lib.activate.nixos c;
|
||||
};
|
||||
}) systemConfigurations) extraConfig;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,10 @@
|
|||
{
|
||||
# tuxnix
|
||||
publicKey = "fTvULvdsc92binFaBV+uWwFi33bi8InShcaPnoxUZEA=";
|
||||
allowedIPs = [ "10.7.6.203/32" "fd00:fae:fae:fae:fae:203::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.203/32"
|
||||
"fd00:fae:fae:fae:fae:203::/96"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
@ -27,9 +30,13 @@
|
|||
} // sshPubKeys;
|
||||
|
||||
wireguardDevices = [
|
||||
{ # stroopwafel
|
||||
{
|
||||
# stroopwafel
|
||||
publicKey = "NNb7T8Jmn+V2dTZ8T6Fcq7hGomHGDckKoV3kK2oAhSE=";
|
||||
allowedIPs = [ "10.7.6.200/32" "fd00:fae:fae:fae:fae:200::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.200/32"
|
||||
"fd00:fae:fae:fae:fae:200::/96"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
@ -42,9 +49,13 @@
|
|||
|
||||
secretEncryptionKeys = sshPubKeys;
|
||||
wireguardDevices = [
|
||||
{ # judy
|
||||
{
|
||||
# judy
|
||||
publicKey = "I+gN7v1VXkAGoSir6L8aebtLbguvy5nAx1QVDTzdckk=";
|
||||
allowedIPs = [ "10.7.6.202/32" "fd00:fae:fae:fae:fae:202::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.202/32"
|
||||
"fd00:fae:fae:fae:fae:202::/96"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
@ -59,13 +70,21 @@
|
|||
};
|
||||
|
||||
wireguardDevices = [
|
||||
{ # dumpyourvms
|
||||
{
|
||||
# dumpyourvms
|
||||
publicKey = "3UrVLQrwXnPAVXPiTAd7eM3fZYxnFSYgKAGpNMUwnUk=";
|
||||
allowedIPs = [ "10.7.6.201/32" "fd00:fae:fae:fae:fae:201::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.201/32"
|
||||
"fd00:fae:fae:fae:fae:201::/96"
|
||||
];
|
||||
}
|
||||
{ # ryzensun
|
||||
{
|
||||
# ryzensun
|
||||
publicKey = "oVF2/s7eIxyVjtG0MhKPx5SZ1JllZg+ZFVF2eVYtPGo=";
|
||||
allowedIPs = [ "10.7.6.204/32" "fd00:fae:fae:fae:fae:204::/96" ];
|
||||
allowedIPs = [
|
||||
"10.7.6.204/32"
|
||||
"fd00:fae:fae:fae:fae:204::/96"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
|
|
@ -1,13 +1,24 @@
|
|||
{ lib, ... }: let
|
||||
{ lib, ... }:
|
||||
let
|
||||
admins = import ./admins.nix;
|
||||
robots = import ./robots.nix;
|
||||
in {
|
||||
in
|
||||
{
|
||||
flake = {
|
||||
logins = {
|
||||
admins = lib.lists.foldl (logins: adminConfig: {
|
||||
sshPubKeys = logins.sshPubKeys ++ (lib.attrsets.attrValues adminConfig.sshPubKeys);
|
||||
wireguardDevices = logins.wireguardDevices ++ (if adminConfig ? "wireguardDevices" then adminConfig.wireguardDevices else []);
|
||||
}) { sshPubKeys = []; wireguardDevices = []; } (lib.attrsets.attrValues admins);
|
||||
admins =
|
||||
lib.lists.foldl
|
||||
(logins: adminConfig: {
|
||||
sshPubKeys = logins.sshPubKeys ++ (lib.attrsets.attrValues adminConfig.sshPubKeys);
|
||||
wireguardDevices =
|
||||
logins.wireguardDevices
|
||||
++ (if adminConfig ? "wireguardDevices" then adminConfig.wireguardDevices else [ ]);
|
||||
})
|
||||
{
|
||||
sshPubKeys = [ ];
|
||||
wireguardDevices = [ ];
|
||||
}
|
||||
(lib.attrsets.attrValues admins);
|
||||
robots.sshPubKeys = lib.attrsets.attrValues robots;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.caddy = {
|
||||
services.caddy = {
|
||||
enable = lib.mkForce true;
|
||||
group = config.pub-solar-os.authentication.robot.username;
|
||||
email = config.pub-solar-os.adminEmail;
|
||||
|
@ -14,5 +15,8 @@
|
|||
grace_period 60s
|
||||
'';
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, self
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
self,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.nginx.virtualHosts."collabora.${config.pub-solar-os.networking.domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
@ -24,9 +26,7 @@
|
|||
containers."collabora" = {
|
||||
image = "collabora/code";
|
||||
autoStart = true;
|
||||
ports = [
|
||||
"127.0.0.1:9980:9980"
|
||||
];
|
||||
ports = [ "127.0.0.1:9980:9980" ];
|
||||
extraOptions = [
|
||||
"--cap-add=MKNOD"
|
||||
"--pull=always"
|
||||
|
|
|
@ -1,4 +1,11 @@
|
|||
{ pkgs, config, flake, lib, ... }: {
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
flake,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [
|
||||
./nix.nix
|
||||
./networking.nix
|
||||
|
@ -29,7 +36,11 @@
|
|||
config = {
|
||||
environment = {
|
||||
# Just a couple of global packages to make our lives easier
|
||||
systemPackages = with pkgs; [ git vim wget ];
|
||||
systemPackages = with pkgs; [
|
||||
git
|
||||
vim
|
||||
wget
|
||||
];
|
||||
};
|
||||
|
||||
# Select internationalization properties
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.pub-solar-os.networking = with lib; {
|
||||
domain = mkOption {
|
||||
description = "domain on which all services should run. This defaults to pub.solar";
|
||||
|
@ -23,8 +24,8 @@
|
|||
networking.firewall.interfaces.wg-ssh.allowedTCPPorts = [ 22 ];
|
||||
|
||||
networking.hosts = {
|
||||
"10.7.6.1" = ["nachtigall.${config.pub-solar-os.networking.domain}"];
|
||||
"10.7.6.2" = ["flora-6.${config.pub-solar-os.networking.domain}"];
|
||||
"10.7.6.1" = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
|
||||
"10.7.6.2" = [ "flora-6.${config.pub-solar-os.networking.domain}" ];
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
|
||||
];
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ ];
|
||||
|
||||
nix = {
|
||||
# Use default version alias for nix package
|
||||
|
@ -25,7 +26,10 @@
|
|||
# Prevents impurities in builds
|
||||
sandbox = true;
|
||||
# Give root and @wheel special privileges with nix
|
||||
trusted-users = [ "root" "@wheel" ];
|
||||
trusted-users = [
|
||||
"root"
|
||||
"@wheel"
|
||||
];
|
||||
# Allow only group wheel to connect to the nix daemon
|
||||
allowed-users = [ "@wheel" ];
|
||||
};
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{ flake, config, ... }: {
|
||||
{ flake, config, ... }:
|
||||
{
|
||||
home-manager.users.${config.pub-solar-os.authentication.username} = {
|
||||
programs.git.enable = true;
|
||||
programs.starship.enable = true;
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.pub-solar-os.authentication = with lib; {
|
||||
username = mkOption {
|
||||
description = "Username for the adminstrative user";
|
||||
|
@ -41,7 +42,10 @@
|
|||
users.users.${config.pub-solar-os.authentication.username} = {
|
||||
name = config.pub-solar-os.authentication.username;
|
||||
group = config.pub-solar-os.authentication.username;
|
||||
extraGroups = [ "wheel" "docker" ];
|
||||
extraGroups = [
|
||||
"wheel"
|
||||
"docker"
|
||||
];
|
||||
isNormalUser = true;
|
||||
openssh.authorizedKeys.keys = config.pub-solar-os.authentication.sshPubKeys;
|
||||
};
|
||||
|
@ -63,7 +67,8 @@
|
|||
|
||||
users.groups.${config.pub-solar-os.authentication.robot.username} = { };
|
||||
|
||||
users.users.root.initialHashedPassword = config.pub-solar-os.authentication.root.initialHashedPassword;
|
||||
users.users.root.initialHashedPassword =
|
||||
config.pub-solar-os.authentication.root.initialHashedPassword;
|
||||
|
||||
security.sudo.wheelNeedsPassword = false;
|
||||
};
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
{ flake, config, lib, ... }:
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets."coturn-static-auth-secret" = {
|
||||
file = "${flake.self}/secrets/coturn-static-auth-secret.age";
|
||||
|
@ -19,8 +24,12 @@
|
|||
pkey = "${config.security.acme.certs.${realm}.directory}/key.pem";
|
||||
extraConfig =
|
||||
let
|
||||
externalIPv4s = lib.strings.concatMapStringsSep "\n" ({ address, ... }: "external-ip=${address}") config.networking.interfaces.enp35s0.ipv4.addresses;
|
||||
externalIPv6s = lib.strings.concatMapStringsSep "\n" ({ address, ... }: "external-ip=${address}") config.networking.interfaces.enp35s0.ipv6.addresses;
|
||||
externalIPv4s = lib.strings.concatMapStringsSep "\n" (
|
||||
{ address, ... }: "external-ip=${address}"
|
||||
) config.networking.interfaces.enp35s0.ipv4.addresses;
|
||||
externalIPv6s = lib.strings.concatMapStringsSep "\n" (
|
||||
{ address, ... }: "external-ip=${address}"
|
||||
) config.networking.interfaces.enp35s0.ipv6.addresses;
|
||||
in
|
||||
''
|
||||
${externalIPv4s}
|
||||
|
@ -61,28 +70,35 @@
|
|||
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
interfaces.enp35s0 =
|
||||
let
|
||||
range = with config.services.coturn; [{
|
||||
from = min-port;
|
||||
to = max-port;
|
||||
}];
|
||||
range = with config.services.coturn; [
|
||||
{
|
||||
from = min-port;
|
||||
to = max-port;
|
||||
}
|
||||
];
|
||||
in
|
||||
{
|
||||
allowedUDPPortRanges = range;
|
||||
allowedUDPPorts = [ 3478 5349 ];
|
||||
allowedUDPPorts = [
|
||||
3478
|
||||
5349
|
||||
];
|
||||
allowedTCPPortRanges = [ ];
|
||||
allowedTCPPorts = [ 3478 5349 ];
|
||||
allowedTCPPorts = [
|
||||
3478
|
||||
5349
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
# get a certificate
|
||||
security.acme.certs.${config.services.coturn.realm} = {
|
||||
/* insert here the right configuration to obtain a certificate */
|
||||
# insert here the right configuration to obtain a certificate
|
||||
postRun = "systemctl restart coturn.service";
|
||||
group = "turnserver";
|
||||
};
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{ pkgs, ... }: {
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
virtualisation.docker = {
|
||||
enable = true;
|
||||
extraOptions = ''
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.drone-secrets = {
|
||||
file = "${flake.self}/secrets/drone-secrets.age";
|
||||
mode = "600";
|
||||
|
@ -26,9 +28,7 @@
|
|||
|
||||
users.groups.drone = { };
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/var/lib/drone-db' 0750 drone drone - -"
|
||||
];
|
||||
systemd.tmpfiles.rules = [ "d '/var/lib/drone-db' 0750 drone drone - -" ];
|
||||
|
||||
services.caddy.virtualHosts."ci.${config.pub-solar-os.networking.domain}" = {
|
||||
logFormat = lib.mkForce ''
|
||||
|
@ -66,23 +66,15 @@
|
|||
image = "postgres:14";
|
||||
autoStart = true;
|
||||
user = "994";
|
||||
volumes = [
|
||||
"/var/lib/drone-db:/var/lib/postgresql/data"
|
||||
];
|
||||
extraOptions = [
|
||||
"--network=drone-net"
|
||||
];
|
||||
environmentFiles = [
|
||||
config.age.secrets.drone-db-secrets.path
|
||||
];
|
||||
volumes = [ "/var/lib/drone-db:/var/lib/postgresql/data" ];
|
||||
extraOptions = [ "--network=drone-net" ];
|
||||
environmentFiles = [ config.age.secrets.drone-db-secrets.path ];
|
||||
};
|
||||
containers."drone-server" = {
|
||||
image = "drone/drone:2";
|
||||
autoStart = true;
|
||||
user = "994";
|
||||
ports = [
|
||||
"127.0.0.1:4000:80"
|
||||
];
|
||||
ports = [ "127.0.0.1:4000:80" ];
|
||||
dependsOn = [ "drone-db" ];
|
||||
extraOptions = [
|
||||
"--network=drone-net"
|
||||
|
@ -95,18 +87,14 @@
|
|||
DRONE_SERVER_PROTO = "https";
|
||||
DRONE_DATABASE_DRIVER = "postgres";
|
||||
};
|
||||
environmentFiles = [
|
||||
config.age.secrets.drone-secrets.path
|
||||
];
|
||||
environmentFiles = [ config.age.secrets.drone-secrets.path ];
|
||||
};
|
||||
containers."drone-docker-runner" = {
|
||||
image = "drone/drone-runner-docker:1";
|
||||
autoStart = true;
|
||||
# needs to run as root
|
||||
#user = "994";
|
||||
volumes = [
|
||||
"/var/run/docker.sock:/var/run/docker.sock"
|
||||
];
|
||||
volumes = [ "/var/run/docker.sock:/var/run/docker.sock" ];
|
||||
dependsOn = [ "drone-db" ];
|
||||
extraOptions = [
|
||||
"--network=drone-net"
|
||||
|
@ -119,9 +107,7 @@
|
|||
DRONE_RUNNER_CAPACITY = "2";
|
||||
DRONE_RUNNER_NAME = "flora-6-docker-runner";
|
||||
};
|
||||
environmentFiles = [
|
||||
config.age.secrets.drone-secrets.path
|
||||
];
|
||||
environmentFiles = [ config.age.secrets.drone-secrets.path ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.forgejo-actions-runner-token = {
|
||||
file = "${flake.self}/secrets/forgejo-actions-runner-token.age";
|
||||
mode = "644";
|
||||
|
@ -20,7 +22,7 @@
|
|||
isSystemUser = true;
|
||||
};
|
||||
|
||||
users.groups.gitea-runner = {};
|
||||
users.groups.gitea-runner = { };
|
||||
|
||||
systemd.services."gitea-runner-flora\\x2d6".serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.forgejo-database-password = {
|
||||
file = "${flake.self}/secrets/forgejo-database-password.age";
|
||||
mode = "600";
|
||||
|
@ -52,7 +54,7 @@
|
|||
isSystemUser = true;
|
||||
};
|
||||
|
||||
users.groups.gitea = {};
|
||||
users.groups.gitea = { };
|
||||
|
||||
# Expose SSH port only for forgejo SSH
|
||||
networking.firewall.interfaces.enp35s0.allowedTCPPorts = [ 2223 ];
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.grafana-admin-password = {
|
||||
file = "${flake.self}/secrets/grafana-admin-password.age";
|
||||
mode = "644";
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -70,9 +70,7 @@
|
|||
"icon": "external link",
|
||||
"includeVars": true,
|
||||
"keepTime": true,
|
||||
"tags": [
|
||||
"matrix"
|
||||
],
|
||||
"tags": ["matrix"],
|
||||
"title": "Dashboards",
|
||||
"type": "dashboards"
|
||||
}
|
||||
|
@ -4313,9 +4311,7 @@
|
|||
"id": "byNames",
|
||||
"options": {
|
||||
"mode": "exclude",
|
||||
"names": [
|
||||
"libera.chat "
|
||||
],
|
||||
"names": ["libera.chat "],
|
||||
"prefix": "All except:",
|
||||
"readOnly": true
|
||||
}
|
||||
|
@ -4435,9 +4431,7 @@
|
|||
"id": "byNames",
|
||||
"options": {
|
||||
"mode": "exclude",
|
||||
"names": [
|
||||
"libera.chat"
|
||||
],
|
||||
"names": ["libera.chat"],
|
||||
"prefix": "All except:",
|
||||
"readOnly": true
|
||||
}
|
||||
|
@ -13266,9 +13260,7 @@
|
|||
"refresh": false,
|
||||
"schemaVersion": 37,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"matrix"
|
||||
],
|
||||
"tags": ["matrix"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
|
@ -13453,17 +13445,7 @@
|
|||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
"time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
{ flake
|
||||
, config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
options.pub-solar-os.auth = with lib; {
|
||||
enable = mkEnableOption "Enable keycloak to run on the node";
|
||||
enable = mkEnableOption "Enable keycloak to run on the node";
|
||||
|
||||
realm = mkOption {
|
||||
realm = mkOption {
|
||||
description = "Name of the realm";
|
||||
type = types.str;
|
||||
default = config.pub-solar-os.networking.domain;
|
||||
|
@ -53,14 +55,13 @@
|
|||
features = "declarative-user-profile";
|
||||
};
|
||||
themes = {
|
||||
"pub.solar" = flake.inputs.keycloak-theme-pub-solar.legacyPackages.${pkgs.system}.keycloak-theme-pub-solar;
|
||||
"pub.solar" =
|
||||
flake.inputs.keycloak-theme-pub-solar.legacyPackages.${pkgs.system}.keycloak-theme-pub-solar;
|
||||
};
|
||||
};
|
||||
|
||||
services.restic.backups.keycloak-droppie = {
|
||||
paths = [
|
||||
"/tmp/keycloak-backup.sql"
|
||||
];
|
||||
paths = [ "/tmp/keycloak-backup.sql" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 02:00:00 Etc/UTC";
|
||||
# droppie will be offline if nachtigall misses the timer
|
||||
|
@ -83,9 +84,7 @@
|
|||
};
|
||||
|
||||
services.restic.backups.keycloak-storagebox = {
|
||||
paths = [
|
||||
"/tmp/keycloak-backup.sql"
|
||||
];
|
||||
paths = [ "/tmp/keycloak-backup.sql" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 04:10:00 Etc/UTC";
|
||||
};
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.caddy.virtualHosts = {
|
||||
"flora-6.${config.pub-solar-os.networking.domain}" = {
|
||||
logFormat = lib.mkForce ''
|
||||
|
@ -51,16 +53,18 @@
|
|||
retention_delete_worker_count = 150;
|
||||
};
|
||||
schema_config = {
|
||||
configs = [{
|
||||
from = "2020-05-15";
|
||||
store = "boltdb-shipper";
|
||||
object_store = "filesystem";
|
||||
schema = "v11";
|
||||
index = {
|
||||
prefix = "index_";
|
||||
period = "24h";
|
||||
};
|
||||
}];
|
||||
configs = [
|
||||
{
|
||||
from = "2020-05-15";
|
||||
store = "boltdb-shipper";
|
||||
object_store = "filesystem";
|
||||
schema = "v11";
|
||||
index = {
|
||||
prefix = "index_";
|
||||
period = "24h";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -75,23 +79,29 @@
|
|||
positions = {
|
||||
filename = "/tmp/positions.yaml";
|
||||
};
|
||||
clients = [{
|
||||
url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
|
||||
}];
|
||||
scrape_configs = [{
|
||||
job_name = "journal";
|
||||
journal = {
|
||||
max_age = "24h";
|
||||
labels = {
|
||||
job = "systemd-journal";
|
||||
host = "flora-6";
|
||||
clients = [
|
||||
{
|
||||
url = "http://127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
|
||||
}
|
||||
];
|
||||
scrape_configs = [
|
||||
{
|
||||
job_name = "journal";
|
||||
journal = {
|
||||
max_age = "24h";
|
||||
labels = {
|
||||
job = "systemd-journal";
|
||||
host = "flora-6";
|
||||
};
|
||||
};
|
||||
};
|
||||
relabel_configs = [{
|
||||
source_labels = [ "__journal__systemd_unit" ];
|
||||
target_label = "unit";
|
||||
}];
|
||||
}];
|
||||
relabel_configs = [
|
||||
{
|
||||
source_labels = [ "__journal__systemd_unit" ];
|
||||
target_label = "unit";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
{ flake
|
||||
, config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [ 25 ];
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
{ config, pkgs, flake, inputs, ... }:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
flake,
|
||||
inputs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
age.secrets."mastodon-secret-key-base" = {
|
||||
|
@ -64,9 +70,7 @@
|
|||
mediaAutoRemove = {
|
||||
olderThanDays = 7;
|
||||
};
|
||||
extraEnvFiles = [
|
||||
"/run/agenix/mastodon-extra-env-secrets"
|
||||
];
|
||||
extraEnvFiles = [ "/run/agenix/mastodon-extra-env-secrets" ];
|
||||
extraConfig = {
|
||||
WEB_DOMAIN = "mastodon.${config.pub-solar-os.networking.domain}";
|
||||
# Defined in ./opensearch.nix
|
||||
|
@ -97,9 +101,7 @@
|
|||
};
|
||||
|
||||
services.restic.backups.mastodon-droppie = {
|
||||
paths = [
|
||||
"/tmp/mastodon-backup.sql"
|
||||
];
|
||||
paths = [ "/tmp/mastodon-backup.sql" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 02:00:00 Etc/UTC";
|
||||
# droppie will be offline if nachtigall misses the timer
|
||||
|
@ -122,9 +124,7 @@
|
|||
};
|
||||
|
||||
services.restic.backups.mastodon-storagebox = {
|
||||
paths = [
|
||||
"/tmp/mastodon-backup.sql"
|
||||
];
|
||||
paths = [ "/tmp/mastodon-backup.sql" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 04:05:00 Etc/UTC";
|
||||
};
|
||||
|
|
|
@ -1,15 +1,18 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Find element in list config.services.matrix-synapse.settings.listeners.*.resources
|
||||
# that sets names = "client"
|
||||
nameHasClient = name: name == "client";
|
||||
resourceHasClient = resource: builtins.any nameHasClient resource.names;
|
||||
listenerWithClient = lib.findFirst
|
||||
(listener:
|
||||
builtins.any resourceHasClient listener.resources)
|
||||
(throw "Found no matrix-synapse.settings.listeners.*.resources.*.names containing string client")
|
||||
config.services.matrix-synapse.settings.listeners
|
||||
;
|
||||
listenerWithClient =
|
||||
lib.findFirst (listener: builtins.any resourceHasClient listener.resources)
|
||||
(throw "Found no matrix-synapse.settings.listeners.*.resources.*.names containing string client")
|
||||
config.services.matrix-synapse.settings.listeners;
|
||||
synapseClientPort = "${toString listenerWithClient.port}";
|
||||
in
|
||||
{
|
||||
|
@ -46,7 +49,11 @@ in
|
|||
};
|
||||
metrics = {
|
||||
enabled = true;
|
||||
remoteUserAgeBuckets = [ "1h" "1d" "1w" ];
|
||||
remoteUserAgeBuckets = [
|
||||
"1h"
|
||||
"1d"
|
||||
"1w"
|
||||
];
|
||||
};
|
||||
provisioning = {
|
||||
enabled = false;
|
||||
|
@ -122,4 +129,3 @@ in
|
|||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,10 @@
|
|||
{ flake, lib, config, pkgs, ... }:
|
||||
{
|
||||
flake,
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets."matrix-mautrix-telegram-env-file" = {
|
||||
file = "${flake.self}/secrets/matrix-mautrix-telegram-env-file.age";
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
{ flake, config, pkgs, ... }:
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
publicDomain = "matrix.${config.pub-solar-os.networking.domain}";
|
||||
serverDomain = "${config.pub-solar-os.networking.domain}";
|
||||
|
@ -40,22 +45,16 @@ in
|
|||
};
|
||||
listeners = [
|
||||
{
|
||||
bind_addresses = [
|
||||
"127.0.0.1"
|
||||
];
|
||||
bind_addresses = [ "127.0.0.1" ];
|
||||
port = 8008;
|
||||
resources = [
|
||||
{
|
||||
compress = true;
|
||||
names = [
|
||||
"client"
|
||||
];
|
||||
names = [ "client" ];
|
||||
}
|
||||
{
|
||||
compress = false;
|
||||
names = [
|
||||
"federation"
|
||||
];
|
||||
names = [ "federation" ];
|
||||
}
|
||||
];
|
||||
tls = false;
|
||||
|
@ -63,29 +62,23 @@ in
|
|||
x_forwarded = true;
|
||||
}
|
||||
{
|
||||
bind_addresses = [
|
||||
"127.0.0.1"
|
||||
];
|
||||
bind_addresses = [ "127.0.0.1" ];
|
||||
port = 8012;
|
||||
resources = [
|
||||
{
|
||||
names = [
|
||||
"metrics"
|
||||
];
|
||||
}
|
||||
];
|
||||
resources = [ { names = [ "metrics" ]; } ];
|
||||
tls = false;
|
||||
type = "metrics";
|
||||
}
|
||||
];
|
||||
|
||||
account_threepid_delegates.msisdn = "";
|
||||
alias_creation_rules = [{
|
||||
action = "allow";
|
||||
alias = "*";
|
||||
room_id = "*";
|
||||
user_id = "*";
|
||||
}];
|
||||
alias_creation_rules = [
|
||||
{
|
||||
action = "allow";
|
||||
alias = "*";
|
||||
room_id = "*";
|
||||
user_id = "*";
|
||||
}
|
||||
];
|
||||
allow_guest_access = false;
|
||||
allow_public_rooms_over_federation = true;
|
||||
allow_public_rooms_without_auth = false;
|
||||
|
@ -152,7 +145,7 @@ in
|
|||
};
|
||||
per_user = {
|
||||
burst_count = 5;
|
||||
per_second = 0.003;
|
||||
per_second = 3.0e-3;
|
||||
};
|
||||
};
|
||||
rc_joins = {
|
||||
|
@ -162,7 +155,7 @@ in
|
|||
};
|
||||
remote = {
|
||||
burst_count = 10;
|
||||
per_second = 0.01;
|
||||
per_second = 1.0e-2;
|
||||
};
|
||||
};
|
||||
rc_login = {
|
||||
|
@ -194,17 +187,19 @@ in
|
|||
registrations_require_3pid = [ "email" ];
|
||||
report_stats = false;
|
||||
require_auth_for_profile_requests = false;
|
||||
room_list_publication_rules = [{
|
||||
action = "allow";
|
||||
alias = "*";
|
||||
room_id = "*";
|
||||
user_id = "*";
|
||||
}];
|
||||
room_list_publication_rules = [
|
||||
{
|
||||
action = "allow";
|
||||
alias = "*";
|
||||
room_id = "*";
|
||||
user_id = "*";
|
||||
}
|
||||
];
|
||||
|
||||
signing_key_path = "/run/agenix/matrix-synapse-signing-key";
|
||||
|
||||
stream_writers = { };
|
||||
trusted_key_servers = [{ server_name = "matrix.org"; }];
|
||||
trusted_key_servers = [ { server_name = "matrix.org"; } ];
|
||||
suppress_key_server_warning = true;
|
||||
|
||||
turn_allow_guests = false;
|
||||
|
@ -276,9 +271,7 @@ in
|
|||
"redis"
|
||||
];
|
||||
|
||||
plugins = [
|
||||
config.services.matrix-synapse.package.plugins.matrix-synapse-shared-secret-auth
|
||||
];
|
||||
plugins = [ config.services.matrix-synapse.package.plugins.matrix-synapse-shared-secret-auth ];
|
||||
|
||||
sliding-sync = {
|
||||
enable = true;
|
||||
|
|
|
@ -2,7 +2,7 @@ version: 1
|
|||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
format: "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"
|
||||
|
||||
filters:
|
||||
context:
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
{ flake
|
||||
, config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
localSettingsPHP = pkgs.writeScript "LocalSettings.php" ''
|
||||
|
@ -201,7 +202,9 @@ in
|
|||
group = "mediawiki";
|
||||
inherit uid;
|
||||
};
|
||||
users.groups.mediawiki = { inherit gid; };
|
||||
users.groups.mediawiki = {
|
||||
inherit gid;
|
||||
};
|
||||
|
||||
virtualisation = {
|
||||
oci-containers = {
|
||||
|
@ -212,9 +215,7 @@ in
|
|||
user = "1000:${builtins.toString gid}";
|
||||
autoStart = true;
|
||||
|
||||
ports = [
|
||||
"127.0.0.1:8293:80"
|
||||
];
|
||||
ports = [ "127.0.0.1:8293:80" ];
|
||||
|
||||
extraOptions = [
|
||||
"--add-host=host.docker.internal:host-gateway"
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
{ config
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets."nextcloud-secrets" = {
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
{
|
||||
config,
|
||||
...
|
||||
}:
|
||||
{ config, ... }:
|
||||
|
||||
let
|
||||
objStorHost = "link.tardigradeshare.io";
|
||||
|
|
|
@ -30,7 +30,12 @@ in
|
|||
};
|
||||
|
||||
locations."@proxy" = {
|
||||
proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-web/web.socket" else "http://127.0.0.1:${toString(cfg.webPort)}");
|
||||
proxyPass = (
|
||||
if cfg.enableUnixSocket then
|
||||
"http://unix:/run/mastodon-web/web.socket"
|
||||
else
|
||||
"http://127.0.0.1:${toString (cfg.webPort)}"
|
||||
);
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
|
||||
|
@ -45,13 +50,12 @@ in
|
|||
extraConfig = ''
|
||||
least_conn;
|
||||
'';
|
||||
servers = builtins.listToAttrs
|
||||
(map
|
||||
(i: {
|
||||
name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket";
|
||||
value = { };
|
||||
})
|
||||
(lib.range 1 cfg.streamingProcesses));
|
||||
servers = builtins.listToAttrs (
|
||||
map (i: {
|
||||
name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket";
|
||||
value = { };
|
||||
}) (lib.range 1 cfg.streamingProcesses)
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
lib,
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
commonHeaders = ''
|
||||
add_header Permissions-Policy interest-cohort=() always;
|
||||
|
@ -71,9 +76,7 @@ in
|
|||
"chat.${config.pub-solar-os.networking.domain}" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
root = pkgs.element-web.override {
|
||||
conf = clientConfig;
|
||||
};
|
||||
root = pkgs.element-web.override { conf = clientConfig; };
|
||||
};
|
||||
|
||||
"stickers.chat.${config.pub-solar-os.networking.domain}" = {
|
||||
|
@ -126,16 +129,18 @@ in
|
|||
serverName = "matrix.${config.pub-solar-os.networking.domain}";
|
||||
forceSSL = lib.mkDefault true;
|
||||
enableACME = lib.mkDefault true;
|
||||
listen = [{
|
||||
port = 8448;
|
||||
addr = "0.0.0.0";
|
||||
ssl = true;
|
||||
}
|
||||
listen = [
|
||||
{
|
||||
port = 8448;
|
||||
addr = "0.0.0.0";
|
||||
ssl = true;
|
||||
}
|
||||
{
|
||||
port = 8448;
|
||||
addr = "[::]";
|
||||
ssl = true;
|
||||
}];
|
||||
}
|
||||
];
|
||||
root = "/dev/null";
|
||||
extraConfig = ''
|
||||
server_tokens off;
|
||||
|
@ -159,4 +164,3 @@ in
|
|||
};
|
||||
networking.firewall.allowedTCPPorts = [ 8448 ];
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{ pkgs, lib, ... }: {
|
||||
{ pkgs, lib, ... }:
|
||||
{
|
||||
default_server_config = {
|
||||
"m.homeserver" = {
|
||||
base_url = "https://matrix.pub.solar";
|
||||
|
@ -35,10 +36,12 @@
|
|||
homeUrl = "";
|
||||
};
|
||||
branding = {
|
||||
auth_footer_links = [{
|
||||
text = "Privacy";
|
||||
url = "https://pub.solar/privacy";
|
||||
}];
|
||||
auth_footer_links = [
|
||||
{
|
||||
text = "Privacy";
|
||||
url = "https://pub.solar/privacy";
|
||||
}
|
||||
];
|
||||
# FUTUREWORK: Replace with pub.solar logo
|
||||
auth_header_logo_url = "themes/element/img/logos/element-logo.svg";
|
||||
};
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
{ config, flake, lib, ... }:
|
||||
{
|
||||
config,
|
||||
flake,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Find element in list config.services.matrix-synapse.settings.listeners
|
||||
# that sets type = "metrics"
|
||||
listenerWithMetrics = lib.findFirst
|
||||
(listener:
|
||||
listener.type == "metrics")
|
||||
(throw "Found no matrix-synapse.settings.listeners.*.type containing string metrics")
|
||||
config.services.matrix-synapse.settings.listeners
|
||||
;
|
||||
listenerWithMetrics =
|
||||
lib.findFirst (listener: listener.type == "metrics")
|
||||
(throw "Found no matrix-synapse.settings.listeners.*.type containing string metrics")
|
||||
config.services.matrix-synapse.settings.listeners;
|
||||
synapseMetricsPort = "${toString listenerWithMetrics.port}";
|
||||
in
|
||||
{
|
||||
|
@ -22,7 +25,7 @@ in
|
|||
addSSL = true;
|
||||
basicAuthFile = "${config.age.secrets.nachtigall-metrics-nginx-basic-auth.path}";
|
||||
locations."/metrics" = {
|
||||
proxyPass = "http://127.0.0.1:${toString(config.services.prometheus.exporters.node.port)}";
|
||||
proxyPass = "http://127.0.0.1:${toString (config.services.prometheus.exporters.node.port)}";
|
||||
};
|
||||
locations."/_synapse/metrics" = {
|
||||
proxyPass = "http://127.0.0.1:${synapseMetricsPort}";
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
{ ... }:
|
||||
|
||||
{
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/srv/www/miom.space' 0750 hakkonaut hakkonaut - -"
|
||||
];
|
||||
systemd.tmpfiles.rules = [ "d '/srv/www/miom.space' 0750 hakkonaut hakkonaut - -" ];
|
||||
|
||||
services.nginx.virtualHosts = {
|
||||
"www.miom.space" = {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{ lib, config, ... }:
|
||||
{
|
||||
lib, config, ... }: {
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/srv/www/${config.pub-solar-os.networking.domain}' 0750 hakkonaut hakkonaut - -"
|
||||
];
|
||||
|
@ -54,7 +54,8 @@
|
|||
};
|
||||
|
||||
# Responsible disclosure information https://securitytxt.org/
|
||||
"/.well-known/security.txt" = let
|
||||
"/.well-known/security.txt" =
|
||||
let
|
||||
securityTXT = lib.lists.foldr (a: b: a + "\n" + b) "" [
|
||||
"Contact: mailto:admins@pub.solar"
|
||||
"Expires: 2025-01-04T23:00:00.000Z"
|
||||
|
@ -62,12 +63,13 @@
|
|||
"Preferred-Languages: en,de"
|
||||
"Canonical: https://${config.pub-solar-os.networking.domain}/.well-known/security.txt"
|
||||
];
|
||||
in {
|
||||
extraConfig = ''
|
||||
add_header Content-Type text/plain;
|
||||
return 200 '${securityTXT}';
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
extraConfig = ''
|
||||
add_header Content-Type text/plain;
|
||||
return 200 '${securityTXT}';
|
||||
'';
|
||||
};
|
||||
|
||||
"/satzung" = {
|
||||
extraConfig = ''
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, self
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
self,
|
||||
...
|
||||
}:
|
||||
let
|
||||
acmeEmailAddress = config.pub-solar-os.adminEmail;
|
||||
|
@ -38,5 +39,8 @@ in
|
|||
defaults.email = acmeEmailAddress;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,29 +1,31 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, self
|
||||
, flake
|
||||
, ...
|
||||
}: let
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
self,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
let
|
||||
configPy = pkgs.writeText "obs-portal-config.py" ''
|
||||
DEBUG = False
|
||||
VERBOSE = DEBUG
|
||||
AUTO_RESTART = DEBUG
|
||||
LEAN_MODE = False
|
||||
FRONTEND_URL = None
|
||||
FRONTEND_HTTPS = True
|
||||
FRONTEND_DIR = "../frontend/build/"
|
||||
FRONTEND_CONFIG = {
|
||||
"imprintUrl": "${config.pub-solar-os.imprintUrl}",
|
||||
"privacyPolicyUrl": "${config.pub-solar-os.privacyPolicyUrl}",
|
||||
"mapHome": {"zoom": 12, "latitude": 50.93, "longitude": 6.97},
|
||||
"banner": {
|
||||
"text": "This is an installation serving the Cologne/Bonn region run for Team OBSKöln by pub.solar n.e.V.",
|
||||
"style": "info"
|
||||
},
|
||||
}
|
||||
TILES_FILE = None
|
||||
ADDITIONAL_CORS_ORIGINS = None
|
||||
DEBUG = False
|
||||
VERBOSE = DEBUG
|
||||
AUTO_RESTART = DEBUG
|
||||
LEAN_MODE = False
|
||||
FRONTEND_URL = None
|
||||
FRONTEND_HTTPS = True
|
||||
FRONTEND_DIR = "../frontend/build/"
|
||||
FRONTEND_CONFIG = {
|
||||
"imprintUrl": "${config.pub-solar-os.imprintUrl}",
|
||||
"privacyPolicyUrl": "${config.pub-solar-os.privacyPolicyUrl}",
|
||||
"mapHome": {"zoom": 12, "latitude": 50.93, "longitude": 6.97},
|
||||
"banner": {
|
||||
"text": "This is an installation serving the Cologne/Bonn region run for Team OBSKöln by pub.solar n.e.V.",
|
||||
"style": "info"
|
||||
},
|
||||
}
|
||||
TILES_FILE = None
|
||||
ADDITIONAL_CORS_ORIGINS = None
|
||||
'';
|
||||
|
||||
env = {
|
||||
|
@ -41,7 +43,8 @@ ADDITIONAL_CORS_ORIGINS = None
|
|||
OBS_DATA_DIR = "/data";
|
||||
OBS_PROXIES_COUNT = "1";
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
age.secrets.obs-portal-env = {
|
||||
file = "${flake.self}/secrets/obs-portal-env.age";
|
||||
mode = "600";
|
||||
|
@ -59,8 +62,16 @@ in {
|
|||
in
|
||||
{
|
||||
serviceConfig.Type = "oneshot";
|
||||
before = [ "docker-obs-portal.service" "docker-obs-portal-db.service" "docker-obs-portal-worker.service" ];
|
||||
requiredBy = [ "docker-obs-portal.service" "docker-obs-portal-db.service" "docker-obs-portal-worker.service" ];
|
||||
before = [
|
||||
"docker-obs-portal.service"
|
||||
"docker-obs-portal-db.service"
|
||||
"docker-obs-portal-worker.service"
|
||||
];
|
||||
requiredBy = [
|
||||
"docker-obs-portal.service"
|
||||
"docker-obs-portal-db.service"
|
||||
"docker-obs-portal-worker.service"
|
||||
];
|
||||
script = ''
|
||||
${dockerBin} network inspect obs-portal-net >/dev/null 2>&1 || ${dockerBin} network create obs-portal-net --subnet 172.20.0.0/24
|
||||
'';
|
||||
|
@ -101,16 +112,17 @@ in {
|
|||
"/var/lib/obs-portal/pbf/:/pbf"
|
||||
];
|
||||
|
||||
extraOptions = [
|
||||
"--network=obs-portal-net"
|
||||
];
|
||||
extraOptions = [ "--network=obs-portal-net" ];
|
||||
};
|
||||
|
||||
containers."obs-portal-worker" = {
|
||||
image = "git.pub.solar/pub-solar/obs-portal:latest";
|
||||
autoStart = true;
|
||||
|
||||
cmd = [ "python" "tools/process_track.py" ];
|
||||
cmd = [
|
||||
"python"
|
||||
"tools/process_track.py"
|
||||
];
|
||||
|
||||
environment = env;
|
||||
environmentFiles = [ config.age.secrets.obs-portal-env.path ];
|
||||
|
@ -120,9 +132,7 @@ in {
|
|||
"/var/lib/obs-portal${env.OBS_DATA_DIR}:${env.OBS_DATA_DIR}"
|
||||
];
|
||||
|
||||
extraOptions = [
|
||||
"--network=obs-portal-net"
|
||||
];
|
||||
extraOptions = [ "--network=obs-portal-net" ];
|
||||
};
|
||||
|
||||
containers."obs-portal-db" = {
|
||||
|
@ -131,13 +141,9 @@ in {
|
|||
|
||||
environmentFiles = [ config.age.secrets.obs-portal-database-env.path ];
|
||||
|
||||
volumes = [
|
||||
"/var/lib/postgres-obs-portal/data:/var/lib/postgresql/data"
|
||||
];
|
||||
volumes = [ "/var/lib/postgres-obs-portal/data:/var/lib/postgresql/data" ];
|
||||
|
||||
extraOptions = [
|
||||
"--network=obs-portal-net"
|
||||
];
|
||||
extraOptions = [ "--network=obs-portal-net" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ flake
|
||||
, config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
services.nginx.virtualHosts."stream.${config.pub-solar-os.networking.domain}" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
|
|
@ -7,11 +7,7 @@
|
|||
};
|
||||
|
||||
systemd.services.postgresql = {
|
||||
after = [
|
||||
"var-lib-postgresql.mount"
|
||||
];
|
||||
requisite = [
|
||||
"var-lib-postgresql.mount"
|
||||
];
|
||||
after = [ "var-lib-postgresql.mount" ];
|
||||
requisite = [ "var-lib-postgresql.mount" ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
{ config
|
||||
, ...
|
||||
}: {
|
||||
{ config, ... }:
|
||||
{
|
||||
services.prometheus = {
|
||||
exporters = {
|
||||
node = {
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.nachtigall-metrics-prometheus-basic-auth-password = {
|
||||
file = "${flake.self}/secrets/nachtigall-metrics-prometheus-basic-auth-password.age";
|
||||
mode = "600";
|
||||
|
@ -27,12 +29,14 @@
|
|||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node-exporter-http";
|
||||
static_configs = [{
|
||||
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
labels = {
|
||||
instance = "flora-6";
|
||||
};
|
||||
}];
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
labels = {
|
||||
instance = "flora-6";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "node-exporter-https";
|
||||
|
@ -42,12 +46,14 @@
|
|||
username = "hakkonaut";
|
||||
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
|
||||
};
|
||||
static_configs = [{
|
||||
targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
|
||||
labels = {
|
||||
instance = "nachtigall";
|
||||
};
|
||||
}];
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
|
||||
labels = {
|
||||
instance = "nachtigall";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
{
|
||||
job_name = "matrix-synapse";
|
||||
|
@ -57,12 +63,14 @@
|
|||
username = "hakkonaut";
|
||||
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
|
||||
};
|
||||
static_configs = [{
|
||||
targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
|
||||
labels = {
|
||||
instance = "nachtigall";
|
||||
};
|
||||
}];
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "nachtigall.${config.pub-solar-os.networking.domain}" ];
|
||||
labels = {
|
||||
instance = "nachtigall";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, flake
|
||||
, ...
|
||||
}: {
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
flake,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.nachtigall-metrics-prometheus-basic-auth-password = {
|
||||
file = "${flake.self}/secrets/nachtigall-metrics-prometheus-basic-auth-password.age";
|
||||
mode = "600";
|
||||
|
@ -20,27 +22,33 @@
|
|||
positions = {
|
||||
filename = "/tmp/positions.yaml";
|
||||
};
|
||||
clients = [{
|
||||
url = "https://flora-6.${config.pub-solar-os.networking.domain}/loki/api/v1/push";
|
||||
basic_auth = {
|
||||
username = "hakkonaut";
|
||||
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
|
||||
};
|
||||
}];
|
||||
scrape_configs = [{
|
||||
job_name = "journal";
|
||||
journal = {
|
||||
max_age = "24h";
|
||||
labels = {
|
||||
job = "systemd-journal";
|
||||
host = "nachtigall";
|
||||
clients = [
|
||||
{
|
||||
url = "https://flora-6.${config.pub-solar-os.networking.domain}/loki/api/v1/push";
|
||||
basic_auth = {
|
||||
username = "hakkonaut";
|
||||
password_file = "${config.age.secrets.nachtigall-metrics-prometheus-basic-auth-password.path}";
|
||||
};
|
||||
};
|
||||
relabel_configs = [{
|
||||
source_labels = [ "__journal__systemd_unit" ];
|
||||
target_label = "unit";
|
||||
}];
|
||||
}];
|
||||
}
|
||||
];
|
||||
scrape_configs = [
|
||||
{
|
||||
job_name = "journal";
|
||||
journal = {
|
||||
max_age = "24h";
|
||||
labels = {
|
||||
job = "systemd-journal";
|
||||
host = "nachtigall";
|
||||
};
|
||||
};
|
||||
relabel_configs = [
|
||||
{
|
||||
source_labels = [ "__journal__systemd_unit" ];
|
||||
target_label = "unit";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
{ flake
|
||||
, config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
{
|
||||
flake,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
{
|
||||
age.secrets.searx-environment = {
|
||||
|
@ -64,9 +65,18 @@
|
|||
};
|
||||
|
||||
engine = [
|
||||
{ engine = "startpage"; disabled = false; }
|
||||
{ engine = "yahoo"; disabled = false; }
|
||||
{ engine = "tagesschau"; disabled = false; }
|
||||
{
|
||||
engine = "startpage";
|
||||
disabled = false;
|
||||
}
|
||||
{
|
||||
engine = "yahoo";
|
||||
disabled = false;
|
||||
}
|
||||
{
|
||||
engine = "tagesschau";
|
||||
disabled = false;
|
||||
}
|
||||
];
|
||||
|
||||
ui = {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ config,... }:
|
||||
{ config, ... }:
|
||||
{
|
||||
services.tmate-ssh-server = {
|
||||
enable = true;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
{ flake, config, ... }: {
|
||||
{ flake, config, ... }:
|
||||
{
|
||||
# From https://nixos.wiki/wiki/ZFS#Unlock_encrypted_zfs_via_ssh_on_boot
|
||||
boot.initrd.network = {
|
||||
enable = true;
|
||||
|
|
|
@ -1,24 +1,27 @@
|
|||
{ self
|
||||
, inputs
|
||||
, ...
|
||||
}: {
|
||||
{ self, inputs, ... }:
|
||||
{
|
||||
flake = {
|
||||
nixosModules = rec {
|
||||
overlays = ({ ... }: {
|
||||
nixpkgs.overlays = [
|
||||
(final: prev:
|
||||
let
|
||||
unstable = import inputs.unstable {
|
||||
system = prev.system;
|
||||
};
|
||||
in
|
||||
{
|
||||
forgejo-runner = unstable.forgejo-runner;
|
||||
element-themes = prev.callPackage ./pkgs/element-themes { inherit (inputs) element-themes; };
|
||||
element-stickerpicker = prev.callPackage ./pkgs/element-stickerpicker { inherit (inputs) element-stickers maunium-stickerpicker; };
|
||||
})
|
||||
];
|
||||
});
|
||||
overlays = (
|
||||
{ ... }:
|
||||
{
|
||||
nixpkgs.overlays = [
|
||||
(
|
||||
final: prev:
|
||||
let
|
||||
unstable = import inputs.unstable { system = prev.system; };
|
||||
in
|
||||
{
|
||||
forgejo-runner = unstable.forgejo-runner;
|
||||
element-themes = prev.callPackage ./pkgs/element-themes { inherit (inputs) element-themes; };
|
||||
element-stickerpicker = prev.callPackage ./pkgs/element-stickerpicker {
|
||||
inherit (inputs) element-stickers maunium-stickerpicker;
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
{ stdenvNoCC, element-stickers, maunium-stickerpicker }:
|
||||
{
|
||||
stdenvNoCC,
|
||||
element-stickers,
|
||||
maunium-stickerpicker,
|
||||
}:
|
||||
stdenvNoCC.mkDerivation {
|
||||
src = maunium-stickerpicker;
|
||||
name = "element-stickers";
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
{ stdenvNoCC, jq, element-themes }:
|
||||
{
|
||||
stdenvNoCC,
|
||||
jq,
|
||||
element-themes,
|
||||
}:
|
||||
stdenvNoCC.mkDerivation {
|
||||
src = element-themes;
|
||||
name = "element-themes";
|
||||
|
|
|
@ -4,15 +4,13 @@ let
|
|||
nachtigall-host = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIP7G0ufi+MNvaAZLDgpieHrABPGN7e/kD5kMFwSk4ABj root@nachtigall";
|
||||
flora-6-host = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGP1InpTBN4AlF/4V8HHumAMLJzeO8DpzjUv9Co/+J09 root@flora-6";
|
||||
|
||||
adminKeys = builtins.foldl' (keys: login: keys ++ (builtins.attrValues login.secretEncryptionKeys)) [] (builtins.attrValues admins);
|
||||
adminKeys = builtins.foldl' (
|
||||
keys: login: keys ++ (builtins.attrValues login.secretEncryptionKeys)
|
||||
) [ ] (builtins.attrValues admins);
|
||||
|
||||
nachtigallKeys = [
|
||||
nachtigall-host
|
||||
];
|
||||
nachtigallKeys = [ nachtigall-host ];
|
||||
|
||||
flora6Keys = [
|
||||
flora-6-host
|
||||
];
|
||||
flora6Keys = [ flora-6-host ];
|
||||
in
|
||||
{
|
||||
# ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBB5XaH02a6+TchnyQED2VwaltPgeFCbildbE2h6nF5e root@nachtigall
|
||||
|
@ -63,7 +61,8 @@ in
|
|||
"grafana-smtp-password.age".publicKeys = flora6Keys ++ adminKeys;
|
||||
|
||||
"nachtigall-metrics-nginx-basic-auth.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
"nachtigall-metrics-prometheus-basic-auth-password.age".publicKeys = flora6Keys ++ nachtigallKeys ++ adminKeys;
|
||||
"nachtigall-metrics-prometheus-basic-auth-password.age".publicKeys =
|
||||
flora6Keys ++ nachtigallKeys ++ adminKeys;
|
||||
|
||||
"obs-portal-env.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
"obs-portal-database-env.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
|
|
|
@ -4,7 +4,8 @@
|
|||
lib,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
name = "website";
|
||||
|
||||
nodes.nachtigall-test = self.nixosConfigurations.nachtigall-test;
|
||||
|
|
Loading…
Reference in a new issue