Compare commits

..

1 commit

Author SHA1 Message Date
7ca53774ce
wip: matrix moderation with draupnir 2024-07-04 13:08:20 +02:00
201 changed files with 2300 additions and 7170 deletions
.editorconfig
.forgejo/workflows
docs
flake.lockflake.nix
hosts
logins
modules

View file

@ -20,8 +20,41 @@ indent_style = unset
indent_size = unset
[{.*,secrets}/**]
end_of_line = false
insert_final_newline = false
end_of_line = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
charset = unset
indent_style = unset
indent_size = unset
[*.rom]
end_of_line = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
charset = unset
indent_style = unset
indent_size = unset
[*.py]
indent_size = 4
[*.md]
max_line_length = off
trim_trailing_whitespace = false
# Ignore diffs/patches
[*.{diff,patch}]
end_of_line = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
indent_size = unset
charset = unset
indent_style = unset
indent_size = unset
[{.*,secrets}/**]
end_of_line = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
charset = unset
indent_style = unset

View file

@ -10,7 +10,7 @@ jobs:
- name: Check formatting
run: |
nix --accept-flake-config --access-tokens '' develop --command treefmt --ci
nix --accept-flake-config --access-tokens '' develop --command treefmt --fail-on-change
- name: Run flake checks
run: |
@ -18,7 +18,14 @@ jobs:
# Prevent cache garbage collection by creating GC roots
mkdir -p /var/lib/gitea-runner/tankstelle/.local/state/nix/results
sed -i 's/virtualisation.cores .*/virtualisation.cores = 16;/' tests/keycloak.nix
sed -i 's/virtualisation.memorySize .*/virtualisation.memorySize = 16384;/' tests/keycloak.nix
# 1 eval-worker needs about 13GB of memory
nix --accept-flake-config --access-tokens '' develop --command nix-fast-build --no-nom --skip-cached --systems "x86_64-linux" --max-jobs 10 --eval-workers 2 --out-link /var/lib/gitea-runner/tankstelle/.local/state/nix/results/nix-fast-build
for target in $(nix flake show --json --all-systems | jq '
.["nixosConfigurations"] |
to_entries[] |
.key
' | tr -d '"'
); do
nix --print-build-logs --verbose --accept-flake-config --access-tokens '' \
build --out-link /var/lib/gitea-runner/tankstelle/.local/state/nix/results/"$target" ".#nixosConfigurations.${target}.config.system.build.toplevel"
done
nix --print-build-logs --verbose --accept-flake-config --access-tokens '' flake check

View file

@ -1,7 +0,0 @@
# pub.solar documentation
This directory holds a collection of notes and documentation for pub.solar admins.
### Systems Overview
To get a first overview of existing pub.solar systems, please see the [pub.solar systems overview](./systems-overview.md).

View file

@ -28,18 +28,18 @@ People with admin access to the infrastructure are added to [`logins/admins.nix`
SSH is not reachable from the open internet. Instead, SSH Port 22 is protected by a wireguard VPN network. Thus, to get root access on the servers, at least two pieces of information have to be added to the admins config:
1. **SSH Public key**: self-explanatory. Add your public key to your user attrset under `sshPubKeys`.
2. **Wireguard device**: each wireguard device has two parts: the public key and the IP addresses it should have in the wireguard network. The pub.solar wireguard network uses the subnets `10.7.6.0/24` and `fd00:fae:fae:fae:fae::/80`. To add your device, it's best to choose a free number between 200 and 255 and use that in both the ipv4 and ipv6 ranges: `10.7.6.<ip-address>/32` `fd00:fae:fae:fae:fae:<ip-address>::/96`. For more information on how to generate keypairs, see [the NixOS Wireguard docs](https://nixos.wiki/wiki/WireGuard#Generate_keypair).
2. **Wireguard device**: each wireguard device has two parts: the public key and the IP addresses it should have in the wireguard network. The pub.solar wireguard network is spaced under `10.7.6.0/24` and `fd00:fae:fae:fae:fae::/80`. To add your device, it's best to choose a free number between 200 and 255 and use that in both the ipv4 and ipv6 ranges: `10.7.6.<ip-address>/32` `fd00:fae:fae:fae:fae:<ip-address>::/96`. For more information on how to generate keypairs, see [the NixOS Wireguard docs](https://nixos.wiki/wiki/WireGuard#Generate_keypair).
One can access our hosts using this domain scheme:
```
ssh <unix-username>@<hostname>.wg.pub.solar
ssh barkeeper@<hostname>.wg.pub.solar
```
So, for example for `nachtigall`:
```
ssh teutat3s@nachtigall.wg.pub.solar
ssh barkeeper@nachtigall.wg.pub.solar
```
Example NixOS snippet for WireGuard client config
@ -63,6 +63,12 @@ Example NixOS snippet for WireGuard client config
#endpoint = "138.201.80.102:51820";
persistentKeepalive = 15;
}
{ # flora-6.pub.solar
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
allowedIPs = [ "10.7.6.2/32" "fd00:fae:fae:fae:fae:2::/96" ];
endpoint = "80.71.153.210:51820";
persistentKeepalive = 15;
}
{ # metronom.pub.solar
publicKey = "zOSYGO7MfnOOUnzaTcWiKRQM0qqxR3JQrwx/gtEtHmo=";
allowedIPs = [ "10.7.6.3/32" "fd00:fae:fae:fae:fae:3::/96" ];
@ -79,39 +85,6 @@ Example NixOS snippet for WireGuard client config
#endpoint = "80.244.242.5:51820";
persistentKeepalive = 15;
}
{
# trinkgenossin.pub.solar
publicKey = "QWgHovHxtqiQhnHLouSWiT6GIoQDmuvnThYL5c/rvU4=";
allowedIPs = [
"10.7.6.5/32"
"fd00:fae:fae:fae:fae:5::/96"
];
#endpoint = "85.215.152.22:51820";
endpoint = "[2a01:239:35d:f500::1]:51820";
persistentKeepalive = 15;
}
{
# delite.pub.solar
publicKey = "ZT2qGWgMPwHRUOZmTQHWCRX4m14YwOsiszjsA5bpc2k=";
allowedIPs = [
"10.7.6.6/32"
"fd00:fae:fae:fae:fae:6::/96"
];
#endpoint = "5.255.119.132:51820";
endpoint = "[2a04:52c0:124:9d8c::2]:51820";
persistentKeepalive = 15;
}
{
# blue-shell.pub.solar
publicKey = "bcrIpWrKc1M+Hq4ds3aN1lTaKE26f2rvXhd+93QrzR8=";
allowedIPs = [
"10.7.6.7/32"
"fd00:fae:fae:fae:fae:7::/96"
];
#endpoint = "194.13.83.205:51820";
endpoint = "[2a03:4000:43:24e::1]:51820";
persistentKeepalive = 15;
}
];
};
};

View file

@ -1,36 +0,0 @@
# Backups
We use [Restic](https://restic.readthedocs.io/en/stable/) to create backups and push them to two repositories.
Check `./modules/backups.nix` and `./hosts/nachtigall/backups.nix` for working examples.
### Hetzner Storagebox
- Uses SFTP for transfer of backups
Adding a new host SSH public key to the storagebox:
First, [SSH to nachtigall](./administrative-access.md#ssh-access), then become root and add the new SSH public key
```
sudo -i
echo '<ssh-public-key>' | ssh -p23 u377325@u377325.your-storagebox.de install-ssh-key
```
[Link to Hetzner storagebox docs](https://docs.hetzner.com/robot/storage-box/backup-space-ssh-keys).
### Garage S3 buckets
- Uses S3 for transfer of backups
- One bucket per host, e.g. `nachtigall-backups`, `metronom-backups`
To start transfering backups from a new hosts, this is how to create a new bucket:
First, [SSH to trinkgenossin](./administrative-access.md#ssh-access), then use the `garage` CLI to create a new key and bucket:
```
export GARAGE_RPC_SECRET=<secret-in-keepass>
garage bucket create <hostname>-backups
garage key create <hostname>-backups-key
garage bucket allow <hostname>-backups --read --write --key <hostname>-backups-key
```

View file

@ -1,55 +0,0 @@
# Cachix usage
URL: https://pub-solar.cachix.org
Requirements:
- [Install cachix](https://docs.cachix.org/installation)
- Optional: To push to the cache, you need to set `CACHIX_AUTH_TOKEN` in your environment. To generate one for you, follow the [Getting Started](https://docs.cachix.org/getting-started#authenticating) docs and login with your GitHub account.
- Add our binary cache [to your nix config](https://docs.cachix.org/faq#cachix-use-effects). To add the pub-solar cache, run:
```
cachix use pub-solar
```
Example to build and push a custom package of a host in this flake (e.g. after creating an overlay):
```
nix build --json -f . '.#nixosConfigurations.nachtigall.pkgs.keycloak^*' \
| jq -r '.[].outputs | to_entries[].value' \
| cachix push pub-solar
```
Example to build and push a package in the `nixpkgs` repo:
```
cd nixpkgs
nix build --json -f . 'pkgs.lix^*' \
| jq -r '.[].outputs | to_entries[].value' \
| cachix push pub-solar
```
Checking if a package has been correctly pushed to the cache:
```
nix build --json '/nix/store/f76xi83z4xk9sn6pbh38rh97yvqhb5m0-noto-fonts-color-emoji-png-2.042.drv^*' | jq -r '.[].outputs | to_entries[].value' | cachix push pub-solar
Pushing 1 paths (0 are already present) using zstd to cache pub-solar ⏳
✓ /nix/store/xpgpi84765dxqja3gd5pldj49xx2v0xl-noto-fonts-color-emoji-png-2.042 (10.30 MiB)
All done.
curl -I https://pub-solar.cachix.org/xpgpi84765dxqja3gd5pldj49xx2v0xl.narinfo
HTTP/2 200
date: Mon, 26 Aug 2024 09:31:10 GMT
content-type: text/x-nix-narinfo
traceparent: 00-b99db37cc9c2581b8d226cdf81e54507-794fc49193659c03-01
tracestate:
cache-control: public, max-age=14400
last-modified: Mon, 26 Aug 2024 09:31:10 GMT
cf-cache-status: EXPIRED
report-to: {"endpoints":[{"url":"https:\/\/a.nel.cloudflare.com\/report\/v4?s=A67KGsCIsYjoFdvndxJ0rkmb7BZ5ztIpm8WUJKAiUPRVWvbYeXU9gU27P7zryiUtArbwrLzHhhMija0yyXk0kwNa3suz8gNzKK6z1CX1FWDZiiP07rnq7zAg8nZbSBiEU%2FZrU9nSrR6mhuL9ihbmW1Hf"}],"group":"cf-nel","max_age":604800}
nel: {"success_fraction":0,"report_to":"cf-nel","max_age":604800}
server: cloudflare
cf-ray: 8b92ceab0d19c80e-DUS
```

View file

@ -34,27 +34,13 @@ Docs: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server
### Mastodon
```
mkdir /tmp/tootctl
sudo chown mastodon /tmp/tootctl
cd /tmp/tootctl
sudo -u mastodon mastodon-tootctl accounts delete --email <mail-address>
rm -r /tmp/tootctl
```
Docs: https://docs.joinmastodon.org/admin/tootctl/#accounts-delete
### Forgejo
Make sure you have access to the gitea/forgejo command:
```
nix shell nixpkgs#forgejo
```
Then, delete the user:
```
sudo -u gitea gitea admin user delete --config /var/lib/forgejo/custom/conf/app.ini --purge --email <mail-address>
```
@ -64,37 +50,11 @@ Docs: https://forgejo.org/docs/latest/admin/command-line/#delete
### Matrix
```
curl --header "Authorization: Bearer <admin-access-token>" --request POST http://127.0.0.1:8008/_synapse/admin/v1/deactivate/@<username>:pub.solar --data '{"erase": true}'
curl --header "Authorization: Bearer <admin-access-token>" --request POST http://172.18.0.3:8008/_synapse/admin/v1/deactivate/@<username>:pub.solar --data '{"erase": true}'
```
Docs: https://element-hq.github.io/synapse/latest/admin_api/user_admin_api.html#deactivate-account
The authentication token should be in the keepass. If it is expired, you can get a new one by running the following:
```
# get full path to mas-cli command with current --config flags from
# sudo systemctl cat matrix-authentication-service
sudo -u matrix-authentication-service mas-cli --config nix-store-config --config /run/agenix/matrix-authentication-service-secret-config.yml manage issue-compatibility-token --yes-i-want-to-grant-synapse-admin-privileges crew
```
Docs: https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#deactivate-account
### OpenBikeSensor
Not implemented, see: https://github.com/openbikesensor/portal/issues/95
## Notifying the user
Make sure to send an e-mail to the specified address notifying the user of the accounts deletion.
You can use this template:
```
Hello,
Your pub.solar ID has been deactivated. Associated data in pub.solar services has been deleted.
Please note that the username is now blocked to prevent impersonation attempts.
Best,
@<name> for the pub.solar crew
```

View file

@ -7,29 +7,22 @@ be manually deployed.
To deploy, make sure you have a [working development shell](./development-shell.md).
Then, run `deploy-rs` with the hostname of the server you want to deploy:
### Dry-run
Use `--dry-activate` to show a diff of updated packages and all services that
would be restarted by the update. This will also put all files in place without
switching to the new generation, enabling a quick switch to the new config at a
later moment.
For nachtigall.pub.solar:
```
deploy --targets '.#nachtigall' --ssh-user <unix-username> --magic-rollback false --auto-rollback false --keep-result --result-path ./results --dry-activate
deploy --targets '.#nachtigall' --magic-rollback false --auto-rollback false --keep-result --result-path ./results
```
After reviewing the changes, apply the update with:
For flora-6.pub.solar:
```
deploy --targets '.#nachtigall' --ssh-user <unix-username> --magic-rollback false --auto-rollback false --keep-result --result-path ./results
deploy --targets '.#flora-6' --magic-rollback false --auto-rollback false --keep-result --result-path ./results
```
For metronom.pub.solar (aarch64-linux):
```
deploy --targets '.#metronom' --ssh-user <unix-username> --magic-rollback false --auto-rollback false --keep-result --result-path ./results --remote-build
deploy --targets '.#metronom' --magic-rollback false --auto-rollback false --keep-result --result-path ./results --remote-build
```
Usually we skip all rollback functionality, but if you want to deploy a change
@ -38,6 +31,9 @@ that might lock you out, e.g. to SSH, it might make sense to set these to `true`
To skip flake checks, e.g. because you already ran them manually before
deployment, add the flag `--skip-checks` at the end of the command.
`--dry-activate` can be used to only put all files in place without switching,
to enable switching to the new config quickly at a later moment.
We use `--keep-result --result-path ./results` to keep the last `result`
symlink of each `deploy` from being garbage collected. That way, we keep builds
cached in the Nix store. This is optional and both flags can be removed if disk

View file

@ -1,10 +1,18 @@
# Changing DNS entries
Our current DNS provider is [namecheap](https://www.namecheap.com/).
We use [OpenTofu](https://opentofu.org) to declaratively manage our pub.solar DNS records.
We use [Terraform](https://www.terraform.io) to declaratively manage our pub.solar DNS records.
### Initial setup
Skip this step if you already have a `triton` profile setup.
```
triton profile create
```
Please follow https://docs.greenbaum.cloud/en/devops/triton-cli.html for the details.
You will need to setup the following [namecheap API credentials](https://www.namecheap.com/support/api/intro),
look for "namecheap API key" in the pub.solar Keepass database.
@ -20,15 +28,13 @@ You will probably also need to add your external IP to the [API allow list](http
dig -4 ip @dns.toys
```
Now, change into the terraform directory and initialize the terraform providers. To decrypt existing state,
search for "terraform state passphrase" in the pub.solar Keepass database.
Now, change into the terraform directory and initialize the terraform providers.
```
cd terraform
export TF_VAR_state_passphrase=$(secret-tool lookup pub.solar terraform-state-passphrase-dns)
export TRITON_KEY_ID=$(cat ~/.config/triton/profiles.d/lev-1-pub_solar.json | jq --raw-output .keyId)
alias tofu="terraform-backend-git --access-logs --tf tofu git terraform"
tofu init
terraform init
```
Make your changes, e.g. in `dns.tf`.
@ -40,21 +46,20 @@ $EDITOR dns.tf
Plan your changes using:
```
tofu plan -out pub-solar-infra.plan
terraform plan -out pub-solar-infra.plan
```
After verification, apply your changes with:
```
tofu apply "pub-solar-infra.plan"
terraform apply "pub-solar-infra.plan"
```
### Useful links
We use terraform-backend-git remote backend with opentofu state encryption for collaboration.
We use the Manta remote backend to save the terraform state for collaboration.
- https://github.com/plumber-cd/terraform-backend-git
- https://opentofu.org/docs/language/state/encryption
- https://www.terraform.io/language/v1.2.x/settings/backends/manta
Namecheap Terraform provider docs:

19
docs/drone-ci.md Normal file
View file

@ -0,0 +1,19 @@
# Drone CI
We currently use two CI systems, [drone CI](https://drone.io), reachable via
https://ci.pub.solar and [Forgejo Actions](https://forgejo.org/docs/latest/user/actions/),
which UI is integrated into https://git.pub.solar, for example
https://git.pub.solar/pub-solar/infra/actions.
### Signing the `.drone.yml` file
Login to https://ci.pub.solar by clicking on the user icon in the bottom left.
After logging in, you can view your personal API token by clicking on the same
icon. If you're using the nix [development-shell](./development-shell.md), the
`drone` command will already be installed.
```
export DRONE_TOKEN=<your-drone-api-token>
drone --token $DRONE_TOKEN sign --save pub-solar/os
```

View file

@ -1,84 +0,0 @@
# Garage
### How-To create a new bucket + keys
Requirements:
- `garage` RPC credentials, in the shared keepass, search for 'garage rpc secret'.
- [Setup WireGuard](./administrative-access.md#ssh-access) for hosts: `trinkgenossin`, optionally: `delite`, `blue-shell`
```
ssh <unix-username>@trinkgenossin.wg.pub.solar
```
```
# Add a few spaces to avoid leaking the secret to the shell history
export GARAGE_RPC_SECRET=<secret-in-keepass>
```
Now, you can run the following command to check the cluster status:
```
garage status
```
Command to list all existing buckets:
```
garage bucket list
```
Creating a new bucket and access keys:
```
garage bucket create <bucket-name>
garage key create <bucket-name>-key
garage bucket allow <bucket-name> --read --write --key <bucket-name>-key
```
Full example for `mastodon` bucket:
```
garage bucket create mastodon
garage key create mastodon-key
garage bucket allow mastodon --read --write --key mastodon-key
```
Then [setup your favourite S3 client](https://garagehq.deuxfleurs.fr/documentation/connect/cli/)
or use the bucket with any [S3 compatible software](https://garagehq.deuxfleurs.fr/documentation/connect/).
Further reading:
- https://garagehq.deuxfleurs.fr/documentation/quick-start/
- https://garagehq.deuxfleurs.fr/documentation/connect/
- https://garagehq.deuxfleurs.fr/documentation/connect/apps/#mastodon
### Notes on manual setup steps
```
ssh <unix-username>@trinkgenossin.wg.pub.solar
# Add a few spaces to avoid leaking the secret to the shell history
export GARAGE_RPC_SECRET=<secret-in-keepass>
# Uses the default config /etc/garage.toml
garage node id
garage node connect <node-id2>
garage node connect <node-id3>
garage status
#Zones
#DE-1 DE-2 NL-1
garage layout assign fdaa -z DE-1 -c 800G -t trinkgenossin
garage layout assign 8835 -z DE-2 -c 800G -t blue-shell
garage layout assign 73da -z NL-1 -c 800G -t delite
garage layout show
garage layout apply --version 1
```
Source: https://garagehq.deuxfleurs.fr/documentation/cookbook/real-world/#creating-a-cluster-layout

View file

@ -12,7 +12,7 @@ Run following after SSH'ing to `nachtigall`.
Credentials for the following command are in keepass. Create a keycloak
config/credentials file at `/tmp/kcadm.config`:
```bash
```
sudo --user keycloak kcadm.sh config credentials \
--config /tmp/kcadm.config \
--server https://auth.pub.solar \
@ -22,7 +22,7 @@ sudo --user keycloak kcadm.sh config credentials \
Get list of accounts without a verified email address:
```bash
```
sudo --user keycloak kcadm.sh get \
--config /tmp/kcadm.config \
users \
@ -35,7 +35,7 @@ Review list of accounts, especially check `createdTimestamp` if any accounts
were created in the past 2 days. If so, delete those from the
`/tmp/keycloak-unverified-accounts` file.
```bash
```
createdTimestamps=( $( nix run nixpkgs#jq -- -r '.[].createdTimestamp' < /tmp/keycloak-unverified-accounts ) )
# timestamps are in nanoseconds since epoch, so we need to strip the last three digits
@ -46,17 +46,17 @@ vim /tmp/keycloak-unverified-accounts
Check how many accounts are going to be deleted:
```bash
```
jq -r '.[].id' < /tmp/keycloak-unverified-accounts | wc -l
```
```bash
```
jq -r '.[].id' < /tmp/keycloak-unverified-accounts > /tmp/keycloak-unverified-account-ids
```
Final check before deletion (dry-run):
```bash
```
for id in $(cat /tmp/keycloak-unverified-account-ids)
do
echo sudo --user keycloak kcadm.sh delete \
@ -68,7 +68,7 @@ for id in $(cat /tmp/keycloak-unverified-account-ids)
THIS WILL DELETE ACCOUNTS:
```bash
```
for id in $(cat /tmp/keycloak-unverified-account-ids)
do
sudo --user keycloak kcadm.sh delete \
@ -77,9 +77,3 @@ for id in $(cat /tmp/keycloak-unverified-account-ids)
--realm pub.solar
done
```
Delete the temp files:
```bash
sudo rm /tmp/kcadm.config /tmp/keycloak-unverified-accounts /tmp/keycloak-unverified-account-ids
```

View file

@ -1,27 +0,0 @@
# Matrix account suspension
> Unlike [account locking](https://spec.matrix.org/v1.12/client-server-api/#account-locking),
> [suspension](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3823-code-for-account-suspension.md)
> allows the user to have a (largely) readonly view of their account.
> Homeserver administrators and moderators may use this functionality to
> temporarily deactivate an account, or place conditions on the account's
> experience. Critically, like locking, account suspension is reversible, unlike
> the deactivation mechanism currently available in Matrix - a destructive,
> irreversible, action.
Required:
- `matrix-synapse admin token`
- [SSH access to host `nachtigall`](./administrative-access.md#ssh-access)
## Suspending an account
```bash
curl --header "Authorization: Bearer <admin-access-token>" --request PUT http://127.0.0.1:8008/_synapse/admin/v1/suspend/@<username>:pub.solar --data '{"suspend": true}'
```
## Unsuspending an account
```bash
curl --header "Authorization: Bearer <admin-access-token>" --request PUT http://127.0.0.1:8008/_synapse/admin/v1/suspend/@<username>:pub.solar --data '{"suspend": false}'
```

View file

@ -4,7 +4,7 @@ See the [mediawiki-oidc-docker repository](https://git.pub.solar/pub-solar/media
for instructions on updating our customized mediawiki docker image.
To deploy a new docker image to `nachtigall`, first bump the mediawiki version
of the docker image tag in `modules/mediawiki/default.nix` (search for
of the docker image tag in `hosts/nachtigall/apps/mediawiki.nix` (search for
`image`).
Next, push your changes to https://git.pub.solar and get them reviewed and
@ -19,7 +19,7 @@ exit
```
```
deploy --targets '.#nachtigall' --magic-rollback false --auto-rollback false --keep-result --result-path ./results
deploy --targets '.#nachtigall'
```
Then, finalize the update by running the database migration script (in a [SSH](./administrative-access.md#ssh-access) shell on `nachtigall`):

View file

@ -1,348 +0,0 @@
# Notes on adding two disks to server nachtigall
Status after Hetzner support added two additional 1TB NVMe disks:
```
teutat3s in 🌐 nachtigall in ~
lsblk -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
nvme0n1
├─nvme0n1p1
├─nvme0n1p2 vfat FAT16 5494-BA1E 385M 21% /boot2
└─nvme0n1p3 zfs_member 5000 root_pool 8287701206764130981
nvme1n1
├─nvme1n1p1
├─nvme1n1p2 vfat FAT32 5493-EFF5 1.8G 5% /boot1
└─nvme1n1p3 zfs_member 5000 root_pool 8287701206764130981
nvme2n1
nvme3n1
teutat3s in 🌐 nachtigall in ~
sudo fdisk -l /dev/nvme0n1
Disk /dev/nvme0n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: KXG60ZNV1T02 TOSHIBA
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: 28F8681A-4559-4801-BF3F-BFEC8058236B
Device Start End Sectors Size Type
/dev/nvme0n1p1 2048 4095 2048 1M BIOS boot
/dev/nvme0n1p2 4096 999423 995328 486M EFI System
/dev/nvme0n1p3 999424 2000408575 1999409152 953.4G Linux filesystem
teutat3s in 🌐 nachtigall in ~
sudo fdisk -l /dev/nvme1n1
Disk /dev/nvme1n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HCLR-00B00
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: A143A806-69C5-4EFC-8E34-20C35574D990
Device Start End Sectors Size Type
/dev/nvme1n1p1 2048 4095 2048 1M BIOS boot
/dev/nvme1n1p2 4096 3905535 3901440 1.9G EFI System
/dev/nvme1n1p3 3905536 2000408575 1996503040 952G Linux filesystem
teutat3s in 🌐 nachtigall in ~
sudo fdisk -l /dev/nvme2n1
Disk /dev/nvme2n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HDLU-00B07
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
teutat3s in 🌐 nachtigall in ~
sudo fdisk -l /dev/nvme3n1
Disk /dev/nvme3n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HCLR-00B00
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
```
Partitioning and formatting the new disks `/dev/nvme2n1` and `/dev/nvme3n1`:
```
teutat3s in 🌐 nachtigall in ~
sudo fdisk /dev/nvme2n1
Welcome to fdisk (util-linux 2.39.4).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
Device does not contain a recognized partition table.
Created a new DOS (MBR) disklabel with disk identifier 0x0852470c.
Command (m for help): p
Disk /dev/nvme2n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HDLU-00B07
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x0852470c
Command (m for help): m
Help:
DOS (MBR)
a toggle a bootable flag
b edit nested BSD disklabel
c toggle the dos compatibility flag
Generic
d delete a partition
F list free unpartitioned space
l list known partition types
n add a new partition
p print the partition table
t change a partition type
v verify the partition table
i print information about a partition
Misc
m print this menu
u change display/entry units
x extra functionality (experts only)
Script
I load disk layout from sfdisk script file
O dump disk layout to sfdisk script file
Save & Exit
w write table to disk and exit
q quit without saving changes
Create a new label
g create a new empty GPT partition table
G create a new empty SGI (IRIX) partition table
o create a new empty MBR (DOS) partition table
s create a new empty Sun partition table
Command (m for help): g
Created a new GPT disklabel (GUID: 8CC98E3F-20A8-4A2D-8D50-9CD769EE4C65).
Command (m for help): p
Disk /dev/nvme2n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HDLU-00B07
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: 8CC98E3F-20A8-4A2D-8D50-9CD769EE4C65
Command (m for help): n
Partition number (1-128, default 1):
First sector (2048-2000409230, default 2048):
Last sector, +/-sectors or +/-size{K,M,G,T,P} (2048-2000409230, default 2000408575): 4095
Created a new partition 1 of type 'Linux filesystem' and of size 1 MiB.
Command (m for help): t
Selected partition 1
Partition type or alias (type L to list all): L
1 EFI System C12A7328-F81F-11D2-BA4B-00A0C93EC93B
2 MBR partition scheme 024DEE41-33E7-11D3-9D69-0008C781F39F
3 Intel Fast Flash D3BFE2DE-3DAF-11DF-BA40-E3A556D89593
4 BIOS boot 21686148-6449-6E6F-744E-656564454649
5 Sony boot partition F4019732-066E-4E12-8273-346C5641494F
6 Lenovo boot partition BFBFAFE7-A34F-448A-9A5B-6213EB736C22
7 PowerPC PReP boot 9E1A2D38-C612-4316-AA26-8B49521E5A8B
8 ONIE boot 7412F7D5-A156-4B13-81DC-867174929325
9 ONIE config D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149
10 Microsoft reserved E3C9E316-0B5C-4DB8-817D-F92DF00215AE
11 Microsoft basic data EBD0A0A2-B9E5-4433-87C0-68B6B72699C7
12 Microsoft LDM metadata 5808C8AA-7E8F-42E0-85D2-E1E90434CFB3
13 Microsoft LDM data AF9B60A0-1431-4F62-BC68-3311714A69AD
14 Windows recovery environment DE94BBA4-06D1-4D40-A16A-BFD50179D6AC
15 IBM General Parallel Fs 37AFFC90-EF7D-4E96-91C3-2D7AE055B174
16 Microsoft Storage Spaces E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D
17 HP-UX data 75894C1E-3AEB-11D3-B7C1-7B03A0000000
18 HP-UX service E2A1E728-32E3-11D6-A682-7B03A0000000
19 Linux swap 0657FD6D-A4AB-43C4-84E5-0933C84B4F4F
20 Linux filesystem 0FC63DAF-8483-4772-8E79-3D69D8477DE4
...
Partition type or alias (type L to list all): 4
Changed type of partition 'Linux filesystem' to 'BIOS boot'.
Command (m for help): n
Partition number (2-128, default 2):
First sector (4096-2000409230, default 4096):
Last sector, +/-sectors or +/-size{K,M,G,T,P} (4096-2000409230, default 2000408575): 3901440
Created a new partition 2 of type 'Linux filesystem' and of size 1.9 GiB.
Command (m for help): t
Partition number (1,2, default 2): 2
Partition type or alias (type L to list all): 1
Changed type of partition 'Linux filesystem' to 'EFI System'.
Command (m for help): n
Partition number (3-128, default 3):
First sector (3901441-2000409230, default 3903488):
Last sector, +/-sectors or +/-size{K,M,G,T,P} (3903488-2000409230, default 2000408575):
Created a new partition 3 of type 'Linux filesystem' and of size 952 GiB.
Command (m for help): p
Disk /dev/nvme2n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HDLU-00B07
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: 8CC98E3F-20A8-4A2D-8D50-9CD769EE4C65
Device Start End Sectors Size Type
/dev/nvme2n1p1 2048 4095 2048 1M BIOS boot
/dev/nvme2n1p2 4096 3901440 3897345 1.9G EFI System
/dev/nvme2n1p3 3903488 2000408575 1996505088 952G Linux filesystem
Command (m for help): w
The partition table has been altered.
Calling ioctl() to re-read partition table.
Syncing disks.
teutat3s in 🌐 nachtigall in ~ took 5m41s
sudo fdisk /dev/nvme3n1
Welcome to fdisk (util-linux 2.39.4).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
Device does not contain a recognized partition table.
Created a new DOS (MBR) disklabel with disk identifier 0xa77eb504.
Command (m for help): g
Created a new GPT disklabel (GUID: 56B64CEE-5E0C-4EAA-AE2F-5BF4356183A5).
Command (m for help): n
Partition number (1-128, default 1):
First sector (2048-2000409230, default 2048):
Last sector, +/-sectors or +/-size{K,M,G,T,P} (2048-2000409230, default 2000408575): 4095
Created a new partition 1 of type 'Linux filesystem' and of size 1 MiB.
Command (m for help): t
Selected partition 1
Partition type or alias (type L to list all): 4
Changed type of partition 'Linux filesystem' to 'BIOS boot'.
Command (m for help): n
Partition number (2-128, default 2):
First sector (4096-2000409230, default 4096):
Last sector, +/-sectors or +/-size{K,M,G,T,P} (4096-2000409230, default 2000408575): 3901440
Created a new partition 2 of type 'Linux filesystem' and of size 1.9 GiB.
Command (m for help): t
Partition number (1,2, default 2): 2
Partition type or alias (type L to list all): 1
Changed type of partition 'Linux filesystem' to 'EFI System'.
Command (m for help): n
Partition number (3-128, default 3):
First sector (3901441-2000409230, default 3903488):
Last sector, +/-sectors or +/-size{K,M,G,T,P} (3903488-2000409230, default 2000408575):
Created a new partition 3 of type 'Linux filesystem' and of size 952 GiB.
Command (m for help): p
Disk /dev/nvme3n1: 953.87 GiB, 1024209543168 bytes, 2000409264 sectors
Disk model: SAMSUNG MZVL21T0HCLR-00B00
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: gpt
Disk identifier: 56B64CEE-5E0C-4EAA-AE2F-5BF4356183A5
Device Start End Sectors Size Type
/dev/nvme3n1p1 2048 4095 2048 1M BIOS boot
/dev/nvme3n1p2 4096 3901440 3897345 1.9G EFI System
/dev/nvme3n1p3 3903488 2000408575 1996505088 952G Linux filesystem
Command (m for help): w
The partition table has been altered.
Calling ioctl() to re-read partition table.
Syncing disks.
teutat3s in 🌐 nachtigall in ~
sudo mkfs.vfat /dev/nvme2n1p2
mkfs.fat 4.2 (2021-01-31)
teutat3s in 🌐 nachtigall in ~
sudo mkfs.vfat /dev/nvme3n1p2
mkfs.fat 4.2 (2021-01-31)
teutat3s in 🌐 nachtigall in ~
lsblk -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
nvme0n1
├─nvme0n1p1
├─nvme0n1p2 vfat FAT16 5494-BA1E 385M 21% /boot2
└─nvme0n1p3 zfs_member 5000 root_pool 8287701206764130981
nvme1n1
├─nvme1n1p1
├─nvme1n1p2 vfat FAT32 5493-EFF5 1.8G 5% /boot1
└─nvme1n1p3 zfs_member 5000 root_pool 8287701206764130981
nvme2n1
├─nvme2n1p1
├─nvme2n1p2 vfat FAT32 E4E4-88C7
└─nvme2n1p3
nvme3n1
├─nvme3n1p1
├─nvme3n1p2 vfat FAT32 E76C-A8A0
└─nvme3n1p3
```
Finally, adding the new drives to the ZFS zpool `root_pool` to extend available disk space:
```
teutat3s in 🌐 nachtigall in ~
sudo zpool status
pool: root_pool
state: ONLINE
scan: scrub repaired 0B in 00:17:47 with 0 errors on Sat Mar 1 03:35:20 2025
config:
NAME STATE READ WRITE CKSUM
root_pool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NF0R517371-part3 ONLINE 0 0 0
nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL-part3 ONLINE 0 0 0
errors: No known data errors
teutat3s in 🌐 nachtigall in ~
sudo zpool add root_pool mirror nvme-SAMSUNG_MZVL21T0HDLU-00B07_S77WNF0XA01902-part3 nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NU0W623944-part3
teutat3s in 🌐 nachtigall in ~
sudo zpool status
pool: root_pool
state: ONLINE
scan: scrub repaired 0B in 00:17:47 with 0 errors on Sat Mar 1 03:35:20 2025
config:
NAME STATE READ WRITE CKSUM
root_pool ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NF0R517371-part3 ONLINE 0 0 0
nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL-part3 ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
nvme-SAMSUNG_MZVL21T0HDLU-00B07_S77WNF0XA01902-part3 ONLINE 0 0 0
nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NU0W623944-part3 ONLINE 0 0 0
teutat3s in 🌐 nachtigall in ~
sudo zfs list root_pool
NAME USED AVAIL REFER MOUNTPOINT
root_pool 782G 1.04T 192K none
```

View file

@ -1,19 +0,0 @@
# Nextcloud debugging
Set loglevel to `0` for debug logs:
```nix
services.nextcloud.settings.loglevel = 0;
```
Then, logs appear in the `phpfpm-nextcloud.service` logs:
```bash
sudo journalctl -fu phpfpm-nextcloud
```
Make sure to set the loglevel back to the default `2` warning after debugging:
```nix
services.nextcloud.settings.loglevel = 2;
```

View file

@ -41,7 +41,3 @@ wrapped-ruby-mastodon-gems: 4.2.1 → 4.2.3
zfs-kernel: 2.2.1-6.1.64 → 2.2.2-6.1.66
zfs-user: 2.2.1 → 2.2.2
```
### Deploying updates
See [deploying.md](./deploying.md).

View file

@ -1,29 +0,0 @@
# Deploying with nixos-anywhere
## On Target: Enter NixOS from non-NixOS host
In case you cannot boot easily into a nixos-installer image you can download the kexec installer image of NixOS and kexec into it:
```
curl -L https://github.com/nix-community/nixos-images/releases/download/nixos-unstable/nixos-kexec-installer-noninteractive-x86_64-linux.tar.gz | tar -xzf- -C /root
/root/kexec/run
```
## Run Disko
```
nix run github:nix-community/nixos-anywhere -- --flake .#<hostname> --target-host root@<host> --phases disko
```
## On Target: Create inital ssh host key used in initrd
```
mkdir -p /mnt/etc/secrets/initrd
ssh-keygen -t ed25519 -f /mnt/etc/secrets/initrd/ssh_host_ed25519_key
```
## Run NixOS Anywhere
```
nix run github:nix-community/nixos-anywhere -- --flake .#<hostname> --target-host root@<host> --phases install,reboot
```

View file

@ -1,179 +0,0 @@
# pub.solar Systems Overview
Last updated: 2025-03-11
Jump to:
1. [Server nachtigall](#server-nachtigall)
2. [Server metronom](#server-metronom)
3. [Server trinkgenossin](#server-trinkgenossin)
4. [Server blue-shell](#server-blue-shell)
5. [Server delite](#server-delite)
6. [Server tankstelle](#server-tankstelle)
7. [Server underground](#server-underground)
8. [Hetzner 1TB storagebox](#hetzner-1tb-storagebox)
### Server nachtigall
**Specs:**
- AMD Ryzen 7 3700X 8-Core Processor
- 64 GB RAM
- 4x 1TB NVMe disks
**Disk layout:**
- Encrypted ZFS mirror vdevs
**Operating System:**
- NixOS 24.11 `linux-x86_64`
**Usage:**
Main pub.solar server. Hosts the majority of pub.solar services. Non-exhaustive list:
- collabora
- coturn
- forgejo
- keycloak
- mailman
- mastodon
- matrix-synapse (homeserver)
- mediawiki
- nextcloud
- owncast
- searx
- tmate
- tt-rss
- obs-portal
### Server metronom
**Specs:**
- Hetzner VPS type: CAX11
- 2 vCPU
- 4 GB RAM
- 40GB disk
**Disk layout:**
- Encrypted ZFS single disk (stripe)
**Operating System:**
- NixOS 24.11 `linux-aach64`
**Usage:**
pub.solar mail server. Note this is an ARM server.
### Server trinkgenossin
**Specs:**
- Strato VPS type: VPS Linux VC8-32
- 8 core AMD EPYC-Milan Processor
- 32 GB RAM
- 1TB NVMe disk
**Disk layout:**
- Encrypted LUKS single disk
**Operating System:**
- NixOS 24.11 `linux-x86_64`
**Usage:**
Monitor, garage cluster node. Services:
- grafana
- loki
- prometheus
- garage
- forgejo-actions-runner (docker)
### Server blue-shell
**Specs:**
- netcup VPS type: VPS 1000 G11
- 4 core AMD EPYC-Rome Processor
- 8 GB RAM
- 256 GB SSD disk
- 850GB mechanical disk
**Disk layout:**
- Encrypted LVM on LUKS single disk and encrypted LUKS garage data disk
**Operating System:**
- NixOS 24.11 `linux-x86_64`
**Usage:**
Garage cluster node.
### Server delite
**Specs:**
- liteserver VPS type: HDD Storage VPS - HDD-2G
- 1 core AMD EPYC 7452
- 2 GB RAM
- 1TB mechanical disk
**Disk layout:**
- Encrypted LVM on LUKS single disk
**Operating System:**
- NixOS 24.11 `linux-x86_64`
**Usage:**
Garage cluster node.
### Server tankstelle
**Specs:**
- 24 core Intel Xeon E5-2670 v2 @ 2.50GHz
- 40 GB RAM
- 80GB SSD disk
**Disk layout:**
- LVM
**Operating System:**
- NixOS 24.11 `linux-x86_64`
**Usage:**
- forgejo-actions-runner (selfhosted, NixOS)
### Server underground
**Specs:**
- 8 core Intel Xeon E5-2670 v2 @ 2.50GHz
- 16 GB RAM
- 40 GB SSD disk
**Disk layout:**
- LVM on LUKS, single disk
**Operating System:**
- NixOS 24.11 `linux-x86_64`
**Usage:**
Testing server.
### Hetzner 1TB storagebox
**Usage:**
Backups get pushed to a Hetzner storagebox every night.

View file

@ -1,19 +0,0 @@
# ZFS Quick Start
View current status of the ZFS pool (zpool):
```
sudo zpool status
```
View available disk space of the pool, replace `<pool-name>` with the pool name from the output above:
```
sudo zfs list <pool-name>
```
List all snapshots:
```
sudo zfs list -t snapshot
```

235
flake.lock generated
View file

@ -14,11 +14,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1736955230,
"narHash": "sha256-uenf8fv2eG5bKM8C/UvFaiJMZ4IpUFaQxk9OH5t/1gA=",
"lastModified": 1718371084,
"narHash": "sha256-abpBi61mg0g+lFFU0zY4C6oP6fBwPzbHPKBGw676xsA=",
"owner": "ryantm",
"repo": "agenix",
"rev": "e600439ec4c273cf11e06fe4d9d906fb98fa097c",
"rev": "3a56735779db467538fb2e577eda28a9daacaca6",
"type": "github"
},
"original": {
@ -52,11 +52,11 @@
"utils": "utils"
},
"locked": {
"lastModified": 1727447169,
"narHash": "sha256-3KyjMPUKHkiWhwR91J1YchF6zb6gvckCAY1jOE+ne0U=",
"lastModified": 1718194053,
"narHash": "sha256-FaGrf7qwZ99ehPJCAwgvNY5sLCqQ3GDiE/6uLhxxwSY=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "aa07eb05537d4cd025e2310397a6adcedfe72c76",
"rev": "3867348fa92bc892eba5d9ddb2d7a97b9e127a8a",
"type": "github"
},
"original": {
@ -87,26 +87,6 @@
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1741786315,
"narHash": "sha256-VT65AE2syHVj6v/DGB496bqBnu1PXrrzwlw07/Zpllc=",
"owner": "nix-community",
"repo": "disko",
"rev": "0d8c6ad4a43906d14abd5c60e0ffe7b587b213de",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"element-stickers": {
"inputs": {
"maunium-stickerpicker": [
@ -185,11 +165,11 @@
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1741352980,
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
"lastModified": 1717285511,
"narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
"rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
"type": "github"
},
"original": {
@ -234,19 +214,18 @@
"type": "github"
}
},
"fork": {
"flake-utils_3": {
"locked": {
"lastModified": 1741180627,
"narHash": "sha256-NM2X42gtXuYT7EG+zYcvNTZ/6+CTc3fSiIwdcQcp0dk=",
"owner": "teutat3s",
"repo": "nixpkgs",
"rev": "8a43eb74ac149c080d57d8c80d647fef74df84d8",
"lastModified": 1653893745,
"narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
"type": "github"
},
"original": {
"owner": "teutat3s",
"ref": "init-matrix-authentication-service-module-0.13.0",
"repo": "nixpkgs",
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
@ -257,16 +236,16 @@
]
},
"locked": {
"lastModified": 1742655702,
"narHash": "sha256-jbqlw4sPArFtNtA1s3kLg7/A4fzP4GLk9bGbtUJg0JQ=",
"lastModified": 1718530513,
"narHash": "sha256-BmO8d0r+BVlwWtMLQEYnwmngqdXIuyFzMwvmTcLMee8=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "0948aeedc296f964140d9429223c7e4a0702a1ff",
"rev": "a1fddf0967c33754271761d91a3d921772b30d0e",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "release-24.11",
"ref": "release-24.05",
"repo": "home-manager",
"type": "github"
}
@ -280,11 +259,11 @@
]
},
"locked": {
"lastModified": 1738012343,
"narHash": "sha256-agMgWwVxXII+RtCqok8ROjzpKJung/5N5f2BVDmMC5Q=",
"lastModified": 1707424749,
"narHash": "sha256-eTvts5E3zmD4/DoAI9KedQjRwica0cg36wwIVp1NWbM=",
"ref": "main",
"rev": "4ffd7bc8ea032991756c5e8e8a37b039789045bc",
"revCount": 38,
"rev": "1202a23c205b3c07a5feb5caf6813f21b3c69307",
"revCount": 30,
"type": "git",
"url": "https://git.pub.solar/pub-solar/keycloak-theme"
},
@ -298,11 +277,11 @@
"flake": false,
"locked": {
"dir": "web",
"lastModified": 1733177811,
"narHash": "sha256-1n7bPSCRw7keTCIu4tJGnUlkoId6H1+dPsTPzKo3Rrk=",
"lastModified": 1718796561,
"narHash": "sha256-RKAAHve17lrJokgAPkM2k/E+f9djencwwg3Xcd70Yfw=",
"owner": "maunium",
"repo": "stickerpicker",
"rev": "89d3aece041c85ebe5a1ad4e620388af5227cbb0",
"rev": "333567f481e60443360aa7199d481e1a45b3a523",
"type": "github"
},
"original": {
@ -320,11 +299,11 @@
]
},
"locked": {
"lastModified": 1742741935,
"narHash": "sha256-ZCNvPYWkL9hxzgWn1gmYCZItqBU4ujsWjwWNpcwCjfQ=",
"lastModified": 1719128254,
"narHash": "sha256-I7jMpq0CAOZA/i70+HDQO/ulLttyQu/K70cSESiMX7A=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "ebb88c3428dcdd95c06dca4d49b9791a65ab777b",
"rev": "50581970f37f06a4719001735828519925ef8310",
"type": "github"
},
"original": {
@ -334,52 +313,98 @@
"type": "github"
}
},
"nixos-flake": {
"locked": {
"lastModified": 1719437091,
"narHash": "sha256-UIZasVC36DS5dli1VimK0VgL6JKuxDG9cMxKq1I6OQ0=",
"owner": "srid",
"repo": "nixos-flake",
"rev": "8cefa1e7af06d366f5d3fd7c97e9edbf4d38c476",
"type": "github"
},
"original": {
"owner": "srid",
"repo": "nixos-flake",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1742751704,
"narHash": "sha256-rBfc+H1dDBUQ2mgVITMGBPI1PGuCznf9rcWX/XIULyE=",
"lastModified": 1719426051,
"narHash": "sha256-yJL9VYQhaRM7xs0M867ZFxwaONB9T2Q4LnGo1WovuR4=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "f0946fa5f1fb876a9dc2e1850d9d3a4e3f914092",
"rev": "89c49874fb15f4124bf71ca5f42a04f2ee5825fd",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.11",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-2205": {
"locked": {
"lastModified": 1685573264,
"narHash": "sha256-Zffu01pONhs/pqH07cjlF10NnMDLok8ix5Uk4rhOnZQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "380be19fbd2d9079f677978361792cb25e8a3635",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-22.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-draupnir": {
"locked": {
"lastModified": 1720089221,
"narHash": "sha256-8abrPKFI9eqopZ/ewSeSPen4X9bs6xoyvFsfirvbJmk=",
"owner": "teutat3s",
"repo": "nixpkgs",
"rev": "078583b84242644a668ee29e995bce02192dbd16",
"type": "github"
},
"original": {
"owner": "teutat3s",
"ref": "draupnir-pr",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1740877520,
"narHash": "sha256-oiwv/ZK/2FhGxrCkQkB83i7GnWXPPLzoqFHpDD3uYpk=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "147dee35aab2193b174e4c0868bd80ead5ce755c",
"type": "github"
"lastModified": 1717284937,
"narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"deploy-rs": "deploy-rs",
"disko": "disko",
"element-stickers": "element-stickers",
"element-themes": "element-themes",
"flake-parts": "flake-parts",
"fork": "fork",
"home-manager": "home-manager",
"keycloak-theme-pub-solar": "keycloak-theme-pub-solar",
"maunium-stickerpicker": "maunium-stickerpicker",
"nix-darwin": "nix-darwin",
"nixos-flake": "nixos-flake",
"nixpkgs": "nixpkgs",
"nixpkgs-2205": "nixpkgs-2205",
"nixpkgs-draupnir": "nixpkgs-draupnir",
"simple-nixos-mailserver": "simple-nixos-mailserver",
"triton-vmtools": "triton-vmtools",
"unstable": "unstable"
}
},
@ -390,21 +415,22 @@
"nixpkgs": [
"unstable"
],
"nixpkgs-24_11": [
"nixpkgs-24_05": [
"nixpkgs"
]
],
"utils": "utils_2"
},
"locked": {
"lastModified": 1734884447,
"narHash": "sha256-HA9fAmGNGf0cOYrhgoa+B6BxNVqGAYXfLyx8zIS0ZBY=",
"lastModified": 1718084203,
"narHash": "sha256-Cx1xoVfSMv1XDLgKg08CUd1EoTYWB45VmB9XIQzhmzI=",
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"rev": "63209b1def2c9fc891ad271f474a3464a5833294",
"rev": "29916981e7b3b5782dc5085ad18490113f8ff63b",
"type": "gitlab"
},
"original": {
"owner": "simple-nixos-mailserver",
"ref": "nixos-24.11",
"ref": "nixos-24.05",
"repo": "nixos-mailserver",
"type": "gitlab"
}
@ -469,13 +495,52 @@
"type": "github"
}
},
"systems_5": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"triton-vmtools": {
"inputs": {
"flake-utils": "flake-utils_3",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"dir": "vmtools",
"lastModified": 1698443513,
"narHash": "sha256-wX2JIJ3JmJn6MAurdyjwZU+FZjLCwBArMrVSeeCb/ZU=",
"ref": "main",
"rev": "0d039dcf06afb8cbddd7ac54bae4d0d185f3e88e",
"revCount": 85,
"type": "git",
"url": "https://git.pub.solar/pub-solar/infra-vintage?dir=vmtools"
},
"original": {
"dir": "vmtools",
"ref": "main",
"type": "git",
"url": "https://git.pub.solar/pub-solar/infra-vintage?dir=vmtools"
}
},
"unstable": {
"locked": {
"lastModified": 1742669843,
"narHash": "sha256-G5n+FOXLXcRx+3hCJ6Rt6ZQyF1zqQ0DL0sWAMn2Nk0w=",
"lastModified": 1719254875,
"narHash": "sha256-ECni+IkwXjusHsm9Sexdtq8weAq/yUyt1TWIemXt3Ko=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "1e5b653dff12029333a6546c11e108ede13052eb",
"rev": "2893f56de08021cffd9b6b6dfc70fd9ccd51eb60",
"type": "github"
},
"original": {
@ -502,6 +567,24 @@
"repo": "flake-utils",
"type": "github"
}
},
"utils_2": {
"inputs": {
"systems": "systems_5"
},
"locked": {
"lastModified": 1709126324,
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
}
},
"root": "root",

138
flake.nix
View file

@ -1,24 +1,24 @@
{
inputs = {
# Track channels with commits tested and built by hydra
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.11";
nixpkgs.url = "github:nixos/nixpkgs/nixos-24.05";
unstable.url = "github:nixos/nixpkgs/nixos-unstable";
fork.url = "github:teutat3s/nixpkgs/init-matrix-authentication-service-module-0.13.0";
nixpkgs-2205.url = "github:nixos/nixpkgs/nixos-22.05";
nixpkgs-draupnir.url = "github:teutat3s/nixpkgs/draupnir-pr";
nix-darwin.url = "github:lnl7/nix-darwin/master";
nix-darwin.inputs.nixpkgs.follows = "nixpkgs";
home-manager.url = "github:nix-community/home-manager/release-24.11";
home-manager.url = "github:nix-community/home-manager/release-24.05";
home-manager.inputs.nixpkgs.follows = "nixpkgs";
flake-parts.url = "github:hercules-ci/flake-parts";
nixos-flake.url = "github:srid/nixos-flake";
deploy-rs.url = "github:serokell/deploy-rs";
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
disko.url = "github:nix-community/disko";
disko.inputs.nixpkgs.follows = "nixpkgs";
agenix.url = "github:ryantm/agenix";
agenix.inputs.nixpkgs.follows = "nixpkgs";
agenix.inputs.darwin.follows = "nix-darwin";
@ -27,6 +27,9 @@
keycloak-theme-pub-solar.url = "git+https://git.pub.solar/pub-solar/keycloak-theme?ref=main";
keycloak-theme-pub-solar.inputs.nixpkgs.follows = "nixpkgs";
triton-vmtools.url = "git+https://git.pub.solar/pub-solar/infra-vintage?ref=main&dir=vmtools";
triton-vmtools.inputs.nixpkgs.follows = "nixpkgs";
element-themes.url = "github:aaronraimist/element-themes/master";
element-themes.flake = false;
@ -37,8 +40,8 @@
element-stickers.inputs.maunium-stickerpicker.follows = "maunium-stickerpicker";
element-stickers.inputs.nixpkgs.follows = "nixpkgs";
simple-nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-24.11";
simple-nixos-mailserver.inputs.nixpkgs-24_11.follows = "nixpkgs";
simple-nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-24.05";
simple-nixos-mailserver.inputs.nixpkgs-24_05.follows = "nixpkgs";
simple-nixos-mailserver.inputs.nixpkgs.follows = "unstable";
};
@ -51,6 +54,7 @@
];
imports = [
inputs.nixos-flake.flakeModule
./logins
./lib
./overlays
@ -62,7 +66,6 @@
system,
pkgs,
config,
lib,
...
}:
{
@ -73,51 +76,12 @@
overlays = [ inputs.agenix.overlays.default ];
};
unstable = import inputs.unstable { inherit system; };
master = import inputs.master { inherit system; };
};
checks =
let
machinesPerSystem = {
aarch64-linux = [
"metronom"
];
x86_64-linux = [
"blue-shell"
"delite"
"nachtigall"
"tankstelle"
"trinkgenossin"
"underground"
];
};
nixosMachines = inputs.nixpkgs.lib.mapAttrs' (n: inputs.nixpkgs.lib.nameValuePair "nixos-${n}") (
inputs.nixpkgs.lib.genAttrs (machinesPerSystem.${system} or [ ]) (
name: self.nixosConfigurations.${name}.config.system.build.toplevel
)
);
nixos-lib = import (inputs.nixpkgs + "/nixos/lib") { };
testDir = builtins.attrNames (builtins.readDir ./tests);
testFiles = builtins.filter (n: builtins.match "^.*.nix$" n != null) testDir;
in
builtins.listToAttrs (
map (x: {
name = "test-${lib.strings.removeSuffix ".nix" x}";
value = nixos-lib.runTest (
import (./tests + "/${x}") {
inherit self;
inherit pkgs;
inherit lib;
inherit config;
}
);
}) testFiles
)
// nixosMachines;
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
deploy-rs
nix-fast-build
nixpkgs-fmt
agenix
age-plugin-yubikey
cachix
@ -126,55 +90,53 @@
nvfetcher
shellcheck
shfmt
treefmt2
treefmt
nixos-generators
opentofu
terraform-backend-git
terraform-ls
inputs.nixpkgs-2205.legacyPackages.${system}.terraform
jq
];
};
devShells.ci = pkgs.mkShell { buildInputs = with pkgs; [ nodejs ]; };
};
flake = {
nixosModules = builtins.listToAttrs (
map (x: {
name = x;
value = import (./modules + "/${x}");
}) (builtins.attrNames (builtins.readDir ./modules))
);
flake =
let
username = "barkeeper";
in
{
inherit username;
checks = builtins.mapAttrs (
system: deployLib: deployLib.deployChecks self.deploy
) inputs.deploy-rs.lib;
nixosModules = builtins.listToAttrs (
map (x: {
name = x;
value = import (./modules + "/${x}");
}) (builtins.attrNames (builtins.readDir ./modules))
);
formatter."x86_64-linux" = inputs.nixpkgs.legacyPackages."x86_64-linux".nixfmt-rfc-style;
checks = builtins.mapAttrs (
system: deployLib: deployLib.deployChecks self.deploy
) inputs.deploy-rs.lib;
deploy.nodes = self.lib.deploy.mkDeployNodes self.nixosConfigurations {
nachtigall = {
hostname = "nachtigall.wg.pub.solar";
};
metronom = {
hostname = "metronom.wg.pub.solar";
};
tankstelle = {
hostname = "tankstelle.wg.pub.solar";
};
underground = {
hostname = "80.244.242.3";
};
trinkgenossin = {
hostname = "trinkgenossin.wg.pub.solar";
};
delite = {
hostname = "delite.wg.pub.solar";
};
blue-shell = {
hostname = "blue-shell.wg.pub.solar";
formatter."x86_64-linux" = inputs.unstable.legacyPackages."x86_64-linux".nixfmt-rfc-style;
deploy.nodes = self.lib.deploy.mkDeployNodes self.nixosConfigurations {
nachtigall = {
hostname = "nachtigall.wg.pub.solar";
sshUser = username;
};
flora-6 = {
hostname = "flora-6.wg.pub.solar";
sshUser = username;
};
metronom = {
hostname = "metronom.wg.pub.solar";
sshUser = username;
};
tankstelle = {
hostname = "tankstelle.wg.pub.solar";
sshUser = username;
};
};
};
};
};
}

View file

@ -1,33 +0,0 @@
{
config,
lib,
pkgs,
...
}:
{
boot.loader.grub.enable = true;
boot.kernelParams = [
"boot.shell_on_fail=1"
"ip=dhcp"
];
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#
# Most users should NEVER change this value after the initial install, for any reason,
# even if you've upgraded your system to a new NixOS release.
#
# This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
# so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
# to actually do that.
#
# This value being lower than the current NixOS release does NOT mean your system is
# out of date, out of support, or vulnerable.
#
# Do NOT change this value unless you have manually inspected all the changes it would make to your configuration,
# and migrated your data accordingly.
#
# For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion .
system.stateVersion = "24.05"; # Did you read the comment?
}

View file

@ -1,13 +0,0 @@
{ flake, ... }:
{
imports = [
./hardware-configuration.nix
./configuration.nix
./disk-config.nix
./networking.nix
./wireguard.nix
#./backups.nix
];
}

View file

@ -1,101 +0,0 @@
{
disko.devices = {
disk = {
main = {
type = "disk";
device = "/dev/vdb";
content = {
type = "gpt";
partitions = {
bios = {
size = "1M";
type = "EF02"; # for grub MBR
};
boot = {
size = "1G";
type = "8300";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/boot";
mountOptions = [ "defaults" ];
};
};
luks = {
size = "100%";
content = {
type = "luks";
name = "cryptroot";
extraOpenArgs = [ ];
# if you want to use the key for interactive login be sure there is no trailing newline
# for example use `echo -n "password" > /tmp/secret.key`
passwordFile = "/tmp/luks-password";
content = {
type = "lvm_pv";
vg = "vg0";
};
};
};
};
};
};
data = {
type = "disk";
device = "/dev/vdc";
content = {
type = "gpt";
partitions = {
luks = {
size = "100%";
content = {
type = "luks";
name = "cryptdata";
extraOpenArgs = [ ];
# if you want to use the key for interactive login be sure there is no trailing newline
# for example use `echo -n "password" > /tmp/secret.key`
passwordFile = "/tmp/luks-password";
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/var/lib/garage/data";
mountOptions = [ "defaults" ];
};
};
};
};
};
};
};
lvm_vg = {
vg0 = {
type = "lvm_vg";
lvs = {
root = {
size = "100G";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
swap = {
size = "16G";
content = {
type = "swap";
};
};
metadata = {
size = "50G";
content = {
type = "filesystem";
format = "btrfs";
mountpoint = "/var/lib/garage/meta";
mountOptions = [ "defaults" ];
};
};
};
};
};
};
}

View file

@ -1,27 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"sr_mod"
"virtio_blk"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -1,26 +0,0 @@
{
config,
pkgs,
flake,
...
}:
{
services.garage.settings.rpc_public_addr = "[2a03:4000:43:24e::1]:3901";
networking.hostName = "blue-shell";
networking.hostId = "00000005";
networking.useDHCP = false;
systemd.network.enable = true;
systemd.network.networks."10-wan" = {
matchConfig.Name = "ens3";
address = [
"194.13.83.205/22"
"2a03:4000:43:24e::1/64"
];
gateway = [
"194.13.80.1"
"fe80::1"
];
};
}

View file

@ -1,51 +0,0 @@
{
config,
pkgs,
flake,
...
}:
let
wireguardIPv4 = "10.7.6.7";
wireguardIPv6 = "fd00:fae:fae:fae:fae:7::";
in
{
networking.firewall.allowedUDPPorts = [ 51820 ];
age.secrets.wg-private-key.file = "${flake.self}/secrets/blue-shell-wg-private-key.age";
networking.wireguard.interfaces = {
wg-ssh = {
listenPort = 51820;
mtu = 1300;
ips = [
"${wireguardIPv4}/32"
"${wireguardIPv6}/96"
];
privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.wireguardDevices ++ [
{
# trinkgenossin.pub.solar
publicKey = "QWgHovHxtqiQhnHLouSWiT6GIoQDmuvnThYL5c/rvU4=";
allowedIPs = [
"10.7.6.5/32"
"fd00:fae:fae:fae:fae:5::/96"
];
#endpoint = "85.215.152.22:51820";
endpoint = "[2a01:239:35d:f500::1]:51820";
persistentKeepalive = 15;
}
];
};
};
services.openssh.listenAddresses = [
{
addr = wireguardIPv4;
port = 22;
}
{
addr = "[${wireguardIPv6}]";
port = 22;
}
];
}

View file

@ -1,35 +1,9 @@
{
self,
inputs,
config,
...
}:
{ self, ... }:
{
flake = {
nixosModules = {
home-manager = {
imports = [
inputs.home-manager.nixosModules.home-manager
{
home-manager.useGlobalPkgs = true;
home-manager.useUserPackages = true;
home-manager.extraSpecialArgs = {
flake = {
inherit self inputs config;
};
};
}
];
};
};
nixosConfigurations = {
nachtigall = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
nachtigall = self.nixos-flake.lib.mkLinuxSystem {
imports = [
self.inputs.agenix.nixosModules.default
self.nixosModules.home-manager
./nachtigall
@ -37,7 +11,6 @@
self.nixosModules.unlock-zfs-on-boot
self.nixosModules.core
self.nixosModules.docker
self.nixosModules.backups
self.nixosModules.nginx
self.nixosModules.collabora
@ -60,29 +33,41 @@
self.nixosModules.promtail
self.nixosModules.searx
self.nixosModules.tmate
self.nixosModules.tt-rss
self.nixosModules.obs-portal
self.nixosModules.matrix
self.nixosModules.matrix-draupnir
self.nixosModules.matrix-irc
self.nixosModules.matrix-telegram
self.nixosModules.nginx-matrix
];
};
metronom = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
flora-6 = self.nixos-flake.lib.mkLinuxSystem {
imports = [
self.inputs.agenix.nixosModules.default
self.nixosModules.home-manager
./flora-6
self.nixosModules.overlays
self.nixosModules.core
self.nixosModules.keycloak
self.nixosModules.caddy
self.nixosModules.drone
self.nixosModules.forgejo-actions-runner
self.nixosModules.grafana
self.nixosModules.prometheus
self.nixosModules.loki
];
};
metronom = self.nixos-flake.lib.mkLinuxSystem {
imports = [
self.inputs.agenix.nixosModules.default
self.nixosModules.home-manager
./metronom
self.nixosModules.overlays
self.nixosModules.unlock-zfs-on-boot
self.nixosModules.core
self.nixosModules.backups
self.nixosModules.mail
self.nixosModules.prometheus-exporters
self.nixosModules.promtail
@ -91,117 +76,17 @@
];
};
tankstelle = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
tankstelle = self.nixos-flake.lib.mkLinuxSystem {
imports = [
self.inputs.agenix.nixosModules.default
self.nixosModules.home-manager
./tankstelle
self.nixosModules.overlays
self.nixosModules.core
self.nixosModules.backups
self.nixosModules.prometheus-exporters
self.nixosModules.promtail
];
};
trinkgenossin = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
self.inputs.agenix.nixosModules.default
self.nixosModules.home-manager
./trinkgenossin
self.nixosModules.backups
self.nixosModules.overlays
self.nixosModules.unlock-luks-on-boot
self.nixosModules.core
self.nixosModules.garage
self.nixosModules.nginx
# This module is already using options, and those options are used by the grafana module
self.nixosModules.keycloak
self.nixosModules.grafana
self.nixosModules.prometheus
self.nixosModules.loki
];
};
delite = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
self.inputs.agenix.nixosModules.default
self.inputs.disko.nixosModules.disko
self.nixosModules.home-manager
./delite
self.nixosModules.overlays
self.nixosModules.unlock-luks-on-boot
self.nixosModules.core
self.nixosModules.prometheus-exporters
self.nixosModules.promtail
self.nixosModules.garage
self.nixosModules.nginx
];
};
blue-shell = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
self.inputs.agenix.nixosModules.default
self.inputs.disko.nixosModules.disko
self.nixosModules.home-manager
./blue-shell
self.nixosModules.overlays
self.nixosModules.unlock-luks-on-boot
self.nixosModules.core
self.nixosModules.prometheus-exporters
self.nixosModules.promtail
self.nixosModules.garage
self.nixosModules.nginx
];
};
underground = self.inputs.nixpkgs.lib.nixosSystem {
specialArgs = {
flake = {
inherit self inputs config;
};
};
modules = [
self.inputs.agenix.nixosModules.default
self.nixosModules.home-manager
./underground
self.nixosModules.overlays
self.nixosModules.unlock-luks-on-boot
self.nixosModules.core
self.nixosModules.backups
self.nixosModules.keycloak
self.nixosModules.postgresql
self.nixosModules.matrix
self.nixosModules.matrix-irc
self.nixosModules.nginx
self.nixosModules.nginx-matrix
];
};
};
};
}

View file

@ -1,33 +0,0 @@
{
flake,
config,
pkgs,
...
}:
{
boot.loader.grub.enable = true;
boot.kernelParams = [
"boot.shell_on_fail=1"
"ip=5.255.119.132::5.255.119.1:255.255.255.0:delite::off"
];
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#
# Most users should NEVER change this value after the initial install, for any reason,
# even if you've upgraded your system to a new NixOS release.
#
# This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
# so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
# to actually do that.
#
# This value being lower than the current NixOS release does NOT mean your system is
# out of date, out of support, or vulnerable.
#
# Do NOT change this value unless you have manually inspected all the changes it would make to your configuration,
# and migrated your data accordingly.
#
# For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion .
system.stateVersion = "24.05"; # Did you read the comment?
}

View file

@ -1,84 +0,0 @@
{
disko.devices = {
disk = {
main = {
type = "disk";
device = "/dev/vda";
content = {
type = "gpt";
partitions = {
bios = {
size = "1M";
type = "EF02"; # for grub MBR
};
boot = {
size = "1G";
type = "8300";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/boot";
mountOptions = [ "defaults" ];
};
};
luks = {
size = "100%";
content = {
type = "luks";
name = "cryptroot";
extraOpenArgs = [ ];
# if you want to use the key for interactive login be sure there is no trailing newline
# for example use `echo -n "password" > /tmp/secret.key`
passwordFile = "/tmp/luks-password";
content = {
type = "lvm_pv";
vg = "vg0";
};
};
};
};
};
};
};
lvm_vg = {
vg0 = {
type = "lvm_vg";
lvs = {
root = {
size = "40G";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
mountOptions = [ "defaults" ];
};
};
swap = {
size = "8G";
content = {
type = "swap";
};
};
data = {
size = "800G";
content = {
type = "filesystem";
format = "xfs";
mountpoint = "/var/lib/garage/data";
mountOptions = [ "defaults" ];
};
};
metadata = {
size = "50G";
content = {
type = "filesystem";
format = "btrfs";
mountpoint = "/var/lib/garage/meta";
mountOptions = [ "defaults" ];
};
};
};
};
};
};
}

View file

@ -1,26 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_blk"
];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -1,26 +0,0 @@
{
config,
pkgs,
flake,
...
}:
{
services.garage.settings.rpc_public_addr = "[2a04:52c0:124:9d8c::2]:3901";
networking.hostName = "delite";
networking.hostId = "00000004";
networking.useDHCP = false;
systemd.network.enable = true;
systemd.network.networks."10-wan" = {
matchConfig.Name = "ens3";
address = [
"5.255.119.132/24"
"2a04:52c0:124:9d8c::2/48"
];
gateway = [
"5.255.119.1"
"2a04:52c0:124::1"
];
};
}

View file

@ -1,51 +0,0 @@
{
config,
pkgs,
flake,
...
}:
let
wireguardIPv4 = "10.7.6.6";
wireguardIPv6 = "fd00:fae:fae:fae:fae:6::";
in
{
networking.firewall.allowedUDPPorts = [ 51820 ];
age.secrets.wg-private-key.file = "${flake.self}/secrets/delite-wg-private-key.age";
networking.wireguard.interfaces = {
wg-ssh = {
listenPort = 51820;
mtu = 1300;
ips = [
"${wireguardIPv4}/32"
"${wireguardIPv6}/96"
];
privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.wireguardDevices ++ [
{
# trinkgenossin.pub.solar
publicKey = "QWgHovHxtqiQhnHLouSWiT6GIoQDmuvnThYL5c/rvU4=";
allowedIPs = [
"10.7.6.5/32"
"fd00:fae:fae:fae:fae:5::/96"
];
#endpoint = "85.215.152.22:51820";
endpoint = "[2a01:239:35d:f500::1]:51820";
persistentKeepalive = 15;
}
];
};
};
services.openssh.listenAddresses = [
{
addr = wireguardIPv4;
port = 22;
}
{
addr = "[${wireguardIPv6}]";
port = 22;
}
];
}

View file

@ -0,0 +1,72 @@
{
config,
lib,
pkgs,
flake,
...
}:
let
psCfg = config.pub-solar;
in
{
config = {
# Override nix.conf for more agressive garbage collection
nix.extraOptions = lib.mkForce ''
experimental-features = flakes nix-command
min-free = 536870912
keep-outputs = false
keep-derivations = false
fallback = true
'';
# # #
# # # Triton host specific options
# # # DO NOT ALTER below this line, changes might render system unbootable
# # #
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
# Force getting the hostname from cloud-init
networking.hostName = lib.mkDefault "";
# We use cloud-init to configure networking, this option should fix
# systemd-networkd-wait-online timeouts
#systemd.services."systemd-networkd".environment.SYSTEMD_LOG_LEVEL = "debug";
systemd.network.wait-online.ignoredInterfaces = [
"docker0"
"wg-ssh"
];
# List services that you want to enable:
services.cloud-init.enable = true;
services.cloud-init.ext4.enable = true;
services.cloud-init.network.enable = true;
# use the default NixOS cloud-init config, but add some SmartOS customization to it
environment.etc."cloud/cloud.cfg.d/90_smartos.cfg".text = ''
datasource_list: [ SmartOS ]
# Do not create the centos/ubuntu/debian user
users: [ ]
# mount second disk with label ephemeral0, gets formated by cloud-init
# this will fail to get added to /etc/fstab as it's read-only, but should
# mount at boot anyway
mounts:
- [ vdb, /data, auto, "defaults,nofail" ]
'';
# We manage the firewall with nix, too
# altough triton can also manage firewall rules via the triton fwrule subcommand
networking.firewall.enable = true;
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "22.05"; # Did you read the comment?
};
}

View file

@ -1,13 +1,11 @@
{ flake, ... }:
{ ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
./configuration.nix
./disk-config.nix
./networking.nix
./triton-vmtools.nix
./wireguard.nix
#./backups.nix
];
}

View file

@ -8,47 +8,45 @@
modulesPath,
...
}:
{
imports = [ ];
boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"ahci"
"virtio_pci"
"xhci_pci"
"sr_mod"
"virtio_blk"
"virtio_net"
];
boot.initrd.kernelModules = [ "dm-snapshot" ];
boot.kernelModules = [ "kvm-amd" ];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
boot.initrd.luks.devices."cryptroot" = {
device = "/dev/disk/by-uuid/52a1fd17-63d7-4d0a-b7ff-74aceaf6085a";
};
fileSystems."/" = {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-label/boot";
fsType = "vfat";
};
fileSystems."/data" = {
device = "/dev/disk/by-label/ephemeral0";
fsType = "ext4";
options = [
"defaults"
"nofail"
];
};
fileSystems."/var/lib/garage/data" = {
device = "/dev/disk/by-label/data";
fsType = "xfs";
};
swapDevices = [ ];
fileSystems."/var/lib/garage/meta" = {
device = "/dev/disk/by-label/metadata";
fsType = "btrfs";
};
swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
networking.useDHCP = lib.mkDefault false;
networking.networkmanager.enable = lib.mkForce false;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -0,0 +1,6 @@
{ pkgs, flake, ... }:
{
environment.systemPackages = with pkgs; [
flake.inputs.triton-vmtools.packages.${pkgs.system}.default
];
}

View file

@ -4,25 +4,21 @@
flake,
...
}:
let
wireguardIPv4 = "10.7.6.5";
wireguardIPv6 = "fd00:fae:fae:fae:fae:5::";
in
{
networking.firewall.allowedUDPPorts = [ 51820 ];
age.secrets.wg-private-key.file = "${flake.self}/secrets/trinkgenossin-wg-private-key.age";
age.secrets.wg-private-key.file = "${flake.self}/secrets/flora6-wg-private-key.age";
networking.wireguard.interfaces = {
wg-ssh = {
listenPort = 51820;
mtu = 1300;
ips = [
"${wireguardIPv4}/32"
"${wireguardIPv6}/96"
"10.7.6.2/32"
"fd00:fae:fae:fae:fae:2::/96"
];
privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.wireguardDevices ++ [
peers = flake.self.logins.admins.wireguardDevices ++ [
{
# nachtigall.pub.solar
endpoint = "138.201.80.102:51820";
@ -51,35 +47,17 @@ in
"fd00:fae:fae:fae:fae:4::/96"
];
}
{
# delite.pub.solar
endpoint = "5.255.119.132:51820";
publicKey = "ZT2qGWgMPwHRUOZmTQHWCRX4m14YwOsiszjsA5bpc2k=";
allowedIPs = [
"10.7.6.6/32"
"fd00:fae:fae:fae:fae:6::/96"
];
}
{
# blue-shell.pub.solar
endpoint = "194.13.83.205:51820";
publicKey = "bcrIpWrKc1M+Hq4ds3aN1lTaKE26f2rvXhd+93QrzR8=";
allowedIPs = [
"10.7.6.7/32"
"fd00:fae:fae:fae:fae:7::/96"
];
}
];
};
};
services.openssh.listenAddresses = [
{
addr = wireguardIPv4;
addr = "10.7.6.2";
port = 22;
}
{
addr = "[${wireguardIPv6}]";
addr = "[fd00:fae:fae:fae:fae:2::]";
port = 22;
}
];

View file

@ -1,29 +1,13 @@
{ config, flake, ... }:
{ flake, ... }:
{
age.secrets."restic-repo-storagebox-metronom" = {
file = "${flake.self}/secrets/restic-repo-storagebox-metronom.age";
age.secrets."restic-repo-droppie" = {
file = "${flake.self}/secrets/restic-repo-droppie.age";
mode = "400";
owner = "root";
};
age.secrets.restic-repo-garage-metronom = {
file = "${flake.self}/secrets/restic-repo-garage-metronom.age";
age.secrets."restic-repo-storagebox" = {
file = "${flake.self}/secrets/restic-repo-storagebox.age";
mode = "400";
owner = "root";
};
age.secrets.restic-repo-garage-metronom-env = {
file = "${flake.self}/secrets/restic-repo-garage-metronom-env.age";
mode = "400";
owner = "root";
};
pub-solar-os.backups.repos.storagebox = {
passwordFile = config.age.secrets."restic-repo-storagebox-metronom".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/metronom-backups";
};
pub-solar-os.backups.repos.garage = {
passwordFile = config.age.secrets."restic-repo-garage-metronom".path;
environmentFile = config.age.secrets."restic-repo-garage-metronom-env".path;
repository = "s3:https://buckets.pub.solar/metronom-backups";
};
}

View file

@ -23,14 +23,6 @@
pools = [ "root_pool" ];
};
# Declarative SSH private key
age.secrets."metronom-root-ssh-key" = {
file = "${flake.self}/secrets/metronom-root-ssh-key.age";
path = "/root/.ssh/id_ed25519";
mode = "400";
owner = "root";
};
# Declarative SSH private key
#age.secrets."metronom-root-ssh-key" = {
# file = "${flake.self}/secrets/metronom-root-ssh-key.age";

View file

@ -7,6 +7,6 @@
./networking.nix
./wireguard.nix
./backups.nix
#./backups.nix
];
}

View file

@ -18,7 +18,16 @@
"fd00:fae:fae:fae:fae:3::/96"
];
privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.wireguardDevices ++ [
peers = flake.self.logins.admins.wireguardDevices ++ [
{
# flora-6.pub.solar
endpoint = "80.71.153.210:51820";
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
allowedIPs = [
"10.7.6.2/32"
"fd00:fae:fae:fae:fae:2::/96"
];
}
{
# nachtigall.pub.solar
endpoint = "138.201.80.102:51820";
@ -28,17 +37,6 @@
"fd00:fae:fae:fae:fae:1::/96"
];
}
{
# trinkgenossin.pub.solar
publicKey = "QWgHovHxtqiQhnHLouSWiT6GIoQDmuvnThYL5c/rvU4=";
allowedIPs = [
"10.7.6.5/32"
"fd00:fae:fae:fae:fae:5::/96"
];
#endpoint = "85.215.152.22:51820";
endpoint = "[2a01:239:35d:f500::1]:51820";
persistentKeepalive = 15;
}
];
};
};

View file

@ -1,34 +1,13 @@
{ config, flake, ... }:
{ flake, ... }:
{
age.secrets."restic-repo-droppie" = {
file = "${flake.self}/secrets/restic-repo-droppie.age";
mode = "400";
owner = "root";
};
age.secrets."restic-repo-storagebox-nachtigall" = {
file = "${flake.self}/secrets/restic-repo-storagebox-nachtigall.age";
age.secrets."restic-repo-storagebox" = {
file = "${flake.self}/secrets/restic-repo-storagebox.age";
mode = "400";
owner = "root";
};
age.secrets.restic-repo-garage-nachtigall = {
file = "${flake.self}/secrets/restic-repo-garage-nachtigall.age";
mode = "400";
owner = "root";
};
age.secrets.restic-repo-garage-nachtigall-env = {
file = "${flake.self}/secrets/restic-repo-garage-nachtigall-env.age";
mode = "400";
owner = "root";
};
pub-solar-os.backups.repos.storagebox = {
passwordFile = config.age.secrets."restic-repo-storagebox-nachtigall".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
};
pub-solar-os.backups.repos.garage = {
passwordFile = config.age.secrets."restic-repo-garage-nachtigall".path;
environmentFile = config.age.secrets."restic-repo-garage-nachtigall-env".path;
repository = "s3:https://buckets.pub.solar/nachtigall-backups";
};
}

View file

@ -20,14 +20,6 @@
devices = [ "/dev/disk/by-id/nvme-KXG60ZNV1T02_TOSHIBA_Z9NF704ZF9ZL" ];
path = "/boot2";
}
{
devices = [ "/dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HDLU-00B07_S77WNF0XA01902" ];
path = "/boot3";
}
{
devices = [ "/dev/disk/by-id/nvme-SAMSUNG_MZVL21T0HCLR-00B00_S676NU0W623944" ];
path = "/boot4";
}
];
copyKernels = true;
};
@ -56,79 +48,9 @@
owner = "root";
};
# keycloak
age.secrets.keycloak-database-password = {
file = "${flake.self}/secrets/keycloak-database-password.age";
mode = "600";
#owner = "keycloak";
};
pub-solar-os.auth.enable = true;
pub-solar-os.auth = {
enable = true;
database-password-file = config.age.secrets.keycloak-database-password.path;
};
# matrix-synapse
age.secrets."matrix-synapse-signing-key" = {
file = "${flake.self}/secrets/matrix-synapse-signing-key.age";
mode = "400";
owner = "matrix-synapse";
};
age.secrets."matrix-synapse-secret-config.yaml" = {
file = "${flake.self}/secrets/matrix-synapse-secret-config.yaml.age";
mode = "400";
owner = "matrix-synapse";
};
age.secrets."matrix-authentication-service-secret-config.yml" = {
file = "${flake.self}/secrets/matrix-authentication-service-secret-config.yml.age";
mode = "400";
owner = "matrix-authentication-service";
};
# matrix-appservice-irc
age.secrets."matrix-appservice-irc-mediaproxy-signing-key" = {
file = "${flake.self}/secrets/matrix-appservice-irc-mediaproxy-signing-key.jwk.age";
mode = "400";
owner = "matrix-appservice-irc";
};
pub-solar-os.matrix = {
enable = true;
appservice-irc.mediaproxy.signingKeyPath =
config.age.secrets."matrix-appservice-irc-mediaproxy-signing-key".path;
synapse = {
signing_key_path = config.age.secrets."matrix-synapse-signing-key".path;
extra-config-files = [
config.age.secrets."matrix-synapse-secret-config.yaml".path
# The registration file is automatically generated after starting the
# appservice for the first time.
# cp /var/lib/mautrix-telegram/telegram-registration.yaml \
# /var/lib/matrix-synapse/
# chown matrix-synapse:matrix-synapse \
# /var/lib/matrix-synapse/telegram-registration.yaml
"/var/lib/matrix-synapse/telegram-registration.yaml"
];
app-service-config-files = [
"/var/lib/matrix-synapse/telegram-registration.yaml"
"/var/lib/matrix-appservice-irc/registration.yml"
# "/matrix-appservice-slack-registration.yaml"
# "/hookshot-registration.yml"
# "/matrix-mautrix-signal-registration.yaml"
# "/matrix-mautrix-telegram-registration.yaml"
];
};
matrix-authentication-service.extra-config-files = [
config.age.secrets."matrix-authentication-service-secret-config.yml".path
];
};
systemd.services.postgresql = {
after = [ "var-lib-postgresql.mount" ];
requisite = [ "var-lib-postgresql.mount" ];
};
nixpkgs.config.permittedInsecurePackages = [ "keycloak-23.0.6" ];
# This value determines the NixOS release with which your system is to be
# compatible, in order to avoid breaking some software such as database

View file

@ -9,10 +9,5 @@
./networking.nix
./wireguard.nix
./backups.nix
"${flake.inputs.fork}/nixos/modules/services/matrix/matrix-authentication-service.nix"
];
disabledModules = [
"services/matrix/matrix-authentication-service.nix"
];
}

View file

@ -50,16 +50,6 @@
fsType = "vfat";
};
fileSystems."/boot3" = {
device = "/dev/disk/by-uuid/E4E4-88C7";
fsType = "vfat";
};
fileSystems."/boot4" = {
device = "/dev/disk/by-uuid/E76C-A8A0";
fsType = "vfat";
};
swapDevices = [ ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View file

@ -18,7 +18,16 @@
"fd00:fae:fae:fae:fae:1::/96"
];
privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.wireguardDevices ++ [
peers = flake.self.logins.admins.wireguardDevices ++ [
{
# flora-6.pub.solar
endpoint = "80.71.153.210:51820";
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
allowedIPs = [
"10.7.6.2/32"
"fd00:fae:fae:fae:fae:2::/96"
];
}
{
# tankstelle.pub.solar
endpoint = "80.244.242.5:51820";
@ -28,17 +37,6 @@
"fd00:fae:fae:fae:fae:4::/96"
];
}
{
# trinkgenossin.pub.solar
publicKey = "QWgHovHxtqiQhnHLouSWiT6GIoQDmuvnThYL5c/rvU4=";
allowedIPs = [
"10.7.6.5/32"
"fd00:fae:fae:fae:fae:5::/96"
];
#endpoint = "85.215.152.22:51820";
endpoint = "[2a01:239:35d:f500::1]:51820";
persistentKeepalive = 15;
}
];
};
};

View file

@ -5,8 +5,8 @@
mode = "400";
owner = "root";
};
age.secrets."restic-repo-storagebox-tankstelle" = {
file = "${flake.self}/secrets/restic-repo-storagebox-tankstelle.age";
age.secrets."restic-repo-storagebox" = {
file = "${flake.self}/secrets/restic-repo-storagebox.age";
mode = "400";
owner = "root";
};

View file

@ -10,9 +10,6 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
# kernel same-page merging
hardware.ksm.enable = true;
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
system.stateVersion = "23.11";

View file

@ -18,7 +18,7 @@
"fd00:fae:fae:fae:fae:4::/96"
];
privateKeyFile = config.age.secrets.wg-private-key.path;
peers = flake.self.logins.wireguardDevices ++ [
peers = flake.self.logins.admins.wireguardDevices ++ [
{
# nachtigall.pub.solar
endpoint = "138.201.80.102:51820";
@ -29,15 +29,13 @@
];
}
{
# trinkgenossin.pub.solar
publicKey = "QWgHovHxtqiQhnHLouSWiT6GIoQDmuvnThYL5c/rvU4=";
# flora-6.pub.solar
endpoint = "80.71.153.210:51820";
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
allowedIPs = [
"10.7.6.5/32"
"fd00:fae:fae:fae:fae:5::/96"
"10.7.6.2/32"
"fd00:fae:fae:fae:fae:2::/96"
];
#endpoint = "85.215.152.22:51820";
endpoint = "[2a01:239:35d:f500::1]:51820";
persistentKeepalive = 15;
}
];
};

View file

@ -1,35 +0,0 @@
{
flake,
config,
lib,
pkgs,
...
}:
{
boot.loader.grub.enable = true;
boot.loader.grub.devices = [ "/dev/vda" ];
boot.kernelParams = [
"boot.shell_on_fail=1"
"ip=dhcp"
];
# This option defines the first version of NixOS you have installed on this particular machine,
# and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
#
# Most users should NEVER change this value after the initial install, for any reason,
# even if you've upgraded your system to a new NixOS release.
#
# This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
# so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how
# to actually do that.
#
# This value being lower than the current NixOS release does NOT mean your system is
# out of date, out of support, or vulnerable.
#
# Do NOT change this value unless you have manually inspected all the changes it would make to your configuration,
# and migrated your data accordingly.
#
# For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion .
system.stateVersion = "24.05"; # Did you read the comment?
}

View file

@ -1,13 +0,0 @@
{ flake, ... }:
{
imports = [
./hardware-configuration.nix
./configuration.nix
./networking.nix
./wireguard.nix
./forgejo-actions-runner.nix
#./backups.nix
];
}

View file

@ -1,15 +0,0 @@
{
config,
pkgs,
flake,
...
}:
{
services.garage.settings.rpc_public_addr = "[2a01:239:35d:f500::1]:3901";
networking.hostName = "trinkgenossin";
networking.hostId = "00000003";
networking.enableIPv6 = true;
networking.useDHCP = true;
}

View file

@ -1,81 +0,0 @@
{
flake,
config,
pkgs,
...
}:
{
# Use GRUB2 as the boot loader.
boot.loader.grub = {
enable = true;
devices = [ "/dev/vda" ];
};
pub-solar-os.networking.domain = "test.pub.solar";
systemd.tmpfiles.rules = [ "f /tmp/dbf 1777 root root 10d password" ];
# keycloak
pub-solar-os.auth = {
enable = true;
database-password-file = "/tmp/dbf";
};
services.keycloak.database.createLocally = true;
# matrix-synapse
# test.pub.solar /.well-known is required for federation
services.nginx.virtualHosts."${config.pub-solar-os.networking.domain}" = {
default = true;
enableACME = true;
forceSSL = true;
};
age.secrets."staging-matrix-synapse-secret-config.yaml" = {
file = "${flake.self}/secrets/staging-matrix-synapse-secret-config.yaml.age";
mode = "400";
owner = "matrix-synapse";
};
age.secrets."staging-matrix-authentication-service-secret-config.yml" = {
file = "${flake.self}/secrets/staging-matrix-authentication-service-secret-config.yml.age";
mode = "400";
owner = "matrix-authentication-service";
};
# matrix-appservice-irc
age.secrets."matrix-appservice-irc-mediaproxy-signing-key" = {
file = "${flake.self}/secrets/staging-matrix-appservice-irc-mediaproxy-signing-key.jwk.age";
mode = "400";
owner = "matrix-appservice-irc";
};
pub-solar-os.matrix = {
enable = true;
appservice-irc.mediaproxy.signingKeyPath =
config.age.secrets."matrix-appservice-irc-mediaproxy-signing-key".path;
synapse = {
extra-config-files = [
config.age.secrets."staging-matrix-synapse-secret-config.yaml".path
# The registration file is automatically generated after starting the
# appservice for the first time.
# cp /var/lib/mautrix-telegram/telegram-registration.yaml \
# /var/lib/matrix-synapse/
# chown matrix-synapse:matrix-synapse \
# /var/lib/matrix-synapse/telegram-registration.yaml
#"/var/lib/matrix-synapse/telegram-registration.yaml"
];
app-service-config-files = [
"/var/lib/matrix-appservice-irc/registration.yml"
#"/var/lib/matrix-synapse/telegram-registration.yaml"
];
};
matrix-authentication-service.extra-config-files = [
config.age.secrets."staging-matrix-authentication-service-secret-config.yml".path
];
};
services.openssh.openFirewall = true;
system.stateVersion = "24.05";
}

View file

@ -1,16 +0,0 @@
{ flake, ... }:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
./configuration.nix
./networking.nix
"${flake.inputs.fork}/nixos/modules/services/matrix/matrix-authentication-service.nix"
];
disabledModules = [
"services/matrix/matrix-authentication-service.nix"
];
}

View file

@ -1,47 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
];
boot.initrd.availableKernelModules = [
"ahci"
"xhci_pci"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
boot.initrd.luks.devices."cryptroot" = {
device = "/dev/disk/by-label/cryptroot";
};
fileSystems."/" = {
device = "/dev/disk/by-label/root";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-label/boot";
fsType = "ext4";
};
swapDevices = [
{ device = "/dev/disk/by-label/swap"; }
];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -1,30 +0,0 @@
{
config,
pkgs,
flake,
...
}:
{
networking.hostName = "underground";
networking = {
defaultGateway = {
address = "80.244.242.1";
interface = "enp1s0";
};
nameservers = [
"95.129.51.51"
"80.244.244.244"
];
interfaces.enp1s0 = {
useDHCP = false;
ipv4.addresses = [
{
address = "80.244.242.3";
prefixLength = 29;
}
];
};
};
}

View file

@ -38,22 +38,6 @@
"fd00:fae:fae:fae:fae:200::/96"
];
}
{
# chocolatebar
publicKey = "AS9w0zDUFLcH6IiF6T1vsyZPWPJ3p5fKsjIsM2AoZz8=";
allowedIPs = [
"10.7.6.205/32"
"fd00:fae:fae:fae:fae:205::/96"
];
}
{
# biolimo
publicKey = "gnLq6KikFVVGxLxPW+3ZnreokEKLDoso+cUepPOZsBA=";
allowedIPs = [
"10.7.6.206/32"
"fd00:fae:fae:fae:fae:206::/96"
];
}
];
};
@ -79,7 +63,6 @@
teutat3s = {
sshPubKeys = {
teutat3s-1 = "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFro/k4Mgqyh8yV/7Zwjc0dv60ZM7bROBU9JNd99P/4co6fxPt1pJiU/pEz2Dax/HODxgcO+jFZfvPEuLMCeAl0= YubiKey #10593996 PIV Slot 9a";
teutat3s-2 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHcU6KPy4b1MQXd6EJhcYwbJu7E+0IrBZF/IP6T7gbMf teutat3s@dumpyourvms";
};
secretEncryptionKeys = {

View file

@ -6,16 +6,19 @@ in
{
flake = {
logins = {
admins = admins;
wireguardDevices = lib.lists.foldl (
wireguardDevices: adminConfig:
wireguardDevices ++ (if adminConfig ? "wireguardDevices" then adminConfig.wireguardDevices else [ ])
) [ ] (lib.attrsets.attrValues admins);
sshPubKeys = lib.lists.foldl (
sshPubKeys: adminConfig:
sshPubKeys
++ (if adminConfig ? "sshPubKeys" then lib.attrsets.attrValues adminConfig.sshPubKeys else [ ])
) [ ] (lib.attrsets.attrValues admins);
admins =
lib.lists.foldl
(logins: adminConfig: {
sshPubKeys = logins.sshPubKeys ++ (lib.attrsets.attrValues adminConfig.sshPubKeys);
wireguardDevices =
logins.wireguardDevices
++ (if adminConfig ? "wireguardDevices" then adminConfig.wireguardDevices else [ ]);
})
{
sshPubKeys = [ ];
wireguardDevices = [ ];
}
(lib.attrsets.attrValues admins);
robots.sshPubKeys = lib.attrsets.attrValues robots;
};
};

View file

@ -1,8 +1,7 @@
{
# Used for restic backups to droppie, a server run by @b12f
"root@droppie" =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBZQSephFJU0NMbVbhwvVJ2/m6jcPYo1IsWCsoarqKin root@droppie";
"root@droppie" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBZQSephFJU0NMbVbhwvVJ2/m6jcPYo1IsWCsoarqKin root@droppie";
"hakkonaut" =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGP5MvCwNRtCcP1pSDrn0XZTNlpOqYnjHDm9/OI4hECW hakkonaut";
# robot user on flora-6
"hakkonaut@flora-6" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGP5MvCwNRtCcP1pSDrn0XZTNlpOqYnjHDm9/OI4hECW hakkonaut@flora-6";
}

View file

@ -1,292 +0,0 @@
{
flake,
config,
lib,
pkgs,
...
}:
let
utils = import "${flake.inputs.nixpkgs}/nixos/lib/utils.nix" {
inherit lib;
inherit config;
inherit pkgs;
};
# Type for a valid systemd unit option. Needed for correctly passing "timerConfig" to "systemd.timers"
inherit (utils.systemdUtils.unitOptions) unitOption;
inherit (lib)
literalExpression
mkOption
mkPackageOption
types
;
in
{
options.pub-solar-os.backups = {
repos = mkOption {
description = ''
Configuration of Restic repositories.
'';
type = types.attrsOf (
types.submodule (
{ name, ... }:
{
options = {
passwordFile = mkOption {
type = types.str;
description = ''
Read the repository password from a file.
'';
example = "/etc/nixos/restic-password";
};
environmentFile = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Read repository secrets as environment variables from a file.
'';
example = "/etc/nixos/restic-env";
};
repository = mkOption {
type = with types; nullOr str;
default = null;
description = ''
repository to backup to.
'';
example = "sftp:backup@192.168.1.100:/backups/${name}";
};
};
}
)
);
default = { };
example = {
remotebackup = {
repository = "sftp:backup@host:/backups/home";
passwordFile = "/etc/nixos/secrets/restic-password";
environmentFile = "/etc/nixos/secrets/restic-env";
};
};
};
restic = mkOption {
description = ''
Periodic backups to create with Restic.
'';
type = types.attrsOf (
types.submodule (
{ name, ... }:
{
options = {
paths = mkOption {
# This is nullable for legacy reasons only. We should consider making it a pure listOf
# after some time has passed since this comment was added.
type = types.nullOr (types.listOf types.str);
default = [ ];
description = ''
Which paths to backup, in addition to ones specified via
`dynamicFilesFrom`. If null or an empty array and
`dynamicFilesFrom` is also null, no backup command will be run.
This can be used to create a prune-only job.
'';
example = [
"/var/lib/postgresql"
"/home/user/backup"
];
};
exclude = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Patterns to exclude when backing up. See
https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files for
details on syntax.
'';
example = [
"/var/cache"
"/home/*/.cache"
".git"
];
};
timerConfig = mkOption {
type = types.nullOr (types.attrsOf unitOption);
default = {
OnCalendar = "daily";
Persistent = true;
};
description = ''
When to run the backup. See {manpage}`systemd.timer(5)` for
details. If null no timer is created and the backup will only
run when explicitly started.
'';
example = {
OnCalendar = "00:05";
RandomizedDelaySec = "5h";
Persistent = true;
};
};
user = mkOption {
type = types.str;
default = "root";
description = ''
As which user the backup should run.
'';
example = "postgresql";
};
extraBackupArgs = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Extra arguments passed to restic backup.
'';
example = [ "--exclude-file=/etc/nixos/restic-ignore" ];
};
extraOptions = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Extra extended options to be passed to the restic --option flag.
'';
example = [ "sftp.command='ssh backup@192.168.1.100 -i /home/user/.ssh/id_rsa -s sftp'" ];
};
initialize = mkOption {
type = types.bool;
default = false;
description = ''
Create the repository if it doesn't exist.
'';
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
A list of options (--keep-\* et al.) for 'restic forget
--prune', to automatically prune old snapshots. The
'forget' command is run *after* the 'backup' command, so
keep that in mind when constructing the --keep-\* options.
'';
example = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 12"
"--keep-yearly 75"
];
};
runCheck = mkOption {
type = types.bool;
default = (builtins.length config.pub-solar-os.backups.restic.${name}.checkOpts > 0);
defaultText = literalExpression ''builtins.length config.services.backups.${name}.checkOpts > 0'';
description = "Whether to run the `check` command with the provided `checkOpts` options.";
example = true;
};
checkOpts = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
A list of options for 'restic check'.
'';
example = [ "--with-cache" ];
};
dynamicFilesFrom = mkOption {
type = with types; nullOr str;
default = null;
description = ''
A script that produces a list of files to back up. The
results of this command are given to the '--files-from'
option. The result is merged with paths specified via `paths`.
'';
example = "find /home/matt/git -type d -name .git";
};
backupPrepareCommand = mkOption {
type = with types; nullOr str;
default = null;
description = ''
A script that must run before starting the backup process.
'';
};
backupCleanupCommand = mkOption {
type = with types; nullOr str;
default = null;
description = ''
A script that must run after finishing the backup process.
'';
};
package = mkPackageOption pkgs "restic" { };
createWrapper = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether to generate and add a script to the system path, that has the same environment variables set
as the systemd service. This can be used to e.g. mount snapshots or perform other opterations, without
having to manually specify most options.
'';
};
};
}
)
);
default = { };
example = {
localbackup = {
paths = [ "/home" ];
exclude = [ "/home/*/.cache" ];
initialize = true;
};
remotebackup = {
paths = [ "/home" ];
extraOptions = [
"sftp.command='ssh backup@host -i /etc/nixos/secrets/backup-private-key -s sftp'"
];
timerConfig = {
OnCalendar = "00:05";
RandomizedDelaySec = "5h";
};
};
};
};
};
config = {
services.restic.backups =
let
repos = config.pub-solar-os.backups.repos;
restic = config.pub-solar-os.backups.restic;
repoNames = builtins.attrNames repos;
backupNames = builtins.attrNames restic;
createBackups =
backupName:
map (repoName: {
name = "${backupName}-${repoName}";
value = repos."${repoName}" // restic."${backupName}";
}) repoNames;
in
builtins.listToAttrs (lib.lists.flatten (map createBackups backupNames));
# Used for pub-solar-os.backups.repos.storagebox
programs.ssh.knownHosts = {
"u377325.your-storagebox.de".publicKey =
"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==";
"[u377325.your-storagebox.de]:23".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs";
};
};
}

View file

@ -54,5 +54,9 @@
};
time.timeZone = "Etc/UTC";
home-manager.users.${config.pub-solar-os.authentication.username} = {
home.stateVersion = "23.05";
};
};
}

View file

@ -31,17 +31,13 @@
networking.hosts = {
"10.7.6.1" = [ "nachtigall.wg.${config.pub-solar-os.networking.domain}" ];
"10.7.6.2" = [ "flora-6.wg.${config.pub-solar-os.networking.domain}" ];
"10.7.6.3" = [ "metronom.wg.${config.pub-solar-os.networking.domain}" ];
"10.7.6.4" = [ "tankstelle.wg.${config.pub-solar-os.networking.domain}" ];
"10.7.6.5" = [ "trinkgenossin.wg.${config.pub-solar-os.networking.domain}" ];
"10.7.6.6" = [ "delite.wg.${config.pub-solar-os.networking.domain}" ];
"10.7.6.7" = [ "blue-shell.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:1::" = [ "nachtigall.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:2::" = [ "flora-6.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:3::" = [ "metronom.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:4::" = [ "tankstelle.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:5::" = [ "trinkgenossin.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:6::" = [ "delite.wg.${config.pub-solar-os.networking.domain}" ];
"fd00:fae:fae:fae:fae:7::" = [ "blue-shell.wg.${config.pub-solar-os.networking.domain}" ];
};
services.openssh = {

View file

@ -6,21 +6,7 @@
...
}:
{
nixpkgs.config = lib.mkDefault {
allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ ];
permittedInsecurePackages = [ "olm-3.2.16" ];
};
system.activationScripts.diff-closures = {
text = ''
if [[ -e /run/current-system ]]; then
${config.nix.package}/bin/nix store diff-closures \
/run/current-system "$systemConfig" \
--extra-experimental-features nix-command
fi
'';
supportsDryActivation = true;
};
nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ ];
nix = {
# Use default version alias for nix package

View file

@ -1,33 +1,19 @@
{ flake, lib, ... }:
{ flake, config, ... }:
{
home-manager.users = (
lib.attrsets.foldlAttrs (
acc: name: value:
acc
// {
${name} = {
programs.git.enable = true;
programs.starship.enable = true;
programs.bash = {
enable = true;
historyControl = [
"ignoredups"
"ignorespace"
];
};
programs.neovim = {
enable = true;
vimAlias = true;
viAlias = true;
defaultEditor = true;
# configure = {
# packages.myVimPackages = with pkgs.vimPlugins; {
# start = [vim-nix vim-surrund rainbow];
# };
# };
};
};
}
) { } flake.self.logins.admins
);
home-manager.users.${config.pub-solar-os.authentication.username} = {
programs.git.enable = true;
programs.starship.enable = true;
programs.bash.enable = true;
programs.neovim = {
enable = true;
vimAlias = true;
viAlias = true;
defaultEditor = true;
# configure = {
# packages.myVimPackages = with pkgs.vimPlugins; {
# start = [vim-nix vim-surrund rainbow];
# };
# };
};
};
}

View file

@ -11,6 +11,18 @@
inherit (lib) mkOption types;
in
{
username = mkOption {
description = "Username for the adminstrative user";
type = types.str;
default = flake.self.username;
};
sshPubKeys = mkOption {
description = "SSH Keys that should have administrative root access";
type = types.listOf types.str;
default = flake.self.logins.admins.sshPubKeys;
};
root.initialHashedPassword = mkOption {
description = "Hashed password of the root account";
type = types.str;
@ -31,60 +43,36 @@
};
config = {
users.users =
(lib.attrsets.foldlAttrs (
acc: name: value:
acc
// {
${name} = {
name = name;
group = name;
extraGroups = [
"wheel"
"docker"
];
isNormalUser = true;
openssh.authorizedKeys.keys = lib.attrsets.attrValues value.sshPubKeys;
};
}
) { } flake.self.logins.admins)
// {
# TODO: Remove when we stop locking ourselves out.
root.openssh.authorizedKeys.keys = flake.self.logins.sshPubKeys;
root.initialHashedPassword = config.pub-solar-os.authentication.root.initialHashedPassword;
users.users.${config.pub-solar-os.authentication.username} = {
name = config.pub-solar-os.authentication.username;
group = config.pub-solar-os.authentication.username;
extraGroups = [
"wheel"
"docker"
];
isNormalUser = true;
openssh.authorizedKeys.keys = config.pub-solar-os.authentication.sshPubKeys;
};
users.groups.${config.pub-solar-os.authentication.username} = { };
${config.pub-solar-os.authentication.robot.username} = {
description = "CI and automation user";
home = "/home/${config.pub-solar-os.authentication.robot.username}";
createHome = true;
useDefaultShell = true;
uid = 998;
group = "${config.pub-solar-os.authentication.robot.username}";
isSystemUser = true;
openssh.authorizedKeys.keys = config.pub-solar-os.authentication.robot.sshPubKeys;
};
};
# TODO: Remove when we stop locking ourselves out.
users.users.root.openssh.authorizedKeys.keys = config.pub-solar-os.authentication.sshPubKeys;
home-manager.users = (
lib.attrsets.foldlAttrs (
acc: name: value:
acc
// {
${name} = {
home.stateVersion = "23.05";
};
}
) { } flake.self.logins.admins
);
users.users.${config.pub-solar-os.authentication.robot.username} = {
description = "CI and automation user";
home = "/home/${config.pub-solar-os.authentication.robot.username}";
createHome = true;
useDefaultShell = true;
uid = 998;
group = "${config.pub-solar-os.authentication.robot.username}";
isSystemUser = true;
openssh.authorizedKeys.keys = config.pub-solar-os.authentication.robot.sshPubKeys;
};
users.groups =
(lib.attrsets.foldlAttrs (
acc: name: value:
acc // { "${name}" = { }; }
) { } flake.self.logins.admins)
// {
${config.pub-solar-os.authentication.robot.username} = { };
};
users.groups.${config.pub-solar-os.authentication.robot.username} = { };
users.users.root.initialHashedPassword =
config.pub-solar-os.authentication.root.initialHashedPassword;
security.sudo.wheelNeedsPassword = false;
};

View file

@ -18,7 +18,7 @@
min-port = 49000;
max-port = 50000;
use-auth-secret = true;
static-auth-secret-file = config.age.secrets."coturn-static-auth-secret".path;
static-auth-secret-file = "/run/agenix/coturn-static-auth-secret";
realm = "turn.${config.pub-solar-os.networking.domain}";
cert = "${config.security.acme.certs.${realm}.directory}/full.pem";
pkey = "${config.security.acme.certs.${realm}.directory}/key.pem";

114
modules/drone/default.nix Normal file
View file

@ -0,0 +1,114 @@
{
config,
lib,
pkgs,
flake,
...
}:
{
age.secrets.drone-secrets = {
file = "${flake.self}/secrets/drone-secrets.age";
mode = "600";
owner = "drone";
};
age.secrets.drone-db-secrets = {
file = "${flake.self}/secrets/drone-db-secrets.age";
mode = "600";
owner = "drone";
};
users.users.drone = {
description = "Drone Service";
home = "/var/lib/drone";
useDefaultShell = true;
uid = 994;
group = "drone";
isSystemUser = true;
};
users.groups.drone = { };
systemd.tmpfiles.rules = [ "d '/var/lib/drone-db' 0750 drone drone - -" ];
services.caddy.virtualHosts."ci.${config.pub-solar-os.networking.domain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
reverse_proxy :4000
'';
};
systemd.services."docker-network-drone" =
let
docker = config.virtualisation.oci-containers.backend;
dockerBin = "${pkgs.${docker}}/bin/${docker}";
in
{
serviceConfig.Type = "oneshot";
before = [ "docker-drone-server.service" ];
script = ''
${dockerBin} network inspect drone-net >/dev/null 2>&1 || ${dockerBin} network create drone-net --subnet 172.20.0.0/24
'';
};
virtualisation = {
docker = {
enable = true; # sadly podman is not supported rightnow
extraOptions = ''
--data-root /data/docker
'';
};
oci-containers = {
backend = "docker";
containers."drone-db" = {
image = "postgres:14";
autoStart = true;
user = "994";
volumes = [ "/var/lib/drone-db:/var/lib/postgresql/data" ];
extraOptions = [ "--network=drone-net" ];
environmentFiles = [ config.age.secrets.drone-db-secrets.path ];
};
containers."drone-server" = {
image = "drone/drone:2";
autoStart = true;
user = "994";
ports = [ "127.0.0.1:4000:80" ];
dependsOn = [ "drone-db" ];
extraOptions = [
"--network=drone-net"
"--pull=always"
"--add-host=nachtigall.${config.pub-solar-os.networking.domain}:10.7.6.1"
];
environment = {
DRONE_GITEA_SERVER = "https://git.${config.pub-solar-os.networking.domain}";
DRONE_SERVER_HOST = "ci.${config.pub-solar-os.networking.domain}";
DRONE_SERVER_PROTO = "https";
DRONE_DATABASE_DRIVER = "postgres";
};
environmentFiles = [ config.age.secrets.drone-secrets.path ];
};
containers."drone-docker-runner" = {
image = "drone/drone-runner-docker:1";
autoStart = true;
# needs to run as root
#user = "994";
volumes = [ "/var/run/docker.sock:/var/run/docker.sock" ];
dependsOn = [ "drone-db" ];
extraOptions = [
"--network=drone-net"
"--pull=always"
"--add-host=nachtigall.${config.pub-solar-os.networking.domain}:10.7.6.1"
];
environment = {
DRONE_RPC_HOST = "ci.${config.pub-solar-os.networking.domain}";
DRONE_RPC_PROTO = "https";
DRONE_RUNNER_CAPACITY = "2";
DRONE_RUNNER_NAME = "flora-6-docker-runner";
};
environmentFiles = [ config.age.secrets.drone-secrets.path ];
};
};
};
}

View file

@ -1,54 +1,57 @@
{
config,
pkgs,
lib,
pkgs,
flake,
...
}:
let
hostname = config.networking.hostName;
in
{
age.secrets."forgejo-actions-runner-token.age" = {
file = "${flake.self}/secrets/trinkgenossin-forgejo-actions-runner-token.age";
owner = "gitea-runner";
age.secrets.forgejo-actions-runner-token = {
file = "${flake.self}/secrets/forgejo-actions-runner-token.age";
mode = "440";
};
# Label configuration on gitea-actions-runner instance requires either docker or podman
virtualisation.docker.enable = true;
# Trust docker bridge interface traffic
# Needed for the docker runner to communicate with the act_runner cache
networking.firewall.trustedInterfaces = [ "br-+" ];
users.users.gitea-runner = {
home = "/var/lib/gitea-runner/${hostname}";
home = "/var/lib/gitea-runner/flora-6";
useDefaultShell = true;
group = "gitea-runner";
# Required to interact with nix daemon
extraGroups = [ "wheel" ];
isSystemUser = true;
};
users.groups.gitea-runner = { };
systemd.tmpfiles.rules = [ "d '/var/lib/gitea-runner' 0750 gitea-runner gitea-runner - -" ];
systemd.services."gitea-runner-${hostname}" = {
serviceConfig.DynamicUser = lib.mkForce false;
systemd.services."gitea-runner-flora\\x2d6".serviceConfig = {
DynamicUser = lib.mkForce false;
};
systemd.tmpfiles.rules = [
"d '/data/gitea-actions-runner' 0750 gitea-runner gitea-runner - -"
"d '/var/lib/gitea-runner' 0750 gitea-runner gitea-runner - -"
];
# forgejo actions runner
# https://forgejo.org/docs/latest/admin/actions/
# https://docs.gitea.com/usage/actions/quickstart
services.gitea-actions-runner = {
package = pkgs.forgejo-runner;
instances."${hostname}" = {
instances."flora-6" = {
enable = true;
name = hostname;
name = config.networking.hostName;
url = "https://git.pub.solar";
tokenFile = config.age.secrets."forgejo-actions-runner-token.age".path;
tokenFile = config.age.secrets.forgejo-actions-runner-token.path;
settings = {
cache = {
enabled = true;
dir = "/data/gitea-actions-runner/actcache";
host = "";
port = 0;
external_server = "";
};
};
labels = [
# provide a debian 12 bookworm base with Node.js for actions
"debian-latest:docker://git.pub.solar/pub-solar/actions-base-image:20-bookworm"
@ -56,6 +59,8 @@ in
"ubuntu-latest:docker://git.pub.solar/pub-solar/actions-base-image:20-bookworm"
# alpine with Node.js
"alpine-latest:docker://node:20-alpine"
# nix flakes enabled image with Node.js
"nix-flakes:docker://git.pub.solar/pub-solar/nix-flakes-node:latest"
];
};
};

View file

@ -65,7 +65,6 @@
services.forgejo = {
enable = true;
package = pkgs.forgejo;
user = "gitea";
group = "gitea";
database = {
@ -76,7 +75,7 @@
};
stateDir = "/var/lib/forgejo";
lfs.enable = true;
secrets.mailer.PASSWD = config.age.secrets.forgejo-mailer-password.path;
mailerPasswordFile = config.age.secrets.forgejo-mailer-password.path;
settings = {
DEFAULT.APP_NAME = "pub.solar git server";
@ -142,12 +141,6 @@
LOGIN_REMEMBER_DAYS = 365;
};
# See https://docs.gitea.com/administration/config-cheat-sheet#migrations-migrations
migrations = {
# This allows migrations from the same forgejo instance
ALLOW_LOCALNETWORKS = true;
};
# https://forgejo.org/docs/next/admin/config-cheat-sheet/#indexer-indexer
indexer = {
REPO_INDEXER_ENABLED = true;
@ -186,10 +179,10 @@
"/tmp/forgejo-backup.sql"
];
timerConfig = {
OnCalendar = "*-*-* 23:00:00 Etc/UTC";
OnCalendar = "*-*-* 00:00:00 Etc/UTC";
};
initialize = true;
passwordFile = config.age.secrets."restic-repo-storagebox-nachtigall".path;
passwordFile = config.age.secrets."restic-repo-storagebox".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d gitea > /tmp/forgejo-backup.sql

View file

@ -1,142 +0,0 @@
{
config,
lib,
pkgs,
flake,
...
}:
{
age.secrets."garage-rpc-secret" = {
file = "${flake.self}/secrets/garage-rpc-secret.age";
mode = "400";
};
age.secrets."garage-admin-token" = {
file = "${flake.self}/secrets/garage-admin-token.age";
mode = "400";
};
age.secrets."acme-namecheap-env" = {
file = "${flake.self}/secrets/acme-namecheap-env.age";
mode = "400";
};
networking.firewall.allowedTCPPorts = [
3900
3901
3902
];
networking.firewall.interfaces.wg-ssh.allowedTCPPorts = [ 3903 ];
security.acme = {
defaults = {
# LEGO_DISABLE_CNAME_SUPPORT=true set here to fix issues with CNAME
# detection, as we use wildcard DNS for garage
environmentFile = config.age.secrets.acme-namecheap-env.path;
};
certs = {
# Wildcard certificate gets created automatically
"buckets.${config.pub-solar-os.networking.domain}" = {
# disable http challenge
webroot = null;
# enable dns challenge
dnsProvider = "namecheap";
};
# Wildcard certificate gets created automatically
"web.${config.pub-solar-os.networking.domain}" = {
# disable http challenge
webroot = null;
# enable dns challenge
dnsProvider = "namecheap";
};
};
};
services.nginx = {
upstreams.s3_backend.servers = {
"[::1]:3900" = { };
};
upstreams.web_backend.servers = {
"[::1]:3902" = { };
};
virtualHosts."buckets.${config.pub-solar-os.networking.domain}" = {
serverAliases = [ "*.buckets.${config.pub-solar-os.networking.domain}" ];
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://s3_backend";
extraConfig = ''
client_max_body_size 64m;
proxy_max_temp_file_size 0;
'';
};
};
virtualHosts."web.${config.pub-solar-os.networking.domain}" = {
serverAliases = [ "*.web.${config.pub-solar-os.networking.domain}" ];
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://web_backend";
};
};
};
services.garage = {
enable = true;
package = pkgs.garage_1_1_0;
settings = {
data_dir = "/var/lib/garage/data";
metadata_dir = "/var/lib/garage/meta";
db_engine = "lmdb";
replication_factor = 3;
compression_level = 2;
rpc_bind_addr = "[::]:3901";
s3_api = {
s3_region = "eu-central";
api_bind_addr = "[::]:3900";
root_domain = ".buckets.${config.pub-solar-os.networking.domain}";
};
s3_web = {
bind_addr = "[::]:3902";
root_domain = ".web.${config.pub-solar-os.networking.domain}";
index = "index.html";
};
admin = {
api_bind_addr = "[::]:3903";
};
};
};
users.users.garage = {
isSystemUser = true;
home = "/var/lib/garage";
group = "garage";
};
users.groups.garage = { };
# Adapted from https://git.clan.lol/clan/clan-core/src/commit/23a9e35c665ff531fe1193dcc47056432fbbeacf/clanModules/garage/default.nix
# Disabled DynamicUser https://github.com/NixOS/nixpkgs/blob/nixos-24.05/nixos/modules/services/web-servers/garage.nix
# for mounts + permissions to work
systemd.services.garage = {
serviceConfig = {
user = "garage";
group = "garage";
DynamicUser = false;
LoadCredential = [
"rpc_secret_path:${config.age.secrets.garage-rpc-secret.path}"
"admin_token_path:${config.age.secrets.garage-admin-token.path}"
];
Environment = [
"GARAGE_ALLOW_WORLD_READABLE_SECRETS=true"
"GARAGE_RPC_SECRET_FILE=%d/rpc_secret_path"
"GARAGE_ADMIN_TOKEN_FILE=%d/admin_token_path"
];
};
};
}

View file

@ -33,18 +33,15 @@
group = "grafana";
user = "grafana";
};
"grafana-dashboards/grafana-garage-dashboard-prometheus.json" = {
source = ./grafana-dashboards/grafana-garage-dashboard-prometheus.json;
group = "grafana";
user = "grafana";
};
};
services.nginx.virtualHosts."grafana.${config.pub-solar-os.networking.domain}" = {
enableACME = true;
forceSSL = true;
locations."/".proxyPass =
"http://127.0.0.1:${toString config.services.grafana.settings.server.http_port}";
services.caddy.virtualHosts."grafana.${config.pub-solar-os.networking.domain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
reverse_proxy :${toString config.services.grafana.settings.server.http_port}
'';
};
services.grafana = {
@ -67,7 +64,7 @@
password = "\$__file{${config.age.secrets.grafana-smtp-password.path}}";
from_address = "no-reply@pub.solar";
from_name = "grafana.pub.solar";
ehlo_identity = "grafana.pub.solar";
ehlo_identity = "flora-6.pub.solar";
};
security = {
admin_email = "crew@pub.solar";

View file

@ -6,22 +6,23 @@
...
}:
{
options.pub-solar-os.auth = with lib; {
enable = mkEnableOption "Enable keycloak to run on the node";
options.pub-solar-os.auth = {
enable = lib.mkEnableOption "Enable keycloak to run on the node";
realm = mkOption {
realm = lib.mkOption {
description = "Name of the realm";
type = types.str;
type = lib.types.str;
default = config.pub-solar-os.networking.domain;
};
database-password-file = mkOption {
description = "Database password file path";
type = types.str;
};
};
config = lib.mkIf config.pub-solar-os.auth.enable {
age.secrets.keycloak-database-password = {
file = "${flake.self}/secrets/keycloak-database-password.age";
mode = "600";
#owner = "keycloak";
};
services.nginx.virtualHosts."auth.${config.pub-solar-os.networking.domain}" = {
enableACME = true;
forceSSL = true;
@ -45,13 +46,12 @@
# keycloak
services.keycloak = {
enable = true;
database.passwordFile = config.pub-solar-os.auth.database-password-file;
database.passwordFile = config.age.secrets.keycloak-database-password.path;
settings = {
hostname = "auth.${config.pub-solar-os.networking.domain}";
http-host = "127.0.0.1";
http-port = 8080;
proxy-headers = "xforwarded";
http-enabled = true;
proxy = "edge";
};
themes = {
"pub.solar" =
@ -59,12 +59,14 @@
};
};
pub-solar-os.backups.restic.keycloak = {
services.restic.backups.keycloak-storagebox = {
paths = [ "/tmp/keycloak-backup.sql" ];
timerConfig = {
OnCalendar = "*-*-* 03:00:00 Etc/UTC";
};
initialize = true;
passwordFile = config.age.secrets."restic-repo-storagebox".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d keycloak > /tmp/keycloak-backup.sql
'';

View file

@ -108,7 +108,7 @@
};
clients = [
{
url = "http://trinkgenossin.wg.pub.solar:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
url = "http://flora-6.wg.pub.solar:${toString config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
}
];
scrape_configs = [
@ -118,7 +118,7 @@
max_age = "24h";
labels = {
job = "systemd-journal";
host = "trinkgenossin";
host = "flora-6";
};
};
relabel_configs = [

View file

@ -67,20 +67,4 @@
};
security.acme.acceptTerms = true;
security.acme.defaults.email = "security@pub.solar";
pub-solar-os.backups.restic.mail = {
paths = [
"/var/vmail"
"/var/dkim"
];
timerConfig = {
OnCalendar = "*-*-* 02:00:00 Etc/UTC";
};
initialize = true;
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 3"
];
};
}

View file

@ -91,7 +91,7 @@
OnCalendar = "*-*-* 02:00:00 Etc/UTC";
};
initialize = true;
passwordFile = config.age.secrets."restic-repo-storagebox-nachtigall".path;
passwordFile = config.age.secrets."restic-repo-storagebox".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
pruneOpts = [
"--keep-daily 7"

View file

@ -7,21 +7,6 @@
}:
{
age.secrets."mastodon-active-record-encryption-deterministic-key" = {
file = "${flake.self}/secrets//mastodon-active-record-encryption-deterministic-key.age";
mode = "400";
owner = config.services.mastodon.user;
};
age.secrets."mastodon-active-record-encryption-key-derivation-salt" = {
file = "${flake.self}/secrets//mastodon-active-record-encryption-key-derivation-salt.age";
mode = "400";
owner = config.services.mastodon.user;
};
age.secrets."mastodon-active-record-encryption-primary-key" = {
file = "${flake.self}/secrets//mastodon-active-record-encryption-primary-key.age";
mode = "400";
owner = config.services.mastodon.user;
};
age.secrets."mastodon-secret-key-base" = {
file = "${flake.self}/secrets/mastodon-secret-key-base.age";
mode = "400";
@ -69,9 +54,6 @@
webProcesses = 2;
# Threads per process used by the mastodon-web service
webThreads = 5;
activeRecordEncryptionDeterministicKeyFile = "/run/agenix/mastodon-active-record-encryption-deterministic-key";
activeRecordEncryptionKeyDerivationSaltFile = "/run/agenix/mastodon-active-record-encryption-key-derivation-salt";
activeRecordEncryptionPrimaryKeyFile = "/run/agenix/mastodon-active-record-encryption-primary-key";
secretKeyBaseFile = "/run/agenix/mastodon-secret-key-base";
otpSecretFile = "/run/agenix/mastodon-otp-secret";
vapidPrivateKeyFile = "/run/agenix/mastodon-vapid-private-key";
@ -85,20 +67,20 @@
passwordFile = "/run/agenix/mastodon-smtp-password";
fromAddress = "mastodon-notifications@pub.solar";
};
# Defined in ./opensearch.nix
elasticsearch.host = "127.0.0.1";
mediaAutoRemove = {
olderThanDays = 7;
};
extraEnvFiles = [ "/run/agenix/mastodon-extra-env-secrets" ];
extraConfig = {
WEB_DOMAIN = "mastodon.${config.pub-solar-os.networking.domain}";
# Defined in ./opensearch.nix
ES_HOST = "127.0.0.1";
# S3 File storage (optional)
# -----------------------
S3_ENABLED = "true";
S3_BUCKET = "mastodon";
S3_REGION = "eu-central";
S3_ENDPOINT = "https://buckets.pub.solar";
S3_BUCKET = "pub-solar-mastodon";
S3_REGION = "europe-west-1";
S3_ENDPOINT = "https://gateway.tardigradeshare.io";
S3_ALIAS_HOST = "files.${config.pub-solar-os.networking.domain}";
# Translation (optional)
# -----------------------
@ -124,7 +106,7 @@
OnCalendar = "*-*-* 04:00:00 Etc/UTC";
};
initialize = true;
passwordFile = config.age.secrets."restic-repo-storagebox-nachtigall".path;
passwordFile = config.age.secrets."restic-repo-storagebox".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d mastodon > /tmp/mastodon-backup.sql

View file

@ -0,0 +1,24 @@
{ config, flake, ... }:
{
imports = [ "${flake.inputs.nixpkgs-draupnir}/nixos/modules/services/matrix/draupnir.nix" ];
disabledModules = [ "services/matrix/draupnir.nix" ];
age.secrets."matrix-draupnir-access-token" = {
file = "${flake.self}/secrets/matrix-draupnir-access-token.age";
mode = "640";
owner = "root";
group = "draupnir";
};
services.draupnir = {
enable = true;
accessTokenFile = config.age.secrets.matrix-draupnir-access-token.path;
# https://github.com/the-draupnir-project/Draupnir/blob/main/config/default.yaml
settings = {
homeserverUrl = "http://localhost:8008";
managementRoom = "#moderators:pub.solar";
protectAllJoinedRooms = true;
};
};
}

View file

@ -16,128 +16,115 @@ let
synapseClientPort = "${toString listenerWithClient.port}";
in
{
options.pub-solar-os = {
matrix.appservice-irc.mediaproxy = {
signingKeyPath = lib.mkOption {
description = "Path to file containing the IRC appservice mediaproxy signing key";
type = lib.types.str;
default = "/var/lib/matrix-appservice-irc/media-signingkey.jwk";
systemd.services.matrix-appservice-irc.serviceConfig.SystemCallFilter = lib.mkForce [
"@system-service @pkey"
"~@privileged @resources"
"@chown"
];
services.matrix-appservice-irc = {
enable = true;
localpart = "irc_bot";
port = 8010;
registrationUrl = "http://localhost:8010";
settings = {
homeserver = {
domain = "${config.pub-solar-os.networking.domain}";
url = "http://127.0.0.1:${synapseClientPort}";
media_url = "https://matrix.${config.pub-solar-os.networking.domain}";
enablePresence = false;
};
};
};
config = {
services.matrix-appservice-irc = {
enable = true;
localpart = "irc_bot";
port = 8010;
registrationUrl = "http://localhost:8010";
settings = {
homeserver = {
domain = "${config.pub-solar-os.networking.domain}";
url = "http://127.0.0.1:${synapseClientPort}";
enablePresence = false;
ircService = {
ident = {
address = "::";
enabled = false;
port = 1113;
};
ircService = {
ident = {
address = "::";
enabled = false;
port = 1113;
};
logging = {
# set to debug for debugging
level = "warn";
maxFiles = 5;
toCosole = true;
};
matrixHandler = {
eventCacheSize = 4096;
};
mediaProxy = {
signingKeyPath = config.pub-solar-os.matrix.appservice-irc.mediaproxy.signingKeyPath;
# keep media for 2 weeks
ttlSeconds = 1209600;
bindPort = 11111;
publicUrl = "https:///matrix.${config.pub-solar-os.networking.domain}/media";
};
metrics = {
enabled = true;
remoteUserAgeBuckets = [
"1h"
"1d"
"1w"
];
};
provisioning = {
enabled = false;
requestTimeoutSeconds = 300;
};
servers =
let
commonConfig = {
allowExpiredCerts = false;
botConfig = {
enabled = false;
joinChannelsIfNoUsers = false;
nick = "MatrixBot";
};
dynamicChannels = {
createAlias = true;
enabled = true;
federate = true;
joinRule = "public";
published = true;
};
ircClients = {
allowNickChanges = true;
concurrentReconnectLimit = 50;
idleTimeout = 10800;
lineLimit = 3;
maxClients = 30;
nickTemplate = "$DISPLAY[m]";
reconnectIntervalMs = 5000;
};
matrixClients = {
joinAttempts = -1;
};
membershipLists = {
enabled = true;
floodDelayMs = 10000;
global = {
ircToMatrix = {
incremental = true;
initial = true;
};
matrixToIrc = {
incremental = true;
initial = true;
};
logging = {
level = "debug";
maxFiles = 5;
toCosole = true;
};
matrixHandler = {
eventCacheSize = 4096;
};
metrics = {
enabled = true;
remoteUserAgeBuckets = [
"1h"
"1d"
"1w"
];
};
provisioning = {
enabled = false;
requestTimeoutSeconds = 300;
};
servers =
let
commonConfig = {
allowExpiredCerts = false;
botConfig = {
enabled = false;
joinChannelsIfNoUsers = false;
nick = "MatrixBot";
};
dynamicChannels = {
createAlias = true;
enabled = true;
federate = true;
joinRule = "public";
published = true;
};
ircClients = {
allowNickChanges = true;
concurrentReconnectLimit = 50;
idleTimeout = 10800;
lineLimit = 3;
maxClients = 30;
nickTemplate = "$DISPLAY[m]";
reconnectIntervalMs = 5000;
};
matrixClients = {
joinAttempts = -1;
};
membershipLists = {
enabled = true;
floodDelayMs = 10000;
global = {
ircToMatrix = {
incremental = true;
initial = true;
};
matrixToIrc = {
incremental = true;
initial = true;
};
};
port = 6697;
privateMessages = {
enabled = true;
federate = true;
};
sasl = false;
sendConnectionMessages = true;
ssl = true;
};
in
{
"irc.libera.chat" = lib.attrsets.recursiveUpdate commonConfig {
name = "libera";
dynamicChannels.groupId = "+libera.chat:localhost";
dynamicChannels.aliasTemplate = "#_libera_$CHANNEL";
matrixClients.displayName = "$NICK (LIBERA-IRC)";
};
"irc.scratch-network.net" = lib.attrsets.recursiveUpdate commonConfig {
name = "scratch";
matrixClients.displayName = "$NICK (SCRATCH-IRC)";
dynamicChannels.aliasTemplate = "#_scratch_$CHANNEL";
dynamicChannels.groupId = "+scratch-network.net:localhost";
port = 6697;
privateMessages = {
enabled = true;
federate = true;
};
sasl = false;
sendConnectionMessages = true;
ssl = true;
};
};
in
{
"irc.libera.chat" = lib.attrsets.recursiveUpdate commonConfig {
name = "libera";
dynamicChannels.groupId = "+libera.chat:localhost";
dynamicChannels.aliasTemplate = "#_libera_$CHANNEL";
matrixClients.displayName = "$NICK (LIBERA-IRC)";
};
"irc.scratch-network.net" = lib.attrsets.recursiveUpdate commonConfig {
name = "scratch";
matrixClients.displayName = "$NICK (SCRATCH-IRC)";
dynamicChannels.aliasTemplate = "#_scratch_$CHANNEL";
dynamicChannels.groupId = "+scratch-network.net:localhost";
};
};
};
};
};

View file

@ -1,7 +1,6 @@
{
flake,
config,
lib,
pkgs,
...
}:
@ -10,355 +9,330 @@ let
serverDomain = "${config.pub-solar-os.networking.domain}";
in
{
options.pub-solar-os = {
matrix = {
enable = lib.mkEnableOption "Enable matrix-synapse and matrix-authentication-service to run on the node";
synapse = {
app-service-config-files = lib.mkOption {
description = "List of app service config files";
type = lib.types.listOf lib.types.str;
default = [ ];
};
extra-config-files = lib.mkOption {
description = "List of extra synapse config files";
type = lib.types.listOf lib.types.str;
default = [ ];
};
signing_key_path = lib.mkOption {
description = "Path to file containing the signing key";
type = lib.types.str;
default = "${config.services.matrix-synapse.dataDir}/homeserver.signing.key";
};
};
matrix-authentication-service = {
extra-config-files = lib.mkOption {
description = "List of extra mas config files";
type = lib.types.listOf lib.types.str;
default = [ ];
};
};
};
age.secrets."matrix-synapse-signing-key" = {
file = "${flake.self}/secrets/matrix-synapse-signing-key.age";
mode = "400";
owner = "matrix-synapse";
};
config = lib.mkIf config.pub-solar-os.matrix.enable {
services.matrix-synapse = {
enable = true;
settings = {
server_name = serverDomain;
public_baseurl = "https://${publicDomain}/";
database = {
name = "psycopg2";
args = {
host = "/run/postgresql";
cp_max = 10;
cp_min = 5;
database = "matrix";
};
allow_unsafe_locale = false;
txn_limit = 0;
age.secrets."matrix-synapse-secret-config.yaml" = {
file = "${flake.self}/secrets/matrix-synapse-secret-config.yaml.age";
mode = "400";
owner = "matrix-synapse";
};
age.secrets."matrix-synapse-sliding-sync-secret" = {
file = "${flake.self}/secrets/matrix-synapse-sliding-sync-secret.age";
mode = "400";
owner = "matrix-synapse";
};
services.matrix-synapse = {
enable = true;
settings = {
server_name = serverDomain;
public_baseurl = "https://${publicDomain}/";
database = {
name = "psycopg2";
args = {
host = "/run/postgresql";
cp_max = 10;
cp_min = 5;
database = "matrix";
};
listeners = [
{
bind_addresses = [ "127.0.0.1" ];
port = 8008;
resources = [
{
compress = true;
names = [ "client" ];
}
{
compress = false;
names = [ "federation" ];
}
];
tls = false;
type = "http";
x_forwarded = true;
}
{
bind_addresses = [ "127.0.0.1" ];
port = 8012;
resources = [ { names = [ "metrics" ]; } ];
tls = false;
type = "metrics";
}
];
allow_unsafe_locale = false;
txn_limit = 0;
};
listeners = [
{
bind_addresses = [ "127.0.0.1" ];
port = 8008;
resources = [
{
compress = true;
names = [ "client" ];
}
{
compress = false;
names = [ "federation" ];
}
];
tls = false;
type = "http";
x_forwarded = true;
}
{
bind_addresses = [ "127.0.0.1" ];
port = 8012;
resources = [ { names = [ "metrics" ]; } ];
tls = false;
type = "metrics";
}
];
account_threepid_delegates.msisdn = "";
alias_creation_rules = [
{
action = "allow";
alias = "*";
room_id = "*";
user_id = "*";
}
];
allow_guest_access = false;
allow_public_rooms_over_federation = true;
allow_public_rooms_without_auth = false;
auto_join_rooms = [
"#community:${serverDomain}"
"#general:${serverDomain}"
];
account_threepid_delegates.msisdn = "";
alias_creation_rules = [
{
action = "allow";
alias = "*";
room_id = "*";
user_id = "*";
}
];
allow_guest_access = false;
allow_public_rooms_over_federation = true;
allow_public_rooms_without_auth = false;
auto_join_rooms = [
"#community:${serverDomain}"
"#general:${serverDomain}"
];
autocreate_auto_join_rooms = true;
caches.global_factor = 0.5;
autocreate_auto_join_rooms = true;
caches.global_factor = 0.5;
default_room_version = "10";
disable_msisdn_registration = true;
enable_media_repo = true;
enable_metrics = true;
mau_stats_only = true;
enable_registration = false;
enable_registration_captcha = false;
enable_registration_without_verification = false;
enable_room_list_search = true;
encryption_enabled_by_default_for_room_type = "off";
event_cache_size = "100K";
default_room_version = "10";
disable_msisdn_registration = true;
enable_media_repo = true;
enable_metrics = true;
mau_stats_only = true;
enable_registration = false;
enable_registration_captcha = false;
enable_registration_without_verification = false;
enable_room_list_search = true;
encryption_enabled_by_default_for_room_type = "off";
event_cache_size = "100K";
federation_rr_transactions_per_room_per_second = 50;
federation_client_minimum_tls_version = "1.2";
forget_rooms_on_leave = true;
include_profile_data_on_invite = true;
instance_map = { };
limit_profile_requests_to_users_who_share_rooms = false;
# https://github.com/element-hq/synapse/issues/11203
# No YAML deep-merge, so this needs to be in secret extraConfigFiles
# together with msc3861
#experimental_features = {
# # Room summary API
# msc3266_enabled = true;
# # Rendezvous server for QR Code generation
# msc4108_enabled = true;
#};
max_spider_size = "10M";
max_upload_size = "50M";
media_storage_providers = [ ];
federation_rr_transactions_per_room_per_second = 50;
federation_client_minimum_tls_version = "1.2";
forget_rooms_on_leave = true;
include_profile_data_on_invite = true;
instance_map = { };
limit_profile_requests_to_users_who_share_rooms = false;
password_config = {
enabled = false;
localdb_enabled = false;
pepper = "";
};
max_spider_size = "10M";
max_upload_size = "50M";
media_storage_providers = [ ];
presence.enabled = true;
push.include_content = false;
password_config = {
enabled = false;
localdb_enabled = false;
pepper = "";
};
presence.enabled = true;
push.include_content = false;
rc_admin_redaction = {
burst_count = 50;
per_second = 1;
};
rc_federation = {
concurrent = 3;
reject_limit = 50;
sleep_delay = 500;
sleep_limit = 10;
window_size = 1000;
};
rc_invites = {
per_issuer = {
burst_count = 10;
per_second = 0.3;
};
per_room = {
burst_count = 10;
per_second = 0.3;
};
per_user = {
burst_count = 5;
per_second = 3.0e-3;
};
};
rc_joins = {
local = {
burst_count = 10;
per_second = 0.1;
};
remote = {
burst_count = 10;
per_second = 1.0e-2;
};
};
rc_login = {
account = {
burst_count = 3;
per_second = 0.17;
};
address = {
burst_count = 3;
per_second = 0.17;
};
failed_attempts = {
burst_count = 3;
per_second = 0.17;
};
};
rc_message = {
rc_admin_redaction = {
burst_count = 50;
per_second = 1;
};
rc_federation = {
concurrent = 3;
reject_limit = 50;
sleep_delay = 500;
sleep_limit = 10;
window_size = 1000;
};
rc_invites = {
per_issuer = {
burst_count = 10;
per_second = 0.2;
per_second = 0.3;
};
rc_registration = {
per_room = {
burst_count = 10;
per_second = 0.3;
};
per_user = {
burst_count = 5;
per_second = 3.0e-3;
};
};
rc_joins = {
local = {
burst_count = 10;
per_second = 0.1;
};
remote = {
burst_count = 10;
per_second = 1.0e-2;
};
};
rc_login = {
account = {
burst_count = 3;
per_second = 0.17;
};
redaction_retention_period = "7d";
forgotten_room_retention_period = "7d";
redis.enabled = false;
registration_requires_token = false;
registrations_require_3pid = [ "email" ];
report_stats = false;
require_auth_for_profile_requests = false;
room_list_publication_rules = [
{
action = "allow";
alias = "*";
room_id = "*";
user_id = "*";
}
];
signing_key_path = config.pub-solar-os.matrix.synapse.signing_key_path;
stream_writers = { };
trusted_key_servers = [ { server_name = "matrix.org"; } ];
suppress_key_server_warning = true;
turn_allow_guests = false;
turn_uris = [
"turn:${config.services.coturn.realm}:3478?transport=udp"
"turn:${config.services.coturn.realm}:3478?transport=tcp"
];
turn_user_lifetime = "1h";
url_preview_accept_language = [
"en-US"
"en"
];
url_preview_enabled = true;
url_preview_ip_range_blacklist = [
"127.0.0.0/8"
"10.0.0.0/8"
"172.16.0.0/12"
"192.168.0.0/16"
"100.64.0.0/10"
"192.0.0.0/24"
"169.254.0.0/16"
"192.88.99.0/24"
"198.18.0.0/15"
"192.0.2.0/24"
"198.51.100.0/24"
"203.0.113.0/24"
"224.0.0.0/4"
"::1/128"
"fe80::/10"
"fc00::/7"
"2001:db8::/32"
"ff00::/8"
"fec0::/10"
];
user_directory = {
prefer_local_users = false;
search_all_users = false;
address = {
burst_count = 3;
per_second = 0.17;
};
failed_attempts = {
burst_count = 3;
per_second = 0.17;
};
user_ips_max_age = "28d";
app_service_config_files = config.pub-solar-os.matrix.synapse.app-service-config-files;
};
withJemalloc = true;
extraConfigFiles = config.pub-solar-os.matrix.synapse.extra-config-files;
extras = [
"oidc"
"redis"
rc_message = {
burst_count = 10;
per_second = 0.2;
};
rc_registration = {
burst_count = 3;
per_second = 0.17;
};
redaction_retention_period = "7d";
forgotten_room_retention_period = "7d";
redis.enabled = false;
registration_requires_token = false;
registrations_require_3pid = [ "email" ];
report_stats = false;
require_auth_for_profile_requests = false;
room_list_publication_rules = [
{
action = "allow";
alias = "*";
room_id = "*";
user_id = "*";
}
];
plugins = [ config.services.matrix-synapse.package.plugins.matrix-synapse-shared-secret-auth ];
};
signing_key_path = "/run/agenix/matrix-synapse-signing-key";
services.matrix-authentication-service = {
enable = true;
createDatabase = true;
extraConfigFiles = config.pub-solar-os.matrix.matrix-authentication-service.extra-config-files;
stream_writers = { };
trusted_key_servers = [ { server_name = "matrix.org"; } ];
suppress_key_server_warning = true;
# https://element-hq.github.io/matrix-authentication-service/reference/configuration.html
settings = {
account.email_change_allowed = false;
http.public_base = "https://mas.${config.pub-solar-os.networking.domain}";
http.issuer = "https://mas.${config.pub-solar-os.networking.domain}";
http.listeners = [
{
name = "web";
resources = [
{ name = "discovery"; }
{ name = "human"; }
{ name = "oauth"; }
{ name = "compat"; }
{ name = "graphql"; }
{
name = "assets";
path = "${config.services.matrix-authentication-service.package}/share/matrix-authentication-service/assets";
}
];
binds = [
{
host = "0.0.0.0";
port = 8090;
}
];
proxy_protocol = false;
}
{
name = "internal";
resources = [
{ name = "health"; }
];
binds = [
{
host = "0.0.0.0";
port = 8081;
}
];
proxy_protocol = false;
}
];
passwords.enabled = false;
};
};
pub-solar-os.backups.restic.matrix-synapse = {
paths = [
"/var/lib/matrix-synapse"
"/var/lib/matrix-appservice-irc"
"/var/lib/mautrix-telegram"
"/tmp/matrix-synapse-backup.sql"
"/tmp/matrix-authentication-service-backup.sql"
turn_allow_guests = false;
turn_uris = [
"turn:${config.services.coturn.realm}:3478?transport=udp"
"turn:${config.services.coturn.realm}:3478?transport=tcp"
];
timerConfig = {
OnCalendar = "*-*-* 05:00:00 Etc/UTC";
turn_user_lifetime = "1h";
url_preview_accept_language = [
"en-US"
"en"
];
url_preview_enabled = true;
url_preview_ip_range_blacklist = [
"127.0.0.0/8"
"10.0.0.0/8"
"172.16.0.0/12"
"192.168.0.0/16"
"100.64.0.0/10"
"192.0.0.0/24"
"169.254.0.0/16"
"192.88.99.0/24"
"198.18.0.0/15"
"192.0.2.0/24"
"198.51.100.0/24"
"203.0.113.0/24"
"224.0.0.0/4"
"::1/128"
"fe80::/10"
"fc00::/7"
"2001:db8::/32"
"ff00::/8"
"fec0::/10"
];
user_directory = {
prefer_local_users = false;
search_all_users = false;
};
initialize = true;
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d matrix > /tmp/matrix-synapse-backup.sql
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d matrix-authentication-service > /tmp/matrix-authentication-service-backup.sql
'';
backupCleanupCommand = ''
rm /tmp/matrix-synapse-backup.sql
rm /tmp/matrix-authentication-service-backup.sql
'';
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 3"
user_ips_max_age = "28d";
app_service_config_files = [
"/var/lib/matrix-synapse/telegram-registration.yaml"
"/var/lib/matrix-appservice-irc/registration.yml"
# "/matrix-appservice-slack-registration.yaml"
# "/hookshot-registration.yml"
# "/matrix-mautrix-signal-registration.yaml"
# "/matrix-mautrix-telegram-registration.yaml"
];
modules = [
{
module = "mjolnir.Module";
config = {
# Prevent servers/users in the ban lists from inviting users on this
# server to rooms. Default true.
block_invites = true;
# Flag messages sent by servers/users in the ban lists as spam. Currently
# this means that spammy messages will appear as empty to users. Default
# false.
block_messages = false;
# Remove users from the user directory search by filtering matrix IDs and
# display names by the entries in the user ban list. Default false.
block_usernames = false;
# The room IDs of the ban lists to honour. Unlike other parts of Mjolnir,
# this list cannot be room aliases or permalinks. This server is expected
# to already be joined to the room - Mjolnir will not automatically join
# these rooms.
ban_lists = [ "!roomid:example.org" ];
};
}
];
};
withJemalloc = true;
extraConfigFiles = [
"/run/agenix/matrix-synapse-secret-config.yaml"
# The registration file is automatically generated after starting the
# appservice for the first time.
# cp /var/lib/mautrix-telegram/telegram-registration.yaml \
# /var/lib/matrix-synapse/
# chown matrix-synapse:matrix-synapse \
# /var/lib/matrix-synapse/telegram-registration.yaml
"/var/lib/matrix-synapse/telegram-registration.yaml"
];
extras = [
"oidc"
"redis"
];
plugins = with config.services.matrix-synapse.package.plugins; [
matrix-synapse-shared-secret-auth
matrix-synapse-mjolnir-antispam
];
};
services.matrix-sliding-sync = {
enable = true;
settings = {
SYNCV3_SERVER = "https://${publicDomain}";
SYNCV3_BINDADDR = "127.0.0.1:8011";
# The bind addr for Prometheus metrics, which will be accessible at
# /metrics at this address
SYNCV3_PROM = "127.0.0.1:9100";
};
environmentFile = config.age.secrets."matrix-synapse-sliding-sync-secret".path;
};
services.restic.backups.matrix-synapse-storagebox = {
paths = [
"/var/lib/matrix-synapse"
"/var/lib/matrix-appservice-irc"
"/var/lib/mautrix-telegram"
"/tmp/matrix-synapse-backup.sql"
];
timerConfig = {
OnCalendar = "*-*-* 05:00:00 Etc/UTC";
};
initialize = true;
passwordFile = config.age.secrets."restic-repo-storagebox".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d matrix > /tmp/matrix-synapse-backup.sql
'';
backupCleanupCommand = ''
rm /tmp/matrix-synapse-backup.sql
'';
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 3"
];
};
}

View file

@ -70,8 +70,6 @@ let
$wgUploadDirectory = "/var/www/html/uploads";
$wgUploadPath = $wgScriptPath . "/uploads";
$wgFileExtensions = [ 'png', 'gif', 'jpg', 'jpeg', 'webp', 'svg', 'pdf', ];
$wgUseImageMagick = true;
$wgImageMagickConvertCommand = "/usr/bin/convert";
@ -141,10 +139,6 @@ let
// https://www.mediawiki.org/wiki/Extension:PluggableAuth#Configuration
$wgPluggableAuth_EnableAutoLogin = false;
$wgPluggableAuth_ButtonLabel = 'Login with pub.solar ID';
// Avoid getting logged out after 30 minutes
// https://www.mediawiki.org/wiki/Topic:W4be4h6t63vf3y8p
// https://www.mediawiki.org/wiki/Manual:$wgRememberMe
$wgRememberMe = 'always';
// https://www.mediawiki.org/wiki/Extension:OpenID_Connect#Keycloak
$wgPluggableAuth_Config[] = [
@ -217,7 +211,7 @@ in
backend = "docker";
containers."mediawiki" = {
image = "git.pub.solar/pub-solar/mediawiki-oidc-docker:1.43.0";
image = "git.pub.solar/pub-solar/mediawiki-oidc-docker:1.41.1";
user = "1000:${builtins.toString gid}";
autoStart = true;
@ -238,27 +232,4 @@ in
};
};
};
pub-solar-os.backups.restic.mediawiki = {
paths = [
"/var/lib/mediawiki/images"
"/var/lib/mediawiki/uploads"
"/tmp/mediawiki-backup.sql"
];
timerConfig = {
OnCalendar = "*-*-* 00:00:00 Etc/UTC";
};
initialize = true;
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d mediawiki > /tmp/mediawiki-backup.sql
'';
backupCleanupCommand = ''
rm /tmp/mediawiki-backup.sql
'';
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 3"
];
};
}

View file

@ -2,7 +2,6 @@
config,
pkgs,
flake,
lib,
...
}:
{
@ -23,249 +22,118 @@
forceSSL = true;
};
services.nextcloud =
let
exiftool_1270 = pkgs.perlPackages.buildPerlPackage rec {
# NOTE nextcloud-memories needs this specific version of exiftool
# https://github.com/NixOS/nixpkgs/issues/345267
pname = "Image-ExifTool";
version = "12.70";
src = pkgs.fetchFromGitHub {
owner = "exiftool";
repo = "exiftool";
rev = version;
hash = "sha256-YMWYPI2SDi3s4KCpSNwovemS5MDj5W9ai0sOkvMa8Zg=";
};
nativeBuildInputs = lib.optional pkgs.stdenv.hostPlatform.isDarwin pkgs.shortenPerlShebang;
postInstall = lib.optionalString pkgs.stdenv.hostPlatform.isDarwin ''
shortenPerlShebang $out/bin/exiftool
'';
};
in
{
hostName = "cloud.${config.pub-solar-os.networking.domain}";
home = "/var/lib/nextcloud";
services.nextcloud = {
hostName = "cloud.${config.pub-solar-os.networking.domain}";
home = "/var/lib/nextcloud";
enable = true;
# When updating package, remember to update nextcloud30Packages in
# services.nextcloud.extraApps
package = pkgs.nextcloud30;
https = true;
secretFile = config.age.secrets."nextcloud-secrets".path; # secret
maxUploadSize = "1G";
configureRedis = true;
notify_push = {
enable = true;
bendDomainToLocalhost = true;
};
config = {
adminuser = "admin";
adminpassFile = config.age.secrets."nextcloud-admin-pass".path;
dbuser = "nextcloud";
dbtype = "pgsql";
dbname = "nextcloud";
};
settings = {
overwrite.cli.url = "https://cloud.${config.pub-solar-os.networking.domain}";
overwriteprotocol = "https";
installed = true;
default_phone_region = "+49";
mail_sendmailmode = "smtp";
mail_from_address = "nextcloud";
mail_smtpmode = "smtp";
mail_smtpauthtype = "PLAIN";
mail_domain = "pub.solar";
mail_smtpname = "admins@pub.solar";
mail_smtpsecure = "ssl";
mail_smtpauth = true;
mail_smtphost = "mail.pub.solar";
mail_smtpport = "465";
# This is to allow connections to collabora and keycloak, among other services
# running on the same host
#
# https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/config_sample_php_parameters.html?highlight=allow_local_remote_servers%20true
# https://github.com/ONLYOFFICE/onlyoffice-nextcloud/issues/293
allow_local_remote_servers = true;
enable_previews = true;
jpeg_quality = 60;
enabledPreviewProviders = [
"OC\\Preview\\PNG"
"OC\\Preview\\JPEG"
"OC\\Preview\\GIF"
"OC\\Preview\\BMP"
"OC\\Preview\\HEIC"
"OC\\Preview\\TIFF"
"OC\\Preview\\XBitmap"
"OC\\Preview\\SVG"
"OC\\Preview\\WebP"
"OC\\Preview\\Font"
"OC\\Preview\\Movie"
"OC\\Preview\\ImaginaryPDF"
"OC\\Preview\\MP3"
"OC\\Preview\\OpenDocument"
"OC\\Preview\\Krita"
"OC\\Preview\\TXT"
"OC\\Preview\\MarkDown"
"OC\\Preview\\Imaginary"
];
preview_imaginary_url = "http://127.0.0.1:${toString config.services.imaginary.port}/";
preview_max_filesize_image = 128; # MB
preview_max_memory = 512; # MB
preview_max_x = 2048; # px
preview_max_y = 2048; # px
preview_max_scale_factor = 1;
"preview_ffmpeg_path" = lib.getExe pkgs.ffmpeg-headless;
"memories.exiftool_no_local" = false;
"memories.exiftool" = "${exiftool_1270}/bin/exiftool";
"memories.vod.ffmpeg" = lib.getExe pkgs.ffmpeg;
"memories.vod.ffprobe" = lib.getExe' pkgs.ffmpeg-headless "ffprobe";
auth.bruteforce.protection.enabled = true;
trashbin_retention_obligation = "auto,7";
skeletondirectory = "${pkgs.nextcloud-skeleton}/{lang}";
defaultapp = "file";
activity_expire_days = "14";
integrity.check.disabled = false;
updater.release.channel = "stable";
loglevel = 2;
debug = false;
maintenance_window_start = "1";
# maintenance = false;
app_install_overwrite = [
"pdfdraw"
"integration_whiteboard"
];
htaccess.RewriteBase = "/";
theme = "";
simpleSignUpLink.shown = false;
};
phpOptions = {
"opcache.interned_strings_buffer" = "32";
"opcache.max_accelerated_files" = "16229";
"opcache.memory_consumption" = "256";
# https://docs.nextcloud.com/server/latest/admin_manual/installation/server_tuning.html#enable-php-opcache
"opcache.revalidate_freq" = "60";
# https://docs.nextcloud.com/server/latest/admin_manual/installation/server_tuning.html#:~:text=opcache.jit%20%3D%201255%20opcache.jit_buffer_size%20%3D%20128m
"opcache.jit" = "1255";
"opcache.jit_buffer_size" = "128M";
};
# Calculated with 4GiB RAM, 80MiB process size available on
# https://spot13.com/pmcalculator/
poolSettings = {
pm = "dynamic";
"pm.max_children" = "52";
"pm.max_requests" = "500";
"pm.max_spare_servers" = "39";
"pm.min_spare_servers" = "13";
"pm.start_servers" = "13";
};
caching.redis = true;
# Don't allow the installation and updating of apps from the Nextcloud appstore,
# because we declaratively install them
appstoreEnable = false;
autoUpdateApps.enable = false;
extraApps = {
inherit (pkgs.nextcloud30Packages.apps)
calendar
contacts
cospend
deck
end_to_end_encryption
groupfolders
integration_deepl
mail
memories
notes
notify_push
previewgenerator
quota_warning
recognize
richdocuments
spreed
tasks
twofactor_webauthn
user_oidc
;
};
database.createLocally = true;
};
# https://docs.nextcloud.com/server/30/admin_manual/installation/server_tuning.html#previews
services.imaginary = {
enable = true;
address = "127.0.0.1";
settings.return-size = true;
};
package = pkgs.nextcloud29;
https = true;
secretFile = config.age.secrets."nextcloud-secrets".path; # secret
maxUploadSize = "1G";
systemd = {
services =
let
occ = "/run/current-system/sw/bin/nextcloud-occ";
in
{
nextcloud-cron-preview-generator = {
environment.NEXTCLOUD_CONFIG_DIR = "${config.services.nextcloud.home}/config";
serviceConfig = {
ExecStart = "${occ} preview:pre-generate";
Type = "oneshot";
User = "nextcloud";
};
};
configureRedis = true;
nextcloud-preview-generator-setup = {
wantedBy = [ "multi-user.target" ];
requires = [ "phpfpm-nextcloud.service" ];
after = [ "phpfpm-nextcloud.service" ];
environment.NEXTCLOUD_CONFIG_DIR = "${config.services.nextcloud.home}/config";
script = # bash
''
# check with:
# for size in squareSizes widthSizes heightSizes; do echo -n "$size: "; nextcloud-occ config:app:get previewgenerator $size; done
# extra commands run for preview generator:
# 32 icon file list
# 64 icon file list android app, photos app
# 96 nextcloud client VFS windows file preview
# 256 file app grid view, many requests
# 512 photos app tags
${occ} config:app:set --value="32 64 96 256 512" previewgenerator squareSizes
# 341 hover in maps app
# 1920 files/photos app when viewing picture
${occ} config:app:set --value="341 1920" previewgenerator widthSizes
# 256 hover in maps app
# 1080 files/photos app when viewing picture
${occ} config:app:set --value="256 1080" previewgenerator heightSizes
'';
serviceConfig = {
Type = "oneshot";
User = "nextcloud";
};
};
};
timers.nextcloud-cron-preview-generator = {
after = [ "nextcloud-setup.service" ];
timerConfig = {
OnCalendar = "*:0/10";
OnUnitActiveSec = "9m";
Persistent = true;
RandomizedDelaySec = 60;
Unit = "nextcloud-cron-preview-generator.service";
};
wantedBy = [ "timers.target" ];
notify_push = {
enable = true;
bendDomainToLocalhost = true;
};
config = {
adminuser = "admin";
adminpassFile = config.age.secrets."nextcloud-admin-pass".path;
dbuser = "nextcloud";
dbtype = "pgsql";
dbname = "nextcloud";
dbtableprefix = "oc_";
};
settings = {
overwrite.cli.url = "http://cloud.${config.pub-solar-os.networking.domain}";
overwriteprotocol = "https";
installed = true;
default_phone_region = "+49";
mail_sendmailmode = "smtp";
mail_from_address = "nextcloud";
mail_smtpmode = "smtp";
mail_smtpauthtype = "PLAIN";
mail_domain = "pub.solar";
mail_smtpname = "admins@pub.solar";
mail_smtpsecure = "ssl";
mail_smtpauth = true;
mail_smtphost = "mail.pub.solar";
mail_smtpport = "465";
# This is to allow connections to collabora and keycloak, among other services
# running on the same host
#
# https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/config_sample_php_parameters.html?highlight=allow_local_remote_servers%20true
# https://github.com/ONLYOFFICE/onlyoffice-nextcloud/issues/293
allow_local_remote_servers = true;
enable_previews = true;
enabledPreviewProviders = [
"OC\\Preview\\PNG"
"OC\\Preview\\JPEG"
"OC\\Preview\\GIF"
"OC\\Preview\\BMP"
"OC\\Preview\\XBitmap"
"OC\\Preview\\Movie"
"OC\\Preview\\PDF"
"OC\\Preview\\MP3"
"OC\\Preview\\TXT"
"OC\\Preview\\MarkDown"
];
preview_max_x = "1024";
preview_max_y = "768";
preview_max_scale_factor = "1";
auth.bruteforce.protection.enabled = true;
trashbin_retention_obligation = "auto,7";
skeletondirectory = "./nextcloud-skeleton";
defaultapp = "file";
activity_expire_days = "14";
integrity.check.disabled = false;
updater.release.channel = "stable";
loglevel = 2;
debug = false;
maintenance_window_start = "1";
# maintenance = false;
app_install_overwrite = [
"pdfdraw"
"integration_whiteboard"
];
htaccess.RewriteBase = "/";
theme = "";
simpleSignUpLink.shown = false;
};
phpOptions = {
"opcache.interned_strings_buffer" = "32";
"opcache.max_accelerated_files" = "16229";
"opcache.memory_consumption" = "256";
# https://docs.nextcloud.com/server/latest/admin_manual/installation/server_tuning.html#enable-php-opcache
"opcache.revalidate_freq" = "60";
# https://docs.nextcloud.com/server/latest/admin_manual/installation/server_tuning.html#:~:text=opcache.jit%20%3D%201255%20opcache.jit_buffer_size%20%3D%20128m
"opcache.jit" = "1255";
"opcache.jit_buffer_size" = "128M";
};
# Calculated with 4GiB RAM, 80MiB process size available on
# https://spot13.com/pmcalculator/
poolSettings = {
pm = "dynamic";
"pm.max_children" = "52";
"pm.max_requests" = "500";
"pm.max_spare_servers" = "39";
"pm.min_spare_servers" = "13";
"pm.start_servers" = "13";
};
caching.redis = true;
autoUpdateApps.enable = true;
database.createLocally = true;
};
services.restic.backups.nextcloud-storagebox = {
@ -277,7 +145,7 @@
OnCalendar = "*-*-* 01:00:00 Etc/UTC";
};
initialize = true;
passwordFile = config.age.secrets."restic-repo-storagebox-nachtigall".path;
passwordFile = config.age.secrets."restic-repo-storagebox".path;
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
backupPrepareCommand = ''
${pkgs.sudo}/bin/sudo -u postgres ${pkgs.postgresql}/bin/pg_dump -d nextcloud > /tmp/nextcloud-backup.sql

Binary file not shown.

After

(image error) Size: 29 KiB

View file

@ -4,7 +4,7 @@
data-name="Layer 3"
viewBox="0 0 275.3 276.37"
version="1.1"
sodipodi:docname="pub.solar.svg"
sodipodi:docname="pubsolar.svg"
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"

Before

(image error) Size: 8.7 KiB

After

(image error) Size: 8.7 KiB

View file

@ -2,6 +2,6 @@
By default, the cloud is limited to 10MB of storage. If you want more, just send a short request to [crew@pub.solar](mailto:crew@pub.solar).
To download a Nextlcoud desktop client, go to https://nextcloud.com/download. You can find instructions on how to set it up on our [Nextcloud wiki page](https://wiki.pub.solar/index.php/Nextcloud).
To download a desktop client, go to https://nextcloud.com/download.
You can edit this file by clicking here, and put any contents you want as notes for this directory :)

View file

@ -1,7 +1,8 @@
{ config, ... }:
let
objStorHost = "mastodon.web.pub.solar";
objStorHost = "link.tardigradeshare.io";
objStorBucket = "s/jw24ad6l4a6zxsnd32cmf5hp5nsq/pub-solar-mastodon";
in
{
services.nginx.virtualHosts = {
@ -9,12 +10,6 @@ in
enableACME = true;
forceSSL = true;
# Use variable to force nginx to perform a DNS resolution on its value,
# the IP of the object storage provider may not always remain the same.
extraConfig = ''
set $s3_backend 'https://${objStorHost}';
'';
locations = {
"= /" = {
index = "index.html";
@ -30,6 +25,7 @@ in
deny all;
}
resolver 8.8.8.8;
proxy_set_header Host ${objStorHost};
proxy_set_header Connection \'\';
proxy_set_header Authorization \'\';
@ -44,7 +40,7 @@ in
proxy_hide_header x-amz-bucket-region;
proxy_hide_header x-amzn-requestid;
proxy_ignore_headers Set-Cookie;
proxy_pass $s3_backend$request_uri;
proxy_pass https://${objStorHost}/${objStorBucket}$request_uri?download;
proxy_intercept_errors off;
proxy_ssl_protocols TLSv1.2 TLSv1.3;
proxy_ssl_server_name on;

View file

@ -10,20 +10,25 @@ let
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-XSS-Protection "1; mode=block";
'';
clientConfig = import ./element-client-config.nix { inherit config lib pkgs; };
clientConfig = import ./element-client-config.nix { inherit lib pkgs; };
wellKnownClient = domain: {
"m.homeserver".base_url = "https://matrix.${domain}";
"m.identity_server".base_url = "https://matrix.${domain}";
"org.matrix.msc2965.authentication" = {
issuer = "https://mas.${domain}/";
account = "https://mas.${domain}/account";
};
"org.matrix.msc3575.proxy".url = "https://matrix.${domain}";
"im.vector.riot.e2ee".default = true;
"io.element.e2ee" = {
default = true;
secure_backup_required = false;
secure_backup_setup_methods = [ ];
};
"m.integrations" = {
managers = [
{
api_url = "https://dimension.${domain}/api/v1/scalar";
ui_url = "https://dimension.${domain}/element";
}
];
};
};
wellKnownServer = domain: { "m.server" = "matrix.${domain}:8448"; };
wellKnownSupport = {
@ -80,27 +85,6 @@ in
root = pkgs.element-stickerpicker;
};
"mas.${config.pub-solar-os.networking.domain}" = {
root = "/dev/null";
forceSSL = lib.mkDefault true;
enableACME = lib.mkDefault true;
locations = {
"/" = {
proxyPass = "http://127.0.0.1:8090";
extraConfig = ''
${commonHeaders}
proxy_http_version 1.1;
# Forward the client IP address
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
'';
};
};
};
"matrix.${config.pub-solar-os.networking.domain}" = {
root = "/dev/null";
@ -115,48 +99,28 @@ in
locations = {
# For telegram
"/c3c3f34b-29fb-5feb-86e5-98c75ec8214b" = {
priority = 100;
proxyPass = "http://127.0.0.1:8009";
extraConfig = commonHeaders;
};
# For IRC appservice media proxy
"/media" = {
priority = 100;
proxyPass = "http://127.0.0.1:${toString (config.services.matrix-appservice-irc.settings.ircService.mediaProxy.bindPort)}";
# sliding-sync
"~ ^/(client/|_matrix/client/unstable/org.matrix.msc3575/sync)" = {
proxyPass = "http://127.0.0.1:8011";
extraConfig = commonHeaders;
};
# Forward to the auth service
"~ ^/_matrix/client/(.*)/(login|logout|refresh)" = {
priority = 100;
proxyPass = "http://127.0.0.1:8090";
extraConfig = ''
${commonHeaders}
proxy_http_version 1.1;
# Forward the client IP address
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
'';
};
# Forward to Synapse
# as per https://element-hq.github.io/synapse/latest/reverse_proxy.html#nginx
"~ ^(/_matrix|/_synapse/client)" = {
priority = 200;
"~* ^(/_matrix|/_synapse/client|/_synapse/oidc)" = {
proxyPass = "http://127.0.0.1:8008";
extraConfig = ''
${commonHeaders}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
client_body_buffer_size 25M;
client_max_body_size 50M;
proxy_max_temp_file_size 0;
proxy_http_version 1.1;
'';
};
};

View file

@ -1,14 +1,9 @@
{
config,
pkgs,
lib,
...
}:
{ pkgs, lib, ... }:
{
default_server_config = {
"m.homeserver" = {
base_url = "https://matrix.${config.pub-solar-os.networking.domain}";
server_name = "${config.pub-solar-os.networking.domain}";
base_url = "https://matrix.pub.solar";
server_name = "pub.solar";
};
"m.identity_server" = {
base_url = "";
@ -50,15 +45,4 @@
# FUTUREWORK: Replace with pub.solar logo
auth_header_logo_url = "themes/element/img/logos/element-logo.svg";
};
# Enable Element Call Beta
features = {
feature_video_rooms = true;
feature_group_calls = true;
feature_element_call_video_rooms = true;
};
element_call = {
url = "https://call.element.io";
participant_limit = 50;
brand = "Element Call";
};
}

View file

@ -7,7 +7,7 @@
services.nginx.virtualHosts = {
"www.${config.pub-solar-os.networking.domain}" = {
enableACME = true;
forceSSL = true;
addSSL = true;
extraConfig = ''
error_log /dev/null;

View file

@ -10,10 +10,9 @@ let
webserverGroup = "hakkonaut";
in
{
users.users.nginx.extraGroups = [ webserverGroup ];
services.nginx = {
enable = true;
group = webserverGroup;
enableReload = true;
proxyCachePath.cache = {
enable = true;
@ -22,13 +21,6 @@ in
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
resolver.addresses = [
# quad9.net
"9.9.9.9"
"149.112.112.112"
"[2620:fe::fe]"
"[2620:fe::9]"
];
appendHttpConfig = ''
# https://my.f5.com/manage/s/article/K51798430
proxy_headers_hash_bucket_size 128;

View file

@ -147,26 +147,4 @@ in
};
};
};
pub-solar-os.backups.restic.obs-portal = {
paths = [
"/var/lib/obs-portal/data"
"/tmp/obs-portal-backup.sql"
];
timerConfig = {
OnCalendar = "*-*-* 06:00:00 Etc/UTC";
};
initialize = true;
backupPrepareCommand = ''
${pkgs.docker}/bin/docker exec -i --user postgres obs-portal-db pg_dump obs > /tmp/obs-portal-backup.sql
'';
backupCleanupCommand = ''
rm /tmp/obs-portal-backup.sql
'';
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 4"
"--keep-monthly 3"
];
};
}

View file

@ -25,4 +25,9 @@
full_page_writes = false;
};
};
systemd.services.postgresql = {
after = [ "var-lib-postgresql.mount" ];
requisite = [ "var-lib-postgresql.mount" ];
};
}

View file

@ -142,8 +142,8 @@ lib.mapAttrsToList
cpu_using_90percent = {
condition = ''100 - (avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) >= 90'';
time = "20m";
description = "{{$labels.instance}} is running with cpu usage > 90% for at least 20 minutes: {{$value}}";
time = "10m";
description = "{{$labels.instance}} is running with cpu usage > 90% for at least 10 minutes: {{$value}}";
};
reboot = {
@ -234,10 +234,10 @@ lib.mapAttrsToList
};
*/
#host_memory_under_memory_pressure = {
# condition = "rate(node_vmstat_pgmajfault[1m]) > 1000";
# description = "{{$labels.instance}}: The node is under heavy memory pressure. High rate of major page faults: {{$value}}";
#};
host_memory_under_memory_pressure = {
condition = "rate(node_vmstat_pgmajfault[1m]) > 1000";
description = "{{$labels.instance}}: The node is under heavy memory pressure. High rate of major page faults: {{$value}}";
};
# ext4_errors = {
# condition = "ext4_errors_value > 0";
@ -250,10 +250,4 @@ lib.mapAttrsToList
# description =
# "alertmanager: number of active silences has changed: {{$value}}";
# };
garage_cluster_healthy = {
condition = "cluster_healthy == 0";
time = "15m";
description = "garage cluster on {{$labels.instance}} is not healthy: {{$labels.result}}!";
};
})

View file

@ -12,27 +12,15 @@
owner = "alertmanager";
};
security.acme.certs = {
"alerts.${config.pub-solar-os.networking.domain}" = {
# disable http challenge
webroot = null;
# enable dns challenge
dnsProvider = "namecheap";
};
};
services.nginx.virtualHosts."alerts.${config.pub-solar-os.networking.domain}" = {
enableACME = true;
forceSSL = true;
listenAddresses = [
"10.7.6.5"
"[fd00:fae:fae:fae:fae:5::]"
];
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.prometheus.alertmanager.port}";
};
services.caddy.virtualHosts."alerts.${config.pub-solar-os.networking.domain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
bind 10.7.6.2 fd00:fae:fae:fae:fae:2::
tls internal
reverse_proxy :${toString config.services.prometheus.alertmanager.port}
'';
};
services.prometheus = {
@ -53,6 +41,12 @@
{
job_name = "node-exporter";
static_configs = [
{
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ];
labels = {
instance = "flora-6";
};
}
{
targets = [ "nachtigall.wg.${config.pub-solar-os.networking.domain}" ];
labels = {
@ -75,30 +69,6 @@
instance = "tankstelle";
};
}
{
targets = [
"trinkgenossin.wg.${config.pub-solar-os.networking.domain}:${toString config.services.prometheus.exporters.node.port}"
];
labels = {
instance = "trinkgenossin";
};
}
{
targets = [
"delite.wg.${config.pub-solar-os.networking.domain}:${toString config.services.prometheus.exporters.node.port}"
];
labels = {
instance = "delite";
};
}
{
targets = [
"blue-shell.wg.${config.pub-solar-os.networking.domain}:${toString config.services.prometheus.exporters.node.port}"
];
labels = {
instance = "blue-shell";
};
}
];
}
{
@ -113,29 +83,6 @@
}
];
}
{
job_name = "garage";
static_configs = [
{
targets = [ "trinkgenossin.wg.${config.pub-solar-os.networking.domain}:3903" ];
labels = {
instance = "trinkgenossin";
};
}
{
targets = [ "delite.wg.${config.pub-solar-os.networking.domain}:3903" ];
labels = {
instance = "delite";
};
}
{
targets = [ "blue-shell.wg.${config.pub-solar-os.networking.domain}:3903" ];
labels = {
instance = "blue-shell";
};
}
];
}
];
ruleFiles = [

View file

@ -18,7 +18,7 @@
};
clients = [
{
url = "http://trinkgenossin.wg.pub.solar:${toString flake.self.nixosConfigurations.trinkgenossin.config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
url = "http://flora-6.wg.pub.solar:${toString flake.self.nixosConfigurations.flora-6.config.services.loki.configuration.server.http_listen_port}/loki/api/v1/push";
}
];
scrape_configs = [

View file

@ -1,82 +0,0 @@
{
flake,
config,
pkgs,
...
}:
let
ttrss-auth-oidc = pkgs.stdenv.mkDerivation {
name = "ttrss-auth-oidc";
version = "7ebfbc91e92bb133beb907c6bde79279ee5156df";
src = pkgs.fetchgit {
url = "https://git.tt-rss.org/fox/ttrss-auth-oidc.git";
rev = "7ebfbc91e92bb133beb907c6bde79279ee5156df";
hash = "sha256-G6vZBvSWms6s6nHZWsxJjMGuubt/imiBvbp6ykwrZbg=";
};
installPhase = ''
mkdir -p $out/auth_oidc
cp -r * $out/auth_oidc
'';
};
in
{
age.secrets.tt-rss-database-password = {
file = "${flake.self}/secrets/tt-rss-database-password.age";
owner = "tt_rss";
mode = "600";
};
age.secrets.tt-rss-keycloak-client-secret = {
file = "${flake.self}/secrets/tt-rss-keycloak-client-secret.age";
owner = "tt_rss";
mode = "600";
};
age.secrets.tt-rss-smtp-password = {
file = "${flake.self}/secrets/tt-rss-smtp-password.age";
owner = "tt_rss";
mode = "600";
};
age.secrets.tt-rss-feed-crypt-key = {
file = "${flake.self}/secrets/tt-rss-feed-crypt-key.age";
owner = "tt_rss";
mode = "600";
};
services.nginx.virtualHosts."rss.${config.pub-solar-os.networking.domain}" = {
enableACME = true;
forceSSL = true;
};
services.tt-rss = {
enable = true;
virtualHost = "rss.${config.pub-solar-os.networking.domain}";
selfUrlPath = "https://rss.${config.pub-solar-os.networking.domain}";
root = "/var/lib/tt-rss";
logDestination = "";
plugins = [
"auth_internal"
"note"
"auth_oidc"
];
pluginPackages = [ ttrss-auth-oidc ];
email = {
server = "mail.pub.solar";
security = "tls";
login = "admins@pub.solar";
fromName = "pub.solar RSS server";
fromAddress = "rss@pub.solar";
digestSubject = "[RSS] New headlines for last 24 hours";
};
database = {
passwordFile = config.age.secrets.tt-rss-database-password.path;
createLocally = true;
};
extraConfig = ''
putenv('TTRSS_SMTP_PASSWORD=' . file_get_contents('${config.age.secrets.tt-rss-smtp-password.path}'));
putenv('TTRSS_AUTH_OIDC_NAME=pub.solar ID');
putenv('TTRSS_AUTH_OIDC_URL=https://auth.${config.pub-solar-os.networking.domain}/realms/${config.pub-solar-os.auth.realm}/');
putenv('TTRSS_AUTH_OIDC_CLIENT_ID=tt-rss');
putenv('TTRSS_AUTH_OIDC_CLIENT_SECRET=' . file_get_contents('${config.age.secrets.tt-rss-keycloak-client-secret.path}'));
putenv('TTRSS_FEED_CRYPT_KEY=' . file_get_contents('${config.age.secrets.tt-rss-feed-crypt-key.path}'));
'';
};
}

View file

@ -1,20 +0,0 @@
{ flake, config, ... }:
{
boot.initrd.network = {
enable = true;
ssh = {
enable = true;
# To prevent ssh clients from freaking out because a different host key is used,
# a different port for ssh is useful (assuming the same host has also a regular sshd running)
port = 2222;
# Please create this manually the first time.
hostKeys = [ "/etc/secrets/initrd/ssh_host_ed25519_key" ];
authorizedKeys = flake.self.logins.sshPubKeys;
};
postCommands = ''
# Automatically ask for the password on SSH login
echo 'cryptsetup-askpass || echo "Unlock was successful; exiting SSH session" && exit 1' >> /root/.profile
'';
};
}

Some files were not shown because too many files have changed in this diff Show more