forked from pub-solar/infra
Compare commits
60 commits
fix/forgej
...
main
Author | SHA1 | Date | |
---|---|---|---|
d58209ef93 | |||
a98cfc82e5 | |||
a66c6ada59 | |||
8e66bea9c8 | |||
505d0f34ea | |||
ddc5c65bf7 | |||
a11255b433 | |||
d62b6cda92 | |||
c580fe0fbb | |||
60aef1d038 | |||
fa9ce9d435 | |||
9541e5029e | |||
c4d0d34807 | |||
d5fe65b60d | |||
0e7dc95250 | |||
c86e22b292 | |||
4992819742 | |||
a9411d05a8 | |||
e8530caf1d | |||
7c492e7391 | |||
a0c6f0dc08 | |||
46c7c9ecb1 | |||
fb4004e9f0 | |||
3030b0f84d | |||
c07d24f6a7 | |||
0f297c4711 | |||
679d9b236f | |||
78d5e5a4f0 | |||
c768203bed | |||
b0c466869e | |||
b6a54efd9a | |||
7e145040cc | |||
9d94b888ae | |||
8a9fe3b8fe | |||
8743ea7b0c | |||
8743b50f7f | |||
316ba9ef53 | |||
afca75441c | |||
9698c47530 | |||
ccb029dde3 | |||
41e4d3427c | |||
16e9d476cb | |||
3caf085d0b | |||
c5159dd66d | |||
b27f8c1380 | |||
76ca43142a | |||
16c6aa3b61 | |||
315cbf5813 | |||
9191729f5c | |||
b6b8d69852 | |||
4380c3b0ab | |||
e618b9f9c2 | |||
ae0c90e4f8 | |||
d7c9333ff4 | |||
18a62b8d35 | |||
9ec77e2a30 | |||
1bcb8bb7e0 | |||
cf1e6f8134 | |||
83e293016f | |||
91a2b66134 |
29 changed files with 668 additions and 117 deletions
|
@ -10,7 +10,7 @@ jobs:
|
|||
- name: Check out repository code
|
||||
uses: https://code.forgejo.org/actions/checkout@v4
|
||||
|
||||
- uses: https://github.com/nixbuild/nix-quick-install-action@v26
|
||||
- uses: https://github.com/nixbuild/nix-quick-install-action@v27
|
||||
with:
|
||||
load_nixConfig: false
|
||||
nix_conf: |
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
echo "hash=$(md5sum flake.lock | awk '{print $1}')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore and cache Nix store
|
||||
uses: https://github.com/nix-community/cache-nix-action@v4
|
||||
uses: https://github.com/nix-community/cache-nix-action@v4.0.3
|
||||
id: nix-store-cache
|
||||
with:
|
||||
key: cache-${{ runner.os }}-nix-store-${{ steps.flake-lock-hash.outputs.hash }}
|
||||
|
@ -35,16 +35,37 @@ jobs:
|
|||
gc-max-store-size-linux: 10000000000
|
||||
|
||||
purge-caches: true
|
||||
purge-keys: cache-${{ runner.os }}-nix-store-
|
||||
purge-key: cache-${{ runner.os }}-nix-store-
|
||||
purge-created: true
|
||||
purge-created-max-age: 42
|
||||
|
||||
- name: Prepare cachix
|
||||
uses: https://github.com/cachix/cachix-action@v12
|
||||
uses: https://github.com/cachix/cachix-action@v14
|
||||
with:
|
||||
name: pub-solar
|
||||
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||
useDaemon: false
|
||||
|
||||
- name: Run flake checks
|
||||
run: |
|
||||
# Prevent cache garbage collection by creating GC roots
|
||||
for target in $(nix flake show --json --all-systems | jq '
|
||||
.["nixosConfigurations"] |
|
||||
to_entries[] |
|
||||
.key
|
||||
' | tr -d '"'
|
||||
); do
|
||||
nix --print-build-logs --verbose --accept-flake-config --access-tokens '' \
|
||||
build --out-link ./result-$target ".#nixosConfigurations.${target}.config.system.build.toplevel"
|
||||
done
|
||||
|
||||
nix --print-build-logs --verbose --accept-flake-config --access-tokens '' flake check
|
||||
|
||||
# Add GC roots for flake inputs, too
|
||||
# https://github.com/NixOS/nix/issues/4250#issuecomment-1146878407
|
||||
mkdir --parents "$NIX_USER_PROFILE_DIR"
|
||||
gc_root_prefix="$NIX_USER_PROFILE_DIR"/infra-flake-
|
||||
echo "Adding gcroots flake inputs with prefix $gc_root_prefix ..."
|
||||
nix flake archive --json 2>/dev/null | jq --raw-output '.inputs | to_entries[] | "ln --force --symbolic --no-target-directory "+.value.path+" \"'"$gc_root_prefix"'"+.key+"\""' | while read -r line; do
|
||||
eval "$line"
|
||||
done
|
||||
|
|
37
docs/administrative-access.md
Normal file
37
docs/administrative-access.md
Normal file
|
@ -0,0 +1,37 @@
|
|||
# Adminstrative access
|
||||
|
||||
People with admin access to the infrastructure are added to [`logins/admins.nix`](../logins/admins.nix). This is a attrset with the following structure:
|
||||
|
||||
```
|
||||
{
|
||||
<username> = {
|
||||
sshPubKeys = {
|
||||
<name> = <pubkey-string>;
|
||||
};
|
||||
|
||||
wireguardDevices = [
|
||||
{
|
||||
publicKey = <pubkey-string>;
|
||||
allowedIPs = [ "10.7.6.<ip-address>/32" "fd00:fae:fae:fae:fae:<ip-address>::/96" ];
|
||||
}
|
||||
}];
|
||||
|
||||
secretEncryptionKeys = {
|
||||
<name> = <encryption-key-string>;
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
# SSH Access
|
||||
|
||||
SSH is not reachable from the open internet. Instead, SSH Port 22 is protected by a wireguard VPN network. Thus, to get root access on the servers, at least two pieces of information have to be added to the admins config:
|
||||
|
||||
1. **SSH Public key**: self-explanatory. Add your public key to your user attrset under `sshPubKeys`.
|
||||
2. **Wireguard device**: each wireguard device has two parts: the public key and the IP addresses it should have in the wireguard network. The pub.solar wireguard network is spaced under `10.7.6.0/24` and `fd00:fae:fae:fae:fae::/80`. To add your device, it's best to choose a free number between 200 and 255 and use that in both the ipv4 and ipv6 ranges: `10.7.6.<ip-address>/32` `fd00:fae:fae:fae:fae:<ip-address>::/96`. For more information on how to generate keypairs, see [the NixOS Wireguard docs](https://nixos.wiki/wiki/WireGuard#Generate_keypair).
|
||||
|
||||
# Secret encryption
|
||||
|
||||
Deployment secrets are added to the repository in encrypted files. To be able to work with these encrypted files, your public key(s) will have to be added to your user attrset under `secretEncryptionKeys`.
|
||||
|
||||
See also the docs on [working with secrets](./secrets.md).
|
|
@ -1,20 +1,32 @@
|
|||
# Deploying new versions
|
||||
|
||||
We use [deploy-rs](https://github.com/serokell/deploy-rs) to deploy changes. Currently this process is not automated, so configuration changes will have to be manually deployed.
|
||||
We use [deploy-rs](https://github.com/serokell/deploy-rs) to deploy changes.
|
||||
Currently this process is not automated, so configuration changes will have to
|
||||
be manually deployed.
|
||||
|
||||
To deploy, make sure you have a [working development shell](./development-shell.md). Then, run `deploy-rs` with the hostname of the server you want to deploy:
|
||||
To deploy, make sure you have a [working development shell](./development-shell.md).
|
||||
Then, run `deploy-rs` with the hostname of the server you want to deploy:
|
||||
|
||||
For nachtigall.pub.solar:
|
||||
```
|
||||
deploy '.#nachtigall'
|
||||
deploy --targets '.#nachtigall' --magic-rollback false --auto-rollback false
|
||||
```
|
||||
|
||||
For flora-6.pub.solar:
|
||||
```
|
||||
deploy '.#flora-6'
|
||||
deploy --targets '.#flora-6' --magic-rollback false --auto-rollback false
|
||||
```
|
||||
|
||||
You'll need to have SSH Access to the boxes to be able to do this.
|
||||
Usually we skip all rollback functionality, but if you want to deploy a change
|
||||
that might lock you out, e.g. to SSH, it might make sense to set these to `true`.
|
||||
|
||||
### SSH access
|
||||
Ensure your SSH public key is in place [here](./public-keys/admins.nix) and was deployed by someone with access.
|
||||
To skip flake checks, e.g. because you already ran them manually before
|
||||
deployment, add the flag `--skip-checks` at the end of the command.
|
||||
|
||||
`--dry-activate` can be used to only put all files in place without switching,
|
||||
to enable switching to the new config quickly at a later moment.
|
||||
|
||||
You'll need to have SSH Access to the boxes to be able to run `deploy`.
|
||||
|
||||
### Getting SSH access
|
||||
See [administrative-access.md](./administrative-access.md).
|
||||
|
|
|
@ -1 +1,5 @@
|
|||
# Working with secrets
|
||||
|
||||
Secrets are handled with [agenix](https://github.com/ryantm/agenix). To be able to view secrets, your public key will have to be added to the admins config. See [Administrative Access](./administrative-access.md) on how to do this.
|
||||
|
||||
For a comprehensive tutorial, see [the agenix repository](https://github.com/ryantm/agenix?tab=readme-ov-file#tutorial).
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
# SSH Access
|
||||
|
||||
SSH Access is granted by adding a public key to [`public-keys/admins.nix`](../public-keys/admins.nix). This change will then have to be deployed to all hosts by an existing key. The keys will also grant access to the initrd SSH Server to enable remote unlock.
|
24
flake.lock
generated
24
flake.lock
generated
|
@ -180,11 +180,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1710888565,
|
||||
"narHash": "sha256-s9Hi4RHhc6yut4EcYD50sZWRDKsugBJHSbON8KFwoTw=",
|
||||
"lastModified": 1714043624,
|
||||
"narHash": "sha256-Xn2r0Jv95TswvPlvamCC46wwNo8ALjRCMBJbGykdhcM=",
|
||||
"owner": "nix-community",
|
||||
"repo": "home-manager",
|
||||
"rev": "f33900124c23c4eca5831b9b5eb32ea5894375ce",
|
||||
"rev": "86853e31dc1b62c6eeed11c667e8cdd0285d4411",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -224,11 +224,11 @@
|
|||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1711763326,
|
||||
"narHash": "sha256-sXcesZWKXFlEQ8oyGHnfk4xc9f2Ip0X/+YZOq3sKviI=",
|
||||
"lastModified": 1713946171,
|
||||
"narHash": "sha256-lc75rgRQLdp4Dzogv5cfqOg6qYc5Rp83oedF2t0kDp8=",
|
||||
"owner": "lnl7",
|
||||
"repo": "nix-darwin",
|
||||
"rev": "36524adc31566655f2f4d55ad6b875fb5c1a4083",
|
||||
"rev": "230a197063de9287128e2c68a7a4b0cd7d0b50a7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -255,11 +255,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1712168706,
|
||||
"narHash": "sha256-XP24tOobf6GGElMd0ux90FEBalUtw6NkBSVh/RlA6ik=",
|
||||
"lastModified": 1713995372,
|
||||
"narHash": "sha256-fFE3M0vCoiSwCX02z8VF58jXFRj9enYUSTqjyHAjrds=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "1487bdea619e4a7a53a4590c475deabb5a9d1bfb",
|
||||
"rev": "dd37924974b9202f8226ed5d74a252a9785aedf8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -405,11 +405,11 @@
|
|||
},
|
||||
"unstable": {
|
||||
"locked": {
|
||||
"lastModified": 1712163089,
|
||||
"narHash": "sha256-Um+8kTIrC19vD4/lUCN9/cU9kcOsD1O1m+axJqQPyMM=",
|
||||
"lastModified": 1713895582,
|
||||
"narHash": "sha256-cfh1hi+6muQMbi9acOlju3V1gl8BEaZBXBR9jQfQi4U=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "fd281bd6b7d3e32ddfa399853946f782553163b5",
|
||||
"rev": "572af610f6151fd41c212f897c71f7056e3fb518",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
|
@ -89,14 +89,12 @@
|
|||
|
||||
deploy.nodes = self.lib.deploy.mkDeployNodes self.nixosConfigurations {
|
||||
nachtigall = {
|
||||
# hostname is set in hosts/nachtigall/networking.nix
|
||||
hostname = "10.7.6.1";
|
||||
sshUser = username;
|
||||
};
|
||||
flora-6 = {
|
||||
hostname = "flora-6.pub.solar";
|
||||
hostname = "10.7.6.2";
|
||||
sshUser = username;
|
||||
# Example
|
||||
#sshOpts = [ "-p" "19999" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
260
hosts/flora-6/apps/alert-rules.nix
Normal file
260
hosts/flora-6/apps/alert-rules.nix
Normal file
|
@ -0,0 +1,260 @@
|
|||
{ lib }:
|
||||
|
||||
let
|
||||
# docker's filesystems disappear quickly, leading to false positives
|
||||
deviceFilter = ''path!~"^(/var/lib/docker|/nix/store).*"'';
|
||||
in
|
||||
lib.mapAttrsToList
|
||||
(name: opts: {
|
||||
alert = name;
|
||||
expr = opts.condition;
|
||||
for = opts.time or "2m";
|
||||
labels = { };
|
||||
annotations.description = opts.description;
|
||||
})
|
||||
({
|
||||
|
||||
# prometheus_too_many_restarts = {
|
||||
# condition = ''changes(process_start_time_seconds{job=~"prometheus|alertmanager"}[15m]) > 2'';
|
||||
# description = "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.";
|
||||
# };
|
||||
|
||||
# alert_manager_config_not_synced = {
|
||||
# condition = ''count(count_values("config_hash", alertmanager_config_hash)) > 1'';
|
||||
# description = "Configurations of AlertManager cluster instances are out of sync.";
|
||||
# };
|
||||
|
||||
#alert_manager_e2e_dead_man_switch = {
|
||||
# condition = "vector(1)";
|
||||
# description = "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.";
|
||||
#};
|
||||
|
||||
# prometheus_not_connected_to_alertmanager = {
|
||||
# condition = "prometheus_notifications_alertmanagers_discovered < 1";
|
||||
# description = "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
|
||||
# };
|
||||
|
||||
# prometheus_rule_evaluation_failures = {
|
||||
# condition = "increase(prometheus_rule_evaluation_failures_total[3m]) > 0";
|
||||
# description = "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
|
||||
# };
|
||||
|
||||
# prometheus_template_expansion_failures = {
|
||||
# condition = "increase(prometheus_template_text_expansion_failures_total[3m]) > 0";
|
||||
# time = "0m";
|
||||
# description = "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
|
||||
# };
|
||||
|
||||
# promtail_file_lagging = {
|
||||
# condition = ''abs(promtail_file_bytes_total - promtail_read_bytes_total) > 1e6'';
|
||||
# time = "15m";
|
||||
# description = ''{{ $labels.instance }} {{ $labels.job }} {{ $labels.path }} has been lagging by more than 1MB for more than 15m.'';
|
||||
# };
|
||||
|
||||
filesystem_full_80percent = {
|
||||
condition = ''
|
||||
100 - ((node_filesystem_avail_bytes{fstype!="rootfs",mountpoint="/"} * 100) / node_filesystem_size_bytes{fstype!="rootfs",mountpoint="/"}) > 80'';
|
||||
time = "10m";
|
||||
description =
|
||||
"{{$labels.instance}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 20% space left on its filesystem.";
|
||||
};
|
||||
|
||||
# filesystem_inodes_full = {
|
||||
# condition = ''disk_inodes_free / disk_inodes_total < 0.10'';
|
||||
# time = "10m";
|
||||
# description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 10% inodes left on its filesystem.";
|
||||
# };
|
||||
|
||||
# daily_task_not_run = {
|
||||
# # give 6 hours grace period
|
||||
# condition = ''time() - task_last_run{state="ok",frequency="daily"} > (24 + 6) * 60 * 60'';
|
||||
# description = "{{$labels.instance}}: {{$labels.name}} was not run in the last 24h";
|
||||
# };
|
||||
|
||||
# daily_task_failed = {
|
||||
# condition = ''task_last_run{state="fail"}'';
|
||||
# description = "{{$labels.instance}}: {{$labels.name}} failed to run";
|
||||
# };
|
||||
# } // (lib.genAttrs [
|
||||
# "borgbackup-turingmachine"
|
||||
# "borgbackup-eve"
|
||||
# "borgbackup-datastore"
|
||||
# ]
|
||||
# (name: {
|
||||
# condition = ''absent_over_time(task_last_run{name="${name}"}[1d])'';
|
||||
# description = "status of ${name} is unknown: no data for a day";
|
||||
# }))
|
||||
# // {
|
||||
|
||||
# borgbackup_matchbox_not_run = {
|
||||
# # give 6 hours grace period
|
||||
# condition = ''time() - task_last_run{state="ok",frequency="daily",name="borgbackup-matchbox"} > 7 * 24 * 60 * 60'';
|
||||
# description = "{{$labels.instance}}: {{$labels.name}} was not run in the last week";
|
||||
# };
|
||||
|
||||
# borgbackup_matchbox = {
|
||||
# condition = ''absent_over_time(task_last_run{name="borgbackup-matchbox"}[7d])'';
|
||||
# description = "status of borgbackup-matchbox is unknown: no data for a week";
|
||||
# };
|
||||
|
||||
# homeassistant = {
|
||||
# condition = ''
|
||||
# homeassistant_entity_available{domain="persistent_notification", entity!="persistent_notification.http_login"} >= 0'';
|
||||
# description =
|
||||
# "homeassistant notification {{$labels.entity}} ({{$labels.friendly_name}}): {{$value}}";
|
||||
# };
|
||||
|
||||
swap_using_20percent = {
|
||||
condition =
|
||||
"node_memory_SwapTotal_bytes - (node_memory_SwapCached_bytes + node_memory_SwapFree_bytes) > node_memory_SwapTotal_bytes * 0.2";
|
||||
time = "30m";
|
||||
description =
|
||||
"{{$labels.instance}} is using 20% of its swap space for at least 30 minutes.";
|
||||
};
|
||||
|
||||
systemd_service_failed = {
|
||||
condition = ''node_systemd_unit_state{state="failed"} == 1'';
|
||||
description =
|
||||
"{{$labels.instance}} failed to (re)start service {{$labels.name}}.";
|
||||
};
|
||||
|
||||
restic_backup_too_old = {
|
||||
condition = ''(time() - restic_snapshots_latest_time)/(60*60) > 24'';
|
||||
description = "{{$labels.instance}} not backed up for more than 24 hours. ({{$value}})";
|
||||
};
|
||||
|
||||
host_down = {
|
||||
condition = ''up{job="node-stats", instance!~"ahorn.wireguard:9100|kartoffel.wireguard:9100|mega.wireguard:9100"} == 0'';
|
||||
description = "{{$labels.instance}} is down!";
|
||||
};
|
||||
|
||||
# service_not_running = {
|
||||
# condition = ''systemd_units_active_code{name=~"teamspeak3-server.service|tt-rss.service", sub!="running"}'';
|
||||
# description = "{{$labels.instance}} should have a running {{$labels.name}}.";
|
||||
# };
|
||||
|
||||
ram_using_90percent = {
|
||||
condition =
|
||||
"node_memory_Buffers_bytes + node_memory_MemFree_bytes + node_memory_Cached_bytes < node_memory_MemTotal_bytes * 0.1";
|
||||
time = "1h";
|
||||
description =
|
||||
"{{$labels.instance}} is using at least 90% of its RAM for at least 1 hour.";
|
||||
};
|
||||
|
||||
cpu_using_90percent = {
|
||||
condition = ''
|
||||
100 - (avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) >= 90'';
|
||||
time = "10m";
|
||||
description =
|
||||
"{{$labels.instance}} is running with cpu usage > 90% for at least 10 minutes: {{$value}}";
|
||||
};
|
||||
|
||||
reboot = {
|
||||
condition = "node_boot_time_seconds < 300";
|
||||
description = "{{$labels.instance}} just rebooted.";
|
||||
};
|
||||
|
||||
uptime = {
|
||||
condition = "(time() - node_boot_time_seconds ) / (60*60*24) > 30";
|
||||
description =
|
||||
"Uptime monster: {{$labels.instance}} has been up for more than 30 days.";
|
||||
};
|
||||
|
||||
flake_nixpkgs_outdated = {
|
||||
condition = ''
|
||||
(time() - flake_input_last_modified{input="nixpkgs"}) / (60*60*24) > 30'';
|
||||
description =
|
||||
"Nixpkgs outdated: Nixpkgs on {{$labels.instance}} has not been updated in 30 days";
|
||||
};
|
||||
|
||||
/* ping = {
|
||||
condition = "ping_result_code{type!='mobile'} != 0";
|
||||
description = "{{$labels.url}}: ping from {{$labels.instance}} has failed!";
|
||||
};
|
||||
|
||||
ping_high_latency = {
|
||||
condition = "ping_average_response_ms{type!='mobile'} > 5000";
|
||||
description = "{{$labels.instance}}: ping probe from {{$labels.source}} is encountering high latency!";
|
||||
};
|
||||
*/
|
||||
http_status = {
|
||||
condition = ''
|
||||
probe_http_status_code{instance!~"https://megaclan3000.de"} != 200'';
|
||||
description =
|
||||
"http request failed from {{$labels.instance}}: {{$labels.result}}!";
|
||||
};
|
||||
/* http_match_failed = {
|
||||
condition = "http_response_response_string_match == 0";
|
||||
description = "{{$labels.server}} : http body not as expected; status code: {{$labels.status_code}}!";
|
||||
};
|
||||
dns_query = {
|
||||
condition = "dns_query_result_code != 0";
|
||||
description = "{{$labels.domain}} : could retrieve A record {{$labels.instance}} from server {{$labels.server}}: {{$labels.result}}!";
|
||||
};
|
||||
secure_dns_query = {
|
||||
condition = "secure_dns_state != 0";
|
||||
description = "{{$labels.domain}} : could retrieve A record {{$labels.instance}} from server {{$labels.server}}: {{$labels.result}} for protocol {{$labels.protocol}}!";
|
||||
};
|
||||
connection_failed = {
|
||||
condition = "net_response_result_code != 0";
|
||||
description = "{{$labels.server}}: connection to {{$labels.port}}({{$labels.protocol}}) failed from {{$labels.instance}}";
|
||||
};
|
||||
healthchecks = {
|
||||
condition = "hc_check_up == 0";
|
||||
description = "{{$labels.instance}}: healtcheck {{$labels.job}} fails!";
|
||||
};
|
||||
*/
|
||||
cert_expiry = {
|
||||
condition = "(probe_ssl_earliest_cert_expiry - time())/(3600*24) < 30";
|
||||
description =
|
||||
"{{$labels.instance}}: The TLS certificate will expire in less than 30 days: {{$value}}s";
|
||||
};
|
||||
|
||||
# ignore devices that disabled S.M.A.R.T (example if attached via USB)
|
||||
|
||||
# smart_errors = {
|
||||
# condition = ''smart_device_health_ok{enabled!="Disabled"} != 1'';
|
||||
# description =
|
||||
# "{{$labels.instance}}: S.M.A.R.T reports: {{$labels.device}} ({{$labels.model}}) has errors.";
|
||||
# };
|
||||
|
||||
oom_kills = {
|
||||
condition = "increase(node_vmstat_oom_kill[5m]) > 0";
|
||||
description = "{{$labels.instance}}: OOM kill detected";
|
||||
};
|
||||
|
||||
/* unusual_disk_read_latency = {
|
||||
condition =
|
||||
"rate(diskio_read_time[1m]) / rate(diskio_reads[1m]) > 0.1 and rate(diskio_reads[1m]) > 0";
|
||||
description = ''
|
||||
{{$labels.instance}}: Disk latency is growing (read operations > 100ms)
|
||||
'';
|
||||
};
|
||||
|
||||
unusual_disk_write_latency = {
|
||||
condition =
|
||||
"rate(diskio_write_time[1m]) / rate(diskio_write[1m]) > 0.1 and rate(diskio_write[1m]) > 0";
|
||||
description = ''
|
||||
{{$labels.instance}}: Disk latency is growing (write operations > 100ms)
|
||||
'';
|
||||
};
|
||||
*/
|
||||
|
||||
host_memory_under_memory_pressure = {
|
||||
condition = "rate(node_vmstat_pgmajfault[1m]) > 1000";
|
||||
description =
|
||||
"{{$labels.instance}}: The node is under heavy memory pressure. High rate of major page faults: {{$value}}";
|
||||
};
|
||||
|
||||
# ext4_errors = {
|
||||
# condition = "ext4_errors_value > 0";
|
||||
# description =
|
||||
# "{{$labels.instance}}: ext4 has reported {{$value}} I/O errors: check /sys/fs/ext4/*/errors_count";
|
||||
# };
|
||||
|
||||
# alerts_silences_changed = {
|
||||
# condition = ''abs(delta(alertmanager_silences{state="active"}[1h])) >= 1'';
|
||||
# description =
|
||||
# "alertmanager: number of active silences has changed: {{$value}}";
|
||||
# };
|
||||
})
|
|
@ -37,6 +37,14 @@
|
|||
reverse_proxy :${toString config.services.loki.configuration.server.http_listen_port}
|
||||
'';
|
||||
};
|
||||
"alerts.pub.solar" = {
|
||||
logFormat = lib.mkForce ''
|
||||
output discard
|
||||
'';
|
||||
extraConfig = ''
|
||||
reverse_proxy 10.7.6.2:${toString config.services.prometheus.alertmanager.port}
|
||||
'';
|
||||
};
|
||||
"grafana.pub.solar" = {
|
||||
logFormat = lib.mkForce ''
|
||||
output discard
|
||||
|
|
|
@ -78,6 +78,7 @@
|
|||
extraOptions = [
|
||||
"--network=drone-net"
|
||||
"--pull=always"
|
||||
"--add-host=nachtigall.pub.solar:10.7.6.1"
|
||||
];
|
||||
environment = {
|
||||
DRONE_GITEA_SERVER = "https://git.pub.solar";
|
||||
|
@ -101,6 +102,7 @@
|
|||
extraOptions = [
|
||||
"--network=drone-net"
|
||||
"--pull=always"
|
||||
"--add-host=nachtigall.pub.solar:10.7.6.1"
|
||||
];
|
||||
environment = {
|
||||
DRONE_RPC_HOST = "ci.pub.solar";
|
||||
|
|
|
@ -13,16 +13,43 @@
|
|||
# Needed for the docker runner to communicate with the act_runner cache
|
||||
networking.firewall.trustedInterfaces = [ "br-+" ];
|
||||
|
||||
users.users.gitea-runner = {
|
||||
home = "/var/lib/gitea-runner/flora-6";
|
||||
useDefaultShell = true;
|
||||
group = "gitea-runner";
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
users.groups.gitea-runner = {};
|
||||
|
||||
systemd.services."gitea-runner-flora\\x2d6".serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '/data/gitea-actions-runner' 0750 gitea-runner gitea-runner - -"
|
||||
"d '/var/lib/gitea-runner' 0750 gitea-runner gitea-runner - -"
|
||||
];
|
||||
|
||||
# forgejo actions runner
|
||||
# https://forgejo.org/docs/latest/admin/actions/
|
||||
# https://docs.gitea.com/usage/actions/quickstart
|
||||
services.gitea-actions-runner = {
|
||||
package = pkgs.forgejo-actions-runner;
|
||||
package = pkgs.forgejo-runner;
|
||||
instances."flora-6" = {
|
||||
enable = true;
|
||||
name = config.networking.hostName;
|
||||
url = "https://git.pub.solar";
|
||||
tokenFile = config.age.secrets.forgejo-actions-runner-token.path;
|
||||
settings = {
|
||||
cache = {
|
||||
enabled = true;
|
||||
dir = "/data/gitea-actions-runner/actcache";
|
||||
host = "";
|
||||
port = 0;
|
||||
external_server = "";
|
||||
};
|
||||
};
|
||||
labels = [
|
||||
# provide a debian 12 bookworm base with Node.js for actions
|
||||
"debian-latest:docker://git.pub.solar/pub-solar/actions-base-image:20-bookworm"
|
||||
|
|
|
@ -65,5 +65,50 @@
|
|||
}];
|
||||
}
|
||||
];
|
||||
|
||||
ruleFiles = [
|
||||
(pkgs.writeText "prometheus-rules.yml" (builtins.toJSON {
|
||||
groups = [{
|
||||
name = "alerting-rules";
|
||||
rules = import ./alert-rules.nix { inherit lib; };
|
||||
}];
|
||||
}))
|
||||
];
|
||||
|
||||
alertmanagers = [{ static_configs = [{ targets = [ "localhost:9093" ]; }]; }];
|
||||
|
||||
alertmanager = {
|
||||
enable = true;
|
||||
# port = 9093; # Default
|
||||
webExternalUrl = "https://alerts.pub.solar"; # TODO use a proper url?
|
||||
# environmentFile = "${config.age.secrets.nachtigall-alertmanager-envfile.path}";
|
||||
configuration = {
|
||||
|
||||
route = {
|
||||
receiver = "all";
|
||||
group_by = [ "instance" ];
|
||||
group_wait = "30s";
|
||||
group_interval = "2m";
|
||||
repeat_interval = "24h";
|
||||
};
|
||||
|
||||
receivers = [{
|
||||
name = "all";
|
||||
# Email config documentation: https://prometheus.io/docs/alerting/latest/configuration/#email_config
|
||||
email_configs = [{
|
||||
send_resolved = true;
|
||||
to = "TODO";
|
||||
from = "alerts@pub.solar";
|
||||
smarthost = "TODO";
|
||||
auth_username = "TODO";
|
||||
auth_password_file = "${config.age.secrets.nachtigall-alertmanager-smtp-password.path}";
|
||||
require_tls = true;
|
||||
}];
|
||||
# TODO:
|
||||
# For matrix notifications, look into: https://github.com/pinpox/matrix-hook and add a webhook
|
||||
# webhook_configs = [ { url = "http://127.0.0.1:11000/alert"; } ];
|
||||
}];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ in
|
|||
#systemd.services."systemd-networkd".environment.SYSTEMD_LOG_LEVEL = "debug";
|
||||
systemd.network.wait-online.ignoredInterfaces = [
|
||||
"docker0"
|
||||
"wg-ssh"
|
||||
];
|
||||
|
||||
# List services that you want to enable:
|
||||
|
|
|
@ -18,12 +18,23 @@
|
|||
];
|
||||
privateKeyFile = config.age.secrets.wg-private-key.path;
|
||||
peers = flake.self.logins.admins.wireguardDevices ++ [
|
||||
{
|
||||
endpoint = "nachtigall.pub.solar:51820";
|
||||
{ # nachtigall.pub.solar
|
||||
endpoint = "138.201.80.102:51820";
|
||||
publicKey = "qzNywKY9RvqTnDO8eLik75/SHveaSk9OObilDzv+xkk=";
|
||||
allowedIPs = [ "10.7.6.1/32" "fd00:fae:fae:fae:fae:1::/96" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh.listenAddresses = [
|
||||
{
|
||||
addr = "10.7.6.2";
|
||||
port = 22;
|
||||
}
|
||||
{
|
||||
addr = "[fd00:fae:fae:fae:fae:2::]";
|
||||
port = 22;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
|
|
@ -16,6 +16,19 @@
|
|||
owner = "gitea";
|
||||
};
|
||||
|
||||
age.secrets.forgejo-ssh-private-key = {
|
||||
file = "${flake.self}/secrets/forgejo-ssh-private-key.age";
|
||||
mode = "600";
|
||||
owner = "gitea";
|
||||
path = "/etc/forgejo/ssh/id_forgejo";
|
||||
};
|
||||
|
||||
environment.etc."forgejo/ssh/id_forgejo.pub" = {
|
||||
text = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCkPjvF2tZ2lZtkXed6lBvaPUpsNrI5kHlCNEf4LyFtgFXHoUL8UD3Bz9Fn1S+SDkdBMw/SumjvUf7TEGqQqzmFbG7+nWdWg2L00VdN8Kp8W+kKPBByJrzjDUIGhIMt7obaZnlSAVO5Cdqc1Q6bA9POLjSHIBxSD3QUs2pjUCkciNcEtL93easuXnlMwoYa217n5sA8n+BZmOJAcmA/UxYvKsqYlpJxa44m8JgMTy+5L08i/zkx9/FwniOcKcLedxmjZfV8raitDy34LslT2nBNG4I+em7qhKhSScn/cfyPvARiK71pk/rTx9mxBEjcGAkp3+hiA3Nyms0h/qTUh8yGyhbOn8hiro34HEKswXDN1HRfseyyZ4TqOoIC07F53x4OliYA0B+QbvwOemTX2XAWHfU4xEYrIhR46o3Eu5ooOM9HZLLYzIzKjsj/rpuKalFZ+9IeT/PJ/DrbgOEBlJGTu4XucEYXSiIvWB7G9WXij7TXKYbsRAFho9jw+9UZWklFAh9dcUKlX9YxafxOrw9DhJK620hblHLY9wPPFCbZVXDGfqdtn+ncRReMAw6N3VYqxMgnxd+OC52SMsSUi9VaL26i2UvEBwNYuim8GDnVabu/ciQLHMgifBONuF9sKD58ee5nnKgtYLDy9zU86aHBU78Ijew+WhYitO7qejMHMQ==";
|
||||
mode = "600";
|
||||
user = "gitea";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."git.pub.solar" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
@ -41,11 +54,17 @@
|
|||
|
||||
users.groups.gitea = {};
|
||||
|
||||
# Expose SSH port only for forgejo SSH
|
||||
networking.firewall.interfaces.enp35s0.allowedTCPPorts = [ 2223 ];
|
||||
networking.firewall.extraCommands = ''
|
||||
iptables -t nat -i enp35s0 -I PREROUTING -p tcp --dport 22 -j REDIRECT --to-ports 2223
|
||||
ip6tables -t nat -i enp35s0 -I PREROUTING -p tcp --dport 22 -j REDIRECT --to-ports 2223
|
||||
'';
|
||||
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
user = "gitea";
|
||||
group = "gitea";
|
||||
package = pkgs.forgejo;
|
||||
database = {
|
||||
type = "postgres";
|
||||
passwordFile = config.age.secrets.forgejo-database-password.path;
|
||||
|
@ -63,6 +82,9 @@
|
|||
DOMAIN = "git.pub.solar";
|
||||
HTTP_ADDR = "127.0.0.1";
|
||||
HTTP_PORT = 3000;
|
||||
START_SSH_SERVER = true;
|
||||
SSH_LISTEN_PORT = 2223;
|
||||
SSH_SERVER_HOST_KEYS = "${config.age.secrets."forgejo-ssh-private-key".path}";
|
||||
};
|
||||
|
||||
log.LEVEL = "Warn";
|
||||
|
@ -111,6 +133,19 @@
|
|||
# the value of DEFAULT_ACTIONS_URL is prepended to it.
|
||||
DEFAULT_ACTIONS_URL = "https://code.forgejo.org";
|
||||
};
|
||||
|
||||
# https://forgejo.org/docs/next/admin/recommendations/#securitylogin_remember_days
|
||||
security = {
|
||||
LOGIN_REMEMBER_DAYS = 365;
|
||||
};
|
||||
|
||||
# https://forgejo.org/docs/next/admin/config-cheat-sheet/#indexer-indexer
|
||||
indexer = {
|
||||
REPO_INDEXER_ENABLED = true;
|
||||
REPO_INDEXER_PATH = "indexers/repos.bleve";
|
||||
MAX_FILE_SIZE = 1048576;
|
||||
REPO_INDEXER_EXCLUDE = "resources/bin/**";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -155,6 +190,11 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/forgejo-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
|
||||
services.restic.backups.forgejo-storagebox = {
|
||||
|
@ -174,5 +214,10 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/forgejo-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -64,6 +64,11 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/keycloak-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
|
||||
services.restic.backups.keycloak-storagebox = {
|
||||
|
@ -82,5 +87,10 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/keycloak-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -94,6 +94,11 @@
|
|||
initialize = true;
|
||||
passwordFile = config.age.secrets."restic-repo-droppie".path;
|
||||
repository = "sftp:yule@droppie.b12f.io:/media/internal/pub.solar";
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
|
||||
services.restic.backups.mailman-storagebox = {
|
||||
|
@ -109,5 +114,10 @@
|
|||
initialize = true;
|
||||
passwordFile = config.age.secrets."restic-repo-storagebox".path;
|
||||
repository = "sftp:u377325@u377325.your-storagebox.de:/backups";
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -61,6 +61,9 @@
|
|||
passwordFile = "/run/agenix/mastodon-smtp-password";
|
||||
fromAddress = "mastodon-notifications@pub.solar";
|
||||
};
|
||||
mediaAutoRemove = {
|
||||
olderThanDays = 7;
|
||||
};
|
||||
extraEnvFiles = [
|
||||
"/run/agenix/mastodon-extra-env-secrets"
|
||||
];
|
||||
|
@ -111,6 +114,11 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/mastodon-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
|
||||
services.restic.backups.mastodon-storagebox = {
|
||||
|
@ -129,5 +137,10 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/mastodon-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -13,11 +13,6 @@ let
|
|||
synapseClientPort = "${toString listenerWithClient.port}";
|
||||
in
|
||||
{
|
||||
systemd.services.matrix-appservice-irc.serviceConfig.SystemCallFilter = lib.mkForce [
|
||||
"@system-service @pkey"
|
||||
"~@privileged @resources"
|
||||
"@chown"
|
||||
];
|
||||
services.matrix-appservice-irc = {
|
||||
enable = true;
|
||||
localpart = "irc_bot";
|
||||
|
|
|
@ -312,5 +312,10 @@ in
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/matrix-synapse-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -97,6 +97,7 @@
|
|||
integrity.check.disabled = false;
|
||||
updater.release.channel = "stable";
|
||||
loglevel = 0;
|
||||
maintenance_window_start = "1";
|
||||
# maintenance = false;
|
||||
app_install_overwrite = [
|
||||
"pdfdraw"
|
||||
|
@ -149,6 +150,11 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/nextcloud-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
|
||||
services.restic.backups.nextcloud-storagebox = {
|
||||
|
@ -168,5 +174,10 @@
|
|||
backupCleanupCommand = ''
|
||||
rm /tmp/nextcloud-backup.sql
|
||||
'';
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -24,6 +24,13 @@ in
|
|||
# https://my.f5.com/manage/s/article/K51798430
|
||||
proxy_headers_hash_bucket_size 128;
|
||||
'';
|
||||
appendConfig = ''
|
||||
# Number of CPU cores
|
||||
worker_processes 8;
|
||||
'';
|
||||
eventsConfig = ''
|
||||
worker_connections 1024;
|
||||
'';
|
||||
};
|
||||
|
||||
security.acme = {
|
||||
|
|
|
@ -18,12 +18,23 @@
|
|||
];
|
||||
privateKeyFile = config.age.secrets.wg-private-key.path;
|
||||
peers = flake.self.logins.admins.wireguardDevices ++ [
|
||||
{
|
||||
endpoint = "flora-6.pub.solar:51820";
|
||||
{ # flora-6.pub.solar
|
||||
endpoint = "80.71.153.210:51820";
|
||||
publicKey = "jtSR5G2P/nm9s8WrVc26Xc/SQLupRxyXE+5eIeqlsTU=";
|
||||
allowedIPs = [ "10.7.6.2/32" "fd00:fae:fae:fae:fae:2::/96" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh.listenAddresses = [
|
||||
{
|
||||
addr = "10.7.6.1";
|
||||
port = 22;
|
||||
}
|
||||
{
|
||||
addr = "[fd00:fae:fae:fae:fae:1::]";
|
||||
port = 22;
|
||||
}
|
||||
];
|
||||
}
|
||||
|
|
|
@ -5,6 +5,14 @@
|
|||
};
|
||||
|
||||
secretEncryptionKeys = sshPubKeys;
|
||||
|
||||
wireguardDevices = [
|
||||
{
|
||||
# tuxnix
|
||||
publicKey = "fTvULvdsc92binFaBV+uWwFi33bi8InShcaPnoxUZEA=";
|
||||
allowedIPs = [ "10.7.6.203/32" "fd00:fae:fae:fae:fae:203::/96" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
b12f = rec {
|
||||
|
@ -55,6 +63,10 @@
|
|||
publicKey = "3UrVLQrwXnPAVXPiTAd7eM3fZYxnFSYgKAGpNMUwnUk=";
|
||||
allowedIPs = [ "10.7.6.201/32" "fd00:fae:fae:fae:fae:201::/96" ];
|
||||
}
|
||||
{ # ryzensun
|
||||
publicKey = "oVF2/s7eIxyVjtG0MhKPx5SZ1JllZg+ZFVF2eVYtPGo=";
|
||||
allowedIPs = [ "10.7.6.204/32" "fd00:fae:fae:fae:fae:204::/96" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -2,6 +2,11 @@
|
|||
# Don't expose SSH via public interfaces
|
||||
networking.firewall.interfaces.wg-ssh.allowedTCPPorts = [ 22 ];
|
||||
|
||||
networking.hosts = {
|
||||
"10.7.6.1" = ["nachtigall.pub.solar"];
|
||||
"10.7.6.2" = ["flora-6.pub.solar"];
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
openFirewall = lib.mkDefault false;
|
||||
|
@ -31,14 +36,11 @@
|
|||
|
||||
services.resolved = {
|
||||
enable = true;
|
||||
# DNSSEC=false because of random SERVFAIL responses with Greenbaum DNS
|
||||
# when using allow-downgrade, see https://github.com/systemd/systemd/issues/10579
|
||||
extraConfig = ''
|
||||
DNS=193.110.81.0#dns0.eu 185.253.5.0#dns0.eu 2a0f:fc80::#dns0.eu 2a0f:fc81::#dns0.eu 9.9.9.9#dns.quad9.net 149.112.112.112#dns.quad9.net 2620:fe::fe#dns.quad9.net 2620:fe::9#dns.quad9.net
|
||||
FallbackDNS=5.1.66.255#dot.ffmuc.net 185.150.99.255#dot.ffmuc.net 2001:678:e68:f000::#dot.ffmuc.net 2001:678:ed0:f000::#dot.ffmuc.net
|
||||
Domains=~.
|
||||
DNSOverTLS=yes
|
||||
DNSSEC=false
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
};
|
||||
in
|
||||
{
|
||||
forgejo-runner = unstable.forgejo-runner;
|
||||
element-themes = prev.callPackage ./pkgs/element-themes { inherit (inputs) element-themes; };
|
||||
})
|
||||
];
|
||||
|
|
BIN
secrets/forgejo-ssh-private-key.age
Normal file
BIN
secrets/forgejo-ssh-private-key.age
Normal file
Binary file not shown.
|
@ -33,6 +33,7 @@ in
|
|||
"forgejo-actions-runner-token.age".publicKeys = flora6Keys ++ adminKeys;
|
||||
"forgejo-database-password.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
"forgejo-mailer-password.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
"forgejo-ssh-private-key.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
|
||||
"matrix-mautrix-telegram-env-file.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
"matrix-synapse-signing-key.age".publicKeys = nachtigallKeys ++ adminKeys;
|
||||
|
|
151
terraform/dns.tf
151
terraform/dns.tf
|
@ -1,181 +1,186 @@
|
|||
# https://registry.terraform.io/providers/namecheap/namecheap/latest/docs
|
||||
resource "namecheap_domain_records" "pub-solar" {
|
||||
domain = "pub.solar"
|
||||
mode = "OVERWRITE"
|
||||
domain = "pub.solar"
|
||||
mode = "OVERWRITE"
|
||||
email_type = "MX"
|
||||
|
||||
record {
|
||||
hostname = "flora-6"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
}
|
||||
record {
|
||||
hostname = "auth"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "ci"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
}
|
||||
record {
|
||||
hostname = "alerts"
|
||||
type = "CNAME"
|
||||
address = "flora-6.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "git"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "stream"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "list"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "obs-portal"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
}
|
||||
record {
|
||||
hostname = "vpn"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
}
|
||||
record {
|
||||
hostname = "cache"
|
||||
type = "A"
|
||||
address = "95.217.225.160"
|
||||
type = "A"
|
||||
address = "95.217.225.160"
|
||||
}
|
||||
record {
|
||||
hostname = "factorio"
|
||||
type = "A"
|
||||
address = "80.244.242.2"
|
||||
type = "A"
|
||||
address = "80.244.242.2"
|
||||
}
|
||||
record {
|
||||
hostname = "collabora"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "@"
|
||||
type = "ALIAS"
|
||||
address = "nachtigall.pub.solar."
|
||||
ttl = 300
|
||||
type = "ALIAS"
|
||||
address = "nachtigall.pub.solar."
|
||||
ttl = 300
|
||||
}
|
||||
record {
|
||||
hostname = "chat"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "cloud"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "turn"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "grafana"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
type = "A"
|
||||
address = "80.71.153.210"
|
||||
}
|
||||
record {
|
||||
hostname = "hpb"
|
||||
type = "A"
|
||||
address = "80.71.153.239"
|
||||
type = "A"
|
||||
address = "80.71.153.239"
|
||||
}
|
||||
record {
|
||||
hostname = "files"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "search"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "wiki"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "mastodon"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "matrix"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "tmate"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "www"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
record {
|
||||
hostname = "@"
|
||||
type = "TXT"
|
||||
address = "v=spf1 include:spf.greenbaum.zone a:list.pub.solar ~all"
|
||||
type = "TXT"
|
||||
address = "v=spf1 include:spf.greenbaum.zone a:list.pub.solar ~all"
|
||||
}
|
||||
record {
|
||||
hostname = "list"
|
||||
type = "TXT"
|
||||
address = "v=spf1 a:list.pub.solar ?all"
|
||||
type = "TXT"
|
||||
address = "v=spf1 a:list.pub.solar ?all"
|
||||
}
|
||||
record {
|
||||
hostname = "_dmarc"
|
||||
type = "TXT"
|
||||
address = "v=DMARC1; p=reject;"
|
||||
type = "TXT"
|
||||
address = "v=DMARC1; p=reject;"
|
||||
}
|
||||
record {
|
||||
hostname = "_dmarc.list"
|
||||
type = "TXT"
|
||||
address = "v=DMARC1; p=reject;"
|
||||
type = "TXT"
|
||||
address = "v=DMARC1; p=reject;"
|
||||
}
|
||||
record {
|
||||
hostname = "modoboa._domainkey"
|
||||
type = "TXT"
|
||||
address = "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx/EqLMpk0MyL1aQ0JVG44ypTRbZBVA13MFjEntxAvowaWtq1smRbnEwTTKgqUOrUyaM4dVmli1dedne4mk/ncqRAm02KuhtTY+5wXfhTKK53EhqehbKwH+Qvzb12983Qwdau/QTHiFHwXHufMaSsCvd9CRWCp9q68Q7noQqndJeLHT6L0eECd2Zk3ZxJuh+Fxdb7+Kw68Tf6z13Rs+MU01qLM7x0jmSQHa4cv2pk+7NTGMBRp6fVskfbqev5nFkZWJ7rhXEbP9Eukd/L3ro/ubs1quWJotG02gPRKE8fgkm1Ytlws1/pnqpuvKXQS1HzBEP1X2ExezJMzQ1SnZCigQIDAQAB"
|
||||
type = "TXT"
|
||||
address = "v=DKIM1;k=rsa;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx/EqLMpk0MyL1aQ0JVG44ypTRbZBVA13MFjEntxAvowaWtq1smRbnEwTTKgqUOrUyaM4dVmli1dedne4mk/ncqRAm02KuhtTY+5wXfhTKK53EhqehbKwH+Qvzb12983Qwdau/QTHiFHwXHufMaSsCvd9CRWCp9q68Q7noQqndJeLHT6L0eECd2Zk3ZxJuh+Fxdb7+Kw68Tf6z13Rs+MU01qLM7x0jmSQHa4cv2pk+7NTGMBRp6fVskfbqev5nFkZWJ7rhXEbP9Eukd/L3ro/ubs1quWJotG02gPRKE8fgkm1Ytlws1/pnqpuvKXQS1HzBEP1X2ExezJMzQ1SnZCigQIDAQAB"
|
||||
}
|
||||
record {
|
||||
hostname = "@"
|
||||
type = "MX"
|
||||
address = "mail.greenbaum.zone."
|
||||
mx_pref = "0"
|
||||
type = "MX"
|
||||
address = "mail.greenbaum.zone."
|
||||
mx_pref = "0"
|
||||
}
|
||||
record {
|
||||
hostname = "list"
|
||||
type = "MX"
|
||||
address = "list.pub.solar."
|
||||
mx_pref = "0"
|
||||
type = "MX"
|
||||
address = "list.pub.solar."
|
||||
mx_pref = "0"
|
||||
}
|
||||
record {
|
||||
hostname = "nachtigall"
|
||||
type = "A"
|
||||
address = "138.201.80.102"
|
||||
type = "A"
|
||||
address = "138.201.80.102"
|
||||
}
|
||||
record {
|
||||
hostname = "nachtigall"
|
||||
type = "AAAA"
|
||||
address = "2a01:4f8:172:1c25::1"
|
||||
type = "AAAA"
|
||||
address = "2a01:4f8:172:1c25::1"
|
||||
}
|
||||
record {
|
||||
hostname = "matrix.test"
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
type = "CNAME"
|
||||
address = "nachtigall.pub.solar."
|
||||
}
|
||||
# SRV records can only be changed via NameCheap Web UI
|
||||
# add comment
|
||||
|
|
Loading…
Add table
Reference in a new issue