Merge pull request 'Add alertmanager config - part 2' (#189) from alertmanager into main

Reviewed-on: #189
Reviewed-by: Hendrik Sokolowski <hensoko@noreply.git.pub.solar>
This commit is contained in:
teutat3s 2024-05-15 15:23:59 +00:00
commit 47c9424459
Signed by: pub.solar gitea
GPG key ID: F0332B04B7054873
7 changed files with 448 additions and 77 deletions

View file

@ -8,7 +8,7 @@
{
age.secrets.forgejo-actions-runner-token = {
file = "${flake.self}/secrets/forgejo-actions-runner-token.age";
mode = "644";
mode = "440";
};
# Trust docker bridge interface traffic

View file

@ -8,17 +8,17 @@
{
age.secrets.grafana-admin-password = {
file = "${flake.self}/secrets/grafana-admin-password.age";
mode = "644";
mode = "440";
owner = "grafana";
};
age.secrets.grafana-smtp-password = {
file = "${flake.self}/secrets/grafana-smtp-password.age";
mode = "644";
mode = "440";
owner = "grafana";
};
age.secrets.grafana-keycloak-client-secret = {
file = "${flake.self}/secrets/grafana-keycloak-client-secret.age";
mode = "644";
mode = "440";
owner = "grafana";
};

View file

@ -0,0 +1,253 @@
{ lib }:
let
# docker's filesystems disappear quickly, leading to false positives
deviceFilter = ''path!~"^(/var/lib/docker|/nix/store).*"'';
in
lib.mapAttrsToList
(name: opts: {
alert = name;
expr = opts.condition;
for = opts.time or "2m";
labels = { };
annotations.description = opts.description;
})
({
# prometheus_too_many_restarts = {
# condition = ''changes(process_start_time_seconds{job=~"prometheus|alertmanager"}[15m]) > 2'';
# description = "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.";
# };
# alert_manager_config_not_synced = {
# condition = ''count(count_values("config_hash", alertmanager_config_hash)) > 1'';
# description = "Configurations of AlertManager cluster instances are out of sync.";
# };
#alert_manager_e2e_dead_man_switch = {
# condition = "vector(1)";
# description = "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.";
#};
# prometheus_not_connected_to_alertmanager = {
# condition = "prometheus_notifications_alertmanagers_discovered < 1";
# description = "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
# };
# prometheus_rule_evaluation_failures = {
# condition = "increase(prometheus_rule_evaluation_failures_total[3m]) > 0";
# description = "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
# };
# prometheus_template_expansion_failures = {
# condition = "increase(prometheus_template_text_expansion_failures_total[3m]) > 0";
# time = "0m";
# description = "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}";
# };
# promtail_file_lagging = {
# condition = ''abs(promtail_file_bytes_total - promtail_read_bytes_total) > 1e6'';
# time = "15m";
# description = ''{{ $labels.instance }} {{ $labels.job }} {{ $labels.path }} has been lagging by more than 1MB for more than 15m.'';
# };
filesystem_root_full_80percent = {
condition = ''100 - ((node_filesystem_avail_bytes{fstype!="rootfs",mountpoint="/"} * 100) / node_filesystem_size_bytes{fstype!="rootfs",mountpoint="/"}) > 80'';
time = "10m";
description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 20% space left on its filesystem.";
};
filesystem_data_full_80percent = {
condition = ''100 - ((node_filesystem_avail_bytes{fstype!="rootfs",mountpoint="/var/lib"} * 100) / node_filesystem_size_bytes{fstype!="rootfs",mountpoint="/var/lib"}) > 80'';
time = "10m";
description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 20% space left on its filesystem.";
};
# filesystem_inodes_full = {
# condition = ''disk_inodes_free / disk_inodes_total < 0.10'';
# time = "10m";
# description = "{{$labels.instance}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 10% inodes left on its filesystem.";
# };
# daily_task_not_run = {
# # give 6 hours grace period
# condition = ''time() - task_last_run{state="ok",frequency="daily"} > (24 + 6) * 60 * 60'';
# description = "{{$labels.instance}}: {{$labels.name}} was not run in the last 24h";
# };
# daily_task_failed = {
# condition = ''task_last_run{state="fail"}'';
# description = "{{$labels.instance}}: {{$labels.name}} failed to run";
# };
# } // (lib.genAttrs [
# "borgbackup-turingmachine"
# "borgbackup-eve"
# "borgbackup-datastore"
# ]
# (name: {
# condition = ''absent_over_time(task_last_run{name="${name}"}[1d])'';
# description = "status of ${name} is unknown: no data for a day";
# }))
# // {
# borgbackup_matchbox_not_run = {
# # give 6 hours grace period
# condition = ''time() - task_last_run{state="ok",frequency="daily",name="borgbackup-matchbox"} > 7 * 24 * 60 * 60'';
# description = "{{$labels.instance}}: {{$labels.name}} was not run in the last week";
# };
# borgbackup_matchbox = {
# condition = ''absent_over_time(task_last_run{name="borgbackup-matchbox"}[7d])'';
# description = "status of borgbackup-matchbox is unknown: no data for a week";
# };
# homeassistant = {
# condition = ''
# homeassistant_entity_available{domain="persistent_notification", entity!="persistent_notification.http_login"} >= 0'';
# description =
# "homeassistant notification {{$labels.entity}} ({{$labels.friendly_name}}): {{$value}}";
# };
#swap_using_20percent = {
# condition = "node_memory_SwapTotal_bytes - (node_memory_SwapCached_bytes + node_memory_SwapFree_bytes) > node_memory_SwapTotal_bytes * 0.2";
# time = "30m";
# description = "{{$labels.instance}} is using 20% of its swap space for at least 30 minutes.";
#};
systemd_service_failed = {
condition = ''node_systemd_unit_state{state="failed"} == 1'';
description = "{{$labels.instance}} failed to (re)start service {{$labels.name}}.";
};
restic_backup_too_old = {
condition = ''(time() - restic_snapshots_latest_time)/(60*60) > 24'';
description = "{{$labels.instance}} not backed up for more than 24 hours. ({{$value}})";
};
#host_down = {
# condition = ''up{job="node-stats", instance!~"ahorn.wireguard:9100|kartoffel.wireguard:9100|mega.wireguard:9100"} == 0'';
# description = "{{$labels.instance}} is down!";
#};
# service_not_running = {
# condition = ''systemd_units_active_code{name=~"teamspeak3-server.service|tt-rss.service", sub!="running"}'';
# description = "{{$labels.instance}} should have a running {{$labels.name}}.";
# };
ram_using_90percent = {
condition = "node_memory_Buffers_bytes + node_memory_MemFree_bytes + node_memory_Cached_bytes < node_memory_MemTotal_bytes * 0.1";
time = "1h";
description = "{{$labels.instance}} is using at least 90% of its RAM for at least 1 hour.";
};
cpu_using_90percent = {
condition = ''100 - (avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) >= 90'';
time = "10m";
description = "{{$labels.instance}} is running with cpu usage > 90% for at least 10 minutes: {{$value}}";
};
reboot = {
condition = "node_boot_time_seconds < 300";
description = "{{$labels.instance}} just rebooted.";
};
uptime = {
condition = "(time() - node_boot_time_seconds ) / (60*60*24) > 30";
description = "Uptime monster: {{$labels.instance}} has been up for more than 30 days.";
};
flake_nixpkgs_outdated = {
condition = ''(time() - flake_input_last_modified{input="nixpkgs"}) / (60*60*24) > 30'';
description = "Nixpkgs outdated: Nixpkgs on {{$labels.instance}} has not been updated in 30 days";
};
/*
ping = {
condition = "ping_result_code{type!='mobile'} != 0";
description = "{{$labels.url}}: ping from {{$labels.instance}} has failed!";
};
ping_high_latency = {
condition = "ping_average_response_ms{type!='mobile'} > 5000";
description = "{{$labels.instance}}: ping probe from {{$labels.source}} is encountering high latency!";
};
*/
#http_status = {
# condition = ''probe_http_status_code{instance!~"https://pub.solar"} != 200'';
# description = "http request failed from {{$labels.instance}}: {{$labels.result}}!";
#};
/*
http_match_failed = {
condition = "http_response_response_string_match == 0";
description = "{{$labels.server}} : http body not as expected; status code: {{$labels.status_code}}!";
};
dns_query = {
condition = "dns_query_result_code != 0";
description = "{{$labels.domain}} : could retrieve A record {{$labels.instance}} from server {{$labels.server}}: {{$labels.result}}!";
};
secure_dns_query = {
condition = "secure_dns_state != 0";
description = "{{$labels.domain}} : could retrieve A record {{$labels.instance}} from server {{$labels.server}}: {{$labels.result}} for protocol {{$labels.protocol}}!";
};
connection_failed = {
condition = "net_response_result_code != 0";
description = "{{$labels.server}}: connection to {{$labels.port}}({{$labels.protocol}}) failed from {{$labels.instance}}";
};
healthchecks = {
condition = "hc_check_up == 0";
description = "{{$labels.instance}}: healtcheck {{$labels.job}} fails!";
};
*/
#cert_expiry = {
# condition = "(probe_ssl_earliest_cert_expiry - time())/(3600*24) < 30";
# description = "{{$labels.instance}}: The TLS certificate will expire in less than 30 days: {{$value}}s";
#};
# ignore devices that disabled S.M.A.R.T (example if attached via USB)
# smart_errors = {
# condition = ''smart_device_health_ok{enabled!="Disabled"} != 1'';
# description =
# "{{$labels.instance}}: S.M.A.R.T reports: {{$labels.device}} ({{$labels.model}}) has errors.";
# };
oom_kills = {
condition = "increase(node_vmstat_oom_kill[5m]) > 0";
description = "{{$labels.instance}}: OOM kill detected";
};
/*
unusual_disk_read_latency = {
condition =
"rate(diskio_read_time[1m]) / rate(diskio_reads[1m]) > 0.1 and rate(diskio_reads[1m]) > 0";
description = ''
{{$labels.instance}}: Disk latency is growing (read operations > 100ms)
'';
};
unusual_disk_write_latency = {
condition =
"rate(diskio_write_time[1m]) / rate(diskio_write[1m]) > 0.1 and rate(diskio_write[1m]) > 0";
description = ''
{{$labels.instance}}: Disk latency is growing (write operations > 100ms)
'';
};
*/
host_memory_under_memory_pressure = {
condition = "rate(node_vmstat_pgmajfault[1m]) > 1000";
description = "{{$labels.instance}}: The node is under heavy memory pressure. High rate of major page faults: {{$value}}";
};
# ext4_errors = {
# condition = "ext4_errors_value > 0";
# description =
# "{{$labels.instance}}: ext4 has reported {{$value}} I/O errors: check /sys/fs/ext4/*/errors_count";
# };
# alerts_silences_changed = {
# condition = ''abs(delta(alertmanager_silences{state="active"}[1h])) >= 1'';
# description =
# "alertmanager: number of active silences has changed: {{$value}}";
# };
})

View file

@ -11,6 +11,22 @@
mode = "600";
owner = "prometheus";
};
age.secrets.alertmanager-envfile = {
file = "${flake.self}/secrets/alertmanager-envfile.age";
mode = "600";
owner = "alertmanager";
};
services.caddy.virtualHosts."alerts.${config.pub-solar-os.networking.domain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
bind 10.7.6.2 fd00:fae:fae:fae:fae:2::
tls internal
reverse_proxy :${toString config.services.prometheus.alertmanager.port}
'';
};
services.prometheus = {
enable = true;
@ -73,5 +89,58 @@
];
}
];
ruleFiles = [
(pkgs.writeText "prometheus-rules.yml" (
builtins.toJSON {
groups = [
{
name = "alerting-rules";
rules = import ./alert-rules.nix { inherit lib; };
}
];
}
))
];
alertmanagers = [ { static_configs = [ { targets = [ "localhost:9093" ]; } ]; } ];
alertmanager = {
enable = true;
# port = 9093; # Default
webExternalUrl = "https://alerts.pub.solar";
environmentFile = "${config.age.secrets.alertmanager-envfile.path}";
configuration = {
route = {
receiver = "all";
group_by = [ "instance" ];
group_wait = "30s";
group_interval = "2m";
repeat_interval = "24h";
};
receivers = [
{
name = "all";
# Email config documentation: https://prometheus.io/docs/alerting/latest/configuration/#email_config
email_configs = [
{
send_resolved = true;
to = "admins@pub.solar";
from = "alerts@pub.solar";
smarthost = "mail.greenbaum.zone:465";
auth_username = "admins@pub.solar";
auth_password = "$SMTP_AUTH_PASSWORD";
require_tls = false;
}
];
# TODO:
# For matrix notifications, look into: https://github.com/pinpox/matrix-hook and add a webhook
# webhook_configs = [ { url = "http://127.0.0.1:11000/alert"; } ];
}
];
};
};
};
}

View file

@ -0,0 +1,43 @@
age-encryption.org/v1
-> ssh-ed25519 Y0ZZaw TsTaRLA+9WtN9+FJWpXeP12Af5EXMbo+ANTaLC9YlC8
Yols084RY1C9gfOrDMwJcFRuGZ/5dgGuJey7RXqm7g0
-> ssh-ed25519 uYcDNw ZLAINtv10PGMtK5TL5Tf0NyK/r1iww+vTC09ElMGoX0
EgBB3aiHHdaDue9+Zdxg6mTV2VHeLoDN9wT+hlAzVMk
-> ssh-rsa f5THog
aiJqMs3/u06tzs8lx2ISlQm87TDatqEn47v3LB3HehPanRpZx9O1HUIRTeiWkMU9
XroGe27HQCCPd63QunBHUH7WStA10IS4rHVpMcULB5IM4jwcbOhSYSiGyY2sbv8+
Nn/04ZOwrfzTabC7moV1DqAw6hnlDqKWp/q5N6xMb780w5vn6Poni3OJfuLaBWaT
r6WhE5evVt3F4jyYI64fB2hFw4AR2N/zIMOMvBncLFwJf9lbIFdbsENZf94cYceF
Tj150xdMPuErBsSJQOlfDYSmyioNN3UJUWiYsDeM3nbPEVPHhfTk6b2/lMhSQkcY
KcuMj/mN/7w7i4HSxW6mUcK2sUMV1BcSSGYRH9ZFf7kq++KpyiP7vB8vaZkcKbfJ
qqrIcXTuXhR+/bWZWqf/GQOVwRwe1TnqN5MoZHipg3a/UCe0gMM617VwZcfhBzjA
eW6VUdjSewwA8YHEuDrAeoQ4CMs7y56EaIlr2IlQy6uzJPX9eeO0auO9RZ5AR40a
7un0FrlTJX9uorpCD/zi3tvd22W5qVoMGZ8vXJShZmT9he9K3Bv6XbzG4DJQ9/nv
xZ676HUYhWeyYZFBvt6DnEBneiDJFeaV2AeuQY+juHBOfBrbYmlE0S4Pd8uRSJ7w
u5UJTT+RV5TkZhpCqqYm7DphYocnrv7Ic+QKmvKE4ls
-> ssh-rsa kFDS0A
HhilpvIiUps80SXYUXg5vqNmcy8SACvxpC5dTVBU2n+4OVXQY/35Il5ZOrUX3U7a
arfVp/KaQF7Oncu3x8F6Tp1ibUwmoyAV6OYqqs128nEPwkNbJvwrLY3aEBm+NIzm
gMlLRjj6EP84TVWgOsenQCS4l957f0QoNVxQ3f+GWdOiZZJFsv//ndsflng8zPlF
bGZy8c1TxDZfOD0/kW3Nx05c9X0EHKOEoDUc0p4qntrWlflxcvLONCgv1gZuPMF+
jMsPFP81eu3rkEUxefJ1qbvvGuW0cbzfwiStv7iGQ+Skh/vcoM0qw6p+csNKyHVO
8nYFcs9kD8067zMnyuqiUHASfZ4rPqTji0iiPC5kZn6N0YSgz2bybkXcoqmy3m6y
qs0S+RD99o2vCLhW46hZyKAgUyTU1DW42EmnZkPrLoqV7uin8fAwPO/98Q/b3Rkr
zBRtyTEbooHvOCL8limiRtDl+5LMcjRFNWk8AN+9vHMsYurXPNOCnd8n2Z4MbT2U
AhpoAD/+8HXp0InBJ/sclITVAc6tPb2CbJW6mrFezH8Ri+/6u+zSF84JDd9ZrCOz
oIshiGZmhP5mIuspVrxgKlm78a56vQrygpqzvuSSYk3zIJxmhEkZhw09/ga+rhyB
pkKn7GRyZTfKjwt5nnvW5/bmQndTa13j+7RhkRgBSvU
-> piv-p256 vRzPNw Awpc8paUfKnP6r0bYsaoeDE9GVSnads4/a3jCVScgS4V
YydKOS09kyZDYN843SHIsYUimtSQKvGhIuycPWOFojc
-> piv-p256 zqq/iw A54xbcufPkLpTD+N47AiIe/xZ/0vA5kDJ4p3rIZw0a4A
1WFP2K3tfUxtdKDBEmT3cx/u1i5nCzFR7cK4kN3WjC4
-> ssh-ed25519 YFSOsg L0lPSkoPVRKGlJ9MzkJx+cQvnZw/5m/j/JO4aRzd52Q
o/N7zQkvbGGoadiJSvL6lfuP63uqzxEIxDtIg4tgKIo
-> ssh-ed25519 iHV63A qfLWZhbDisCSJ4vFFTR+XpRUR0WViuAqarf56M0ekT4
ZSWW34pFRr0M2jFhnphIPJ5ch37ASM6OgTzyHSo0KAs
-> ssh-ed25519 BVsyTA JcFezSIfTF+AP8LYfFqz+wIpUrE0aoc1usiLtWxAPQE
F9uhFyCPK46kIy+ud4V5/ESacQgc9R0JV+JTEZO6nBI
-> ssh-ed25519 +3V2lQ G4yT1e7B5O2Gy6tusRMxuWOFScynWfFY5AjrJvxMK1o
n1OVFRqzijWlc+B93cBNdFPz+8CBYOsI5hpF1wz7xr0
--- 61u55uUc7z59iHF1IeyBLmcR6u7STUhpOPb/ODf75Vc
<$kxpû´Ú H:}ò*ä/Tâ®Ñ$ÕbÀJ \F*ðòWîzÉ6 Ý ± Âì<î̹>e?ñ¼<C3B1>Ÿ6ÚµÌ~Ô!

View file

@ -60,6 +60,7 @@ in
"grafana-keycloak-client-secret.age".publicKeys = flora6Keys ++ adminKeys;
"grafana-smtp-password.age".publicKeys = flora6Keys ++ adminKeys;
"alertmanager-envfile.age".publicKeys = flora6Keys ++ adminKeys;
"nachtigall-metrics-nginx-basic-auth.age".publicKeys = nachtigallKeys ++ adminKeys;
"nachtigall-metrics-prometheus-basic-auth-password.age".publicKeys =
flora6Keys ++ nachtigallKeys ++ adminKeys;

View file

@ -19,6 +19,11 @@ resource "namecheap_domain_records" "pub-solar" {
type = "A"
address = "80.71.153.210"
}
record {
hostname = "alerts"
type = "A"
address = "10.7.6.2"
}
record {
hostname = "git"
type = "CNAME"