chore: remove nougat-2
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
Benjamin Bädorf 2023-09-10 21:44:38 +02:00
parent be42eb3a86
commit 8ef898f575
No known key found for this signature in database
GPG key ID: 4406E80E13CD656C
12 changed files with 1 additions and 818 deletions

View file

@ -147,7 +147,7 @@
yule = pubsolaros ++ [users.yule];
droppie = yule ++ [];
nougat-2 = yule ++ [];
pie = yule ++ [];
};
};
};

View file

@ -1,79 +0,0 @@
{
config,
lib,
pkgs,
self,
...
}: let
exDomain = (import ./ex-domain.nix) lib;
pubsolarDomain = import ./pubsolar-domain.nix;
hostingdeProviderConf = {
dnsProvider = "hostingde";
credentialsFile = "${pkgs.writeText "hostingde-creds" ''
HOSTINGDE_API_KEY_FILE=${config.age.secrets."hosting.de-api.key".path}
''}";
};
in {
age.secrets."hosting.de-api.key" = {
file = "${self}/secrets/hosting.de-api.key";
mode = "440";
group = "acme";
};
systemd.tmpfiles.rules = [
"d '/data/acme' 0750 root acme - -"
];
users.groups.acme = {};
ids.uids.acme = 997;
ids.gids.acme = 997;
containers.acme = {
autoStart = true;
privateNetwork = true;
hostAddress = "192.168.101.0";
localAddress = "192.168.106.0";
hostAddress6 = "fc00::1";
localAddress6 = "fc00::6";
bindMounts = {
"/var/lib/acme" = {
hostPath = "/data/acme";
isReadOnly = false;
};
"${config.age.secrets."hosting.de-api.key".path}" = {
hostPath = "${config.age.secrets."hosting.de-api.key".path}";
isReadOnly = true;
};
};
config = {
networking.nameservers = ["1.1.1.1"];
users.groups.acme = config.users.groups.acme;
security.acme = {
acceptTerms = true;
defaults.email = "acme@benjaminbaedorf.eu";
defaults.server = "https://acme-staging-v02.api.letsencrypt.org/directory";
defaults.group = "acme";
certs."b12f.io" = hostingdeProviderConf;
certs."mail.b12f.io" = hostingdeProviderConf;
certs."transmission.b12f.io" = hostingdeProviderConf;
certs."${exDomain}" = hostingdeProviderConf;
certs."mail.${exDomain}" = hostingdeProviderConf;
certs."${pubsolarDomain}" = hostingdeProviderConf;
certs."www.${pubsolarDomain}" = hostingdeProviderConf;
certs."auth.${pubsolarDomain}" = hostingdeProviderConf;
certs."git.${pubsolarDomain}" = hostingdeProviderConf;
certs."ci.${pubsolarDomain}" = hostingdeProviderConf;
certs."list.${pubsolarDomain}" = hostingdeProviderConf;
certs."obs-portal.${pubsolarDomain}" = hostingdeProviderConf;
};
};
};
}

View file

@ -1,181 +0,0 @@
{
config,
lib,
pkgs,
self,
...
}: let
pubsolarDomain = import ./pubsolar-domain.nix;
# Machine user for CI pipelines
in {
networking.firewall.allowedTCPPorts = [80 443];
networking.networkmanager.unmanaged = ["interface-name:ve-caddy"];
networking.nat = {
enable = true;
internalInterfaces = ["ve-caddy"];
externalInterface = "enp0s31f6";
# Lazy IPv6 connectivity for the container
enableIPv6 = true;
};
systemd.tmpfiles.rules = [
"d '/data/www' 0750 root www - -"
"d '/data/caddy' 0750 root caddy - -"
];
users.groups.caddy = {};
users.groups.www = {};
users.users.hakkonaut.extraGroups = ["www"];
ids.uids.www = 996;
ids.gids.www = 996;
fileSystems."/var/lib/caddy" = {
device = "/data/caddy";
options = ["bind"];
};
fileSystems."/srv/www" = {
device = "/data/www";
options = ["bind"];
};
containers.caddy = {
autoStart = true;
privateNetwork = true;
hostAddress = "192.168.101.0";
localAddress = "192.168.103.0";
hostAddress6 = "fc00::1";
localAddress6 = "fc00::3";
forwardPorts = [
{
containerPort = 443;
hostPort = 443;
protocol = "tcp";
}
{
containerPort = 80;
hostPort = 80;
protocol = "tcp";
}
];
bindMounts = {
"/srv/www/" = {
hostPath = "/data/www";
isReadOnly = false;
};
"/var/lib/caddy/" = {
hostPath = "/data/caddy";
isReadOnly = false;
};
"/var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory" = {
hostPath = "/data/acme";
isReadOnly = false;
};
};
config = {
users.groups.caddy = {};
users.groups.www = {};
users.groups.acme = {};
users.users.caddy.extraGroups = ["www" "acme"];
networking.firewall.allowedTCPPorts = [80 443];
environment.etc."resolv.conf".text = "nameserver 1.1.1.0";
services.caddy = {
enable = lib.mkForce true;
globalConfig = lib.mkForce ''
auto_https disable_certs
'';
virtualHosts = {
"dashboard.nougat-2.b12f.io" = {
extraConfig = ''
reverse_proxy :2019
'';
};
"www.b12f.io" = {
extraConfig = ''
redir https://pub.solar{uri}
'';
};
"mail.b12f.io" = {
extraConfig = ''
redir / /realms/pub.solar/account temporary
reverse_proxy :8080
'';
};
"${pubsolarDomain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
# PubSolarOS images
handle /os/download/* {
root * /srv/www
file_server /os/download/* browse
}
# serve base domain pub.solar for mastodon.pub.solar
# https://masto.host/mastodon-usernames-different-from-the-domain-used-for-installation/
handle /.well-known/host-meta {
redir https://mastodon.${pubsolarDomain}{uri}
}
# pub.solar website
handle {
root * /srv/www/pub.solar
try_files {path}.html {path}
file_server
}
# minimal error handling, respond with status code and text
handle_errors {
respond "{http.error.status_code} {http.error.status_text}"
}
'';
};
"www.${pubsolarDomain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
redir https://${pubsolarDomain}{uri}
'';
};
"auth.${pubsolarDomain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
redir / /realms/${pubsolarDomain}/account temporary
reverse_proxy 192.168.104.0:8080
'';
};
"git.${pubsolarDomain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
redir /user/login /user/oauth2/keycloak temporary
reverse_proxy 192.168.105.0:3000
'';
};
"ci.${pubsolarDomain}" = {
logFormat = lib.mkForce ''
output discard
'';
extraConfig = ''
reverse_proxy 192.168.101.0:8080
'';
};
};
};
};
};
}

View file

@ -1,137 +0,0 @@
{
config,
lib,
pkgs,
self,
...
}: let
pubsolarDomain = import ./pubsolar-domain.nix;
getSecret = name:
lib.attrsets.setAttrByPath [name] {
file = "${self}/secrets/${name}.age";
mode = "600";
owner = "concourse";
};
keys = [
"concourse-session-signing-key"
"concourse-worker-key"
"concourse-tsa-host-key"
];
secrets =
[
"concourse-secrets"
"concourse-db-secrets"
]
++ keys;
in {
age.secrets = lib.lists.foldl (a: b: a // getSecret b) {} secrets;
users.users.concourse = {
description = "Concourse Service";
home = "/var/lib/concourse";
useDefaultShell = true;
group = "concourse";
isSystemUser = true;
};
users.groups.concourse = {};
users.groups.postgres = {};
ids.uids.concourse = 995;
ids.gids.concourse = 995;
systemd.tmpfiles.rules = [
"d '/data/concourse/db' 0770 root postgres - -"
];
system.activationScripts.mkConcourseNet = let
docker = config.virtualisation.oci-containers.backend;
dockerBin = "${pkgs.${docker}}/bin/${docker}";
in ''
${dockerBin} network inspect concourse-net >/dev/null 2>&1 || ${dockerBin} network create concourse-net --subnet 172.20.0.0/24
'';
containers.concourse = {
autoStart = true;
privateNetwork = true;
hostAddress = "192.168.101.0";
localAddress = "192.168.107.0";
hostAddress6 = "fc00::1";
localAddress6 = "fc00::7";
bindMounts = {
"/var/lib/postgresql/14" = {
hostPath = "/data/concourse/db";
isReadOnly = false;
};
"${config.age.secrets.keycloak-database-password.path}" = {
hostPath = "${config.age.secrets.keycloak-database-password.path}";
isReadOnly = true;
};
};
config = {
networking.nameservers = ["1.1.1.1"];
virtualisation.oci-containers = {
containers."concourse-db" = {
image = "postgres:14";
autoStart = true;
user = builtins.toString config.ids.uids.postgres;
volumes = [
"/data/concourse/db:/var/lib/postgresql/data"
];
extraOptions = [
"--network=concourse-net"
];
environmentFiles = [
config.age.secrets.concourse-db-secrets.path
];
};
containers."concourse" = {
image = "concourse/concourse:7.9.1";
autoStart = true;
user = builtins.toString config.ids.uids.concourse;
ports = [
"8080:8080"
];
dependsOn = ["concourse-db"];
extraOptions = [
"--network=concourse-net"
];
volumes = [
"${config.age.secrets.concourse-session-signing-key.path}:/keys/session_signing_key"
"${config.age.secrets.concourse-worker-key.path}:/keys/worker_key"
"${config.age.secrets.concourse-tsa-host-key.path}:/keys/tsa_host_key"
];
environment = {
CONCOURSE_EXTERNAL_URL = "https://ci.${pubsolarDomain}";
CONCOURSE_ADD_LOCAL_USER = "crew:changeme";
CONCOURSE_MAIN_TEAM_LOCAL_USER = "crew";
# instead of relying on the default "detect"
CONCOURSE_WORKER_BAGGAGECLAIM_DRIVER = "overlay";
CONCOURSE_X_FRAME_OPTIONS = "allow";
CONCOURSE_CONTENT_SECURITY_POLICY = "*";
CONCOURSE_CLUSTER_NAME = "pub.solar";
CONCOURSE_WORKER_CONTAINERD_DNS_SERVER = "8.8.8.8";
CONCOURSE_SESSION_SIGNING_KEY = "/keys/session_signing_key";
CONCOURSE_TSA_HOST_KEY = "/keys/tsa_host_key";
CONCOURSE_TSA_AUTHORIZED_KEYS = "/keys/worker_key";
# For ARM-based machine, change the Concourse runtime to "houdini"
CONCOURSE_WORKER_RUNTIME = "containerd";
};
environmentFiles = [
config.age.secrets.concourse-secrets.path
];
};
};
}

View file

@ -1,136 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help).
{
config,
pkgs,
lib,
...
}: let
psCfg = config.pub-solar;
in {
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
];
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
boot.kernelParams = [
"boot.shell_on_fail=1"
"ip=135.181.179.123::135.181.179.65:255.255.255.192:nougat-2.b12f.io::off"
];
networking.hostName = "nougat-2";
# The mdadm RAID1s were created with 'mdadm --create ... --homehost=hetzner',
# but the hostname for each machine may be different, and mdadm's HOMEHOST
# setting defaults to '<system>' (using the system hostname).
# This results mdadm considering such disks as "foreign" as opposed to
# "local", and showing them as e.g. '/dev/md/hetzner:root0'
# instead of '/dev/md/root0'.
# This is mdadm's protection against accidentally putting a RAID disk
# into the wrong machine and corrupting data by accidental sync, see
# https://bugzilla.redhat.com/show_bug.cgi?id=606481#c14 and onward.
# We do not worry about plugging disks into the wrong machine because
# we will never exchange disks between machines, so we tell mdadm to
# ignore the homehost entirely.
environment.etc."mdadm.conf".text = ''
HOMEHOST <ignore>
ARRAY /dev/md/SSD metadata=1.2 name=nixos:SSD UUID=f8189c09:cb247cc7:22b79b5f:df888705
ARRAY /dev/md/HDD metadata=1.2 name=nixos:HDD UUID=85ed8a8e:9ddc5f09:c6ef6110:c00728fa
'';
# The RAIDs are assembled in stage1, so we need to make the config
# available there.
boot.initrd.services.swraid.enable = true;
boot.initrd.services.swraid.mdadmConf = config.environment.etc."mdadm.conf".text;
boot.initrd.network.enable = true;
boot.initrd.network.ssh = {
enable = true;
port = 22;
authorizedKeys =
if psCfg.user.publicKeys != null
then psCfg.user.publicKeys
else [];
hostKeys = ["/etc/secrets/initrd/ssh_host_ed25519_key"];
};
# Network (Hetzner uses static IP assignments, and we don't use DHCP here)
networking.useDHCP = false;
networking.interfaces."enp0s31f6".ipv4.addresses = [
{
address = "135.181.179.123";
prefixLength = 26;
}
];
networking.defaultGateway = "135.181.179.65";
networking.interfaces."enp0s31f6".ipv6.addresses = [
{
address = "2a01:4f9:3a:2170::1";
prefixLength = 64;
}
];
networking.defaultGateway6 = {
address = "fe80::1";
interface = "enp0s31f6";
};
networking.nameservers = ["1.1.1.1"];
# Initial empty root password for easy login:
users.users.root.initialHashedPassword = "";
users.users.root.openssh.authorizedKeys.keys =
if psCfg.user.publicKeys != null
then psCfg.user.publicKeys
else [];
users.users.hakkonaut = {
home = "/home/hakkonaut";
description = "CI and automation user";
useDefaultShell = true;
group = "hakkonaut";
isSystemUser = true;
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGP5MvCwNRtCcP1pSDrn0XZTNlpOqYnjHDm9/OI4hECW hakkonaut@flora-6"
];
};
users.groups.hakkonaut = {};
ids.uids.hakkonaut = 998;
ids.gids.hakkonaut = 998;
services.openssh.enable = true;
services.openssh.settings.PermitRootLogin = "prohibit-password";
pub-solar.core.disk-encryption-active = false;
pub-solar.core.lite = true;
virtualisation = {
docker = {
enable = true;
};
oci-containers = {
backend = "docker";
};
};
security.sudo.extraRules = [
{
users = ["${psCfg.user.name}"];
commands = [
{
command = "ALL";
options = ["NOPASSWD"];
}
];
}
];
# This value determines the NixOS release with which your system is to be
# compatible, in order to avoid breaking some software such as database
# servers. You should change this only after NixOS release notes say you
# should.
system.stateVersion = "23.05"; # Did you read the comment?
}

View file

@ -1,7 +0,0 @@
{suites, ...}: {
imports =
[
./nougat-2.nix
]
++ suites.nougat-2;
}

View file

@ -1 +0,0 @@
lib: lib.concatStrings (lib.lists.reverseList ["et" ".n" "zz" "wd" "h"])

View file

@ -1,124 +0,0 @@
{
config,
lib,
pkgs,
self,
...
}: let
pubsolarDomain = import ./pubsolar-domain.nix;
in {
age.secrets.gitea-database-password = {
file = "${self}/secrets/gitea-database-password.age";
mode = "600";
group = "gitea";
};
# age.secrets.gitea-mailer-password = {
# file = "${self}/secrets/gitea-mailer-password.age";
# mode = "600";
# owner = "gitea";
# };
systemd.tmpfiles.rules = [
"d '/data/gitea/db' 0770 root postgres - -"
"d '/data/gitea/gitea' 0770 root gitea - -"
];
users.groups.postgres = {};
users.groups.gitea = {};
ids.uids.gitea = 994;
ids.gids.gitea = 994;
containers.gitea = {
autoStart = true;
privateNetwork = true;
hostAddress = "192.168.101.0";
localAddress = "192.168.105.0";
hostAddress6 = "fc00::1";
localAddress6 = "fc00::5";
bindMounts = {
"/var/lib/postgresql/14" = {
hostPath = "/data/gitea/db";
isReadOnly = false;
};
"/var/lib/gitea" = {
hostPath = "/data/gitea/gitea";
isReadOnly = false;
};
"${config.age.secrets.gitea-database-password.path}" = {
hostPath = "${config.age.secrets.gitea-database-password.path}";
isReadOnly = true;
};
};
config = {
networking.nameservers = ["1.1.1.1"];
services.gitea = {
enable = true;
package = pkgs.forgejo;
appName = "pub.solar git server";
database = {
type = "postgres";
passwordFile = config.age.secrets.gitea-database-password.path;
};
lfs.enable = true;
# mailerPasswordFile = config.age.secrets.gitea-mailer-password.path;
settings = {
server = {
DOMAIN = "git.${pubsolarDomain}";
HTTP_ADDR = "127.0.0.1";
HTTP_PORT = 3000;
ROOT_URL = "https://git.${pubsolarDomain}";
};
mailer = {
ENABLED = false;
PROTOCOL = "smtps";
SMTP_ADDR = "mx2.greenbaum.cloud";
SMTP_PORT = 465;
FROM = ''"pub.solar git server" <gitea@pub.solar>'';
USER = "admins@pub.solar";
};
"repository.signing" = {
SIGNING_KEY = "default";
MERGES = "always";
};
openid = {
ENABLE_OPENID_SIGNIN = true;
ENABLE_OPENID_SIGNUP = true;
};
# uncomment after initial deployment, first user is admin user
# required to setup SSO (oauth openid-connect, keycloak auth provider)
service.ALLOW_ONLY_EXTERNAL_REGISTRATION = true;
service.ENABLE_NOTIFY_MAIL = true;
session.COOKIE_SECURE = lib.mkForce true;
};
};
# See: https://docs.gitea.io/en-us/signing/#installing-and-generating-a-gpg-key-for-gitea
# Required for gitea server side gpg signatures
# configured/setup manually in:
# /var/lib/gitea/data/home/.gitconfig
# /var/lib/gitea/data/home/.gnupg/
# sudo su gitea
# export GNUPGHOME=/var/lib/gitea/data/home/.gnupg
# gpg --quick-gen-key 'pub.solar gitea <gitea@pub.solar>' ed25519
# TODO: implement declarative GPG key generation and
# gitea gitconfig
programs.gnupg.agent = {
enable = true;
pinentryFlavor = "curses";
};
# Required to make gpg work without a graphical environment?
# otherwise generating a new gpg key fails with this error:
# gpg: agent_genkey failed: No pinentry
# see: https://github.com/NixOS/nixpkgs/issues/97861#issuecomment-827951675
environment.variables = {
GPG_TTY = "$(tty)";
};
};
};
}

View file

@ -1,64 +0,0 @@
{
config,
lib,
pkgs,
modulesPath,
...
}: {
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"dm-snapshot"
"xhci_pci"
"ahci"
"nvme"
"usbhid"
"usb_storage"
"sd_mod"
"dm-raid"
"e1000e"
];
boot.initrd.kernelModules = [];
boot.kernelModules = ["kvm-intel"];
boot.extraModulePackages = [];
boot.initrd.luks.devices."ssd" = {
device = "/dev/disk/by-id/md-uuid-f8189c09:cb247cc7:22b79b5f:df888705";
};
boot.initrd.luks.devices."hdd" = {
device = "/dev/disk/by-id/md-uuid-85ed8a8e:9ddc5f09:c6ef6110:c00728fa";
};
fileSystems."/" = {
device = "/dev/disk/by-uuid/cb88e8b9-be51-43eb-a51a-cd021c90771c";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/3F6D-065E";
fsType = "vfat";
};
fileSystems."/data" = {
device = "/dev/disk/by-uuid/824341f0-fd56-4db7-bb7e-4f161d94144b";
fsType = "ext4";
};
swapDevices = [
{device = "/dev/disk/by-uuid/f37e9f96-0174-4cac-a0bb-b63b2a67a4ad";}
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.eno1.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
powerManagement.cpuFreqGovernor = lib.mkDefault "powersave";
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -1,64 +0,0 @@
{
config,
lib,
inputs,
pkgs,
self,
...
}: let
pubsolarDomain = import ./pubsolar-domain.nix;
in {
age.secrets.keycloak-database-password = {
file = "${self}/secrets/keycloak-database-password.age";
mode = "770";
group = "keycloak";
};
systemd.tmpfiles.rules = [
"d '/data/keycloak/db' 0770 root postgres - -"
];
users.groups.postgres = {};
users.groups.keycloak = {};
ids.uids.keycloak = 993;
ids.gids.keycloak = 993;
containers.keycloak = {
autoStart = true;
privateNetwork = true;
hostAddress = "192.168.101.0";
localAddress = "192.168.104.0";
hostAddress6 = "fc00::1";
localAddress6 = "fc00::4";
bindMounts = {
"/var/lib/postgresql/14" = {
hostPath = "/data/keycloak/db";
isReadOnly = false;
};
"${config.age.secrets.keycloak-database-password.path}" = {
hostPath = "${config.age.secrets.keycloak-database-password.path}";
isReadOnly = true;
};
};
config = {
networking.nameservers = ["1.1.1.1"];
services.keycloak = {
enable = true;
database.passwordFile = config.age.secrets.keycloak-database-password.path;
settings = {
hostname = "auth.${pubsolarDomain}";
http-host = "0.0.0.0";
http-port = 8080;
proxy = "edge";
};
themes = {
"pub.solar" = inputs.keycloak-theme-pub-solar.legacyPackages.${pkgs.system}.keycloak-theme-pub-solar;
};
};
};
};
}

View file

@ -1,23 +0,0 @@
{
config,
pkgs,
lib,
self,
fix-atomic-container-restartsModulesPath,
...
}:
with lib; let
psCfg = config.pub-solar;
xdg = config.home-manager.users."${psCfg.user.name}".xdg;
in {
imports = [
./configuration.nix
./acme.nix
./caddy.nix
./keycloak.nix
./gitea.nix
# ./concourse.nix
# "${fix-atomic-container-restartsModulesPath}/virtualisation/nixos-containers.nix"
];
}

View file

@ -1 +0,0 @@
"pub.solar.b12f.io"