matrix-docker-ansible-deploy/roles/matrix-postgres/tasks/import_postgres.yml
Slavi Pantaleev e3dca2f66f Try to avoid Docker logs growing too much for one-off containers
We recently had a report of the Postgres backup container's log file
growing the size of /var/lib/docker until it ran out of disk space.

Trying to prevent similar problems in the future.
2020-09-01 09:03:48 +03:00

100 lines
3.7 KiB
YAML

---
# Pre-checks
- name: Fail if Postgres not enabled
fail:
msg: "Postgres via the matrix-postgres role is not enabled (`matrix_postgres_enabled`). Cannot import."
when: "not matrix_postgres_enabled|bool"
- name: Fail if playbook called incorrectly
fail:
msg: "The `server_path_postgres_dump` variable needs to be provided to this playbook, via --extra-vars"
when: "server_path_postgres_dump is not defined or server_path_postgres_dump.startswith('<')"
- name: Check if the provided Postgres dump file exists
stat:
path: "{{ server_path_postgres_dump }}"
register: result_server_path_postgres_dump_stat
- name: Fail if provided Postgres dump file doesn't exists
fail:
msg: "File cannot be found on the server at {{ server_path_postgres_dump }}"
when: "not result_server_path_postgres_dump_stat.stat.exists"
# Defaults
- name: Set postgres_start_wait_time, if not provided
set_fact:
postgres_start_wait_time: 15
when: "postgres_start_wait_time|default('') == ''"
- name: Set postgres_import_wait_time, if not provided
set_fact:
postgres_import_wait_time: "{{ 7 * 86400 }}"
when: "postgres_import_wait_time|default('') == ''"
# Actual import work
- name: Ensure matrix-postgres is started
service:
name: matrix-postgres
state: started
daemon_reload: yes
- name: Wait a bit, so that Postgres can start
wait_for:
timeout: "{{ postgres_start_wait_time }}"
delegate_to: 127.0.0.1
become: false
- import_tasks: tasks/util/detect_existing_postgres_version.yml
- name: Abort, if no existing Postgres version detected
fail:
msg: "Could not find existing Postgres installation"
when: "not matrix_postgres_detected_existing|bool"
# Starting the database container had automatically created the default
# role (`matrix_postgres_connection_username`) and database (`matrix_postgres_db_name`).
# The dump most likely contains those same entries and would try to re-create them, leading to errors.
# We need to skip over those lines.
- name: Generate Postgres database import command
set_fact:
matrix_postgres_import_command: >-
{{ matrix_host_command_docker }} run --rm --name matrix-postgres-import
--log-driver=none
--user={{ matrix_user_uid }}:{{ matrix_user_gid }}
--cap-drop=ALL
--network={{ matrix_docker_network }}
--env-file={{ matrix_postgres_base_path }}/env-postgres-psql
-v {{ server_path_postgres_dump }}:/{{ server_path_postgres_dump|basename }}:ro
--entrypoint=/bin/sh
{{ matrix_postgres_docker_image_latest }}
-c "cat /{{ server_path_postgres_dump|basename }} |
{{ 'gunzip |' if server_path_postgres_dump.endswith('.gz') else '' }}
grep -vE '^CREATE ROLE {{ matrix_postgres_connection_username }}' |
grep -vE '^CREATE DATABASE {{ matrix_postgres_db_name }}' |
psql -v ON_ERROR_STOP=1 -h matrix-postgres"
# This is a hack.
# See: https://ansibledaily.com/print-to-standard-output-without-escaping/
#
# We want to run `debug: msg=".."`, but that dumps it as JSON and escapes double quotes within it,
# which ruins the command (`matrix_postgres_import_command`)
- name: Note about Postgres importing alternative
set_fact:
dummy: true
with_items:
- >-
Importing Postgres database using the following command: `{{ matrix_postgres_import_command }}`.
If this crashes, you can stop Postgres (`systemctl stop matrix-postgres`),
delete its existing data (`rm -rf {{ matrix_postgres_data_path }}/*`), start it again (`systemctl start matrix-postgres`)
and manually run the above import command directly on the server.
- name: Perform Postgres database import
command: "{{ matrix_postgres_import_command }}"
async: "{{ postgres_import_wait_time }}"
poll: 10