diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index ea5986b7..99cd7019 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -36,9 +36,19 @@ jobs: - name: git diff run: git diff --exit-code + shellcheck: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + # Shellcheck should already be installed on github runners + - name: install shellcheck + run: sudo apt install --yes shellcheck + - name: shellcheck + run: shellcheck scripts/*.sh + nix-flake-check: # Run after pre checks - needs: [license-check, flake-checker, nix-fmt] + needs: [license-check, flake-checker, nix-fmt, shellcheck] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 diff --git a/scripts/backup.sh b/scripts/backup.sh new file mode 100755 index 00000000..69f6602e --- /dev/null +++ b/scripts/backup.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# +# backup +# + +# Check that rsync exists +if [ ! -x "$(command -v rsync)" ]; then + echo "rsync command not found, cannot backup" + exit 1 +fi +RSYNC_ARGS="-avhP" + +HEADLESS_SYSTEM=false +USER_HOST=$2 + +# Check that the machine name is known +case $1 in + vps) + HEADLESS_SYSTEM=true + ;; + *) + echo "Unknown machine name" + exit 1 + ;; +esac + +# Check that the target folder exists +USER_DEST=$3 +if [ ! -d "$USER_DEST" ]; then + echo "Failed to find backup target" + exit 1 +fi +BACKUP_DEST="$USER_DEST" + +# This is a normal headless system +if [ $HEADLESS_SYSTEM ]; then + export DOCKER_COMPOSE_RUNNER_DEST="$BACKUP_DEST/docker-compose-runner" + mkdir -p "$DOCKER_COMPOSE_RUNNER_DEST" + + # Backup all of the docker data + # + # TODO: can we have a userns mapped folder for rsync to read/write from? + # and even then potentially a user? + "$(command -v rsync)" $RSYNC_ARGS --rsync-path="sudo rsync" "$USER_HOST:/var/lib/docker-compose-runner/" "$DOCKER_COMPOSE_RUNNER_DEST" +fi + +# Ensure the filesystem is synced +sync + +echo "Backup complete!" +date diff --git a/scripts/restore.sh b/scripts/restore.sh new file mode 100644 index 00000000..c3ce9e07 --- /dev/null +++ b/scripts/restore.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# +# restore +# + +# Check that rsync exists +if [ ! -x "$(command -v rsync)" ]; then + echo "rsync command not found, cannot restore" + exit 1 +fi +RSYNC_ARGS="-avhP" + +HEADLESS_SYSTEM=false +USER_HOST=$2 + +# Check that the machine name is known +case $1 in + vps) + HEADLESS_SYSTEM=true + ;; + *) + echo "Unknown machine name" + exit 1 + ;; +esac + +# Check that the source folder exists +USER_SRC=$3 +if [ ! -d "$USER_SRC" ]; then + echo "Failed to find restore source" + exit 1 +fi +RESTORE_SRC="$USER_SRC" + +# This is a normal headless system +if [ $HEADLESS_SYSTEM ]; then + export DOCKER_COMPOSE_RUNNER_SRC="$RESTORE_SRC/docker-compose-runner" + if [ ! -d "$DOCKER_COMPOSE_RUNNER_SRC" ]; then + echo "Failed to find docker-compose-runner data to restore" + exit 1 + fi + + # Stop services as we are about to mutate data + ssh "$USER_HOST" sudo systemctl stop docker-compose-runner.service + + # Restore all of the docker data + "$(command -v rsync)" $RSYNC_ARGS "$DOCKER_COMPOSE_RUNNER_SRC" "$USER_HOST:/var/lib/docker-compose-runner/" + + # Restart services + ssh $USER_HOST sudo systemctl start docker-compose-runner.service +fi + +echo "Restore complete!" +date diff --git a/tests/files/ssh_config b/tests/files/ssh_config new file mode 100644 index 00000000..2923e527 --- /dev/null +++ b/tests/files/ssh_config @@ -0,0 +1,2 @@ +Host vps + IdentityFile /etc/ssh/test_ssh_id_ed25519 diff --git a/tests/files/test_ssh_id_ed25519 b/tests/files/test_ssh_id_ed25519 new file mode 100644 index 00000000..d0ed7f9e --- /dev/null +++ b/tests/files/test_ssh_id_ed25519 @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACBwClEXAWm/c9/42a0VdvVowOxAkdiJUFyeqUwkNP5ioQAAAIj3ZErF92RK +xQAAAAtzc2gtZWQyNTUxOQAAACBwClEXAWm/c9/42a0VdvVowOxAkdiJUFyeqUwkNP5ioQ +AAAEAwbllncNKWZCOFyHkejkA4GZNBl9O6IKQf3pKpMj22SHAKURcBab9z3/jZrRV29WjA +7ECR2IlQXJ6pTCQ0/mKhAAAABHRlc3QB +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/files/test_ssh_id_ed25519.pub b/tests/files/test_ssh_id_ed25519.pub new file mode 100644 index 00000000..61f2903c --- /dev/null +++ b/tests/files/test_ssh_id_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHAKURcBab9z3/jZrRV29WjA7ECR2IlQXJ6pTCQ0/mKh test diff --git a/tests/vps.nix b/tests/vps.nix index b17fcf71..f050f635 100644 --- a/tests/vps.nix +++ b/tests/vps.nix @@ -5,7 +5,7 @@ (import ./lib.nix) { name = "vps test"; nodes = { - machine = { self, pkgs, ... }: { + vps = { self, pkgs, ... }: { imports = [ self.nixosModules.headlessSystem @@ -21,26 +21,83 @@ networking.hosts = { "127.0.0.1" = [ "ahayzen.com" ]; }; + + # Allow test ssh authentication + users.users.headless.openssh.authorizedKeys.keyFiles = [ + ./files/test_ssh_id_ed25519.pub + ]; + }; + + backup = { self, pkgs, ... }: { + environment = { + etc = { + "ahayzen.com/backup.sh".source = ../scripts/backup.sh; + "ssh/test_ssh_id_ed25519" = { + mode = "0400"; + source = ./files/test_ssh_id_ed25519; + }; + "ssh/test_ssh_id_ed25519.pub".source = ./files/test_ssh_id_ed25519.pub; + }; + + # Extra packages for the test + systemPackages = with pkgs; [ + python3 rsync + ]; + }; + + services.openssh.enable = true; + + # Setup IdentityFile for vps + programs.ssh.extraConfig = builtins.readFile ./files/ssh_config; }; }; testScript = '' start_all() - # Wait for docker runner - machine.wait_for_unit("docker-compose-runner", timeout=90) + #exit() + + # + # Test that the VPS boots and shows wagtail admin + # + + with subtest("Ensure docker starts and wagtail admin works"): + # Wait for docker runner + vps.wait_for_unit("docker-compose-runner", timeout=90) + + # Wait for caddy to start + vps.wait_for_open_port(80, timeout=60) + + # Wait for wagtail to start + vps.wait_until_succeeds('journalctl --boot --no-pager --quiet --unit docker.service --grep "\[INFO\] Listening at: http:\/\/0\.0\.0\.0:8080"', timeout=60) + + # Test that admin page exists + output = vps.succeed("curl --silent ahayzen.com:80/admin/login/?next=/admin/") + assert "Sign in" in output, f"'{output}' does not contain 'Sign in'" + + # Test that wagtail port is not open externally + vps.fail("curl --silent ahayzen.com:8080") + + # + # Test that we can backup the VPS + # - # Wait for caddy to start - machine.wait_for_open_port(80, timeout=60) + with subtest("Access hostkey"): + vps.wait_for_open_port(22, timeout=30) + backup.succeed("ssh -vvv -o StrictHostKeyChecking=accept-new headless@vps exit") - # Wait for wagtail to start - machine.wait_until_succeeds('journalctl --boot --no-pager --quiet --unit docker.service --grep "\[INFO\] Listening at: http:\/\/0\.0\.0\.0:8080"', timeout=60) + with subtest("Attempt to run a backup"): + backup.succeed("mkdir -p /tmp/backup-root") + backup.succeed("/etc/ahayzen.com/backup.sh vps headless@vps /tmp/backup-root") - # Test that admin page exists - output = machine.succeed("curl --silent ahayzen.com:80/admin/login/?next=/admin/") - assert "Sign in" in output, f"'{output}' does not contain 'Sign in'" + # Check volumes are appearing + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/caddy/persistent") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/caddy/config") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/db") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/media") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/static") - # Test that wagtail port is not open externally - machine.fail("curl --silent ahayzen.com:8080") + # Check that known files exist + backup.succeed("test -e /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3") ''; }