diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index ea5986b7..99cd7019 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -36,9 +36,19 @@ jobs: - name: git diff run: git diff --exit-code + shellcheck: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + # Shellcheck should already be installed on github runners + - name: install shellcheck + run: sudo apt install --yes shellcheck + - name: shellcheck + run: shellcheck scripts/*.sh + nix-flake-check: # Run after pre checks - needs: [license-check, flake-checker, nix-fmt] + needs: [license-check, flake-checker, nix-fmt, shellcheck] runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 diff --git a/docs/commands/testing.md b/docs/commands/testing.md index ab99b0e2..104f2439 100644 --- a/docs/commands/testing.md +++ b/docs/commands/testing.md @@ -4,8 +4,27 @@ SPDX-FileCopyrightText: Andrew Hayzen SPDX-License-Identifier: MPL-2.0 --> +# Test with flake check + +We need the sandbox disabled as we need network access + +```console +$ nix flake --option sandbox false check -L --show-trace +``` + # Test in a VM +Ensure that you add the following snippet to the configuration of the machine you want to test in a VM. + +```nix +{ + ahayzen.testing = true; +} +``` + +> Note that if you are testing http update any `Caddyfile.vm` to use `http://localhost` +> rather than `http://mydomain.com` to access locally. + ## `nixos-build` ```console @@ -31,11 +50,3 @@ $ result/bin/run--vm ```console $ ssh -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no admin@localhost -p 2221 ``` - -## Flake check - -We need the sandbox disabled as we need network access - -```console -$ nix flake --option sandbox false check -L --show-trace -``` diff --git a/nixos/modules/headless/docker.nix b/nixos/modules/headless/docker.nix index d781ee57..cc12306e 100755 --- a/nixos/modules/headless/docker.nix +++ b/nixos/modules/headless/docker.nix @@ -58,10 +58,40 @@ }; # Define the unpriv user for docker - users.users.unpriv = { - isNormalUser = true; + # + # Set this to a high id so that we remain stable + users = { + groups.unpriv = { + gid = 2000; + }; + users.unpriv = { + isNormalUser = true; + group = "unpriv"; + uid = 2000; - openssh.authorizedKeys.keys = config.ahayzen.publicKeys.group.user.developers; + # Map the root sub id to the same as the user (as it is unpriviledged) + # then map the remaining uids high + subGidRanges = [ + { + count = 1; + startGid = 2000; + } + { + count = 65535; + startGid = 200001; + } + ]; + subUidRanges = [ + { + count = 1; + startUid = 2000; + } + { + count = 65535; + startUid = 200001; + } + ]; + }; }; virtualisation.docker = { @@ -75,7 +105,7 @@ daemon.settings = { dns = [ "9.9.9.9" ]; no-new-privileges = true; - userns-remap = "unpriv:users"; + userns-remap = "unpriv:unpriv"; }; # rootless is too problematic as it requires services to run as user services diff --git a/scripts/backup.sh b/scripts/backup.sh new file mode 100755 index 00000000..33843295 --- /dev/null +++ b/scripts/backup.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# +# backup +# + +# Check that rsync exists +if [ ! -x "$(command -v rsync)" ]; then + echo "rsync command not found, cannot backup" + exit 1 +fi +RSYNC_ARGS=(--archive --human-readable --partial --progress --rsync-path="sudo rsync") + +HEADLESS_SYSTEM=false +USER_HOST=$2 + +# Check that the machine name is known +case $1 in + vps) + HEADLESS_SYSTEM=true + ;; + *) + echo "Unknown machine name" + exit 1 + ;; +esac + +# Check that the target folder exists +USER_DEST=$3 +if [ ! -d "$USER_DEST" ]; then + echo "Failed to find backup target" + exit 1 +fi +BACKUP_DEST="$USER_DEST" + +# This is a normal headless system +if [ $HEADLESS_SYSTEM ]; then + export DOCKER_COMPOSE_RUNNER_DEST="$BACKUP_DEST/docker-compose-runner/" + mkdir -p "$DOCKER_COMPOSE_RUNNER_DEST" + + # Backup all of the docker data + "$(command -v rsync)" "${RSYNC_ARGS[@]}" "$USER_HOST:/var/lib/docker-compose-runner/" "$DOCKER_COMPOSE_RUNNER_DEST" +fi + +# Ensure the filesystem is synced +sync + +echo "Backup complete!" +date diff --git a/scripts/restore.sh b/scripts/restore.sh new file mode 100755 index 00000000..5d2f165e --- /dev/null +++ b/scripts/restore.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# +# restore +# + +# Check that rsync exists +if [ ! -x "$(command -v rsync)" ]; then + echo "rsync command not found, cannot restore" + exit 1 +fi +RSYNC_ARGS=(--archive --human-readable --partial --progress --rsync-path="sudo rsync") + +HEADLESS_SYSTEM=false +USER_HOST=$2 + +# Check that the machine name is known +case $1 in + vps) + HEADLESS_SYSTEM=true + ;; + *) + echo "Unknown machine name" + exit 1 + ;; +esac + +# Check that the source folder exists +USER_SRC=$3 +if [ ! -d "$USER_SRC" ]; then + echo "Failed to find restore source" + exit 1 +fi +RESTORE_SRC="$USER_SRC" + +# This is a normal headless system +if [ $HEADLESS_SYSTEM ]; then + export DOCKER_COMPOSE_RUNNER_SRC="$RESTORE_SRC/docker-compose-runner/" + if [ ! -d "$DOCKER_COMPOSE_RUNNER_SRC" ]; then + echo "Failed to find docker-compose-runner data to restore" + exit 1 + fi + + # Stop services as we are about to mutate data + ssh "$USER_HOST" sudo systemctl stop docker-compose-runner.service + + # Restore all of the docker data + "$(command -v rsync)" "${RSYNC_ARGS[@]}" "$DOCKER_COMPOSE_RUNNER_SRC" "$USER_HOST:/var/lib/docker-compose-runner/" + + # Restart services + ssh "$USER_HOST" sudo systemctl start docker-compose-runner.service +fi + +echo "Restore complete!" +date diff --git a/tests/files/ssh_config b/tests/files/ssh_config new file mode 100644 index 00000000..364688c9 --- /dev/null +++ b/tests/files/ssh_config @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: MPL-2.0 + +Host vps + IdentityFile /etc/ssh/test_ssh_id_ed25519 diff --git a/tests/files/test_ssh_id_ed25519 b/tests/files/test_ssh_id_ed25519 new file mode 100644 index 00000000..d0ed7f9e --- /dev/null +++ b/tests/files/test_ssh_id_ed25519 @@ -0,0 +1,7 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACBwClEXAWm/c9/42a0VdvVowOxAkdiJUFyeqUwkNP5ioQAAAIj3ZErF92RK +xQAAAAtzc2gtZWQyNTUxOQAAACBwClEXAWm/c9/42a0VdvVowOxAkdiJUFyeqUwkNP5ioQ +AAAEAwbllncNKWZCOFyHkejkA4GZNBl9O6IKQf3pKpMj22SHAKURcBab9z3/jZrRV29WjA +7ECR2IlQXJ6pTCQ0/mKhAAAABHRlc3QB +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/files/test_ssh_id_ed25519.license b/tests/files/test_ssh_id_ed25519.license new file mode 100644 index 00000000..7a058e16 --- /dev/null +++ b/tests/files/test_ssh_id_ed25519.license @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: CC0-1.0 diff --git a/tests/files/test_ssh_id_ed25519.pub b/tests/files/test_ssh_id_ed25519.pub new file mode 100644 index 00000000..61f2903c --- /dev/null +++ b/tests/files/test_ssh_id_ed25519.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHAKURcBab9z3/jZrRV29WjA7ECR2IlQXJ6pTCQ0/mKh test diff --git a/tests/files/test_ssh_id_ed25519.pub.license b/tests/files/test_ssh_id_ed25519.pub.license new file mode 100644 index 00000000..7a058e16 --- /dev/null +++ b/tests/files/test_ssh_id_ed25519.pub.license @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: CC0-1.0 diff --git a/tests/fixtures/vps/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3 b/tests/fixtures/vps/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3 new file mode 100644 index 00000000..23f580ab Binary files /dev/null and b/tests/fixtures/vps/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3 differ diff --git a/tests/fixtures/vps/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3.license b/tests/fixtures/vps/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3.license new file mode 100644 index 00000000..7a058e16 --- /dev/null +++ b/tests/fixtures/vps/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3.license @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Andrew Hayzen +# +# SPDX-License-Identifier: CC0-1.0 diff --git a/tests/vps.nix b/tests/vps.nix index b17fcf71..38a56db6 100644 --- a/tests/vps.nix +++ b/tests/vps.nix @@ -5,7 +5,7 @@ (import ./lib.nix) { name = "vps test"; nodes = { - machine = { self, pkgs, ... }: { + vps = { self, pkgs, ... }: { imports = [ self.nixosModules.headlessSystem @@ -21,26 +21,121 @@ networking.hosts = { "127.0.0.1" = [ "ahayzen.com" ]; }; + + # Allow test ssh authentication + users.users.headless.openssh.authorizedKeys.keyFiles = [ + ./files/test_ssh_id_ed25519.pub + ]; + }; + + backup = { self, pkgs, ... }: { + environment = { + etc = { + # Map backup and restore scripts + "ahayzen.com/backup.sh".source = ../scripts/backup.sh; + "ahayzen.com/restore.sh".source = ../scripts/restore.sh; + + # Map restore fixtures + "ahayzen.com/restore/fixtures".source = ./fixtures/vps; + + # Map the test SSH key for backups + "ssh/test_ssh_id_ed25519" = { + mode = "0400"; + source = ./files/test_ssh_id_ed25519; + }; + "ssh/test_ssh_id_ed25519.pub".source = ./files/test_ssh_id_ed25519.pub; + }; + + # Extra packages for the test + systemPackages = with pkgs; [ + python3 + rsync + ]; + }; + + services.openssh.enable = true; + + # Setup IdentityFile for vps + programs.ssh.extraConfig = builtins.readFile ./files/ssh_config; }; }; testScript = '' start_all() - # Wait for docker runner - machine.wait_for_unit("docker-compose-runner", timeout=90) + wait_for_wagtail_cmd = 'journalctl --boot --no-pager --quiet --unit docker.service --grep "\[INFO\] Listening at: http:\/\/0\.0\.0\.0:8080"' + + # + # Test that the VPS boots and shows wagtail admin + # + + with subtest("Ensure docker starts and wagtail admin works"): + # Wait for docker runner + vps.wait_for_unit("docker-compose-runner", timeout=90) + + # Wait for caddy to start + vps.wait_for_open_port(80, timeout=60) + + # Wait for wagtail to start + vps.wait_until_succeeds(wait_for_wagtail_cmd, timeout=60) + + # Test that admin page exists + output = vps.succeed("curl --silent ahayzen.com:80/admin/login/?next=/admin/") + assert "Sign in" in output, f"'{output}' does not contain 'Sign in'" + + # Test that wagtail port is not open externally + vps.fail("curl --silent ahayzen.com:8080") + + # + # Test that we can backup and restore the VPS + # + + with subtest("Access hostkey"): + vps.wait_for_open_port(22, timeout=30) + # Ensure we allow the host key + backup.succeed("ssh -vvv -o StrictHostKeyChecking=accept-new headless@vps exit") + + with subtest("Attempt to run a backup"): + backup.succeed("mkdir -p /tmp/backup-root") + + # Run the backup + backup.succeed("/etc/ahayzen.com/backup.sh vps headless@vps /tmp/backup-root") + + # Check volumes are appearing + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/caddy/persistent") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/caddy/config") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/db") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/media") + backup.succeed("test -d /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/static") + + # Check that known files exist + backup.succeed("test -e /tmp/backup-root/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3") + + with subtest("Attempt to run a restore"): + # Check the home does not contain restore key + output = vps.succeed("curl --silent ahayzen.com:80/") + assert "Restore Unit Test" not in output, f"'{output}' does contain 'Restore Unit Test'" + + # Copy fixtures to a /tmp folder so that we can ix permissions + # as environment.etc..user only affects files + backup.succeed("mkdir -p /tmp/restore-root") + backup.succeed("cp -R /etc/ahayzen.com/restore/fixtures/* /tmp/restore-root/") + backup.succeed("chown -R 2000:2000 /tmp/restore-root/") - # Wait for caddy to start - machine.wait_for_open_port(80, timeout=60) + # Check files exist + backup.succeed("test -d /tmp/restore-root/test-page/docker-compose-runner/wagtail-ahayzen/db") + backup.succeed("test -e /tmp/restore-root/test-page/docker-compose-runner/wagtail-ahayzen/db/db.sqlite3") - # Wait for wagtail to start - machine.wait_until_succeeds('journalctl --boot --no-pager --quiet --unit docker.service --grep "\[INFO\] Listening at: http:\/\/0\.0\.0\.0:8080"', timeout=60) + # Run the restore + backup.succeed("/etc/ahayzen.com/restore.sh vps headless@vps /tmp/restore-root/test-page") - # Test that admin page exists - output = machine.succeed("curl --silent ahayzen.com:80/admin/login/?next=/admin/") - assert "Sign in" in output, f"'{output}' does not contain 'Sign in'" + # Wait for services to restart and second occurance of Listening at + vps.wait_for_unit("docker-compose-runner", timeout=90) + vps.wait_for_open_port(80, timeout=60) + vps.wait_until_succeeds(wait_for_wagtail_cmd + " | wc -l | awk '{if ($1 > 1) {exit 0} else {exit 1}}'", timeout=60) - # Test that wagtail port is not open externally - machine.fail("curl --silent ahayzen.com:8080") + # Check the home does contain restore key + output = vps.succeed("curl --silent ahayzen.com:80/") + assert "Restore Unit Test" in output, f"'{output}' does not contain 'Restore Unit Test'" ''; }