Skip to content

Fixed config file for cicd #4

Fixed config file for cicd

Fixed config file for cicd #4

Workflow file for this run

# ---------------------------------------------------------------------------------------------------------------------
# This workflow handles all CI/CD related tasks and can be re-used in custom projects
# (https://github.com/openremote/custom-project i.e. has openremote submodule) or forks of this repo, it handles:
#
# - Running tests on push/release
# - Distributing openremote/manager images to docker hub (DOCKERHUB_USER and DOCKERHUB_PASSWORD must be set) on push/release
# - Deploying to hosts on push/release and/or manual trigger and/or when certain openremote/manager docker tags are updated
#
# By default this workflow will just run tests on push/release; distribution (pushing of openremote/manager docker image)
# and deployment behaviour is configured via the .ci_cd/ci_cd.json file (see .ci_cd/README.md for more details).
#
# DEPLOYMENTS
#
# When a deployment is requested the repo is checked for a 'deployment/Dockerfile' and if that exists then also
# 'deployment/build.gradle' must exist with an installDist task that prepares the deployment image in the
# 'deployment/build' directory; if this condition is not met the deployment will fail.
#
# Secrets, inputs and environment variables are combined for consumption by the '.ci_cd/deploy.sh' bash script, they are
# combined in the following priority:
#
# - '.ci_cd/env/.env'
# - '.ci_cd/env/{ENVIRONMENT}.env'
# - secrets
# - inputs (from manual trigger using workflow_dispatch)
#
# The above variables are output to 'temp/env' file for each requested deployment. If a secret called 'SSH_PASSWORD' is
# found that is output to 'ssh.env' file (so it is not copied to the host), if a secret called 'SSH_KEY' is
# found that is output to 'ssh.key' file (so it is not copied to the host). Sensitive credentials should always be stored
# in github secrets so they are encrypted, do not store within repo files (even private repos as these are not encrypted).
#
# SEE https://github.com/openremote/openremote/tree/master/.ci_cd for details of standard variables and handling.
#
# MANUAL TRIGGER
#
# This workflow can be triggered by push, release, manually or schedule (from any repo other than openremote/openremote)
# if triggered manually then the following inputs are available and these override any set in env files and/or secrets:
#
# Inputs:
#
# - ENVIRONMENT - Which environment to deploy (equivalent to deploy/environment in '.ci_cd/ci_cd.json')
# - MANAGER_TAG - Which manager docker tag to deploy (equivalent to deploy/managerTags in '.ci_cd/ci_cd.json')
# leave empty to build a manager image from the repo (must be an openremote repo or have
# an openremote submodule).
# - CLEAN_INSTALL - Should the .ci_cd/host_init/clean.sh script be run during deployment (warning this will delete
# assets, rules, etc.)
# - COMMIT - Which branch/SHA should be checked out for the deployment (defaults to trigger commit)
# - OR_HOSTNAME - FQDN of host to deploy to (e.g. demo.openremote.app)
# - SSH_USER - Set/override the SSH user to use for SSH/SCP commands
# - SSH_PASSWORD - Set/override the SSH password to use for SSH/SCP commands (SSH key should be preferred)
# - SSH_PORT - Set/override the SSH port to use for SSH/SCP commands
# - OR_ADMIN_PASSWORD - The admin password to set for clean installs
# ---------------------------------------------------------------------------------------------------------------------
name: CI/CD
on:
# Push on master excluding tags
push:
branches:
- 'feature/cicd_pipeline'
tags-ignore:
- '*.*'
# PR
pull_request:
# When a release is published
release:
types: [published]
# Manual trigger
workflow_dispatch:
inputs:
ENVIRONMENT:
description: 'Environment to use (if any)'
MANAGER_TAG:
description: 'Manager docker tag to pull'
CLEAN_INSTALL:
description: 'Delete data before starting'
type: boolean
COMMIT:
description: 'Repo branch or commit SHA to checkout'
OR_HOSTNAME:
description: 'Host to deploy to (e.g. demo.openremote.app)'
OR_ADMIN_PASSWORD:
description: 'Admin password override'
workflow_call:
inputs:
INPUTS:
type: string
secrets:
SECRETS:
required: false
# needed for aws auth
permissions:
id-token: write # This is required for requesting the JWT
contents: read # This is required for actions/checkout
jobs:
build:
name: CI/CD
runs-on: ubuntu-latest
steps:
- name: Cancel previous runs
uses: styfle/[email protected]
with:
access_token: ${{ github.token }}
- name: Get inputs and secrets
id: inputs-and-secrets
shell: python
run: |
import os
import json
# Overlay all inputs and secrets onto this jobs outputs
callerInputs = os.getenv("CALLER_INPUTS")
inputs = os.getenv("INPUTS")
secrets = os.getenv("SECRETS")
eventName = os.getenv("EVENT_NAME")
if inputs is not None and inputs != '':
inputs = json.loads(inputs)
if secrets is not None and secrets != '':
secrets = json.loads(secrets)
if eventName == 'workflow_call' and callerInputs is not None and callerInputs != 'null':
os.system(f"echo 'Processing caller inputs'")
inputs = json.loads(callerInputs)
if inputs is not None and 'INPUTS' in inputs:
os.system("echo 'Processing inputs from caller'")
inputs = json.loads(inputs['INPUTS'])
if 'SECRETS' in secrets:
os.system("echo 'Processing secrets from caller'")
secrets = json.loads(secrets['SECRETS'])
# Iterate over secrets then inputs and assign them as outputs on this step
if secrets is not None and secrets != 'null':
for key, value in secrets.items():
os.system(f"echo 'Outputting secret: {key}'")
lines = len(value.split("\n"))
if lines > 1:
os.system(f"echo '{key}<<EEOOFF\n{value}\nEEOOFF' >> $GITHUB_OUTPUT")
else:
os.system(f"echo '{key}={value}' >> $GITHUB_OUTPUT")
if inputs is not None and inputs != 'null':
for key, value in inputs.items():
os.system(f"echo 'Outputting input: {key}'")
lines = len(value.split("\n"))
if lines > 1:
os.system(f"echo '{key}<<EEOOFF\n{value}\nEEOOFF' >> $GITHUB_OUTPUT")
else:
os.system(f"echo '{key}={value}' >> $GITHUB_OUTPUT")
env:
CALLER_INPUTS: ${{ toJSON(inputs) }}
SECRETS: ${{ toJSON(secrets) }}
INPUTS: ${{ toJSON(github.event.inputs) }}
EVENT_NAME: ${{ github.event_name }}
- name: Public IP
id: ip-address
uses: haythem/[email protected]
- name: Checkout
uses: actions/checkout@v3
with:
# This will only be available when run by workflow_dispatch otherwise will checkout branch/commit that triggered the workflow
ref: ${{ steps.inputs-and-secrets.outputs.COMMIT }}
submodules: recursive
fetch-depth: 200
# Check which files have changed to only run appropriate tests and checks
- name: Backend files changed
id: backend-files-changed
# if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@v35
with:
files_ignore: |
.ci_cd/**
console/**
deployment/**
setup/**
ui/**
**/*.md
**/*.yml
manager/src/web/**
# Check which files have changed to only run appropriate tests and checks
- name: UI files changed
id: ui-files-changed
# if: github.event_name == 'pull_request'
uses: tj-actions/changed-files@v35
with:
files: |
ui/app/manager/**
ui/component/**
files_ignore: |
**/*.md
**/*.yml
- name: Set skip backend manager tests
id: skip-backend-tests
if: steps.backend-files-changed.outputs.any_modified != 'true'
run: echo "value=true" >> $GITHUB_OUTPUT
- name: Set skip UI tests
id: skip-ui-tests
if: steps.ui-files-changed.outputs.any_modified != 'true'
run: echo "value=true" >> $GITHUB_OUTPUT
- name: Make cache dirs and set file permissions
run: |
chmod +x gradlew
mkdir -p ~/manager-tags-old
mkdir -p ~/manager-tags-new
- name: Check if main repo
id: is_main_repo
run: |
if [ $REPO_NAME == 'openremote/openremote' ]; then
echo "value=true" >> $GITHUB_OUTPUT
fi
env:
REPO_NAME: ${{ github.repository }}
- name: Check deployment build.gradle
id: check_deployment_gradle
uses: andstor/file-existence-action@v2
with:
files: "deployment/build.gradle"
- name: Check deployment dockerfile
id: check_deployment_dockerfile
uses: andstor/file-existence-action@v2
with:
files: "deployment/Dockerfile"
- name: Check custom project
id: check_custom_project
uses: andstor/file-existence-action@v2
with:
files: "openremote,.gitmodules"
- name: Check ci_cd existence
id: check_cicd_json
uses: andstor/file-existence-action@v2
with:
files: ".ci_cd/ci_cd.json"
- name: Check is custom project when scheduled trigger
if: ${{ github.event_name == 'schedule' }}
run: |
if [ "$IS_CUSTOM_PROJECT" != 'true' ]; then
echo "Schedule can only be used on a custom project repo"
exit 1
fi
env:
IS_CUSTOM_PROJECT: ${{ steps.check_custom_project.outputs.files_exists }}
# For scheduled execution from custom projects we need to be able to determine if the manager docker image has
# changed since the last execution, we do this using a cache
- name: Get docker tags to monitor
if: ${{ steps.check_cicd_json.outputs.files_exists == 'true' && github.event_name == 'schedule' }}
id: monitor-tags
shell: python
run: |
import json
import os
eventName = os.getenv('EVENT_NAME')
refName = os.getenv('REF_NAME')
pushTag = None
deployTag = None
deployEnvironment = None
f = open(".ci_cd/ci_cd.json")
data = json.load(f)
f.close()
if "managerDockerPush" in data:
managerPushConfig = data["managerDockerPush"]
if managerPushConfig is not None:
if len(managerPushConfig.keys()) > 0:
for key in managerPushConfig.keys():
# Output new tag cache files (for generating hash)
os.system(f"echo 'Outputting manifest info for tag: {key}'")
os.system(f"docker buildx imagetools inspect --raw openremote/manager:{key} > ~/manager-tags-new/{key}")
os.system("echo 'Tag manifests generated:'")
os.system("find ~/manager-tags-new")
env:
EVENT_NAME: ${{ github.event_name }}
REF_NAME: ${{ github.ref_name }}
- name: Load manager tag cache
if: ${{ steps.check_cicd_json.outputs.files_exists == 'true' && github.event_name == 'schedule' }}
uses: actions/cache@v3
id: manager-tag-cache
with:
path: |
~/manager-tags-old
key: ${{ runner.os }}-manager-tags---${{ hashFiles('~/manager-tags-new/**') }}
restore-keys: |
${{ runner.os }}-manager-tags---
- name: Process ci_cd.json file
if: ${{ steps.check_cicd_json.outputs.files_exists == 'true' && github.event_name != 'workflow_dispatch' && github.event_name != 'pull_request' }}
id: ci-cd-output
shell: python
run: |
import json
import os
eventName = os.getenv('EVENT_NAME')
refName = os.getenv('REF_NAME')
isMainRepo = os.getenv('IS_MAIN_REPO')
deploys = None
dockerPublishTags = None
mavenPublishTag = None
deployEnvironment = None
f = open(".ci_cd/ci_cd.json")
data = json.load(f)
f.close()
if eventName == "schedule":
eventName = "managerDockerPush"
if data is not None and eventName in data:
eventConfig = data[eventName]
if eventName == "managerDockerPush":
# Iterate through keys and check if that manager docker image has changed since last execution
deploys=[]
for key in eventConfig.keys():
os.system(f"echo 'Checking manager docker tag: {key}'")
newManifestStr = None
oldManifestStr = None
if os.path.exists(os.path.expanduser(f"~/manager-tags-new/{key}")):
f = open(os.path.expanduser(f"~/manager-tags-new/{key}"))
newManifestStr = f.read()
f.close()
if os.path.exists(os.path.expanduser(f"~/manager-tags-old/{key}")):
f = open(os.path.expanduser(f"~/manager-tags-old/{key}"))
oldManifestStr = f.read()
f.close()
if newManifestStr != oldManifestStr: # Not a great way to compare but we'll assume manifest output is consistent
os.system(f"echo 'Manager docker tag has been updated: {key}'")
if 'deploy' in eventConfig[key]:
deployConfig = eventConfig[key]['deploy']
if deployConfig is not None:
# Inject manager tag into deploy config (no point reacting to the manager image updating otherwise)
deployConfig['managerTag'] = key
deploys.append(deployConfig)
else:
os.system(f"echo 'No deploy config in ci_cd.json for manager tag {key}'")
else:
os.system(f"echo 'Manager docker tag has not changed: {key}'")
elif eventName == "push" and refName in eventConfig:
eventConfig = eventConfig[refName]
if eventConfig is not None:
deploys = eventConfig['deploy'] if 'deploy' in eventConfig else None
if 'distribute' in eventConfig and 'docker' in eventConfig['distribute']:
dockerPublishTags = eventConfig['distribute']['docker']
elif eventName == "release" and refName in eventConfig:
eventConfig = eventConfig[refName]
if eventConfig is not None:
deploys = eventConfig['deploy'] if 'deploy' in eventConfig else {}
if 'distribute' in eventConfig:
if 'docker' in eventConfig['distribute']:
dockerPublishTags = eventConfig['distribute']['docker']
if 'maven' in eventConfig['distribute']:
mavenPublishTag = eventConfig['distribute']['maven']
if dockerPublishTags is not None and isMainRepo == 'true':
dockerPublishTags = dockerPublishTags.replace("$version", refName)
firstDockerTag = dockerPublishTags.split(",")[0]
os.system(f"echo 'firstDockerTag={firstDockerTag}' >> $GITHUB_OUTPUT")
os.system(f" echo 'Manager tags to push to docker: {dockerPublishTags}'")
dockerPublishTags = " ".join(map(lambda t: f"-t openremote/manager:{t.strip()}", dockerPublishTags.split(",")))
os.system(f"echo 'dockerTags={dockerPublishTags}' >> $GITHUB_OUTPUT")
if mavenPublishTag is not None and isMainRepo == 'true':
mavenPublishTag = mavenPublishTag.replace("$version", refName)
os.system(f" echo 'Maven publish version: {mavenPublishTag}'")
os.system(f"echo 'mavenTag={mavenPublishTag}' >> $GITHUB_OUTPUT")
deployStr = None
if deploys is not None:
if not isinstance(deploys, list):
deploys = [deploys]
deployStr = ""
for deploy in deploys:
if 'environment' in deploy:
deployStr += deploy['environment']
deployStr += ":"
if 'managerTag' in deploy:
deployStr += deploy['managerTag']
else:
os.system("echo 'Manager tag not specified so using commit SHA'")
deployStr += '#ref'
deployStr += ";"
deployStr = deployStr.rstrip(";")
if deployStr is not None and len(deployStr) > 0:
print(f"Deployments to deploy: {deployStr}")
os.system(f"echo 'deploys={deployStr}' >> $GITHUB_OUTPUT")
env:
IS_MAIN_REPO: ${{ steps.is_main_repo.outputs.value }}
EVENT_NAME: ${{ github.event_name }}
REF_NAME: ${{ github.ref_name }}
- name: Copy new tag cache to old
if: ${{ steps.check_cicd_json.outputs.files_exists == 'true' }}
run: |
cd ~/manager-tags-new
cp -r . ~/manager-tags-old/
- name: Sanitize deployments value
id: deployments
run: |
deployments=$DEPLOYMENTS
if [ "$EVENT_NAME" == 'workflow_dispatch' ]; then
tag="$INPUT_MANAGER_TAG"
if [ -z "$INPUT_MANAGER_TAG" ]; then
tag='#ref'
fi
deployments="$INPUT_ENVIRONMENT:$tag"
fi
echo "value=$deployments" >> $GITHUB_OUTPUT
env:
EVENT_NAME: ${{ github.event_name }}
DEPLOYMENTS: ${{ steps.ci-cd-output.outputs.deploys }}
INPUT_ENVIRONMENT: ${{ steps.inputs-and-secrets.outputs.ENVIRONMENT }}
INPUT_MANAGER_TAG: ${{steps.inputs-and-secrets.outputs.MANAGER_TAG }}
- name: Define backend test command
id: test-backend-command
if: ${{ steps.skip-backend-tests.outputs.value != 'true' }}
run: echo "value=./gradlew -p test test" >> $GITHUB_OUTPUT
- name: Define UI test command
id: test-ui-command
if: ${{ steps.skip-ui-tests.outputs.value != 'true' }}
run: echo ""
- name: Define manager docker build command
id: manager-docker-command
shell: bash
run: |
if [ $IS_CUSTOM_PROJECT == 'true' ]; then
buildPath="openremote/manager/build/install/manager"
commitSha=$(cd openremote; git rev-parse HEAD; cd ..)
commitShaShort=$(cd openremote; git rev-parse --short HEAD; cd ..)
else
buildPath="manager/build/install/manager"
commitSha=$(git rev-parse HEAD)
commitShaShort=$(git rev-parse --short HEAD)
fi
if [ -n "$MANAGER_TAGS" ] || [[ "$DEPLOYMENTS" == *"#ref"* ]] || [ -n "$TEST_UI_CMD" ]; then
if [ -n "$MANAGER_TAGS" ]; then
command="docker build --push --build-arg GIT_COMMIT=$commitSha --platform linux/amd64,linux/aarch64 $MANAGER_TAGS $buildPath"
echo "pushRequired=true" >> $GITHUB_OUTPUT
else
command="docker build --build-arg GIT_COMMIT=$commitSha --platform linux/amd64,linux/aarch64 -t openremote/manager:$commitShaShort $buildPath"
fi
echo "value=$command" >> $GITHUB_OUTPUT
fi
echo "buildPath=$buildPath" >> $GITHUB_OUTPUT
echo "refTag=$commitShaShort" >> $GITHUB_OUTPUT
env:
FIRST_MANAGER_TAG: ${{ steps.ci-cd-output.outputs.firstDockerTag }}
MANAGER_TAGS: ${{ steps.ci-cd-output.outputs.dockerTags }}
DEPLOYMENTS: ${{ steps.deployments.outputs.value }}
IS_CUSTOM_PROJECT: ${{ steps.check_custom_project.outputs.files_exists }}
TEST_UI_CMD: ${{ steps.test-ui-command.outputs.value }}
- name: Define maven publish command
id: maven-publish-command
if: ${{ steps.ci-cd-output.outputs.mavenTag != '' }}
shell: bash
run: |
command="./gradlew publish -PopenremoteVersion=$MAVEN_TAG -PsigningKey=$SIGNING_KEY -PsigningPassword=$SIGNING_PASSWORD -PpublishUsername=$MAVEN_USERNAME -P$MAVEN_PASSWORD"
echo "value=$command" >> $GITHUB_OUTPUT
env:
MAVEN_TAG: ${{ steps.ci-cd-output.outputs.mavenTag }}
SIGNING_PASSWORD: ${{ steps.inputs-and-secrets.outputs._TEMP_SIGNING_PASSWORD }}
SIGNING_KEY: ${{ steps.inputs-and-secrets.outputs._TEMP_SIGNING_KEY }}
MAVEN_USERNAME: ${{ steps.inputs-and-secrets.outputs._TEMP_MAVEN_USERNAME }}
MAVEN_PASSWORD: ${{ steps.inputs-and-secrets.outputs._TEMP_MAVEN_PASSWORD }}
- name: Define deployment docker build command
id: deployment-docker-command
shell: bash
run: |
if [ "$DEPLOYMENT_DOCKERFILE_EXISTS" == 'true' ]; then
if [ "$DEPLOYMENT_GRADLE_EXISTS" != 'true' ]; then
echo "Deployment must have a build.gradle file to prepare the deployment files in the deployment/build dir"
exit 1
fi
buildPath="deployment/build"
commitSha=$(git rev-parse HEAD)
commitShaShort=$(git rev-parse --short HEAD)
echo "buildPath=$buildPath" >> $GITHUB_OUTPUT
echo "refTag=$commitShaShort" >> $GITHUB_OUTPUT
if [ -n "$DEPLOYMENTS" ]; then
command="docker build --build-arg GIT_COMMIT=$commitSha --platform linux/amd64,linux/aarch64 -t openremote/deployment:$commitShaShort $buildPath"
echo "value=$command" >> $GITHUB_OUTPUT
fi
fi
env:
DEPLOYMENTS: ${{ steps.deployments.outputs.value }}
DEPLOYMENT_DOCKERFILE_EXISTS: ${{ steps.check_deployment_dockerfile.outputs.files_exists }}
DEPLOYMENT_GRADLE_EXISTS: ${{ steps.check_deployment_gradle.outputs.files_exists }}
- name: Define installDist command
id: install-command
shell: bash
run: |
REF_NAME=${REF_NAME//\//_}
if [ -n "$MANAGER_DOCKER_CMD" ]; then
echo "value=./gradlew installDist -PopenremoteVersion=$REF_NAME" >> $GITHUB_OUTPUT
elif [ -n "$DEPLOYMENT_DOCKER_CMD" ]; then
echo "value=./gradlew -p deployment installDist" >> $GITHUB_OUTPUT
fi
env:
MANAGER_DOCKER_CMD: ${{ steps.manager-docker-command.outputs.value }}
DEPLOYMENT_DOCKER_CMD: ${{ steps.deployment-docker-command.outputs.value }}
REF_NAME: ${{ github.ref_name }}
- name: Login to DockerHub
if: ${{ steps.manager-docker-command.outputs.pushRequired == 'true' }}
uses: docker/login-action@v2
with:
username: ${{ steps.inputs-and-secrets.outputs._TEMP_DOCKERHUB_USER || steps.inputs-and-secrets.outputs.DOCKERHUB_USER }}
password: ${{ steps.inputs-and-secrets.outputs._TEMP_DOCKERHUB_PASSWORD || steps.inputs-and-secrets.outputs.DOCKERHUB_PASSWORD }}
- name: set up QEMU
if: ${{ steps.manager-docker-command.outputs.value != '' || steps.deployment-docker-command.outputs.value != '' }}
uses: docker/setup-qemu-action@v2
with:
platforms: linux/amd64,linux/aarch64
- name: install buildx
if: ${{ steps.manager-docker-command.outputs.value != '' || steps.deployment-docker-command.outputs.value != '' }}
id: buildx
uses: docker/setup-buildx-action@v2
with:
version: latest
install: true # set docker buildx build as synonym for docker build
- name: Set up JDK 17 and gradle cache
id: java
if: ${{ steps.install-command.outputs.value != '' || steps.test-backend-command.outputs.value != '' }}
uses: actions/setup-java@v3
with:
distribution: 'temurin'
java-version: '17'
cache: 'gradle'
- name: Get yarn cache directory path
id: yarn-cache-dir-path
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
- name: Yarn cache
uses: actions/cache@v3
if: steps.skip-cicd.outputs.value != 'true'
id: yarn-cache # use this to check for `cache-hit` (`steps.yarn-cache.outputs.cache-hit != 'true'`)
with:
path: ${{ steps.yarn-cache-dir-path.outputs.dir }}
key: ${{ runner.os }}-yarn---${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-yarn---
- name: Output info
if: steps.skip-cicd.outputs.value != 'true'
run: |
echo "************************************************************"
echo "************** INFO *******************"
echo "************************************************************"
echo 'Trigger event: ${{ github.event_name }}'
echo 'Is main repo: ${{ steps.is_main_repo.outputs.value == 'true' }}'
echo 'Is custom project repo: ${{ steps.check_custom_project.outputs.files_exists == 'true' }}'
echo 'Has deployment dockerfile: ${{ steps.check_deployment_dockerfile.outputs.files_exists == 'true' }}'
echo 'Manager commit SHA: ${{ steps.manager-docker-command.outputs.refTag }}'
echo 'Deployment commit SHA: ${{ steps.deployment-docker-command.outputs.refTag }}'
echo 'Deployments: ${{ steps.deployments.outputs.value }}'
echo 'Test backend command: ${{ steps.test-backend-command.outputs.value }}'
echo 'Test UI command: ${{ steps.test-ui-command.outputs.value }}'
echo 'Manager docker build command: ${{ steps.manager-docker-command.outputs.value }}'
echo 'Maven publish command: ${{ steps.maven-publish-command.outputs.value }}'
echo 'Deployment docker build command: ${{ steps.deployment-docker-command.outputs.value }}'
echo 'InstallDist command: ${{ steps.install-command.outputs.value }}'
echo "Java version: $(java --version)"
echo "Yarn version: $(yarn -v)"
echo "Node version: $(node -v)"
echo 'Gradle cache: ${{ steps.java.outputs.cache-hit == 'true' }}'
echo 'Yarn cache: ${{ steps.yarn-cache.outputs.cache-hit == 'true' }}'
echo 'Manager tag cache: ${{ steps.manager-tag-cache.outputs.cache-hit == 'true' }}'
echo "************************************************************"
echo "************************************************************"
- name: Pull docker images
if: ${{ steps.test-backend-command.outputs.value != '' || steps.test-ui-command.outputs.value != '' }}
run: |
# Only need keycloak and postgres services for backend testing
if [ -z $TEST_UI_COMMAND ]; then
composeProfile='profile/dev-testing.yml'
else
composeProfile='profile/dev-ui.yml'
fi
if [ $IS_CUSTOM_PROJECT == 'true' ]; then
composeProfile="openremote/$composeProfile"
fi
# Pull the images
docker-compose -f $composeProfile pull
env:
IS_CUSTOM_PROJECT: ${{ steps.check_custom_project.outputs.files_exists }}
TEST_UI_COMMAND: ${{ steps.test-ui-command.outputs.value }}
# Use docker layer caching to speed up image building
- uses: satackey/[email protected]
if: ${{ steps.manager-docker-command.outputs.value != '' || steps.deployment-docker-command.outputs.value != '' }}
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Run backend tests
id: run-backend-tests
if: ${{ steps.test-backend-command.outputs.value != '' }}
run: |
composeProfile='profile/dev-testing.yml'
if [ $IS_CUSTOM_PROJECT == 'true' ]; then
composeProfile="openremote/$composeProfile"
fi
# Make temp dir and set mask to 777 as docker seems to run as root
mkdir tmp
chmod 777 tmp
# Define cleanup command
echo "cleanup=docker-compose -f $composeProfile down" >> $GITHUB_OUTPUT
# Start the stack
echo "docker-compose -f ${composeProfile} up -d --no-build"
docker-compose -f ${composeProfile} up -d --no-build
# Run the tests
${{ steps.test-backend-command.outputs.value }}
env:
IS_CUSTOM_PROJECT: ${{ steps.check_custom_project.outputs.files_exists }}
timeout-minutes: 20
#continue-on-error: true
- name: Archive backend test results
if: always()
uses: actions/upload-artifact@v3
with:
name: backend-test-results
path: test/build/reports/tests
- name: Cleanup backend tests
if: ${{ steps.test-backend-command.outputs.value != '' && steps.test-ui-command.outputs.value != '' }}
run: ${{ steps.run-backend-tests.outputs.cleanup }}
- name: Run install dist
if: steps.install-command.outputs.value != ''
shell: python
run: |
import json
import os
import sys
import subprocess
inputsAndSecrets = json.loads(os.getenv("INPUTS_AND_SECRETS"))
# Output inputs and secrets as environment variables for build
for key, value in inputsAndSecrets.items():
if "." in key:
continue
# Look for temp and env prefixed keys
if key.startswith("_"):
if key.startswith("_TEMP_"):
key = key.replace("_TEMP_", "")
else:
continue
os.system(f"echo 'Setting environment variable {key}...'")
os.putenv(key, value)
buildCmd = os.getenv("CMD")
result = subprocess.run(f"{buildCmd}", shell=True)
if result.returncode != 0:
os.system("echo 'installDist failed'")
sys.exit(result.returncode)
env:
CMD: ${{ steps.install-command.outputs.value }}
INPUTS_AND_SECRETS: ${{ toJSON(steps.inputs-and-secrets.outputs) }}
timeout-minutes: 20
- name: Run manager docker build command
if: steps.manager-docker-command.outputs.value != ''
run: |
${{ steps.manager-docker-command.outputs.value }}
- name: Run maven publish command
if: steps.maven-publish-command.outputs.value != ''
run: |
${{ steps.maven-publish-command.outputs.value }}
- name: Run frontend tests
if: steps.test-ui-command.outputs.value != ''
run: |
composeProfile='profile/dev-ui.yml'
if [ $IS_CUSTOM_PROJECT == 'true' ]; then
composeProfile="openremote/$composeProfile"
fi
# Start the stack
MANAGER_VERSION=$MANAGER_TAG docker-compose -f $composeProfile up -d --no-build
# Run the tests
${{ steps.test-ui-command.outputs.value }}
env:
IS_CUSTOM_PROJECT: ${{ steps.check_custom_project.outputs.files_exists }}
MANAGER_TAG: ${{ steps.manager-docker-command.outputs.refTag }}
timeout-minutes: 20
continue-on-error: true
- name: Run deployment docker command
if: steps.deployment-docker-command.outputs.value != ''
run: |
${{ steps.deployment-docker-command.outputs.value }}
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::205672091018:role/GitHubAction-AssumeRoleWithAction
role-session-name: GitHub_to_AWS_via_FederatedOIDC_meterverse
aws-region: ${{ env.AWS_REGION }}
env:
AWS_REGION: eu-central-1
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v1
- name: Build, Tag, and Push image to Amazon ECR
id: build-image
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
ECR_REPOSITORY: meterverse_manager
ECR_IMAGE_TAG:
IMAGE_TAG: ${{ github.sha }}
run: |
docker build --provenance=false --platform linux/amd64 --build-arg GIT_COMMIT=$IMAGE_TAG -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -t $ECR_REGISTRY/$ECR_REPOSITORY:latest manager/build/install/manager -o type=registry
echo "image=$ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG" >> $GITHUB_OUTPUT
# delete image from github actions after push:
# docker rmi $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
docker image prune -f
# echo "Docker images:"
# docker images
# - name: Do deployments
# if: steps.deployments.outputs.value != ''
# shell: python
# run: |
# import json
# import os
# import sys
# import subprocess
# deployments = os.getenv("DEPLOYMENTS")
# deployments = deployments.split(";")
# managerRef = os.getenv("MANAGER_REF")
# deploymentRef = os.getenv("DEPLOYMENT_REF")
# isCustomProject = os.getenv("IS_CUSTOM_PROJECT")
# inputsAndSecrets = json.loads(os.getenv("INPUTS_AND_SECRETS"))
# ipv4 = os.getenv("IPV4")
# ipv6 = os.getenv("IPV6")
# failure = False
# # Determine deploy script to use
# deployScript = ".ci_cd/deploy.sh"
# if not os.path.exists(deployScript) and isCustomProject == 'true':
# deployScript = "openremote/.ci_cd/deploy.sh"
# if not os.path.exists(deployScript):
# os.system(f"Deploy script not found '{deployScript}'")
# sys.exit(1)
# for deployment in deployments:
# dep = deployment.split(":")
# env = dep[0]
# managerTag = dep[1]
# managerTagFound = True
# os.putenv("MANAGER_TAG", managerTag)
# os.putenv("ENVIRONMENT", env)
# # Clean stale ssh credentials and temp files
# os.system("rm temp.env 2>/dev/null")
# os.system("rm ssh.key 2>/dev/null")
# os.system("rm -r temp 2>/dev/null")
# os.system("mkdir temp")
# # ------------------------------------------------------
# # Output env variables to temp env file for POSIX shell
# # ------------------------------------------------------
# # Output inputs and secrets (spacial handling for SSH_KEY and some other variables)
# # _$ENV_ prefixed keys are output last (to override any non env specific keys)
# environment = (env if env else "").upper()
# prefix = "_" + environment + "_"
# for key, value in inputsAndSecrets.items():
# if "." in key:
# continue
# envFile = "temp/env"
# # Look for temp and env prefixed keys
# if key.startswith("_"):
# if key.startswith("_TEMP_"):
# key = key.replace("_TEMP_", "")
# envFile = "temp.env"
# elif key.startswith(prefix):
# key = key.replace(prefix, "")
# else:
# continue
# if key == "github_token":
# continue
# else:
# os.system(f"echo 'Secret found {key}...'")
# if key == "SSH_KEY":
# os.system(f"echo \"{value}\" > ssh.key")
# else:
# lines = len(value.split("\n"))
# if lines > 1:
# os.system(f"echo '{key}='\"'\"'' >> {envFile}")
# os.system(f"echo '{value}'\"'\"'' >> {envFile}")
# else:
# os.system(f"echo '{key}='\"'\"'{value}'\"'\"'' >> {envFile}")
# # Output env file if exists
# if os.path.exists(".ci_cd/env/.env"):
# os.system(f"echo 'Outputting .ci_cd/env/.env to temp/env'")
# os.system("cat .ci_cd/env/.env >> temp/env")
# # Output environment specific env file if exists
# if env is not None and env != '' and os.path.exists(f".ci_cd/env/{env}.env"):
# os.system(f"echo 'Outputting .ci_cd/env/{env}.env to temp/env'")
# os.system(f"cat .ci_cd/env/{env}.env >> temp/env")
# # Set CIDR environment variable
# if ipv4 is not None and ipv4 != '':
# os.putenv("CIDR", ipv4 + '/32')
# elif ipv6 is not None and ipv6 != '':
# os.putenv("CIDR", ipv6 + '/64')
# # Execute deploy script
# os.system(f"echo 'Executing deploy script for deployment: managerTag={managerTag} deploymentTag={deploymentRef} environment={env}'")
# # Uncomment this in combination with the SSH debug step afterwards to debug deployment script
# #sys.exit(0)
# result = subprocess.run(f"bash {deployScript}", shell=True)
# if result.returncode != 0:
# os.system(f"echo 'Deployment failed: managerTag={managerTag} deploymentTag={deploymentRef} environment={env}'")
# failure = True
# continue
# if failure == True:
# os.system("echo 'One or more deployments failed'")
# sys.exit(1)
# env:
# IS_CUSTOM_PROJECT: ${{ steps.check_custom_project.outputs.files_exists }}
# REPO_NAME: ${{ github.repository }}
# DEPLOYMENTS: ${{ steps.deployments.outputs.value }}
# MANAGER_DOCKER_BUILD_PATH: ${{ steps.manager-docker-command.outputs.buildPath }}
# DEPLOYMENT_DOCKER_BUILD_PATH: ${{ steps.deployment-docker-command.outputs.buildPath }}
# MANAGER_REF: ${{ steps.manager-docker-command.outputs.refTag }}
# DEPLOYMENT_REF: ${{ steps.deployment-docker-command.outputs.refTag }}
# INPUTS_AND_SECRETS: ${{ toJSON(steps.inputs-and-secrets.outputs) }}
# IPV4: ${{ steps.ip-address.outputs.ipv4 }}
# IPV6: ${{ steps.ip-address.outputs.ipv6 }}
# - name: Setup upterm session
# uses: lhotari/action-upterm@v1
# with:
# ## limits ssh access and adds the ssh public keys of the listed GitHub users
# limit-access-to-actor: true