diff --git a/.github/workflows/kvm-test.yaml b/.github/workflows/kvm-test.yaml index 444abc53..0effe9e5 100644 --- a/.github/workflows/kvm-test.yaml +++ b/.github/workflows/kvm-test.yaml @@ -1,4 +1,5 @@ -name: "Vagrant (KVM) Tests" +--- +name: Vagrant (KVM) Tests on: pull_request: @@ -9,7 +10,7 @@ on: jobs: # https://github.com/jonashackt/vagrant-github-actions test-kvm: - name: "KVM Test" + name: KVM Test runs-on: macos-latest steps: - uses: actions/checkout@v2 diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 5fa50718..8ad0e24e 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -1,9 +1,9 @@ +--- name: Ansible Lint on: push: pull_request: - jobs: build: name: Ansible Lint @@ -11,4 +11,4 @@ jobs: steps: - uses: actions/checkout@v4 - name: Run ansible-lint - uses: ansible/ansible-lint@main \ No newline at end of file + uses: ansible/ansible-lint@main diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 68bd0f7e..4233fa79 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,3 +1,4 @@ +--- # .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details @@ -9,7 +10,6 @@ version: 2 sphinx: configuration: docs/conf.py - # Optionally set the version of Python and requirements required to build your docs python: version: 3.8 diff --git a/inventory.template/group_vars/all.yaml b/inventory.template/group_vars/all.yaml index 977f8a44..5048c2fd 100644 --- a/inventory.template/group_vars/all.yaml +++ b/inventory.template/group_vars/all.yaml @@ -1,3 +1,4 @@ +--- # Any additional users and groups you would like added to all nodes enabled_users: - username: example-user @@ -6,7 +7,7 @@ enabled_users: email: example-user@example.com # mkpasswd --method=sha-512 # password: example-user - password: "$6$3aaf4gr8D$2T31r9/GtXM6rVY8oHOejn.sThwhBZehbPZC.ZkN0XJOZUuguR9VnRQRYmqYAt9eW3LgLR21q1kbqSYSEDm5U." + password: $6$3aaf4gr8D$2T31r9/GtXM6rVY8oHOejn.sThwhBZehbPZC.ZkN0XJOZUuguR9VnRQRYmqYAt9eW3LgLR21q1kbqSYSEDm5U. primary_group: example-user groups: - users @@ -24,9 +25,9 @@ munge_key: eC36WeTj1JKUGyQEcfqkaRO0fDBoyTVHnkn7kE5sOZ1YUYyqWMSp3qeRZEmtEX9B openldap_bind_password: EsicntiZOhQaGomPiJZLWJEJ -jupyterhub_client_secret: "SUPERSECRETPASSWORDJUPYTERHUB" -conda_store_client_secret: "SUPERSECRETPASSWORDCONDASTORE" -grafana_client_secret: "SUPERSECRETPASSWORDGRAFANA" +jupyterhub_client_secret: SUPERSECRETPASSWORDJUPYTERHUB +conda_store_client_secret: SUPERSECRETPASSWORDCONDASTORE +grafana_client_secret: SUPERSECRETPASSWORDGRAFANA minio_password: mWdaGyPmNOApU93Vxk6sNTac keycloak_admin_password: XLWUMUu8OG0XqlMREZK9id9o @@ -45,4 +46,4 @@ mysql_users: postgres_users: - username: conda-store password: eIbmUditL4RbQm0YPeLozRme - role: 'CREATEDB,CREATEROLE' + role: CREATEDB,CREATEROLE diff --git a/inventory.template/group_vars/hpc_master.yaml b/inventory.template/group_vars/hpc_master.yaml index 82b69366..cc78bf3a 100644 --- a/inventory.template/group_vars/hpc_master.yaml +++ b/inventory.template/group_vars/hpc_master.yaml @@ -1,3 +1,4 @@ +--- dask_gateway_enabled: true firewall_enabled: true grafana_enabled: true @@ -21,6 +22,6 @@ openldap_server_enabled: true openldap_client_enabled: true nfs_server_exports: - - "/home" - - "/opt/conda" - - "/opt/conda-store" + - /home + - /opt/conda + - /opt/conda-store diff --git a/inventory.template/group_vars/hpc_worker.yaml b/inventory.template/group_vars/hpc_worker.yaml index fb4797d9..356aa62d 100644 --- a/inventory.template/group_vars/hpc_worker.yaml +++ b/inventory.template/group_vars/hpc_worker.yaml @@ -1,3 +1,4 @@ +--- dask_gateway_client_enabled: true firewall_enabled: true ipyparallel_enabled: true diff --git a/inventory.template/host_vars/hpc01-test.yaml b/inventory.template/host_vars/hpc01-test.yaml index b91454e8..5223b739 100644 --- a/inventory.template/host_vars/hpc01-test.yaml +++ b/inventory.template/host_vars/hpc01-test.yaml @@ -1,3 +1,4 @@ +--- # or set these variables in group_vars/hpc_worker.yaml if all # workers have the same resources slurm_memory: 5900 diff --git a/playbook.yaml b/playbook.yaml index 0274dff3..4401452e 100644 --- a/playbook.yaml +++ b/playbook.yaml @@ -1,56 +1,57 @@ --- - - hosts: all - pre_tasks: - - name: Gather facts from ALL hosts (regardless of limit or tags) - setup: - delegate_to: "{{ item }}" - delegate_facts: True - when: hostvars[item]['ansible_default_ipv4'] is not defined - with_items: "{{ groups['all'] }}" - - name: Copy files - include_tasks: tasks/copy_files.yaml - loop: '{{ ["all"] + group_names + [inventory_hostname_short] }}' - loop_control: - loop_var: myhost +- hosts: all + pre_tasks: + - name: Gather facts from ALL hosts (regardless of limit or tags) + ansible.builtin.setup: + delegate_to: "{{ item }}" + delegate_facts: true + when: hostvars[item]['ansible_default_ipv4'] is not defined + with_items: "{{ groups['all'] }}" + - name: Copy files + ansible.builtin.include_tasks: tasks/copy_files.yaml + loop: '{{ ["all"] + group_names + [inventory_hostname_short] }}' + loop_control: + loop_var: myhost - roles: - # core services - - hosts - - firewall - - accounts - - apt_packages - - miniforge - - cifs - - nfs - - mysql - - postgresql - - minio - - backups - - traefik - - openldap - - keycloak - # workflows - - slurm - # monitoring - - prometheus - - grafana - # data-science services - - conda_store - - jupyterhub - - dask_gateway - - ipyparallel - # plugins - - bodo + roles: + # core services + - hosts + - firewall + - accounts + - apt_packages + - miniforge + - cifs + - nfs + - mysql + - postgresql + - minio + - backups + - traefik + - openldap + - keycloak + # workflows + - slurm + # monitoring + - prometheus + - grafana + # data-science services + - conda_store + - jupyterhub + - dask_gateway + - ipyparallel + # plugins + - bodo - tasks: - - name: Additional ansible tasks - include_tasks: "{{ item }}" - with_items: "{{ additional_tasks | default([]) }}" + tasks: + - name: Additional ansible tasks + ansible.builtin.include_tasks: "{{ item }}" + with_items: "{{ additional_tasks | default([]) }}" - - name: Keycloak administration credentials - ansible.builtin.debug: - msg: "Keycloak administration username={{ keycloak_admin_username }} via https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/admin/" + - name: Keycloak administration credentials + ansible.builtin.debug: + msg: Keycloak administration username={{ keycloak_admin_username }} via https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) + }}/auth/admin/ - - name: Accessing cluster - ansible.builtin.debug: - msg: "Access cluster via following url: https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}" + - name: Accessing cluster + ansible.builtin.debug: + msg: "Access cluster via following url: https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}" diff --git a/roles/accounts/defaults/main.yml b/roles/accounts/defaults/main.yml index a85cfe80..e7eb21aa 100644 --- a/roles/accounts/defaults/main.yml +++ b/roles/accounts/defaults/main.yml @@ -1,8 +1,5 @@ --- enabled_users: [] - enabled_groups: [] - disabled_users: [] - disabled_groups: [] diff --git a/roles/accounts/tasks/main.yml b/roles/accounts/tasks/main.yml index e4ddf301..32918cbf 100644 --- a/roles/accounts/tasks/main.yml +++ b/roles/accounts/tasks/main.yml @@ -1,38 +1,38 @@ --- - - name: Ensure groups are present - become: true - group: - name: "{{ item.name }}" - gid: "{{ item.gid }}" - state: present - with_items: "{{ enabled_groups }}" +- name: Ensure groups are present + become: true + ansible.builtin.group: + name: "{{ item.name }}" + gid: "{{ item.gid }}" + state: present + with_items: "{{ enabled_groups }}" - - name: Ensure users are present - become: true - user: - name: "{{ item.username }}" - uid: "{{ item.uid }}" - shell: /bin/bash - createhome: true - generate_ssh_key: false - comment: "{{ item.fullname }},,,,{{ item.email }}" - group: "{{ item.primary_group | default(omit) }}" - groups: "{{ item.groups | default(omit) }}" - password: "{{ item.password | default(omit) }}" - home: /home/{{ item.username }} - state: present - with_items: "{{ enabled_users }}" +- name: Ensure users are present + become: true + ansible.builtin.user: + name: "{{ item.username }}" + uid: "{{ item.uid }}" + shell: /bin/bash + createhome: true + generate_ssh_key: false + comment: "{{ item.fullname }},,,,{{ item.email }}" + group: "{{ item.primary_group | default(omit) }}" + groups: "{{ item.groups | default(omit) }}" + password: "{{ item.password | default(omit) }}" + home: /home/{{ item.username }} + state: present + with_items: "{{ enabled_users }}" - - name: Ensure users are disabled - become: true - user: - name: "{{ item }}" - state: absent - with_items: "{{ disabled_users }}" +- name: Ensure users are disabled + become: true + ansible.builtin.user: + name: "{{ item }}" + state: absent + with_items: "{{ disabled_users }}" - - name: Ensure groups are disabled - become: true - group: - name: "{{ item }}" - state: absent - with_items: "{{ disabled_groups }}" +- name: Ensure groups are disabled + become: true + ansible.builtin.group: + name: "{{ item }}" + state: absent + with_items: "{{ disabled_groups }}" diff --git a/roles/apt_packages/defaults/main.yml b/roles/apt_packages/defaults/main.yml index 7b192121..5d0577f7 100644 --- a/roles/apt_packages/defaults/main.yml +++ b/roles/apt_packages/defaults/main.yml @@ -1 +1,2 @@ +--- installed_packages: [] diff --git a/roles/apt_packages/tasks/main.yml b/roles/apt_packages/tasks/main.yml index 18976bba..74722cc9 100644 --- a/roles/apt_packages/tasks/main.yml +++ b/roles/apt_packages/tasks/main.yml @@ -1,8 +1,8 @@ --- - - name: Ensure apt packages are installed - become: true - apt: - name: "{{ installed_packages }}" - state: latest - update_cache: yes - cache_valid_time: 3600 +- name: Ensure apt packages are installed + become: true + ansible.builtin.apt: + name: "{{ installed_packages }}" + state: latest + update_cache: true + cache_valid_time: 3600 diff --git a/roles/backups/defaults/main.yml b/roles/backups/defaults/main.yml index 3d9df2df..e15061c9 100644 --- a/roles/backups/defaults/main.yml +++ b/roles/backups/defaults/main.yml @@ -1,7 +1,7 @@ --- backup_enabled: false -backup_on_calendar: "daily" +backup_on_calendar: daily backup_randomized_delay: "3600" backup_environment: - RESTIC_REPOSITORY: ... - RESTIC_PASSWORD: ... + RESTIC_REPOSITORY: "..." + RESTIC_PASSWORD: "..." diff --git a/roles/backups/tasks/backup.yaml b/roles/backups/tasks/backup.yaml index 51ec6bbe..49aab111 100644 --- a/roles/backups/tasks/backup.yaml +++ b/roles/backups/tasks/backup.yaml @@ -1,86 +1,86 @@ --- - - name: Ensure restic installed - become: true - apt: - name: restic - state: latest - update_cache: yes - cache_valid_time: 3600 +- name: Ensure restic installed + become: true + ansible.builtin.apt: + name: restic + state: latest + update_cache: true + cache_valid_time: 3600 - - name: Ensure that restic backup configuration directory exists - become: true - file: - path: /etc/restic - state: directory - mode: '0700' - owner: root - group: root +- name: Ensure that restic backup configuration directory exists + become: true + ansible.builtin.file: + path: /etc/restic + state: directory + mode: "0700" + owner: root + group: root - - name: Restic Backup Service - become: true - copy: - content: | - {% for key, value in backup_environment.items() %} - {{ key }}={{ value }} - {% endfor %} - dest: /etc/restic/credentials - owner: root - group: root - mode: 0600 - register: _restic_backup_configuration +- name: Restic Backup Service + become: true + ansible.builtin.copy: + content: | + {% for key, value in backup_environment.items() %} + {{ key }}={{ value }} + {% endfor %} + dest: /etc/restic/credentials + owner: root + group: root + mode: "0600" + register: _restic_backup_configuration - - name: Restic Backup Service - become: true - copy: - content: | - [Unit] - Description=Restic Backup +- name: Restic Backup Service + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Restic Backup - [Service] - X-RestartIfChanged=false - EnvironmentFile=/etc/restic/credentials - ExecStart=/usr/bin/restic backup --limit-upload=50000 /home - ExecStartPre=/bin/bash -c "/usr/bin/restic snapshots || /usr/bin/restic init" - RuntimeDirectory=restic-backups - Type=oneshot - User=root - dest: /etc/systemd/system/restic-backup.service - owner: root - group: root - mode: 0644 - register: _restic_backup_service + [Service] + X-RestartIfChanged=false + EnvironmentFile=/etc/restic/credentials + ExecStart=/usr/bin/restic backup --limit-upload=50000 /home + ExecStartPre=/bin/bash -c "/usr/bin/restic snapshots || /usr/bin/restic init" + RuntimeDirectory=restic-backups + Type=oneshot + User=root + dest: /etc/systemd/system/restic-backup.service + owner: root + group: root + mode: "0644" + register: _restic_backup_service - - name: Restic Backup Timer - become: true - copy: - content: | - [Unit] - Description=Restic Backup Timer +- name: Restic Backup Timer + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Restic Backup Timer - [Timer] - OnCalendar={{ backup_on_calendar }} - RandomizedDelaySec={{ backup_randomized_delay }} + [Timer] + OnCalendar={{ backup_on_calendar }} + RandomizedDelaySec={{ backup_randomized_delay }} - [Install] - WantedBy=timers.target - dest: /etc/systemd/system/restic-backup.timer - owner: root - group: root - mode: 0644 - register: _restic_backup_timer + [Install] + WantedBy=timers.target + dest: /etc/systemd/system/restic-backup.timer + owner: root + group: root + mode: "0644" + register: _restic_backup_timer - - name: Ensure restic backup service is enabled on boot - become: true - systemd: - daemon_reload: true - name: restic-backup.service - enabled: true - state: started +- name: Ensure restic backup service is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: restic-backup.service + enabled: true + state: started - - name: Ensure restic backup timer is enabled on boot - become: true - systemd: - daemon_reload: true - name: restic-backup.timer - enabled: true - state: started +- name: Ensure restic backup timer is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: restic-backup.timer + enabled: true + state: started diff --git a/roles/backups/tasks/main.yml b/roles/backups/tasks/main.yml index 625868fc..f96b44b8 100644 --- a/roles/backups/tasks/main.yml +++ b/roles/backups/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Backup configuration - include_tasks: backup.yaml - when: backup_enabled +- name: Backup configuration + ansible.builtin.include_tasks: backup.yaml + when: backup_enabled diff --git a/roles/bodo/defaults/main.yml b/roles/bodo/defaults/main.yml index da7ddd5e..3791d6aa 100644 --- a/roles/bodo/defaults/main.yml +++ b/roles/bodo/defaults/main.yml @@ -1,6 +1,7 @@ +--- bodo_enabled: false -bodo_environment_path: "environments/bodo.yaml" -bodo_license: ... +bodo_environment_path: environments/bodo.yaml +bodo_license: "..." # role: miniforge -miniforge_home: "/opt/conda" +miniforge_home: /opt/conda diff --git a/roles/bodo/tasks/bodo.yaml b/roles/bodo/tasks/bodo.yaml index 178038e3..ad57fcd6 100644 --- a/roles/bodo/tasks/bodo.yaml +++ b/roles/bodo/tasks/bodo.yaml @@ -1,28 +1,28 @@ --- - - name: Install Bodo Environment - include_role: - name: conda_environment - vars: - environment_path: "{{ bodo_environment_path }}" +- name: Install Bodo Environment + ansible.builtin.include_role: + name: conda_environment + vars: + environment_path: "{{ bodo_environment_path }}" - - name: Ensure bodo license directory exists - become: true - file: - path: "{{ miniforge_home }}/envs/bodo/share/bodo" - state: directory - mode: '0755' +- name: Ensure bodo license directory exists + become: true + ansible.builtin.file: + path: "{{ miniforge_home }}/envs/bodo/share/bodo" + state: directory + mode: "0755" - - name: Write bodo license - become: true - copy: - content: "{{ bodo_license }}" - dest: "{{ miniforge_home }}/envs/bodo/share/bodo/bodo.license" - mode: '644' +- name: Write bodo license + become: true + ansible.builtin.copy: + content: "{{ bodo_license }}" + dest: "{{ miniforge_home }}/envs/bodo/share/bodo/bodo.license" + mode: "644" - - name: Ensure bodo.sh activated in shell - become: true - copy: - content: | - export BODO_LICENSE={{ miniforge_home }}/envs/bodo/share/bodo/bodo.license - dest: "/etc/profile.d/bodo.sh" - mode: '0755' +- name: Ensure bodo.sh activated in shell + become: true + ansible.builtin.copy: + content: | + export BODO_LICENSE={{ miniforge_home }}/envs/bodo/share/bodo/bodo.license + dest: /etc/profile.d/bodo.sh + mode: "0755" diff --git a/roles/bodo/tasks/main.yml b/roles/bodo/tasks/main.yml index 389c5c65..94de6690 100644 --- a/roles/bodo/tasks/main.yml +++ b/roles/bodo/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Install bodo - include_tasks: bodo.yaml - when: bodo_enabled +- name: Install bodo + ansible.builtin.include_tasks: bodo.yaml + when: bodo_enabled diff --git a/roles/cifs/defaults/main.yaml b/roles/cifs/defaults/main.yaml index a534821a..79793314 100644 --- a/roles/cifs/defaults/main.yaml +++ b/roles/cifs/defaults/main.yaml @@ -1,3 +1,4 @@ +--- samba_server_enabled: false samba_server_shares: [] # - name: example_mout diff --git a/roles/cifs/handlers/main.yaml b/roles/cifs/handlers/main.yaml index ce21b8e7..ad42c1b8 100644 --- a/roles/cifs/handlers/main.yaml +++ b/roles/cifs/handlers/main.yaml @@ -1,9 +1,10 @@ --- - - name: "restart services samba" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "smbd" +- name: Restart services samba + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - smbd diff --git a/roles/cifs/tasks/client.yaml b/roles/cifs/tasks/client.yaml index 110b42f4..5524ad08 100644 --- a/roles/cifs/tasks/client.yaml +++ b/roles/cifs/tasks/client.yaml @@ -1,64 +1,64 @@ --- - - name: Install cifs - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - cifs-utils +- name: Install cifs + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - cifs-utils - - name: Wait for samba server at host to be available - wait_for: - host: "{{ item.host }}" - port: 445 - timeout: 600 - with_items: "{{ samba_client_mounts }}" - no_log: True # Avoid logging user creds +- name: Wait for samba server at host to be available + ansible.builtin.wait_for: + host: "{{ item.host }}" + port: 445 + timeout: 600 + with_items: "{{ samba_client_mounts }}" + no_log: true # Avoid logging user creds - - name: Ensure samba mounted directories exist - become: true - file: - path: "{{ item.path }}" - state: directory - with_items: "{{ samba_client_mounts }}" - no_log: True # Avoid logging user creds +- name: Ensure samba mounted directories exist + become: true + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + with_items: "{{ samba_client_mounts }}" + no_log: true # Avoid logging user creds - - name: Ensure samba credentials directory exists - become: true - file: - path: "/etc/samba/credentials" - state: directory - owner: root - group: root - mode: 0700 +- name: Ensure samba credentials directory exists + become: true + ansible.builtin.file: + path: /etc/samba/credentials + state: directory + owner: root + group: root + mode: "0700" - - name: Add credentials for particular mount - become: true - copy: - content: | - {% if item.username is defined %} - username={{ item.username }} - {% endif %} - {% if item.password is defined %} - password={{ item.password }} - {% endif %} - {% if item.domain is defined %} - domain={{ item.domain }} - {% endif %} - dest: "/etc/samba/credentials/{{ item.name }}" - owner: root - group: root - mode: 0600 - with_items: "{{ samba_client_mounts }}" - no_log: True # Avoid logging user creds +- name: Add credentials for particular mount + become: true + ansible.builtin.copy: + content: | + {% if item.username is defined %} + username={{ item.username }} + {% endif %} + {% if item.password is defined %} + password={{ item.password }} + {% endif %} + {% if item.domain is defined %} + domain={{ item.domain }} + {% endif %} + dest: /etc/samba/credentials/{{ item.name }} + owner: root + group: root + mode: "0600" + with_items: "{{ samba_client_mounts }}" + no_log: true # Avoid logging user creds - - name: Add fstab entries for nfs mounts - become: true - ansible.posix.mount: - src: "//{{ item.host }}/{{ item.name }}" - path: "{{ item.path }}" - opts: 'credentials=/etc/samba/credentials/{{ item.name }},{{ item.options | default("rw") }}' - state: mounted - fstype: cifs - with_items: "{{ samba_client_mounts }}" - no_log: True # Avoid logging user creds +- name: Add fstab entries for nfs mounts + become: true + ansible.posix.mount: + src: //{{ item.host }}/{{ item.name }} + path: "{{ item.path }}" + opts: credentials=/etc/samba/credentials/{{ item.name }},{{ item.options | default("rw") }} + state: mounted + fstype: cifs + with_items: "{{ samba_client_mounts }}" + no_log: true # Avoid logging user creds diff --git a/roles/cifs/tasks/main.yaml b/roles/cifs/tasks/main.yaml index cacdee73..fe9b9a5a 100644 --- a/roles/cifs/tasks/main.yaml +++ b/roles/cifs/tasks/main.yaml @@ -1,8 +1,8 @@ --- - - name: Install samba server - include_tasks: server.yaml - when: samba_server_enabled +- name: Install samba server + ansible.builtin.include_tasks: server.yaml + when: samba_server_enabled - - name: Install samba client - include_tasks: client.yaml - when: samba_client_enabled +- name: Install samba client + ansible.builtin.include_tasks: client.yaml + when: samba_client_enabled diff --git a/roles/cifs/tasks/server.yaml b/roles/cifs/tasks/server.yaml index aad2885c..8427455c 100644 --- a/roles/cifs/tasks/server.yaml +++ b/roles/cifs/tasks/server.yaml @@ -1,25 +1,25 @@ --- - - name: Install samba - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - samba +- name: Install samba + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - samba - - name: Ensure samba server share directories exist - become: true - file: - path: "{{ item.path }}" - state: directory - with_items: "{{ samba_server_shares }}" +- name: Ensure samba server share directories exist + become: true + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + with_items: "{{ samba_server_shares }}" - - name: Copy the samba configuration - become: true - template: - src: templates/smb.conf.j2 - dest: /etc/samba/smb.conf - owner: root - group: root - mode: 0644 - notify: restart services samba +- name: Copy the samba configuration + become: true + ansible.builtin.template: + src: templates/smb.conf.j2 + dest: /etc/samba/smb.conf + owner: root + group: root + mode: "0644" + notify: restart services samba diff --git a/roles/conda_environment/defaults/main.yaml b/roles/conda_environment/defaults/main.yaml index 15e742ab..f9416897 100644 --- a/roles/conda_environment/defaults/main.yaml +++ b/roles/conda_environment/defaults/main.yaml @@ -1,4 +1,5 @@ -environment_path: null +--- +environment_path: # role: miniforge -miniforge_home: "/opt/conda" +miniforge_home: /opt/conda diff --git a/roles/conda_environment/tasks/main.yaml b/roles/conda_environment/tasks/main.yaml index 95573bcd..312d4c63 100644 --- a/roles/conda_environment/tasks/main.yaml +++ b/roles/conda_environment/tasks/main.yaml @@ -1,20 +1,21 @@ --- - - name: create environments directory - become: yes - file: - path: /opt/conda-environments - state: directory +- name: Create environments directory + become: true + ansible.builtin.file: + path: /opt/conda-environments + state: directory - - name: copy environments files - become: yes - template: - src: "{{ environment_path }}" - dest: "/opt/conda-environments/{{ environment_path | basename }}" - mode: 0644 - register: _environment +- name: Copy environments files + become: true + ansible.builtin.template: + src: "{{ environment_path }}" + dest: /opt/conda-environments/{{ environment_path | basename }} + mode: "0644" + register: _environment - - name: "install conda environment {{ environment_path | basename }}" - become: yes - shell: - cmd: "{{ miniforge_home }}/bin/mamba env update -f /opt/conda-environments/{{ environment_path | basename }} --prefix {{ miniforge_home }}/envs/{{ environment_path | basename | splitext | first }}" - when: _environment.changed +- name: install conda environment {{ environment_path | Basename }} + become: true + ansible.builtin.command: + cmd: "{{ miniforge_home }}/bin/mamba env update -f /opt/conda-environments/{{ environment_path | basename }} --prefix {{ miniforge_home }}/envs/{{ environment_path + | basename | splitext | first }}" + when: _environment.changed diff --git a/roles/conda_store/defaults/main.yaml b/roles/conda_store/defaults/main.yaml index 21ce8e10..41212006 100644 --- a/roles/conda_store/defaults/main.yaml +++ b/roles/conda_store/defaults/main.yaml @@ -1,8 +1,9 @@ +--- conda_store_enabled: false -conda_store_version: "2024.1.1" +conda_store_version: 2024.1.1 conda_store_port: "5000" -conda_store_environment: "environments/conda-store.yaml" -conda_store_prefix: "/conda-store" +conda_store_environment: environments/conda-store.yaml +conda_store_prefix: /conda-store conda_store_client_id: conda_store conda_store_client_secret: SUPERSECRETPASSWORDCONDASTORE @@ -31,10 +32,10 @@ mysql_users: postgres_users: - username: conda-store password: eIbmUditL4RbQm0YPeLozRme - role: 'CREATEDB,CREATEROLE' + role: CREATEDB,CREATEROLE # role: keycloak keycloak_port: "30020" -keycloak_admin_username: "admin" +keycloak_admin_username: admin keycloak_admin_password: XLWUMUu8OG0XqlMREZK9id9o keycloak_realm: qhub-hpc diff --git a/roles/conda_store/tasks/conda_store.yaml b/roles/conda_store/tasks/conda_store.yaml index 340cfc16..5d4ac7bf 100644 --- a/roles/conda_store/tasks/conda_store.yaml +++ b/roles/conda_store/tasks/conda_store.yaml @@ -1,177 +1,177 @@ --- - - name: Check that the conda-store-server exists - stat: - path: "/opt/conda/envs/conda-store/bin/conda-store-server" - register: _conda_store_stat +- name: Check that the conda-store-server exists + ansible.builtin.stat: + path: /opt/conda/envs/conda-store/bin/conda-store-server + register: _conda_store_stat - - name: Install conda-store environment - include_role: - name: conda_environment - vars: - environment_path: "{{ conda_store_environment }}" +- name: Install conda-store environment + ansible.builtin.include_role: + name: conda_environment + vars: + environment_path: "{{ conda_store_environment }}" - - name: Create conda-store config directory - become: true - file: - path: /etc/conda-store - state: directory - mode: '0755' +- name: Create conda-store config directory + become: true + ansible.builtin.file: + path: /etc/conda-store + state: directory + mode: "0755" - - name: Create conda-store state directory - become: true - file: - path: /opt/conda-store - state: directory - mode: '0755' +- name: Create conda-store state directory + become: true + ansible.builtin.file: + path: /opt/conda-store + state: directory + mode: "0755" - - name: Create keycloak client for conda_store - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - state: present - client_id: "{{ conda_store_client_id }}" - client_authenticator_type: client-secret - secret: "{{ conda_store_client_secret }}" - protocol_mappers: - - config: - access.token.claim: True - userinfo.token.claim: True - id.token.claim: True - claim.name: "roles" - jsonType.label: String - multivalued: True - name: clientroles - protocol: openid-connect - protocolMapper: oidc-usermodel-client-role-mapper - - config: - access.token.claim: True - userinfo.token.claim: True - id.token.claim: True - claim.name: "groups" - name: clientgroups - protocol: openid-connect - protocolMapper: oidc-group-membership-mapper - redirect_uris: - - "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/conda-store/oauth_callback" - register: conda_store_client +- name: Create keycloak client for conda_store + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + state: present + client_id: "{{ conda_store_client_id }}" + client_authenticator_type: client-secret + secret: "{{ conda_store_client_secret }}" + protocol_mappers: + - config: + access.token.claim: true + userinfo.token.claim: true + id.token.claim: true + claim.name: roles + jsonType.label: String + multivalued: true + name: clientroles + protocol: openid-connect + protocolMapper: oidc-usermodel-client-role-mapper + - config: + access.token.claim: true + userinfo.token.claim: true + id.token.claim: true + claim.name: groups + name: clientgroups + protocol: openid-connect + protocolMapper: oidc-group-membership-mapper + redirect_uris: + - https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/conda-store/oauth_callback + register: conda_store_client - - name: Create conda-store keycloak roles - community.general.keycloak_role: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - client_id: "{{ conda_store_client_id }}" - name: "{{ item.name }}" - description: "{{ item.description }}" - state: present - with_items: - - name: conda_store_admin - description: Conda-Store Administrator - - name: conda_store_developer - description: Conda-Store Developer - - name: conda_store_viewer - description: Conda-Store Viewer +- name: Create conda-store keycloak roles + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + client_id: "{{ conda_store_client_id }}" + name: "{{ item.name }}" + description: "{{ item.description }}" + state: present + with_items: + - name: conda_store_admin + description: Conda-Store Administrator + - name: conda_store_developer + description: Conda-Store Developer + - name: conda_store_viewer + description: Conda-Store Viewer - - name: Create conda-store keycloak role mappings to groups - community.general.keycloak_client_rolemapping: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - client_id: "{{ conda_store_client_id }}" - group_name: "{{ item.group }}" - roles: - - name: "{{ item.role }}" - state: present - with_items: - - group: admin - role: conda_store_admin - - group: developer - role: conda_store_developer - - group: viewer - role: conda_store_viewer +- name: Create conda-store keycloak role mappings to groups + community.general.keycloak_client_rolemapping: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + client_id: "{{ conda_store_client_id }}" + group_name: "{{ item.group }}" + roles: + - name: "{{ item.role }}" + state: present + with_items: + - group: admin + role: conda_store_admin + - group: developer + role: conda_store_developer + - group: viewer + role: conda_store_viewer - - name: Copy conda_store_config.py file - become: true - template: - src: conda_store_config.py - dest: /etc/conda-store/conda_store_config.py - register: _conda_store_config +- name: Copy conda_store_config.py file + become: true + ansible.builtin.template: + src: conda_store_config.py + dest: /etc/conda-store/conda_store_config.py + register: _conda_store_config - - name: Copy the conda-store server systemd service file - become: true - copy: - content: | - [Unit] - Description=Conda-Store Server - Wants=network-online.target - After=network-online.target - AssertFileIsExecutable=/opt/conda/envs/conda-store/bin/conda-store-server +- name: Copy the conda-store server systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Conda-Store Server + Wants=network-online.target + After=network-online.target + AssertFileIsExecutable=/opt/conda/envs/conda-store/bin/conda-store-server - [Service] - WorkingDirectory=/opt/conda-store/ - User=root - Group=root - ExecStart=/opt/conda/envs/conda-store/bin/conda-store-server --config /etc/conda-store/conda_store_config.py - Restart=always + [Service] + WorkingDirectory=/opt/conda-store/ + User=root + Group=root + ExecStart=/opt/conda/envs/conda-store/bin/conda-store-server --config /etc/conda-store/conda_store_config.py + Restart=always - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/conda-store-server.service - owner: root - group: root - mode: 0644 - register: _conda_store_server_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/conda-store-server.service + owner: root + group: root + mode: "0644" + register: _conda_store_server_service - - name: Copy the conda-store worker systemd service file - become: true - copy: - content: | - [Unit] - Description=Conda-Store Worker - Wants=network-online.target - After=network-online.target - AssertFileIsExecutable=/opt/conda/envs/conda-store/bin/conda-store-worker +- name: Copy the conda-store worker systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Conda-Store Worker + Wants=network-online.target + After=network-online.target + AssertFileIsExecutable=/opt/conda/envs/conda-store/bin/conda-store-worker - [Service] - WorkingDirectory=/opt/conda-store/ - User=root - Group=root - Environment=PATH=/opt/conda/bin:/opt/conda/envs/conda-store/bin:PATH:/bin:/usr/bin:/usr/local/bin - ExecStart=/opt/conda/envs/conda-store/bin/conda-store-worker --config /etc/conda-store/conda_store_config.py - Restart=always + [Service] + WorkingDirectory=/opt/conda-store/ + User=root + Group=root + Environment=PATH=/opt/conda/bin:/opt/conda/envs/conda-store/bin:PATH:/bin:/usr/bin:/usr/local/bin + ExecStart=/opt/conda/envs/conda-store/bin/conda-store-worker --config /etc/conda-store/conda_store_config.py + Restart=always - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/conda-store-worker.service - owner: root - group: root - mode: 0644 - register: _conda_store_worker_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/conda-store-worker.service + owner: root + group: root + mode: "0644" + register: _conda_store_worker_service - - name: Ensure Conda-Store Server is enabled on boot - become: true - systemd: - daemon_reload: true - name: conda-store-server - enabled: true - state: restarted - when: _conda_store_server_service.changed or _conda_store_config.changed +- name: Ensure Conda-Store Server is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: conda-store-server + enabled: true + state: restarted + when: _conda_store_server_service.changed or _conda_store_config.changed - - name: Ensure Conda-Store Worker is enabled on boot - become: true - systemd: - daemon_reload: true - name: conda-store-worker - enabled: true - state: restarted - when: _conda_store_worker_service.changed or _conda_store_config.changed +- name: Ensure Conda-Store Worker is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: conda-store-worker + enabled: true + state: restarted + when: _conda_store_worker_service.changed or _conda_store_config.changed diff --git a/roles/conda_store/tasks/main.yaml b/roles/conda_store/tasks/main.yaml index 89a55996..13c77389 100644 --- a/roles/conda_store/tasks/main.yaml +++ b/roles/conda_store/tasks/main.yaml @@ -1,4 +1,4 @@ --- - - name: Install conda_store - include_tasks: conda_store.yaml - when: conda_store_enabled +- name: Install conda_store + ansible.builtin.include_tasks: conda_store.yaml + when: conda_store_enabled diff --git a/roles/dask_gateway/defaults/main.yml b/roles/dask_gateway/defaults/main.yml index 660e290d..57414ff7 100644 --- a/roles/dask_gateway/defaults/main.yml +++ b/roles/dask_gateway/defaults/main.yml @@ -1,18 +1,19 @@ +--- dask_gateway_enabled: false dask_gateway_api_port: "8010" dask_gateway_scheduler_internal_port: "8785" dask_gateway_scheduler_external_port: "8786" dask_gateway_client_enabled: true -dask_gateway_environment: "environments/dask-gateway.yaml" +dask_gateway_environment: environments/dask-gateway.yaml # role: miniforge -miniforge_home: "/opt/conda" +miniforge_home: /opt/conda # role: firewall firewall_enabled: true # role: jupyterhub jupyterhub_proxy_port: "15002" -jupyterhub_lab_environment: "environments/jupyterlab.yaml" +jupyterhub_lab_environment: environments/jupyterlab.yaml jupyterhub_services: dask_gateway: CStgn1NN8DogQR1KajuoQfye1qNRqx6zsh diff --git a/roles/dask_gateway/handlers/main.yaml b/roles/dask_gateway/handlers/main.yaml index 7fad6ff8..b0d90946 100644 --- a/roles/dask_gateway/handlers/main.yaml +++ b/roles/dask_gateway/handlers/main.yaml @@ -1,9 +1,10 @@ --- - - name: "restart services dask-gateway" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "dask-gateway" +- name: Restart services dask-gateway + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - dask-gateway diff --git a/roles/dask_gateway/tasks/client.yaml b/roles/dask_gateway/tasks/client.yaml index 0c81940b..70a196f2 100644 --- a/roles/dask_gateway/tasks/client.yaml +++ b/roles/dask_gateway/tasks/client.yaml @@ -1,20 +1,20 @@ --- - - name: Ensure that dask configuration directory exists - become: true - file: - path: /etc/dask - state: directory - mode: '0755' +- name: Ensure that dask configuration directory exists + become: true + ansible.builtin.file: + path: /etc/dask + state: directory + mode: "0755" - - name: Copy the dask-gateway client configuration - become: true - copy: - content: | - gateway: - address: "http://{{ groups['hpc_master'][0] }}:{{ dask_gateway_api_port }}" - public-address: "https://{{ hostvars[groups['hpc_master'][0]].ansible_ssh_host }}" - proxy-address: "tls://{{ groups['hpc_master'][0] }}:{{ dask_gateway_scheduler_external_port }}" - auth: - type: jupyterhub - dest: "/etc/dask/gateway.yaml" - mode: '644' +- name: Copy the dask-gateway client configuration + become: true + ansible.builtin.copy: + content: | + gateway: + address: "http://{{ groups['hpc_master'][0] }}:{{ dask_gateway_api_port }}" + public-address: "https://{{ hostvars[groups['hpc_master'][0]].ansible_ssh_host }}" + proxy-address: "tls://{{ groups['hpc_master'][0] }}:{{ dask_gateway_scheduler_external_port }}" + auth: + type: jupyterhub + dest: /etc/dask/gateway.yaml + mode: "644" diff --git a/roles/dask_gateway/tasks/dask_gateway.yaml b/roles/dask_gateway/tasks/dask_gateway.yaml index 1f80625c..fb364c30 100644 --- a/roles/dask_gateway/tasks/dask_gateway.yaml +++ b/roles/dask_gateway/tasks/dask_gateway.yaml @@ -1,88 +1,88 @@ --- - - name: Install Dask Gateway - include_role: - name: conda_environment - vars: - environment_path: "{{ dask_gateway_environment }}" +- name: Install Dask Gateway + ansible.builtin.include_role: + name: conda_environment + vars: + environment_path: "{{ dask_gateway_environment }}" - - name: Create dask group - become: true - group: +- name: Create dask group + become: true + ansible.builtin.group: name: dask state: present system: true - - name: Create the dask user - become: true - user: - name: dask - groups: dask - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / +- name: Create the dask user + become: true + ansible.builtin.user: + name: dask + groups: dask + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / - - name: Ensure that dask-gateway configuration directory exists - become: true - file: - path: /etc/dask-gateway - state: directory - mode: '0700' - owner: dask - group: dask +- name: Ensure that dask-gateway configuration directory exists + become: true + ansible.builtin.file: + path: /etc/dask-gateway + state: directory + mode: "0700" + owner: dask + group: dask - - name: Ensure that dask-gateway runtime directory exists - become: true - file: - path: /var/lib/dask-gateway - state: directory - mode: '0700' - owner: dask - group: dask +- name: Ensure that dask-gateway runtime directory exists + become: true + ansible.builtin.file: + path: /var/lib/dask-gateway + state: directory + mode: "0700" + owner: dask + group: dask - - name: Copy the dask-gateway configuration - become: true - template: - src: templates/dask_gateway_config.py - dest: /etc/dask-gateway/dask_gateway_config.py - owner: dask - group: dask - mode: 0644 - notify: restart services dask-gateway +- name: Copy the dask-gateway configuration + become: true + ansible.builtin.template: + src: templates/dask_gateway_config.py + dest: /etc/dask-gateway/dask_gateway_config.py + owner: dask + group: dask + mode: "0644" + notify: restart services dask-gateway - - name: Copy the dask-gateway systemd service file - become: true - copy: - content: | - [Unit] - Description=dask-gateway - After=network-online.target +- name: Copy the dask-gateway systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=dask-gateway + After=network-online.target - [Service] - Type=simple - User=root - Group=root - # By default, $PATH is pretty bare (thanks, systemd) - # We add the conda dir & /bin - # batchspawner uses sudo, which is in /bin (at least on CentOS) - Environment=PATH={{ miniforge_home }}/condabin:{{ miniforge_home }}/envs/{{ dask_gateway_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin - ExecStart={{ miniforge_home }}/envs/{{ dask_gateway_environment | basename | splitext | first }}/bin/dask-gateway-server -f /etc/dask-gateway/dask_gateway_config.py - Restart=always - WorkingDirectory=/var/lib/dask-gateway + [Service] + Type=simple + User=root + Group=root + # By default, $PATH is pretty bare (thanks, systemd) + # We add the conda dir & /bin + # batchspawner uses sudo, which is in /bin (at least on CentOS) + Environment=PATH={{ miniforge_home }}/condabin:{{ miniforge_home }}/envs/{{ dask_gateway_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin + ExecStart={{ miniforge_home }}/envs/{{ dask_gateway_environment | basename | splitext | first }}/bin/dask-gateway-server -f /etc/dask-gateway/dask_gateway_config.py + Restart=always + WorkingDirectory=/var/lib/dask-gateway - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/dask-gateway.service - owner: root - group: root - mode: 0644 - notify: restart services dask-gateway + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/dask-gateway.service + owner: root + group: root + mode: "0644" + notify: restart services dask-gateway - - name: Ensure dask-gateway is enabled on boot - become: true - systemd: - daemon_reload: true - name: dask-gateway - enabled: true - state: started +- name: Ensure dask-gateway is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: dask-gateway + enabled: true + state: started diff --git a/roles/dask_gateway/tasks/main.yml b/roles/dask_gateway/tasks/main.yml index 9b05d59c..6585340b 100644 --- a/roles/dask_gateway/tasks/main.yml +++ b/roles/dask_gateway/tasks/main.yml @@ -1,8 +1,8 @@ --- - - name: Install dask-gateway server - include_tasks: dask_gateway.yaml - when: dask_gateway_enabled +- name: Install dask-gateway server + ansible.builtin.include_tasks: dask_gateway.yaml + when: dask_gateway_enabled - - name: Install dask-gateway client - include_tasks: client.yaml - when: dask_gateway_client_enabled +- name: Install dask-gateway client + ansible.builtin.include_tasks: client.yaml + when: dask_gateway_client_enabled diff --git a/roles/firewall/defaults/main.yml b/roles/firewall/defaults/main.yml index 6410cf83..3667fd21 100644 --- a/roles/firewall/defaults/main.yml +++ b/roles/firewall/defaults/main.yml @@ -1,2 +1,3 @@ +--- firewall_enabled: true -firewall_internal_ip_range: "192.168.0.0/16" +firewall_internal_ip_range: 192.168.0.0/16 diff --git a/roles/firewall/tasks/firewall.yaml b/roles/firewall/tasks/firewall.yaml index e31808ac..3c23e85a 100644 --- a/roles/firewall/tasks/firewall.yaml +++ b/roles/firewall/tasks/firewall.yaml @@ -1,20 +1,20 @@ --- - - name: Always allow ssh traffic - become: true - community.general.ufw: - rule: allow - name: OpenSSH +- name: Always allow ssh traffic + become: true + community.general.ufw: + rule: allow + name: OpenSSH - - name: By default deny all incoming network requests - become: true - community.general.ufw: - state: enabled - policy: deny - proto: any +- name: By default deny all incoming network requests + become: true + community.general.ufw: + state: enabled + policy: deny + proto: any - - name: Allow any network requests witin internal ip range - become: true - community.general.ufw: - rule: allow - src: "{{ firewall_internal_ip_range }}" - proto: any +- name: Allow any network requests witin internal ip range + become: true + community.general.ufw: + rule: allow + src: "{{ firewall_internal_ip_range }}" + proto: any diff --git a/roles/firewall/tasks/main.yml b/roles/firewall/tasks/main.yml index 7aa7fa0f..07ae1298 100644 --- a/roles/firewall/tasks/main.yml +++ b/roles/firewall/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Firewall configuration - include_tasks: firewall.yaml - when: firewall_enabled +- name: Firewall configuration + ansible.builtin.include_tasks: firewall.yaml + when: firewall_enabled diff --git a/roles/grafana/defaults/main.yml b/roles/grafana/defaults/main.yml index 7bf71ff0..4fa81e65 100644 --- a/roles/grafana/defaults/main.yml +++ b/roles/grafana/defaults/main.yml @@ -1,6 +1,7 @@ +--- grafana_enabled: false grafana_port: "3000" -grafana_base_url: "/monitoring" +grafana_base_url: /monitoring grafana_client_id: grafana grafana_client_secret: SUPERSECRETPASSWORDGRAFANA grafana_dashboards: @@ -19,6 +20,6 @@ prometheus_port: "9090" # role: keycloak keycloak_port: "30020" -keycloak_admin_username: "admin" +keycloak_admin_username: admin keycloak_admin_password: XLWUMUu8OG0XqlMREZK9id9o keycloak_realm: qhub-hpc diff --git a/roles/grafana/tasks/grafana.yaml b/roles/grafana/tasks/grafana.yaml index d9ce12bc..00214a52 100644 --- a/roles/grafana/tasks/grafana.yaml +++ b/roles/grafana/tasks/grafana.yaml @@ -1,174 +1,174 @@ --- - - name: Add apt keys for grafana - become: true - apt_key: - url: "https://packages.grafana.com/gpg.key" - state: present +- name: Add apt keys for grafana + become: true + ansible.builtin.apt_key: + url: https://packages.grafana.com/gpg.key + state: present - - name: Add apt repository for grafana - become: true - apt_repository: - repo: "deb https://apt.grafana.com stable main" +- name: Add apt repository for grafana + become: true + ansible.builtin.apt_repository: + repo: deb https://apt.grafana.com stable main - - name: Install grafana - become: true - apt: - name: grafana{{ grafana_version }} - state: "{% if grafana_version %}present{% else %}latest{% endif %}" - cache_valid_time: 3600 +- name: Install grafana + become: true + ansible.builtin.apt: + name: grafana{{ grafana_version }} + state: "{% if grafana_version %}present{% else %}latest{% endif %}" + cache_valid_time: 3600 - - name: Create keycloak client for grafana - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - state: present - client_id: "{{ grafana_client_id }}" - client_authenticator_type: client-secret - secret: "{{ grafana_client_secret }}" - protocol_mappers: - - config: - access.token.claim: True - userinfo.token.claim: True - id.token.claim: True - claim.name: "roles" - jsonType.label: String - multivalued: True - name: clientroles - protocol: openid-connect - protocolMapper: oidc-usermodel-client-role-mapper - - config: - access.token.claim: True - userinfo.token.claim: True - id.token.claim: True - claim.name: "groups" - name: clientgroups - protocol: openid-connect - protocolMapper: oidc-group-membership-mapper - redirect_uris: - - "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}{{ grafana_base_url }}/login/generic_oauth" - register: grafana_client +- name: Create keycloak client for grafana + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + state: present + client_id: "{{ grafana_client_id }}" + client_authenticator_type: client-secret + secret: "{{ grafana_client_secret }}" + protocol_mappers: + - config: + access.token.claim: true + userinfo.token.claim: true + id.token.claim: true + claim.name: roles + jsonType.label: String + multivalued: true + name: clientroles + protocol: openid-connect + protocolMapper: oidc-usermodel-client-role-mapper + - config: + access.token.claim: true + userinfo.token.claim: true + id.token.claim: true + claim.name: groups + name: clientgroups + protocol: openid-connect + protocolMapper: oidc-group-membership-mapper + redirect_uris: + - https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}{{ grafana_base_url }}/login/generic_oauth + register: grafana_client - - name: Create grafana keycloak roles - community.general.keycloak_role: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - client_id: "{{ grafana_client_id }}" - name: "{{ item.name }}" - description: "{{ item.description }}" - state: present - with_items: - - name: grafana_admin - description: Grafana Administrator - - name: grafana_developer - description: Grafana Developer - - name: grafana_viewer - description: Grafana Viewer +- name: Create grafana keycloak roles + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + client_id: "{{ grafana_client_id }}" + name: "{{ item.name }}" + description: "{{ item.description }}" + state: present + with_items: + - name: grafana_admin + description: Grafana Administrator + - name: grafana_developer + description: Grafana Developer + - name: grafana_viewer + description: Grafana Viewer - - name: Create grafana keycloak role mappings to groups - community.general.keycloak_client_rolemapping: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - client_id: "{{ grafana_client_id }}" - group_name: "{{ item.group }}" - roles: - - name: "{{ item.role }}" - state: present - with_items: - - group: admin - role: grafana_admin - - group: developer - role: grafana_developer - - group: viewer - role: grafana_viewer +- name: Create grafana keycloak role mappings to groups + community.general.keycloak_client_rolemapping: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + client_id: "{{ grafana_client_id }}" + group_name: "{{ item.group }}" + roles: + - name: "{{ item.role }}" + state: present + with_items: + - group: admin + role: grafana_admin + - group: developer + role: grafana_developer + - group: viewer + role: grafana_viewer - - name: Copy grafana datasource provision file - become: true - template: - src: grafana-datasources.yaml - dest: /etc/grafana/provisioning/datasources/grafana-datasources.yaml - owner: root - group: grafana - mode: 0440 - register: _grafana_provision_datasource +- name: Copy grafana datasource provision file + become: true + ansible.builtin.template: + src: grafana-datasources.yaml + dest: /etc/grafana/provisioning/datasources/grafana-datasources.yaml + owner: root + group: grafana + mode: "0440" + register: _grafana_provision_datasource - - name: Copy grafana dashboard provision file - become: true - template: - src: grafana-dashboards.yaml - dest: /etc/grafana/provisioning/dashboards/dashboard.yaml - owner: root - group: grafana - mode: 0440 - register: _grafana_provision_dashboard +- name: Copy grafana dashboard provision file + become: true + ansible.builtin.template: + src: grafana-dashboards.yaml + dest: /etc/grafana/provisioning/dashboards/dashboard.yaml + owner: root + group: grafana + mode: "0440" + register: _grafana_provision_dashboard - - name: Copy grafana dashboards - become: true - copy: - src: "dashboards/{{ item }}.json" - dest: "/etc/grafana/provisioning/dashboards/{{ item }}.json" - owner: root - group: grafana - mode: 0440 - with_items: "{{ grafana_dashboards }}" - register: _grafana_dashboards +- name: Copy grafana dashboards + become: true + ansible.builtin.copy: + src: dashboards/{{ item }}.json + dest: /etc/grafana/provisioning/dashboards/{{ item }}.json + owner: root + group: grafana + mode: "0440" + with_items: "{{ grafana_dashboards }}" + register: _grafana_dashboards - - name: Copy Grafana Configuration - become: true - copy: - content: | - [server] - protocol = http - http_port = {{ grafana_port }} - domain = {{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }} - root_url = https://%(domain)s{{ grafana_base_url }} - serve_from_sub_path = true +- name: Copy Grafana Configuration + become: true + ansible.builtin.copy: + content: | + [server] + protocol = http + http_port = {{ grafana_port }} + domain = {{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }} + root_url = https://%(domain)s{{ grafana_base_url }} + serve_from_sub_path = true - [auth] - oauth_auto_login = true + [auth] + oauth_auto_login = true - [auth.basic] - enabled = false + [auth.basic] + enabled = false - [auth.generic_oauth] - enabled = true - name = Login Keycloak - allow_sign_up = true - client_id = {{ grafana_client_id }} - client_secret = {{ grafana_client_secret }} - scopes = profile - auth_url = "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/auth" - token_url = "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/token" - api_url = "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/userinfo" - tls_skip_verify_insecure = true - login_attribute_path: preferred_username - role_attribute_path: "contains(roles[*], 'grafana_admin') && 'Admin' || contains(roles[*], 'grafana_developer') && 'Editor' || contains(roles[*], 'grafana_viewer') || 'Viewer'" + [auth.generic_oauth] + enabled = true + name = Login Keycloak + allow_sign_up = true + client_id = {{ grafana_client_id }} + client_secret = {{ grafana_client_secret }} + scopes = profile + auth_url = "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/auth" + token_url = "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/token" + api_url = "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/auth/realms/{{ keycloak_realm }}/protocol/openid-connect/userinfo" + tls_skip_verify_insecure = true + login_attribute_path: preferred_username + role_attribute_path: "contains(roles[*], 'grafana_admin') && 'Admin' || contains(roles[*], 'grafana_developer') && 'Editor' || contains(roles[*], 'grafana_viewer') || 'Viewer'" - [dashboards] - min_refresh_interval = 1s + [dashboards] + min_refresh_interval = 1s - {{ grafana_additional_config }} - dest: /etc/grafana/grafana.ini - owner: root - group: grafana - mode: 0440 - register: _grafana_configuration + {{ grafana_additional_config }} + dest: /etc/grafana/grafana.ini + owner: root + group: grafana + mode: "0440" + register: _grafana_configuration - - name: Ensure granfana is started - become: true - service: - name: grafana-server - enabled: true - state: restarted - when: _grafana_configuration.changed or _grafana_provision_dashboard.changed or _grafana_provision_datasource.changed or _grafana_dashboards.changed +- name: Ensure granfana is started + become: true + ansible.builtin.service: + name: grafana-server + enabled: true + state: restarted + when: _grafana_configuration.changed or _grafana_provision_dashboard.changed or _grafana_provision_datasource.changed or _grafana_dashboards.changed diff --git a/roles/grafana/tasks/main.yml b/roles/grafana/tasks/main.yml index 739b7786..f0acedda 100644 --- a/roles/grafana/tasks/main.yml +++ b/roles/grafana/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Install grafana - include_tasks: grafana.yaml - when: grafana_enabled +- name: Install grafana + ansible.builtin.include_tasks: grafana.yaml + when: grafana_enabled diff --git a/roles/hosts/defaults/main.yaml b/roles/hosts/defaults/main.yaml index 194720c9..a0118037 100644 --- a/roles/hosts/defaults/main.yaml +++ b/roles/hosts/defaults/main.yaml @@ -1 +1,2 @@ +--- internal_interface: eth0 diff --git a/roles/hosts/tasks/main.yaml b/roles/hosts/tasks/main.yaml index ec32122f..1b40d851 100644 --- a/roles/hosts/tasks/main.yaml +++ b/roles/hosts/tasks/main.yaml @@ -1,9 +1,9 @@ --- - - name: Ensure hosts exist within /etc/hosts - become: true - lineinfile: - dest: /etc/hosts - regexp: '.*{{ item }}$' - line: '{{ hostvars[item]["ansible_" + hostvars[item].get("internal_interface", internal_interface)].ipv4.address }} {{item}}' - state: present - with_items: '{{ groups["all"] }}' +- name: Ensure hosts exist within /etc/hosts + become: true + ansible.builtin.lineinfile: + dest: /etc/hosts + regexp: .*{{ item }}$ + line: '{{ hostvars[item]["ansible_" + hostvars[item].get("internal_interface", internal_interface)].ipv4.address }} {{ item }}' + state: present + with_items: '{{ groups["all"] }}' diff --git a/roles/ipyparallel/defaults/main.yml b/roles/ipyparallel/defaults/main.yml index e7289f48..b867c543 100644 --- a/roles/ipyparallel/defaults/main.yml +++ b/roles/ipyparallel/defaults/main.yml @@ -1,11 +1,12 @@ +--- ipyparallel_enabled: false ipyparallel_enable_mpi: false -ipyparallel_environment: "environments/jupyterlab.yaml" +ipyparallel_environment: environments/jupyterlab.yaml ipyparallel_config: - controller_memory: "1G" + controller_memory: 1G controller_cpus: 1 - engine_worker_memory: "1G" + engine_worker_memory: 1G engine_worker_cpus: 2 # role: miniforge -miniforge_home: "/opt/conda" +miniforge_home: /opt/conda diff --git a/roles/ipyparallel/tasks/ipyparallel.yaml b/roles/ipyparallel/tasks/ipyparallel.yaml index c6ddba0f..f32a99e1 100644 --- a/roles/ipyparallel/tasks/ipyparallel.yaml +++ b/roles/ipyparallel/tasks/ipyparallel.yaml @@ -1,62 +1,62 @@ --- - - name: Ensure that jupyter configuration directory exists - become: true - file: - path: /etc/jupyter - state: directory - mode: '0755' +- name: Ensure that jupyter configuration directory exists + become: true + ansible.builtin.file: + path: /etc/jupyter + state: directory + mode: "0755" - - name: Ensure that ipyparallel default profile configuration directory exists - become: true - file: - path: /etc/jupyter/profile_default - state: directory - mode: '0755' +- name: Ensure that ipyparallel default profile configuration directory exists + become: true + ansible.builtin.file: + path: /etc/jupyter/profile_default + state: directory + mode: "0755" - - name: Configuration ipcontroller_config.py - become: true - copy: - content: | - c.HubFactory.ip = u'*' - c.HubFactory.registration_timeout = 600 - dest: /etc/jupyter/profile_default/ipcontroller_config.py - mode: '0744' +- name: Configuration ipcontroller_config.py + become: true + ansible.builtin.copy: + content: | + c.HubFactory.ip = u'*' + c.HubFactory.registration_timeout = 600 + dest: /etc/jupyter/profile_default/ipcontroller_config.py + mode: "0744" - - name: Configuration ipengine_config.py - become: true - copy: - content: | - c.IPEngineApp.wait_for_url_file = 300 - c.EngineFactory.timeout = 300 - dest: /etc/jupyter/profile_default/ipengine_config.py - mode: '0744' +- name: Configuration ipengine_config.py + become: true + ansible.builtin.copy: + content: | + c.IPEngineApp.wait_for_url_file = 300 + c.EngineFactory.timeout = 300 + dest: /etc/jupyter/profile_default/ipengine_config.py + mode: "0744" - - name: Configuration ipcluster_config.py - become: true - copy: - content: | - c.IPClusterStart.controller_launcher_class = 'SlurmControllerLauncher' - c.IPClusterEngines.engine_launcher_class = 'SlurmEngineSetLauncher' +- name: Configuration ipcluster_config.py + become: true + ansible.builtin.copy: + content: | + c.IPClusterStart.controller_launcher_class = 'SlurmControllerLauncher' + c.IPClusterEngines.engine_launcher_class = 'SlurmEngineSetLauncher' - c.SlurmEngineSetLauncher.batch_template = """#!/bin/sh - #SBATCH --job-name=ipy-engine-{cluster_id} - #SBATCH --output=.ipyparallel_engine_%j.log - #SBATCH --error=.ipyparallel_engine_%j.log - #SBATCH --ntasks={n} - #SBATCH --mem={{ ipyparallel_config.engine_worker_memory }} - #SBATCH --cpus-per-task={{ ipyparallel_config.engine_worker_cpus }} + c.SlurmEngineSetLauncher.batch_template = """#!/bin/sh + #SBATCH --job-name=ipy-engine-{cluster_id} + #SBATCH --output=.ipyparallel_engine_%j.log + #SBATCH --error=.ipyparallel_engine_%j.log + #SBATCH --ntasks={n} + #SBATCH --mem={{ ipyparallel_config.engine_worker_memory }} + #SBATCH --cpus-per-task={{ ipyparallel_config.engine_worker_cpus }} - srun {{ miniforge_home }}/envs/{{ ipyparallel_environment | basename | splitext | first }}/bin/python -m ipyparallel.engine {% if ipyparallel_enable_mpi %}--mpi{% endif %} --profile-dir="{profile_dir}" --cluster-id="{cluster_id}" - """ + srun {{ miniforge_home }}/envs/{{ ipyparallel_environment | basename | splitext | first }}/bin/python -m ipyparallel.engine {% if ipyparallel_enable_mpi %}--mpi{% endif %} --profile-dir="{profile_dir}" --cluster-id="{cluster_id}" + """ - c.SlurmControllerLauncher.batch_template = """#!/bin/sh - #SBATCH --job-name=ipy-controller-{cluster_id} - #SBATCH --output=.ipyparallel_controller_%j.log - #SBATCH --error=.ipyparallel_controller_%j.log - #SBATCH --mem={{ ipyparallel_config.controller_memory }} - #SBATCH --cpus-per-task={{ ipyparallel_config.controller_cpus }} - #SBATCH --ntasks=1 - {{ miniforge_home }}/envs/{{ ipyparallel_environment | basename | splitext | first }}/bin/python -m ipyparallel.controller --profile-dir="{profile_dir}" --cluster-id="{cluster_id}" - """ - dest: /etc/jupyter/profile_default/ipcluster_config.py - mode: '0744' + c.SlurmControllerLauncher.batch_template = """#!/bin/sh + #SBATCH --job-name=ipy-controller-{cluster_id} + #SBATCH --output=.ipyparallel_controller_%j.log + #SBATCH --error=.ipyparallel_controller_%j.log + #SBATCH --mem={{ ipyparallel_config.controller_memory }} + #SBATCH --cpus-per-task={{ ipyparallel_config.controller_cpus }} + #SBATCH --ntasks=1 + {{ miniforge_home }}/envs/{{ ipyparallel_environment | basename | splitext | first }}/bin/python -m ipyparallel.controller --profile-dir="{profile_dir}" --cluster-id="{cluster_id}" + """ + dest: /etc/jupyter/profile_default/ipcluster_config.py + mode: "0744" diff --git a/roles/ipyparallel/tasks/main.yml b/roles/ipyparallel/tasks/main.yml index 473fd9af..17e1c5e9 100644 --- a/roles/ipyparallel/tasks/main.yml +++ b/roles/ipyparallel/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Install ipyparallel - include_tasks: ipyparallel.yaml - when: ipyparallel_enabled +- name: Install ipyparallel + ansible.builtin.include_tasks: ipyparallel.yaml + when: ipyparallel_enabled diff --git a/roles/jupyterhub/defaults/main.yml b/roles/jupyterhub/defaults/main.yml index e6597eff..d21dee55 100644 --- a/roles/jupyterhub/defaults/main.yml +++ b/roles/jupyterhub/defaults/main.yml @@ -1,22 +1,23 @@ +--- jupyterhub_ssh_enabled: false jupyterhub_ssh_internal_port: "8021" jupyterhub_ssh_external_port: "8022" jupyterhub_proxy_port: "15002" jupyterhub_proxy_api_port: "15003" -jupyterhub_proxy_auth_token: "m8dfdKvyVJ0bWTNpbVCQyoCt" +jupyterhub_proxy_auth_token: m8dfdKvyVJ0bWTNpbVCQyoCt cdsdashboards_enabled: true jupyterhub_enabled: false -jupyterhub_version: "==4.0.2" +jupyterhub_version: ==4.0.2 jupyterhub_port: "15001" -jupyterhub_base_url: "/" -jupyterhub_hub_environment: "environments/jupyterhub.yaml" -jupyterhub_lab_environment: "environments/jupyterlab.yaml" -jupyterhub_dashboard_environment: "environments/dashboards.yaml" -jupyterhub_client_id: "jupyterhub" -jupyterhub_client_secret: "SUPERSECRETPASSWORDJUPYTERHUB" +jupyterhub_base_url: / +jupyterhub_hub_environment: environments/jupyterhub.yaml +jupyterhub_lab_environment: environments/jupyterlab.yaml +jupyterhub_dashboard_environment: environments/dashboards.yaml +jupyterhub_client_id: jupyterhub +jupyterhub_client_secret: SUPERSECRETPASSWORDJUPYTERHUB jupyterhub_qhub_options_form: true jupyterhub_config: @@ -28,13 +29,13 @@ jupyterhub_services: jupyterhub_theme: template_vars: - hub_title: "This is Nebari Slurm" - hub_subtitle: "Your scalable open source data science laboratory." - welcome: "have fun." - logo: "/hub/custom/images/jupyter_qhub_logo.svg" - primary_color: '#4f4173' - secondary_color: '#957da6' - accent_color: '#32C574' + hub_title: This is Nebari Slurm + hub_subtitle: Your scalable open source data science laboratory. + welcome: have fun. + logo: /hub/custom/images/jupyter_qhub_logo.svg + primary_color: "#4f4173" + secondary_color: "#957da6" + accent_color: "#32C574" text_color: "#111111" h1_color: "#652e8e" h2_color: "#652e8e" @@ -42,24 +43,22 @@ jupyterhub_theme: # for each key # c.{classname}.{attribute} = {value} jupyterhub_custom: {} - # "name": "" # use variable: inventory_dir to point to use inventory directory jupyterhub_additional_config: {} - idle_culler: enabled: true timeout: 86400 # 1 day cull_every: 3600 # 1 hour # role: miniforge -miniforge_home: "/opt/conda" +miniforge_home: /opt/conda # role: traefik # traefik_domain: ... # role: keycloak keycloak_port: "30020" -keycloak_admin_username: "admin" +keycloak_admin_username: admin keycloak_admin_password: XLWUMUu8OG0XqlMREZK9id9o keycloak_realm: qhub-hpc diff --git a/roles/jupyterhub/handlers/main.yaml b/roles/jupyterhub/handlers/main.yaml index 4a890b3b..fa66fb22 100644 --- a/roles/jupyterhub/handlers/main.yaml +++ b/roles/jupyterhub/handlers/main.yaml @@ -1,27 +1,30 @@ --- - - name: "restart services jupyterhub" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "jupyterhub" +- name: Restart services jupyterhub + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - jupyterhub - - name: "restart services jupyterhub-proxy" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "jupyterhub-proxy" +- name: Restart services jupyterhub-proxy + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - jupyterhub-proxy - - name: "restart services jupyterhub-ssh" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "jupyterhub-ssh" +- name: Restart services jupyterhub-ssh + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - jupyterhub-ssh diff --git a/roles/jupyterhub/tasks/jupyterhub.yaml b/roles/jupyterhub/tasks/jupyterhub.yaml index 3fa2553f..6ea6d964 100644 --- a/roles/jupyterhub/tasks/jupyterhub.yaml +++ b/roles/jupyterhub/tasks/jupyterhub.yaml @@ -1,204 +1,204 @@ --- - - name: Install Environments - include_role: - name: conda_environment - vars: - environment_path: "{{ item }}" - with_items: - - "{{ jupyterhub_hub_environment }}" - - "{{ jupyterhub_lab_environment }}" - - "{{ jupyterhub_dashboard_environment }}" - - - name: Create hub config directory - become: true - file: - path: /etc/jupyterhub - state: directory - mode: '0755' - - - name: Create hub user config directory - become: true - file: - path: /etc/jupyterhub/additional - state: directory - mode: '0755' - - - name: Create hub state directory - become: true - file: - path: /var/lib/jupyterhub - state: directory - mode: '0755' - - - name: Create hub proxy state directory - become: true - file: - path: /var/lib/jupyterhub-proxy - state: directory - mode: '0755' - - - name: Create keycloak client for jupyterhub - community.general.keycloak_client: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - state: present - client_id: "{{ jupyterhub_client_id }}" - client_authenticator_type: client-secret - secret: "{{ jupyterhub_client_secret }}" - protocol_mappers: - - config: - access.token.claim: True - userinfo.token.claim: True - id.token.claim: True - claim.name: "roles" - jsonType.label: String - multivalued: True - name: clientroles - protocol: openid-connect - protocolMapper: oidc-usermodel-client-role-mapper - - config: - access.token.claim: True - userinfo.token.claim: True - id.token.claim: True - claim.name: "groups" - name: clientgroups - protocol: openid-connect - protocolMapper: oidc-group-membership-mapper - redirect_uris: - - "https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/hub/oauth_callback" - register: jupyterhub_client - - - name: Create JupyterHub Keycloak roles - community.general.keycloak_role: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - client_id: "{{ jupyterhub_client_id }}" - name: "{{ item.name }}" - description: "{{ item.description }}" - state: present - with_items: - - name: jupyterhub_admin - description: JupyterHub Administrator - - name: jupyterhub_developer - description: JupyterHub Developer - - - name: Create JupyterHub Keycloak role mappings to groups - community.general.keycloak_client_rolemapping: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - client_id: "{{ jupyterhub_client_id }}" - group_name: "{{ item.group }}" - roles: - - name: "{{ item.role }}" - state: present - with_items: - - group: admin - role: jupyterhub_admin - - group: developer - role: jupyterhub_developer - - - name: Copy jupyterhub_config.py file - become: true - template: - src: templates/jupyterhub_config.py - dest: /etc/jupyterhub/jupyterhub_config.py - notify: restart services jupyterhub - - - name: Copy jupyterhub_config.py files - become: true - template: - src: "{{ item.value }}" - dest: "/etc/jupyterhub/additional/{{ item.key }}.py" - with_dict: "{{ jupyterhub_additional_config }}" - notify: restart services jupyterhub - - - name: Setup External Proxy for JupyterHub systemd unit - become: true - copy: - content: | - [Service] - User=root - Restart=always - WorkingDirectory=/var/lib/jupyterhub-proxy - # Protect bits that are normally shared across the system - ProtectHome=tmpfs - ProtectSystem=strict - PrivateTmp=yes - PrivateDevices=yes - ProtectKernelTunables=yes - ProtectKernelModules=yes - - Environment=PATH={{ miniforge_home }}/condabin:{{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin - Environment=CONFIGPROXY_AUTH_TOKEN={{ jupyterhub_proxy_auth_token }} - ExecStart={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin/configurable-http-proxy \ - --ip=127.0.0.1 \ - --port={{ jupyterhub_proxy_port }} \ - --api-ip=127.0.0.1 \ - --api-port={{ jupyterhub_proxy_api_port }} \ - --default-target=http://localhost:{{ jupyterhub_port }}/ \ - --error-target=http://localhost:{{ jupyterhub_port }}/hub/error - - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/jupyterhub-proxy.service - mode: '0544' - notify: restart services jupyterhub-proxy - - - name: Setup JupyterHub systemd unit - become: true - copy: - content: | - [Unit] - # CHP must have successfully started *before* we launch JupyterHub - Requires=jupyterhub-proxy.service - After=jupyterhub-proxy.service - - [Service] - User=root - Restart=always - WorkingDirectory=/var/lib/jupyterhub - # Protect bits that are normally shared across the system - PrivateTmp=yes - PrivateDevices=yes - ProtectKernelTunables=yes - ProtectKernelModules=yes - # By default, $PATH is pretty bare (thanks, systemd) - # We add the conda dir & /bin - # batchspawner uses sudo, which is in /bin (at least on CentOS) - Environment=PATH={{ miniforge_home }}/condabin:{{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin - ExecStart={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin/jupyterhub --config /etc/jupyterhub/jupyterhub_config.py --debug - KillMode=process - - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/jupyterhub.service - mode: '0544' - notify: restart services jupyterhub - - - name: Ensure JupyterHub Started - become: true - systemd: - name: jupyterhub - enabled: true - state: started - daemon_reload: true - - - name: Ensure JupyterHub Proxy Started - become: true - systemd: - name: jupyterhub-proxy - enabled: true - state: started - daemon_reload: true +- name: Install Environments + ansible.builtin.include_role: + name: conda_environment + vars: + environment_path: "{{ item }}" + with_items: + - "{{ jupyterhub_hub_environment }}" + - "{{ jupyterhub_lab_environment }}" + - "{{ jupyterhub_dashboard_environment }}" + +- name: Create hub config directory + become: true + ansible.builtin.file: + path: /etc/jupyterhub + state: directory + mode: "0755" + +- name: Create hub user config directory + become: true + ansible.builtin.file: + path: /etc/jupyterhub/additional + state: directory + mode: "0755" + +- name: Create hub state directory + become: true + ansible.builtin.file: + path: /var/lib/jupyterhub + state: directory + mode: "0755" + +- name: Create hub proxy state directory + become: true + ansible.builtin.file: + path: /var/lib/jupyterhub-proxy + state: directory + mode: "0755" + +- name: Create keycloak client for jupyterhub + community.general.keycloak_client: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + state: present + client_id: "{{ jupyterhub_client_id }}" + client_authenticator_type: client-secret + secret: "{{ jupyterhub_client_secret }}" + protocol_mappers: + - config: + access.token.claim: true + userinfo.token.claim: true + id.token.claim: true + claim.name: roles + jsonType.label: String + multivalued: true + name: clientroles + protocol: openid-connect + protocolMapper: oidc-usermodel-client-role-mapper + - config: + access.token.claim: true + userinfo.token.claim: true + id.token.claim: true + claim.name: groups + name: clientgroups + protocol: openid-connect + protocolMapper: oidc-group-membership-mapper + redirect_uris: + - https://{{ traefik_domain | default(hostvars[groups['hpc_master'][0]].ansible_ssh_host) }}/hub/oauth_callback + register: jupyterhub_client + +- name: Create JupyterHub Keycloak roles + community.general.keycloak_role: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + client_id: "{{ jupyterhub_client_id }}" + name: "{{ item.name }}" + description: "{{ item.description }}" + state: present + with_items: + - name: jupyterhub_admin + description: JupyterHub Administrator + - name: jupyterhub_developer + description: JupyterHub Developer + +- name: Create JupyterHub Keycloak role mappings to groups + community.general.keycloak_client_rolemapping: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + client_id: "{{ jupyterhub_client_id }}" + group_name: "{{ item.group }}" + roles: + - name: "{{ item.role }}" + state: present + with_items: + - group: admin + role: jupyterhub_admin + - group: developer + role: jupyterhub_developer + +- name: Copy jupyterhub_config.py file + become: true + ansible.builtin.template: + src: templates/jupyterhub_config.py + dest: /etc/jupyterhub/jupyterhub_config.py + notify: restart services jupyterhub + +- name: Copy jupyterhub_config.py files + become: true + ansible.builtin.template: + src: "{{ item.value }}" + dest: /etc/jupyterhub/additional/{{ item.key }}.py + with_dict: "{{ jupyterhub_additional_config }}" + notify: restart services jupyterhub + +- name: Setup External Proxy for JupyterHub systemd unit + become: true + ansible.builtin.copy: + content: | + [Service] + User=root + Restart=always + WorkingDirectory=/var/lib/jupyterhub-proxy + # Protect bits that are normally shared across the system + ProtectHome=tmpfs + ProtectSystem=strict + PrivateTmp=yes + PrivateDevices=yes + ProtectKernelTunables=yes + ProtectKernelModules=yes + + Environment=PATH={{ miniforge_home }}/condabin:{{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin + Environment=CONFIGPROXY_AUTH_TOKEN={{ jupyterhub_proxy_auth_token }} + ExecStart={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin/configurable-http-proxy \ + --ip=127.0.0.1 \ + --port={{ jupyterhub_proxy_port }} \ + --api-ip=127.0.0.1 \ + --api-port={{ jupyterhub_proxy_api_port }} \ + --default-target=http://localhost:{{ jupyterhub_port }}/ \ + --error-target=http://localhost:{{ jupyterhub_port }}/hub/error + + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/jupyterhub-proxy.service + mode: "0544" + notify: restart services jupyterhub-proxy + +- name: Setup JupyterHub systemd unit + become: true + ansible.builtin.copy: + content: | + [Unit] + # CHP must have successfully started *before* we launch JupyterHub + Requires=jupyterhub-proxy.service + After=jupyterhub-proxy.service + + [Service] + User=root + Restart=always + WorkingDirectory=/var/lib/jupyterhub + # Protect bits that are normally shared across the system + PrivateTmp=yes + PrivateDevices=yes + ProtectKernelTunables=yes + ProtectKernelModules=yes + # By default, $PATH is pretty bare (thanks, systemd) + # We add the conda dir & /bin + # batchspawner uses sudo, which is in /bin (at least on CentOS) + Environment=PATH={{ miniforge_home }}/condabin:{{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin + ExecStart={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin/jupyterhub --config /etc/jupyterhub/jupyterhub_config.py --debug + KillMode=process + + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/jupyterhub.service + mode: "0544" + notify: restart services jupyterhub + +- name: Ensure JupyterHub Started + become: true + ansible.builtin.systemd: + name: jupyterhub + enabled: true + state: started + daemon_reload: true + +- name: Ensure JupyterHub Proxy Started + become: true + ansible.builtin.systemd: + name: jupyterhub-proxy + enabled: true + state: started + daemon_reload: true diff --git a/roles/jupyterhub/tasks/jupyterhub_ssh.yaml b/roles/jupyterhub/tasks/jupyterhub_ssh.yaml index 30693481..56d7937f 100644 --- a/roles/jupyterhub/tasks/jupyterhub_ssh.yaml +++ b/roles/jupyterhub/tasks/jupyterhub_ssh.yaml @@ -1,43 +1,43 @@ --- # jupyterhub-ssh uses the jupyterhub conda environment - - name: Ensure that jupyterhub-ssh configuration directory exists - become: true - file: - path: /etc/jupyterhub-ssh - state: directory - mode: '0700' +- name: Ensure that jupyterhub-ssh configuration directory exists + become: true + ansible.builtin.file: + path: /etc/jupyterhub-ssh + state: directory + mode: "0700" - - name: Copy the jupyterhub_ssh configuration - become: true - template: - src: templates/jupyterhub_ssh_config.py - dest: /etc/jupyterhub-ssh/jupyterhub_ssh_config.py - owner: root - group: root - mode: 0644 - notify: restart services jupyterhub-ssh +- name: Copy the jupyterhub_ssh configuration + become: true + ansible.builtin.template: + src: templates/jupyterhub_ssh_config.py + dest: /etc/jupyterhub-ssh/jupyterhub_ssh_config.py + owner: root + group: root + mode: "0644" + notify: restart services jupyterhub-ssh - - name: Setup JupyterHub-SSH systemd unit - become: true - copy: - content: | - [Service] - User=root - Restart=always - WorkingDirectory=/etc/jupyterhub-ssh - Environment=PATH={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin - ExecStart={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin/python -m jupyterhub_ssh +- name: Setup JupyterHub-SSH systemd unit + become: true + ansible.builtin.copy: + content: | + [Service] + User=root + Restart=always + WorkingDirectory=/etc/jupyterhub-ssh + Environment=PATH={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin:$PATH:/bin:/usr/bin:/usr/local/bin + ExecStart={{ miniforge_home }}/envs/{{ jupyterhub_hub_environment | basename | splitext | first }}/bin/python -m jupyterhub_ssh - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/jupyterhub-ssh.service - mode: '0544' - notify: restart services jupyterhub-ssh + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/jupyterhub-ssh.service + mode: "0544" + notify: restart services jupyterhub-ssh - - name: Restart JupyterHub SSH - become: true - systemd: - name: jupyterhub-ssh - enabled: true - state: started - daemon_reload: true +- name: Restart JupyterHub SSH + become: true + ansible.builtin.systemd: + name: jupyterhub-ssh + enabled: true + state: started + daemon_reload: true diff --git a/roles/jupyterhub/tasks/main.yaml b/roles/jupyterhub/tasks/main.yaml index ec6f64ab..9803b1cf 100644 --- a/roles/jupyterhub/tasks/main.yaml +++ b/roles/jupyterhub/tasks/main.yaml @@ -1,8 +1,8 @@ --- - - name: Install jupyterhub server - include_tasks: jupyterhub.yaml - when: jupyterhub_enabled +- name: Install jupyterhub server + ansible.builtin.include_tasks: jupyterhub.yaml + when: jupyterhub_enabled - - name: Install jupyterhub_ssh server - include_tasks: jupyterhub_ssh.yaml - when: jupyterhub_ssh_enabled +- name: Install jupyterhub_ssh server + ansible.builtin.include_tasks: jupyterhub_ssh.yaml + when: jupyterhub_ssh_enabled diff --git a/roles/keycloak/defaults/main.yaml b/roles/keycloak/defaults/main.yaml index 5d7c3300..b8823a4b 100644 --- a/roles/keycloak/defaults/main.yaml +++ b/roles/keycloak/defaults/main.yaml @@ -1,20 +1,21 @@ +--- keycloak_enabled: false -keycloak_version: "16.1.0" +keycloak_version: 16.1.0 keycloak_sha256: 41ba611b19c657a18889371e7bb1311dc70887a36206642f8dab60bffd0733cb keycloak_port: "30020" keycloak_admin_username: admin keycloak_admin_password: XLWUMUu8OG0XqlMREZK9id9o keycloak_realm: qhub-hpc -keycloak_metrics_version: "2.5.3" +keycloak_metrics_version: 2.5.3 keycloak_metrics_sha256: 9b3f52f842a66dadf5ff3cc3a729b8e49042d32f84510a5d73d41a2e39f29a96 keycloak_default_uid: "10000" keycloak_default_gid: "10000" -keycloak_default_home_directory: "/home/nobody" -keycloak_default_shell: "/bin/bash" +keycloak_default_home_directory: /home/nobody +keycloak_default_shell: /bin/bash # role: openldap openldap_server_uri: ldap://localhost:389 -openldap_base_dn: "dc=example,dc=com" -openldap_bind_dn: "dc=example,dc=com" +openldap_base_dn: dc=example,dc=com +openldap_bind_dn: dc=example,dc=com openldap_bind_password: EsicntiZOhQaGomPiJZLWJEJ diff --git a/roles/keycloak/tasks/keycloak.yaml b/roles/keycloak/tasks/keycloak.yaml index 82e51fe8..917991ae 100644 --- a/roles/keycloak/tasks/keycloak.yaml +++ b/roles/keycloak/tasks/keycloak.yaml @@ -1,134 +1,134 @@ --- - - name: Install openjdk and python requirements - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - default-jdk - - python3 - - python3-pip - - python3-lxml +- name: Install openjdk and python requirements + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - default-jdk + - python3 + - python3-pip + - python3-lxml - - name: Create keycloak group - become: true - group: +- name: Create keycloak group + become: true + ansible.builtin.group: name: keycloak state: present system: true - - name: Create the keycloak user - become: true - user: - name: keycloak - groups: keycloak - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / - - - name: Check that the keycloak binary exists - stat: - path: "/opt/keycloak-{{ keycloak_version }}/bin/standalone.sh" - register: _keycloak_stat +- name: Create the keycloak user + become: true + ansible.builtin.user: + name: keycloak + groups: keycloak + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / - - name: Download keycloak to local folder - get_url: - url: "https://github.com/keycloak/keycloak/releases/download/{{ keycloak_version }}/keycloak-{{ keycloak_version }}.tar.gz" - checksum: "sha256:{{ keycloak_sha256 }}" - dest: "/tmp/keycloak-{{ keycloak_version }}.linux-amd64.tar.gz" - force: false - mode: 0755 - when: not _keycloak_stat.stat.exists +- name: Check that the keycloak binary exists + ansible.builtin.stat: + path: /opt/keycloak-{{ keycloak_version }}/bin/standalone.sh + register: _keycloak_stat - - name: Unpack keycloak - become: true - unarchive: - src: "/tmp/keycloak-{{ keycloak_version }}.linux-amd64.tar.gz" - remote_src: true - dest: "/opt" - owner: keycloak - group: keycloak - creates: "/opt/keycloak-{{ keycloak_version }}/bin/standalone.sh" - when: not _keycloak_stat.stat.exists +- name: Download keycloak to local folder + ansible.builtin.get_url: + url: https://github.com/keycloak/keycloak/releases/download/{{ keycloak_version }}/keycloak-{{ keycloak_version }}.tar.gz + checksum: sha256:{{ keycloak_sha256 }} + dest: /tmp/keycloak-{{ keycloak_version }}.linux-amd64.tar.gz + force: false + mode: "0755" + when: not _keycloak_stat.stat.exists - - name: Ensure that keycloak configuration directory exists - become: true - file: - path: /etc/keycloak - state: directory - mode: '0700' - owner: keycloak - group: keycloak +- name: Unpack keycloak + become: true + ansible.builtin.unarchive: + src: /tmp/keycloak-{{ keycloak_version }}.linux-amd64.tar.gz + remote_src: true + dest: /opt + owner: keycloak + group: keycloak + creates: /opt/keycloak-{{ keycloak_version }}/bin/standalone.sh + when: not _keycloak_stat.stat.exists - - name: Ensure Keycloak admin user exists - become: true - ansible.builtin.shell: "/opt/keycloak-{{ keycloak_version }}/bin/add-user-keycloak.sh -r master -u \"{{ keycloak_admin_username }}\" -p \"{{ keycloak_admin_password }}\"" - args: - creates: "/opt/keycloak-{{ keycloak_version }}/standalone/configuration/keycloak-add-user.json" +- name: Ensure that keycloak configuration directory exists + become: true + ansible.builtin.file: + path: /etc/keycloak + state: directory + mode: "0700" + owner: keycloak + group: keycloak - - name: Install keycloak metrics server plugin - include_tasks: metrics.yaml +- name: Ensure Keycloak admin user exists + become: true + ansible.builtin.command: /opt/keycloak-{{ keycloak_version }}/bin/add-user-keycloak.sh -r master -u "{{ keycloak_admin_username }}" -p "{{ keycloak_admin_password + }}" + args: + creates: /opt/keycloak-{{ keycloak_version }}/standalone/configuration/keycloak-add-user.json - - name: Ensure https and proxy is enabled - become: true - community.general.xml: - path: "/opt/keycloak-{{ keycloak_version }}/standalone/configuration/standalone.xml" - xpath: "{{ item.xpath }}" - attribute: "{{ item.attribute }}" - value: "{{ item.value }}" - namespaces: - ns1: "urn:jboss:domain:19.0" - ns2: "urn:jboss:domain:undertow:12.0" - with_items: - - xpath: "/ns1:server/ns1:profile/ns2:subsystem/ns2:server/ns2:http-listener" - attribute: "proxy-address-forwarding" - value: "true" - - xpath: "/ns1:server/ns1:profile/ns2:subsystem/ns2:server/ns2:https-listener" - attribute: "proxy-address-forwarding" - value: "true" +- name: Install keycloak metrics server plugin + ansible.builtin.include_tasks: metrics.yaml +- name: Ensure https and proxy is enabled + become: true + community.general.xml: + path: /opt/keycloak-{{ keycloak_version }}/standalone/configuration/standalone.xml + xpath: "{{ item.xpath }}" + attribute: "{{ item.attribute }}" + value: "{{ item.value }}" + namespaces: + ns1: urn:jboss:domain:19.0 + ns2: urn:jboss:domain:undertow:12.0 + with_items: + - xpath: /ns1:server/ns1:profile/ns2:subsystem/ns2:server/ns2:http-listener + attribute: proxy-address-forwarding + value: "true" + - xpath: /ns1:server/ns1:profile/ns2:subsystem/ns2:server/ns2:https-listener + attribute: proxy-address-forwarding + value: "true" - - name: Copy the keycloak systemd service file - become: true - copy: - content: | - [Unit] - Description=Keycloak - After=network.target +- name: Copy the keycloak systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Keycloak + After=network.target - [Service] - Environment=LAUNCH_JBOSS_IN_BACKGROUND=1 - User=keycloak - Group=keycloak - LimitNOFILE=102642 - ExecStart=/opt/keycloak-{{ keycloak_version }}/bin/standalone.sh -c standalone.xml -b "0.0.0.0" -Djboss.http.port="{{ keycloak_port }}" - WorkingDirectory=/opt/keycloak-{{ keycloak_version }} + [Service] + Environment=LAUNCH_JBOSS_IN_BACKGROUND=1 + User=keycloak + Group=keycloak + LimitNOFILE=102642 + ExecStart=/opt/keycloak-{{ keycloak_version }}/bin/standalone.sh -c standalone.xml -b "0.0.0.0" -Djboss.http.port="{{ keycloak_port }}" + WorkingDirectory=/opt/keycloak-{{ keycloak_version }} - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/keycloak.service - owner: root - group: root - mode: 0644 - register: _keycloak_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/keycloak.service + owner: root + group: root + mode: "0644" + register: _keycloak_service - - name: Ensure Keycloak is enabled on boot - become: true - systemd: - daemon_reload: true - name: keycloak - enabled: true - state: restarted - when: _keycloak_service.changed +- name: Ensure Keycloak is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: keycloak + enabled: true + state: restarted + when: _keycloak_service.changed - - name: Wait for keycloak to be up - uri: - url: "http://localhost:{{ keycloak_port }}/auth/admin/master/console/" - follow_redirects: none - method: GET - register: _result - until: _result.status == 200 - retries: 30 - delay: 5 - changed_when: false +- name: Wait for keycloak to be up + ansible.builtin.uri: + url: http://localhost:{{ keycloak_port }}/auth/admin/master/console/ + follow_redirects: none + method: GET + register: _result + until: _result.status == 200 + retries: 30 + delay: 5 + changed_when: false diff --git a/roles/keycloak/tasks/main.yaml b/roles/keycloak/tasks/main.yaml index 7578e648..eaa25c64 100644 --- a/roles/keycloak/tasks/main.yaml +++ b/roles/keycloak/tasks/main.yaml @@ -1,8 +1,8 @@ --- - - name: Install keycloak - include_tasks: keycloak.yaml - when: keycloak_enabled +- name: Install keycloak + ansible.builtin.include_tasks: keycloak.yaml + when: keycloak_enabled - - name: Install keycloak realm, groups, and roles - include_tasks: realm.yaml - when: keycloak_enabled +- name: Install keycloak realm, groups, and roles + ansible.builtin.include_tasks: realm.yaml + when: keycloak_enabled diff --git a/roles/keycloak/tasks/metrics.yaml b/roles/keycloak/tasks/metrics.yaml index 1debd2c5..63276186 100644 --- a/roles/keycloak/tasks/metrics.yaml +++ b/roles/keycloak/tasks/metrics.yaml @@ -1,20 +1,20 @@ --- - - name: Download keycloak to local folder - become: true - get_url: - url: "https://github.com/aerogear/keycloak-metrics-spi/releases/download/{{ keycloak_metrics_version }}/keycloak-metrics-spi-{{ keycloak_metrics_version }}.jar" - checksum: "sha256:{{ keycloak_metrics_sha256 }}" - dest: "/opt/keycloak-{{ keycloak_version }}/standalone/deployments/keycloak-metrics-spi-{{ keycloak_metrics_version }}.jar" - force: false - owner: keycloak - group: keycloak - mode: 0755 +- name: Download keycloak to local folder + become: true + ansible.builtin.get_url: + url: https://github.com/aerogear/keycloak-metrics-spi/releases/download/{{ keycloak_metrics_version }}/keycloak-metrics-spi-{{ keycloak_metrics_version }}.jar + checksum: sha256:{{ keycloak_metrics_sha256 }} + dest: /opt/keycloak-{{ keycloak_version }}/standalone/deployments/keycloak-metrics-spi-{{ keycloak_metrics_version }}.jar + force: false + owner: keycloak + group: keycloak + mode: "0755" - - name: Touch dodeploy file within folder - become: true - file: - path: "/opt/keycloak-{{ keycloak_version }}/standalone/deployments/keycloak-metrics-spi-{{ keycloak_metrics_version }}.jar.dodeploy" - state: touch - owner: keycloak - group: keycloak - mode: 0755 +- name: Touch dodeploy file within folder + become: true + ansible.builtin.file: + path: /opt/keycloak-{{ keycloak_version }}/standalone/deployments/keycloak-metrics-spi-{{ keycloak_metrics_version }}.jar.dodeploy + state: touch + owner: keycloak + group: keycloak + mode: "0755" diff --git a/roles/keycloak/tasks/realm.yaml b/roles/keycloak/tasks/realm.yaml index b03679d1..b81c266b 100644 --- a/roles/keycloak/tasks/realm.yaml +++ b/roles/keycloak/tasks/realm.yaml @@ -1,61 +1,61 @@ --- - - name: Create or update Keycloak realm - community.general.keycloak_realm: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - id: "{{ keycloak_realm }}" - realm: "{{ keycloak_realm }}" - enabled: true - state: present +- name: Create or update Keycloak realm + community.general.keycloak_realm: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + id: "{{ keycloak_realm }}" + realm: "{{ keycloak_realm }}" + enabled: true + state: present - - name: Create User Federation for LDAP - community.general.keycloak_user_federation: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - name: "{{ keycloak_realm }}-ldap" - state: present - provider_id: ldap - provider_type: org.keycloak.storage.UserStorageProvider - config: - priority: 0 - enabled: true - cachePolicy: DEFAULT - batchSizeForSync: 1000 - editMode: WRITABLE - importEnabled: true - syncRegistrations: true - vendor: other - usernameLDAPAttribute: uid - rdnLDAPAttribute: uid - uuidLDAPAttribute: entryUUID - userObjectClasses: inetOrgPerson, organizationalPerson, posixAccount - connectionUrl: "{{ openldap_server_uri }}" - usersDn: "{{ openldap_base_dn }}" - authType: simple - bindDn: "{{ openldap_bind_dn }}" - bindCredential: "{{ openldap_bind_password }}" - searchScope: 1 - validatePasswordPolicy: false - trustEmail: false - useTruststoreSpi: ldapsOnly - connectionPooling: true - pagination: true - allowKerberosAuthentication: false - debug: false - useKerberosForPasswordAuthentication: false - fullSyncPeriod: 3600 - changedSyncPeriod: 3600 - mappers: - - name: "unix uid" - providerId: "user-attribute-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" +- name: Create User Federation for LDAP + community.general.keycloak_user_federation: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + name: "{{ keycloak_realm }}-ldap" + state: present + provider_id: ldap + provider_type: org.keycloak.storage.UserStorageProvider + config: + priority: 0 + enabled: true + cachePolicy: DEFAULT + batchSizeForSync: 1000 + editMode: WRITABLE + importEnabled: true + syncRegistrations: true + vendor: other + usernameLDAPAttribute: uid + rdnLDAPAttribute: uid + uuidLDAPAttribute: entryUUID + userObjectClasses: inetOrgPerson, organizationalPerson, posixAccount + connectionUrl: "{{ openldap_server_uri }}" + usersDn: "{{ openldap_base_dn }}" + authType: simple + bindDn: "{{ openldap_bind_dn }}" + bindCredential: "{{ openldap_bind_password }}" + searchScope: 1 + validatePasswordPolicy: false + trustEmail: false + useTruststoreSpi: ldapsOnly + connectionPooling: true + pagination: true + allowKerberosAuthentication: false + debug: false + useKerberosForPasswordAuthentication: false + fullSyncPeriod: 3600 + changedSyncPeriod: 3600 + mappers: + - name: unix uid + providerId: user-attribute-ldap-mapper + providerType: org.keycloak.storage.ldap.mappers.LDAPStorageMapper config: ldap.attribute: uidNumber user.model.attribute: uidNumber @@ -63,9 +63,9 @@ read.only: "false" always.read_value.from.ldap: "false" attribute.default.value: "{{ keycloak_default_uid }}" - - name: "unix gid" - providerId: "user-attribute-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + - name: unix gid + providerId: user-attribute-ldap-mapper + providerType: org.keycloak.storage.ldap.mappers.LDAPStorageMapper config: ldap.attribute: gidNumber user.model.attribute: gidNumber @@ -73,9 +73,9 @@ read.only: "false" always.read_value.from.ldap: "false" attribute.default.value: "{{ keycloak_default_gid }}" - - name: "unix home directory" - providerId: "user-attribute-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + - name: unix home directory + providerId: user-attribute-ldap-mapper + providerType: org.keycloak.storage.ldap.mappers.LDAPStorageMapper config: ldap.attribute: homeDirectory user.model.attribute: homeDirectory @@ -83,9 +83,9 @@ read.only: "false" always.read_value.from.ldap: "false" attribute.default.value: "{{ keycloak_default_home_directory }}" - - name: "unix shell" - providerId: "user-attribute-ldap-mapper" - providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper" + - name: unix shell + providerId: user-attribute-ldap-mapper + providerType: org.keycloak.storage.ldap.mappers.LDAPStorageMapper config: ldap.attribute: loginShell user.model.attribute: loginShell @@ -94,28 +94,27 @@ always.read_value.from.ldap: "false" attribute.default.value: "{{ keycloak_default_shell }}" +- name: Create Keycloak groups + community.general.keycloak_group: + auth_client_id: admin-cli + auth_keycloak_url: http://localhost:{{ keycloak_port }}/auth + auth_realm: master + auth_username: "{{ keycloak_admin_username }}" + auth_password: "{{ keycloak_admin_password }}" + realm: "{{ keycloak_realm }}" + name: "{{ item }}" + state: present + with_items: + - admin + - developer + - viewer - - name: Create Keycloak groups - community.general.keycloak_group: - auth_client_id: admin-cli - auth_keycloak_url: "http://localhost:{{ keycloak_port }}/auth" - auth_realm: master - auth_username: "{{ keycloak_admin_username }}" - auth_password: "{{ keycloak_admin_password }}" - realm: "{{ keycloak_realm }}" - name: "{{ item }}" - state: present - with_items: - - admin - - developer - - viewer - - - name: Enable keycloak metrics endpoint - become: true - ansible.builtin.shell: | - /opt/keycloak-{{ keycloak_version }}/bin/kcadm.sh config credentials --server "http://localhost:{{ keycloak_port }}/auth" --realm master --user "{{ keycloak_admin_username }}" --password "{{ keycloak_admin_password }}" && \ - /opt/keycloak-{{ keycloak_version }}/bin/kcadm.sh update events/config -s "eventsEnabled=true" -s "adminEventsEnabled=true" -s "eventsListeners+=metrics-listener" && \ - /opt/keycloak-{{ keycloak_version }}/bin/kcadm.sh update events/config -r "{{ keycloak_realm }}" -s "eventsEnabled=true" -s "adminEventsEnabled=true" -s "eventsListeners+=metrics-listener" && \ - touch /opt/keycloak-{{ keycloak_version }}/metrics - args: - creates: "/opt/keycloak-{{ keycloak_version }}/metrics" +- name: Enable keycloak metrics endpoint + become: true + ansible.builtin.shell: | + /opt/keycloak-{{ keycloak_version }}/bin/kcadm.sh config credentials --server "http://localhost:{{ keycloak_port }}/auth" --realm master --user "{{ keycloak_admin_username }}" --password "{{ keycloak_admin_password }}" && \ + /opt/keycloak-{{ keycloak_version }}/bin/kcadm.sh update events/config -s "eventsEnabled=true" -s "adminEventsEnabled=true" -s "eventsListeners+=metrics-listener" && \ + /opt/keycloak-{{ keycloak_version }}/bin/kcadm.sh update events/config -r "{{ keycloak_realm }}" -s "eventsEnabled=true" -s "adminEventsEnabled=true" -s "eventsListeners+=metrics-listener" && \ + touch /opt/keycloak-{{ keycloak_version }}/metrics + args: + creates: /opt/keycloak-{{ keycloak_version }}/metrics diff --git a/roles/miniforge/defaults/main.yml b/roles/miniforge/defaults/main.yml index 3464e114..c14e94d7 100644 --- a/roles/miniforge/defaults/main.yml +++ b/roles/miniforge/defaults/main.yml @@ -1,10 +1,11 @@ +--- miniforge_enabled: false -miniforge_version: "4.11.0-0" +miniforge_version: 4.11.0-0 miniforge_sha256: 49268ee30d4418be4de852dda3aa4387f8c95b55a76f43fb1af68dcbf8b205c3 -miniforge_home: "/opt/conda" +miniforge_home: /opt/conda miniforge_envs: - - "/opt/conda/envs/" - - "/opt/conda-store/conda-store/default/envs/" - - "/opt/conda-store/conda-store/developer/envs/" - - "/opt/conda-store/conda-store/admin/envs/" - - "/opt/conda-store/conda-store/filesystem/envs/" + - /opt/conda/envs/ + - /opt/conda-store/conda-store/default/envs/ + - /opt/conda-store/conda-store/developer/envs/ + - /opt/conda-store/conda-store/admin/envs/ + - /opt/conda-store/conda-store/filesystem/envs/ diff --git a/roles/miniforge/tasks/main.yaml b/roles/miniforge/tasks/main.yaml index dab6d72d..64359397 100644 --- a/roles/miniforge/tasks/main.yaml +++ b/roles/miniforge/tasks/main.yaml @@ -1,4 +1,4 @@ --- - - name: Install miniforge - include_tasks: miniforge.yaml - when: miniforge_enabled +- name: Install miniforge + ansible.builtin.include_tasks: miniforge.yaml + when: miniforge_enabled diff --git a/roles/miniforge/tasks/miniforge.yaml b/roles/miniforge/tasks/miniforge.yaml index 289063cf..01213c97 100644 --- a/roles/miniforge/tasks/miniforge.yaml +++ b/roles/miniforge/tasks/miniforge.yaml @@ -1,68 +1,61 @@ --- - - name: Check that the conda binary exists - stat: - path: "{{ miniforge_home }}/bin/mamba" - register: stat_conda - - - - name: download miniconda mambaforge installer - get_url: - url: "https://github.com/conda-forge/miniforge/releases/download/{{ miniforge_version }}/Mambaforge-{{ miniforge_version }}-Linux-x86_64.sh" - checksum: "sha256:{{ miniforge_sha256 }}" - dest: "/tmp/miniforge.sh" - force: false - mode: 0755 - when: not stat_conda.stat.exists - - - - name: install miniforge - become: yes - shell: - creates: "{{ miniforge_home }}/bin/mamba" - cmd: | - /tmp/miniforge.sh -b -p "{{ miniforge_home }}" - when: not stat_conda.stat.exists - - - - name: ensure conda.sh activated in shell - become: yes - file: - src: "{{ miniforge_home }}/etc/profile.d/conda.sh" - dest: "/etc/profile.d/conda.sh" - state: link - - - - name: Ensure conda activate directory exists - become: true - file: - path: "{{ miniforge_home }}/etc/conda/activate.d" - state: directory - mode: '0755' - - - - name: create conda configuration directory - become: yes - file: - path: /etc/conda - state: directory - - - - name: Remove implicit .condarc file installed by miniforge - become: yes - file: - path: "{{ miniforge_home }}/.condarc" - state: absent - - - - name: Create default condarc for users - become: yes - copy: - dest: /etc/conda/condarc - mode: 644 - content: | - channels: - - conda-forge - envs_dirs: - {% for directory in miniforge_envs %} - - "{{ directory }}" - {% endfor %} +- name: Check that the conda binary exists + ansible.builtin.stat: + path: "{{ miniforge_home }}/bin/mamba" + register: stat_conda + +- name: Download miniconda mambaforge installer + ansible.builtin.get_url: + url: https://github.com/conda-forge/miniforge/releases/download/{{ miniforge_version }}/Mambaforge-{{ miniforge_version }}-Linux-x86_64.sh + checksum: sha256:{{ miniforge_sha256 }} + dest: /tmp/miniforge.sh + force: false + mode: "0755" + when: not stat_conda.stat.exists + +- name: Install miniforge + become: true + ansible.builtin.shell: + creates: "{{ miniforge_home }}/bin/mamba" + cmd: | + /tmp/miniforge.sh -b -p "{{ miniforge_home }}" + when: not stat_conda.stat.exists + +- name: Ensure conda.sh activated in shell + become: true + ansible.builtin.file: + src: "{{ miniforge_home }}/etc/profile.d/conda.sh" + dest: /etc/profile.d/conda.sh + state: link + +- name: Ensure conda activate directory exists + become: true + ansible.builtin.file: + path: "{{ miniforge_home }}/etc/conda/activate.d" + state: directory + mode: "0755" + +- name: Create conda configuration directory + become: true + ansible.builtin.file: + path: /etc/conda + state: directory + +- name: Remove implicit .condarc file installed by miniforge + become: true + ansible.builtin.file: + path: "{{ miniforge_home }}/.condarc" + state: absent + +- name: Create default condarc for users + become: true + ansible.builtin.copy: + dest: /etc/conda/condarc + mode: 644 + content: | + channels: + - conda-forge + envs_dirs: + {% for directory in miniforge_envs %} + - "{{ directory }}" + {% endfor %} diff --git a/roles/minio/defaults/main.yaml b/roles/minio/defaults/main.yaml index f57b3b4b..c16d8b1c 100644 --- a/roles/minio/defaults/main.yaml +++ b/roles/minio/defaults/main.yaml @@ -1,5 +1,6 @@ +--- minio_enabled: false -minio_version: "RELEASE.2021-02-11T08-23-43Z" +minio_version: RELEASE.2021-02-11T08-23-43Z minio_sha256: b965214b7e61ef04087addf0c5da62e4ceff88adb2a0af7e38566c40be306e28 minio_internal_port: "9001" minio_external_port: "9000" diff --git a/roles/minio/tasks/main.yaml b/roles/minio/tasks/main.yaml index 483e8926..f63aaf52 100644 --- a/roles/minio/tasks/main.yaml +++ b/roles/minio/tasks/main.yaml @@ -1,4 +1,4 @@ --- - - name: Install minio - include_tasks: minio.yaml - when: minio_enabled +- name: Install minio + ansible.builtin.include_tasks: minio.yaml + when: minio_enabled diff --git a/roles/minio/tasks/minio.yaml b/roles/minio/tasks/minio.yaml index 13963cf2..91c98762 100644 --- a/roles/minio/tasks/minio.yaml +++ b/roles/minio/tasks/minio.yaml @@ -1,112 +1,112 @@ --- - - name: Check that the minio binary exists - stat: - path: "/usr/local/bin/minio" - register: _minio_stat +- name: Check that the minio binary exists + ansible.builtin.stat: + path: /usr/local/bin/minio + register: _minio_stat - - name: Download minio binary to local folder - become: true - get_url: - url: "https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.{{ minio_version }}" - checksum: "sha256:{{ minio_sha256 }}" - dest: "/usr/local/bin/minio" - force: false - mode: 0755 - when: not _minio_stat.stat.exists +- name: Download minio binary to local folder + become: true + ansible.builtin.get_url: + url: https://dl.minio.io/server/minio/release/linux-amd64/archive/minio.{{ minio_version }} + checksum: sha256:{{ minio_sha256 }} + dest: /usr/local/bin/minio + force: false + mode: "0755" + when: not _minio_stat.stat.exists - - name: Create minio group - become: true - group: +- name: Create minio group + become: true + ansible.builtin.group: name: minio state: present system: true - - name: Create the minio user - become: true - user: - name: minio - groups: minio - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / +- name: Create the minio user + become: true + ansible.builtin.user: + name: minio + groups: minio + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / - - name: Ensure that minio data directory exists - become: true - file: - path: /var/lib/minio - state: directory - mode: '0700' - owner: minio - group: minio +- name: Ensure that minio data directory exists + become: true + ansible.builtin.file: + path: /var/lib/minio + state: directory + mode: "0700" + owner: minio + group: minio - - name: Ensure that minio buckets exist - become: true - file: - path: "/var/lib/minio/{{ item }}" - state: directory - mode: '0700' - owner: minio - group: minio - with_items: "{{ minio_buckets }}" +- name: Ensure that minio buckets exist + become: true + ansible.builtin.file: + path: /var/lib/minio/{{ item }} + state: directory + mode: "0700" + owner: minio + group: minio + with_items: "{{ minio_buckets }}" - - name: Ensure that minio configuration directory exists - become: true - file: - path: /etc/minio - state: directory - mode: '0700' - owner: minio - group: minio +- name: Ensure that minio configuration directory exists + become: true + ansible.builtin.file: + path: /etc/minio + state: directory + mode: "0700" + owner: minio + group: minio - - name: Copy minio configuration - become: true - copy: - content: | - MINIO_ROOT_USER={{ minio_username }} - MINIO_ROOT_PASSWORD={{ minio_password }} - dest: /etc/minio/minio.env - mode: '0600' - owner: minio - group: minio - register: _minio_configuration +- name: Copy minio configuration + become: true + ansible.builtin.copy: + content: | + MINIO_ROOT_USER={{ minio_username }} + MINIO_ROOT_PASSWORD={{ minio_password }} + dest: /etc/minio/minio.env + mode: "0600" + owner: minio + group: minio + register: _minio_configuration - - name: Copy the prometheus systemd service file - become: true - copy: - content: | - [Unit] - Description=MinIO - Wants=network-online.target - After=network-online.target - AssertFileIsExecutable=/usr/local/bin/minio +- name: Copy the prometheus systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=MinIO + Wants=network-online.target + After=network-online.target + AssertFileIsExecutable=/usr/local/bin/minio - [Service] - WorkingDirectory=/var/lib/minio/ - User=minio - Group=minio - EnvironmentFile=/etc/minio/minio.env - ExecStart=/usr/local/bin/minio server /var/lib/minio/ --address ":{{ minio_internal_port }}" - Restart=always - LimitNOFILE=65536 - TasksMax=infinity - TimeoutStopSec=infinity - SendSIGKILL=no + [Service] + WorkingDirectory=/var/lib/minio/ + User=minio + Group=minio + EnvironmentFile=/etc/minio/minio.env + ExecStart=/usr/local/bin/minio server /var/lib/minio/ --address ":{{ minio_internal_port }}" + Restart=always + LimitNOFILE=65536 + TasksMax=infinity + TimeoutStopSec=infinity + SendSIGKILL=no - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/minio.service - owner: root - group: root - mode: 0644 - register: _minio_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/minio.service + owner: root + group: root + mode: "0644" + register: _minio_service - - name: Ensure minio is enabled on boot - become: true - systemd: - daemon_reload: true - name: minio - enabled: true - state: restarted - when: _minio_service.changed or _minio_configuration.changed +- name: Ensure minio is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: minio + enabled: true + state: restarted + when: _minio_service.changed or _minio_configuration.changed diff --git a/roles/mysql/defaults/main.yml b/roles/mysql/defaults/main.yml index 17c580cf..8931fe34 100644 --- a/roles/mysql/defaults/main.yml +++ b/roles/mysql/defaults/main.yml @@ -1,3 +1,4 @@ +--- mysql_enabled: false mysql_databases: - slurm @@ -9,7 +10,7 @@ mysql_config: mysql_users: - username: slurm password: W9T0N4ejQBR4RmQCU6GmbbZa - privileges: '*.*:ALL' + privileges: "*.*:ALL" - username: conda-store password: eIbmUditL4RbQm0YPeLozRme - privileges: '*.*:ALL' + privileges: "*.*:ALL" diff --git a/roles/mysql/handlers/main.yaml b/roles/mysql/handlers/main.yaml index a53d914c..fa33d310 100644 --- a/roles/mysql/handlers/main.yaml +++ b/roles/mysql/handlers/main.yaml @@ -1,9 +1,10 @@ --- - - name: "restart services mysql" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "mysql" +- name: Restart services mysql + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - mysql diff --git a/roles/mysql/tasks/main.yml b/roles/mysql/tasks/main.yml index 7d3960d7..67fe5a5f 100644 --- a/roles/mysql/tasks/main.yml +++ b/roles/mysql/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Install mysql - include_tasks: mysql.yaml - when: mysql_enabled +- name: Install mysql + ansible.builtin.include_tasks: mysql.yaml + when: mysql_enabled diff --git a/roles/mysql/tasks/mysql.yaml b/roles/mysql/tasks/mysql.yaml index 00ae7c43..19f565f0 100644 --- a/roles/mysql/tasks/mysql.yaml +++ b/roles/mysql/tasks/mysql.yaml @@ -1,40 +1,40 @@ --- - - name: Install mysql - become: true - apt: - name: - - mysql-server - - python3 - - python3-pip - - python3-mysqldb - state: latest - cache_valid_time: 3600 +- name: Install mysql + become: true + ansible.builtin.apt: + name: + - mysql-server + - python3 + - python3-pip + - python3-mysqldb + state: latest + cache_valid_time: 3600 - - name: Ensure mysql settings in file - become: true - community.general.ini_file: - path: "/etc/mysql/mysql.conf.d/mysqld.cnf" - section: mysqld - option: "{{ item.key }}" - value: "{{ item.value }}" - mode: '0644' - backup: yes - with_dict: "{{ mysql_config }}" - notify: restart services mysql +- name: Ensure mysql settings in file + become: true + community.general.ini_file: + path: /etc/mysql/mysql.conf.d/mysqld.cnf + section: mysqld + option: "{{ item.key }}" + value: "{{ item.value }}" + mode: "0644" + backup: true + with_dict: "{{ mysql_config }}" + notify: restart services mysql - - name: Create mysql database - become: true - community.mysql.mysql_db: - name: "{{ item }}" - state: present - with_items: "{{ mysql_databases }}" +- name: Create mysql database + become: true + community.mysql.mysql_db: + name: "{{ item }}" + state: present + with_items: "{{ mysql_databases }}" - - name: Create mysql users - become: true - community.mysql.mysql_user: - name: "{{ item.username }}" - password: "{{ item.password }}" - priv: "{{ item.privileges }}" - state: present - with_items: "{{ mysql_users }}" - no_log: True # Avoid logging user creds +- name: Create mysql users + become: true + community.mysql.mysql_user: + name: "{{ item.username }}" + password: "{{ item.password }}" + priv: "{{ item.privileges }}" + state: present + with_items: "{{ mysql_users }}" + no_log: true # Avoid logging user creds diff --git a/roles/nfs/defaults/main.yaml b/roles/nfs/defaults/main.yaml index 3aa470e8..bba52bc3 100644 --- a/roles/nfs/defaults/main.yaml +++ b/roles/nfs/defaults/main.yaml @@ -1,6 +1,6 @@ +--- nfs_server_enabled: false nfs_server_exports: [] - nfs_client_enabled: false # list of {host: ..., path: ...} nfs_client_mounts: [] diff --git a/roles/nfs/tasks/client.yaml b/roles/nfs/tasks/client.yaml index a128679c..ac6300d4 100644 --- a/roles/nfs/tasks/client.yaml +++ b/roles/nfs/tasks/client.yaml @@ -1,32 +1,32 @@ --- - - name: Install nfs - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - nfs-common +- name: Install nfs + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - nfs-common - - name: Wait for nfs server at host to be available - wait_for: - host: "{{ item.host }}" - port: 2049 - timeout: 600 - with_items: "{{ nfs_client_mounts }}" +- name: Wait for nfs server at host to be available + ansible.builtin.wait_for: + host: "{{ item.host }}" + port: 2049 + timeout: 600 + with_items: "{{ nfs_client_mounts }}" - - name: Ensure nfs mounted directories exist - become: true - file: - path: "{{ item.path }}" - state: directory - with_items: "{{ nfs_client_mounts }}" +- name: Ensure nfs mounted directories exist + become: true + ansible.builtin.file: + path: "{{ item.path }}" + state: directory + with_items: "{{ nfs_client_mounts }}" - - name: Add fstab entries for nfs mounts - become: true - ansible.posix.mount: - src: "{{ item.host }}:{{ item.path }}" - path: "{{ item.path }}" - opts: "rw,sync,hard,intr" - state: mounted - fstype: nfs - with_items: "{{ nfs_client_mounts }}" +- name: Add fstab entries for nfs mounts + become: true + ansible.posix.mount: + src: "{{ item.host }}:{{ item.path }}" + path: "{{ item.path }}" + opts: rw,sync,hard,intr + state: mounted + fstype: nfs + with_items: "{{ nfs_client_mounts }}" diff --git a/roles/nfs/tasks/main.yaml b/roles/nfs/tasks/main.yaml index 4ca02624..44868215 100644 --- a/roles/nfs/tasks/main.yaml +++ b/roles/nfs/tasks/main.yaml @@ -1,8 +1,8 @@ --- - - name: Install nfs server - include_tasks: server.yaml - when: nfs_server_enabled +- name: Install nfs server + ansible.builtin.include_tasks: server.yaml + when: nfs_server_enabled - - name: Install nfs client - include_tasks: client.yaml - when: nfs_client_enabled +- name: Install nfs client + ansible.builtin.include_tasks: client.yaml + when: nfs_client_enabled diff --git a/roles/nfs/tasks/server.yaml b/roles/nfs/tasks/server.yaml index 6afb9429..41b91418 100644 --- a/roles/nfs/tasks/server.yaml +++ b/roles/nfs/tasks/server.yaml @@ -1,38 +1,38 @@ --- - - name: Install nfs - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - nfs-common - - nfs-server +- name: Install nfs + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - nfs-common + - nfs-server - - name: Ensure nfs directory created - become: true - file: - path: "{{ item }}" - state: directory - with_items: "{{ nfs_server_exports }}" +- name: Ensure nfs directory created + become: true + ansible.builtin.file: + path: "{{ item }}" + state: directory + with_items: "{{ nfs_server_exports }}" - - name: nfs configuration - become: true - copy: - content: | - {% for export in nfs_server_exports %} - {{ export }} *(rw,sync,no_root_squash,no_subtree_check) - {% endfor %} - dest: /etc/exports - mode: '0644' - owner: root - group: root - register: _nfs_configuration +- name: Nfs configuration + become: true + ansible.builtin.copy: + content: | + {% for export in nfs_server_exports %} + {{ export }} *(rw,sync,no_root_squash,no_subtree_check) + {% endfor %} + dest: /etc/exports + mode: "0644" + owner: root + group: root + register: _nfs_configuration - - name: Ensure nfs server is started - become: true - systemd: - daemon_reload: true - name: nfs-kernel-server - enabled: true - state: restarted - when: _nfs_configuration.changed +- name: Ensure nfs server is started + become: true + ansible.builtin.systemd: + daemon_reload: true + name: nfs-kernel-server + enabled: true + state: restarted + when: _nfs_configuration.changed diff --git a/roles/openldap/defaults/main.yaml b/roles/openldap/defaults/main.yaml index 3e2e8bef..86d74e2d 100644 --- a/roles/openldap/defaults/main.yaml +++ b/roles/openldap/defaults/main.yaml @@ -1,8 +1,9 @@ +--- openldap_server_enabled: false openldap_bind_password: EsicntiZOhQaGomPiJZLWJEJ openldap_server_uri: ldap://localhost:389 -openldap_bind_dn: "dc=example,dc=com" -openldap_base_dn: "dc=example,dc=com" +openldap_bind_dn: dc=example,dc=com +openldap_base_dn: dc=example,dc=com openldap_db_type: "{1}mdb" openldap_client_enabled: false diff --git a/roles/openldap/handlers/main.yaml b/roles/openldap/handlers/main.yaml index 30fdb3aa..9dbaca48 100644 --- a/roles/openldap/handlers/main.yaml +++ b/roles/openldap/handlers/main.yaml @@ -1,11 +1,12 @@ # handlers file for ansible-role-ldap --- - - name: "restart services ldap" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "nscd" - - "nslcd" +- name: Restart services ldap + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - nscd + - nslcd diff --git a/roles/openldap/tasks/client.yaml b/roles/openldap/tasks/client.yaml index 5034bda4..80503277 100644 --- a/roles/openldap/tasks/client.yaml +++ b/roles/openldap/tasks/client.yaml @@ -1,53 +1,51 @@ --- - - name: Install ldap client packages - become: true - apt: - name: - - libpam-ldapd - - nscd - state: latest - cache_valid_time: 3600 +- name: Install ldap client packages + become: true + ansible.builtin.apt: + name: + - libpam-ldapd + - nscd + state: latest + cache_valid_time: 3600 +- name: LDAP Authentication | Create Home Directories on login + become: true + ansible.builtin.lineinfile: + dest: /etc/pam.d/common-account + regexp: pam_mkhomedir\.so + line: session required pam_mkhomedir.so skel=/etc/skel/ umask=0022 + state: present + notify: restart services ldap - - name: LDAP Authentication | Create Home Directories on login - become: true - lineinfile: - dest: /etc/pam.d/common-account - regexp: 'pam_mkhomedir\.so' - line: "session required pam_mkhomedir.so skel=/etc/skel/ umask=0022" - state: present - notify: restart services ldap +- name: LDAP Authentication | Query ldap in nsswitch.conf + become: true + ansible.builtin.replace: + dest: /etc/nsswitch.conf + regexp: ^({{ item }}(?!.*\bldap\b).*)$ + replace: \1 ldap + backup: "yes" + with_items: + - passwd + - shadow + - group + notify: restart services ldap +- name: LDAP Authentication | no cache for ldap in nscd.conf + become: true + ansible.builtin.replace: + dest: /etc/nscd.conf + regexp: ^(\s+enable-cache\s+{{ item }})\s+yes\s*$ + replace: \1 no + backup: "yes" + with_items: + - passwd + - group + notify: restart services ldap - - name: LDAP Authentication | Query ldap in nsswitch.conf - become: true - replace: - dest: /etc/nsswitch.conf - regexp: '^({{ item }}(?!.*\bldap\b).*)$' - replace: '\1 ldap' - backup: 'yes' - with_items: - - passwd - - shadow - - group - notify: restart services ldap - - - name: LDAP Authentication | no cache for ldap in nscd.conf - become: true - replace: - dest: /etc/nscd.conf - regexp: '^(\s+enable-cache\s+{{ item }})\s+yes\s*$' - replace: '\1 no' - backup: 'yes' - with_items: - - passwd - - group - notify: restart services ldap - - - name: "LDAP Authentication | Configure /etc/nslcd.conf" - become: true - template: - src: nslcd.conf.j2 - dest: /etc/nslcd.conf - mode: 0600 - notify: restart services ldap +- name: LDAP Authentication | Configure /etc/nslcd.conf + become: true + ansible.builtin.template: + src: nslcd.conf.j2 + dest: /etc/nslcd.conf + mode: "0600" + notify: restart services ldap diff --git a/roles/openldap/tasks/main.yaml b/roles/openldap/tasks/main.yaml index 3284624f..073aea81 100644 --- a/roles/openldap/tasks/main.yaml +++ b/roles/openldap/tasks/main.yaml @@ -1,8 +1,8 @@ --- - - name: Install openldap - include_tasks: openldap.yaml - when: openldap_server_enabled +- name: Install openldap + ansible.builtin.include_tasks: openldap.yaml + when: openldap_server_enabled - - name: Install openldap client - include_tasks: client.yaml - when: openldap_client_enabled +- name: Install openldap client + ansible.builtin.include_tasks: client.yaml + when: openldap_client_enabled diff --git a/roles/openldap/tasks/openldap.yaml b/roles/openldap/tasks/openldap.yaml index c98fe489..aff3eaef 100644 --- a/roles/openldap/tasks/openldap.yaml +++ b/roles/openldap/tasks/openldap.yaml @@ -1,59 +1,58 @@ --- - - name: Install openldap packages - become: true - apt: - name: - - slapd - - ldap-utils - - python3-ldap - state: present - update_cache: yes - cache_valid_time: 3600 +- name: Install openldap packages + become: true + ansible.builtin.apt: + name: + - slapd + - ldap-utils + - python3-ldap + state: present + update_cache: true + cache_valid_time: 3600 - - name: Start ldap service - become: true - service: - name: slapd - state: started - enabled: yes +- name: Start ldap service + become: true + ansible.builtin.service: + name: slapd + state: started + enabled: true - - name: Register encripted password - command: slappasswd -s "{{openldap_bind_password}}" - register: openldap_encripted_password +- name: Register encripted password + ansible.builtin.command: slappasswd -s "{{openldap_bind_password}}" + register: openldap_encripted_password - - name: Copy Grafana Configuration - become: true - copy: - content: | - dn: olcDatabase={{openldap_db_type}},cn=config - changetype: modify - replace: olcSuffix - olcSuffix: {{openldap_base_dn}} +- name: Copy Grafana Configuration + become: true + ansible.builtin.copy: + content: | + dn: olcDatabase={{ openldap_db_type }},cn=config + changetype: modify + replace: olcSuffix + olcSuffix: {{ openldap_base_dn }} - dn: olcDatabase={{openldap_db_type}},cn=config - changetype: modify - replace: olcRootDN - olcRootDN: {{openldap_base_dn}} + dn: olcDatabase={{ openldap_db_type }},cn=config + changetype: modify + replace: olcRootDN + olcRootDN: {{ openldap_base_dn }} - dn: olcDatabase={{openldap_db_type}},cn=config - changetype: modify - replace: olcRootPW - olcRootPW: {{openldap_encripted_password.stdout}} - dest: /tmp/db.ldif - owner: root - group: root - mode: 0664 + dn: olcDatabase={{ openldap_db_type }},cn=config + changetype: modify + replace: olcRootPW + olcRootPW: {{ openldap_encripted_password.stdout }} + dest: /tmp/db.ldif + owner: root + group: root + mode: "0664" - - name: Load ddbb template into ldap - become: true - command: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/db.ldif - - - name: Load ldap root entry - ldap_entry: - server_uri: "{{openldap_server_uri}}" - bind_dn: "{{openldap_base_dn}}" - bind_pw: "{{openldap_bind_password}}" - dn: "{{openldap_base_dn}}" - objectClass: - - top - - domain +- name: Load ddbb template into ldap + become: true + ansible.builtin.command: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/db.ldif +- name: Load ldap root entry + community.general.ldap_entry: + server_uri: "{{ openldap_server_uri }}" + bind_dn: "{{ openldap_base_dn }}" + bind_pw: "{{ openldap_bind_password }}" + dn: "{{ openldap_base_dn }}" + objectClass: + - top + - domain diff --git a/roles/postgresql/defaults/main.yml b/roles/postgresql/defaults/main.yml index c41ea649..edb3fdfc 100644 --- a/roles/postgresql/defaults/main.yml +++ b/roles/postgresql/defaults/main.yml @@ -1,3 +1,4 @@ +--- postgres_enabled: false postgres_databases: - conda-store @@ -5,4 +6,4 @@ postgres_databases: postgres_users: - username: conda-store password: eIbmUditL4RbQm0YPeLozRme - role: 'CREATEDB,CREATEROLE' + role: CREATEDB,CREATEROLE diff --git a/roles/postgresql/handlers/main.yaml b/roles/postgresql/handlers/main.yaml index 9f23b41b..2f5a82ce 100644 --- a/roles/postgresql/handlers/main.yaml +++ b/roles/postgresql/handlers/main.yaml @@ -1,9 +1,10 @@ --- - - name: "restart services postgres" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "postgresql" +- name: Restart services postgres + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - postgresql diff --git a/roles/postgresql/tasks/main.yml b/roles/postgresql/tasks/main.yml index 62f07c58..4780b848 100644 --- a/roles/postgresql/tasks/main.yml +++ b/roles/postgresql/tasks/main.yml @@ -1,4 +1,4 @@ --- - - name: Install postgres - include_tasks: postgresql.yaml - when: postgres_enabled +- name: Install postgres + ansible.builtin.include_tasks: postgresql.yaml + when: postgres_enabled diff --git a/roles/postgresql/tasks/postgresql.yaml b/roles/postgresql/tasks/postgresql.yaml index 80c3ce75..6a4fe492 100644 --- a/roles/postgresql/tasks/postgresql.yaml +++ b/roles/postgresql/tasks/postgresql.yaml @@ -1,7 +1,7 @@ --- - - name: Install PostgreSQL - become: true - apt: +- name: Install PostgreSQL + become: true + ansible.builtin.apt: name: - postgresql - postgresql-contrib @@ -10,26 +10,26 @@ state: latest cache_valid_time: 3600 - - name: Ensure PostgreSQL service is running - systemd: - name: postgresql - state: started - enabled: yes +- name: Ensure PostgreSQL service is running + ansible.builtin.systemd: + name: postgresql + state: started + enabled: true - - name: "Create database" - postgresql_db: - state: present - name: "{{ item }}" - become_user: postgres - become: yes - with_items: "{{ postgres_databases }}" +- name: Create database + postgresql_db: + state: present + name: "{{ item }}" + become_user: postgres + become: true + with_items: "{{ postgres_databases }}" - - name: Create PostgreSQL User - postgresql_user: - db: postgres - name: "{{ item.username }}" - password: "{{ item.password }}" - role_attr_flags: "{{ item.role }}" - with_items: "{{ postgres_users }}" - become_user: postgres - become: yes +- name: Create PostgreSQL User + postgresql_user: + db: postgres + name: "{{ item.username }}" + password: "{{ item.password }}" + role_attr_flags: "{{ item.role }}" + with_items: "{{ postgres_users }}" + become_user: postgres + become: true diff --git a/roles/prometheus/defaults/main.yml b/roles/prometheus/defaults/main.yml index 3d54faaa..dc75c480 100644 --- a/roles/prometheus/defaults/main.yml +++ b/roles/prometheus/defaults/main.yml @@ -1,15 +1,15 @@ +--- prometheus_enabled: false -prometheus_version: "2.23.0" -prometheus_sha256: 0f54cefdb946852947e35d4db8cfce394911ff586486f927c3887db4183cb643 +prometheus_version: 2.23.0 +prometheus_sha256: "0f54cefdb946852947e35d4db8cfce394911ff586486f927c3887db4183cb643" prometheus_port: "9090" node_exporter_enabled: false -node_exporter_version: "1.0.1" +node_exporter_version: 1.0.1 node_exporter_sha256: 3369b76cd2b0ba678b6d618deab320e565c3d93ccb5c2a0d5db51a53857768ae node_exporter_port: "9100" prometheus_additional_scrape_configs: [] - # role: jupyterhub jupyterhub_proxy_port: "15002" @@ -21,7 +21,7 @@ slurm_exporter_port: "9341" # role: conda_store conda_store_port: "5000" -conda_store_prefix: "/conda-store" +conda_store_prefix: /conda-store # role: keycloak keycloak_port: "30020" diff --git a/roles/prometheus/tasks/main.yml b/roles/prometheus/tasks/main.yml index e3784177..8c135eca 100644 --- a/roles/prometheus/tasks/main.yml +++ b/roles/prometheus/tasks/main.yml @@ -1,8 +1,8 @@ --- - - name: Install prometheus - include_tasks: prometheus.yml - when: prometheus_enabled +- name: Install prometheus + ansible.builtin.include_tasks: prometheus.yml + when: prometheus_enabled - - name: Install node_exporter - include_tasks: node_exporter.yml - when: node_exporter_enabled +- name: Install node_exporter + ansible.builtin.include_tasks: node_exporter.yml + when: node_exporter_enabled diff --git a/roles/prometheus/tasks/node_exporter.yml b/roles/prometheus/tasks/node_exporter.yml index 11baa811..a1a58719 100644 --- a/roles/prometheus/tasks/node_exporter.yml +++ b/roles/prometheus/tasks/node_exporter.yml @@ -1,90 +1,90 @@ --- - - name: Check that the node exporter binary exists - stat: - path: "/usr/local/bin/node_exporter" - register: _node_exporter_stat +- name: Check that the node exporter binary exists + ansible.builtin.stat: + path: /usr/local/bin/node_exporter + register: _node_exporter_stat - - name: Download node_exporter binary to local folder - get_url: - url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_version }}/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz" - checksum: "sha256:{{ node_exporter_sha256 }}" - dest: "/tmp/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz" - force: false - mode: 0755 - when: not _node_exporter_stat.stat.exists +- name: Download node_exporter binary to local folder + ansible.builtin.get_url: + url: https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_version }}/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz + checksum: sha256:{{ node_exporter_sha256 }} + dest: /tmp/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz + force: false + mode: "0755" + when: not _node_exporter_stat.stat.exists - - name: Unpack node_exporter binary - unarchive: - src: "/tmp/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz" - remote_src: true - dest: "/tmp" - creates: "/tmp/node_exporter-{{ node_exporter_version }}.linux-amd64/node_exporter" - when: not _node_exporter_stat.stat.exists +- name: Unpack node_exporter binary + ansible.builtin.unarchive: + src: /tmp/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz + remote_src: true + dest: /tmp + creates: /tmp/node_exporter-{{ node_exporter_version }}.linux-amd64/node_exporter + when: not _node_exporter_stat.stat.exists - - name: Install node_exporter binary - become: true - copy: - src: "/tmp/node_exporter-{{ node_exporter_version }}.linux-amd64/node_exporter" - dest: "/usr/local/bin/node_exporter" - remote_src: true - mode: 0755 - owner: root - group: root - when: not _node_exporter_stat.stat.exists +- name: Install node_exporter binary + become: true + ansible.builtin.copy: + src: /tmp/node_exporter-{{ node_exporter_version }}.linux-amd64/node_exporter + dest: /usr/local/bin/node_exporter + remote_src: true + mode: "0755" + owner: root + group: root + when: not _node_exporter_stat.stat.exists - - name: Create node_exporter group - become: true - group: +- name: Create node_exporter group + become: true + ansible.builtin.group: name: node_exporter state: present system: true - - name: Create the node_exporter user - become: true - user: - name: node_exporter - groups: node_exporter - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / +- name: Create the node_exporter user + become: true + ansible.builtin.user: + name: node_exporter + groups: node_exporter + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / - - name: Copy the node_exporter systemd service file - become: true - copy: - content: | - [Unit] - Description=Prometheus Node Exporter - After=network-online.target +- name: Copy the node_exporter systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Prometheus Node Exporter + After=network-online.target - [Service] - Type=simple - User=node_exporter - Group=node_exporter - ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} - SyslogIdentifier=node_exporter - Restart=always - RestartSec=1 - StartLimitInterval=0 - ProtectSystem=strict - ProtectControlGroups=true - ProtectKernelModules=true - ProtectKernelTunables=yes + [Service] + Type=simple + User=node_exporter + Group=node_exporter + ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} + SyslogIdentifier=node_exporter + Restart=always + RestartSec=1 + StartLimitInterval=0 + ProtectSystem=strict + ProtectControlGroups=true + ProtectKernelModules=true + ProtectKernelTunables=yes - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/node_exporter.service - owner: root - group: root - mode: 0644 - register: _node_exporter_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/node_exporter.service + owner: root + group: root + mode: "0644" + register: _node_exporter_service - - name: Ensure Node Exporter is enabled on boot - become: true - systemd: - daemon_reload: true - name: node_exporter - enabled: true - state: started - when: _node_exporter_service.changed +- name: Ensure Node Exporter is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: node_exporter + enabled: true + state: started + when: _node_exporter_service.changed diff --git a/roles/prometheus/tasks/prometheus.yml b/roles/prometheus/tasks/prometheus.yml index 67286173..3cd2994f 100644 --- a/roles/prometheus/tasks/prometheus.yml +++ b/roles/prometheus/tasks/prometheus.yml @@ -1,129 +1,129 @@ --- - - name: Check that the node exporter binary exists - stat: - path: "/usr/local/bin/prometheus" - register: _prometheus_stat +- name: Check that the node exporter binary exists + ansible.builtin.stat: + path: /usr/local/bin/prometheus + register: _prometheus_stat - - name: Download prometheus binary to local folder - get_url: - url: "https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz" - checksum: "sha256:{{ prometheus_sha256 }}" - dest: "/tmp/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz" - force: false - mode: 0755 - when: not _prometheus_stat.stat.exists +- name: Download prometheus binary to local folder + ansible.builtin.get_url: + url: https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz + checksum: sha256:{{ prometheus_sha256 }} + dest: /tmp/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz + force: false + mode: "0755" + when: not _prometheus_stat.stat.exists - - name: Unpack prometheus binary - unarchive: - src: "/tmp/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz" - remote_src: true - dest: "/tmp" - creates: "/tmp/prometheus-{{ prometheus_version }}.linux-amd64/prometheus" - when: not _prometheus_stat.stat.exists +- name: Unpack prometheus binary + ansible.builtin.unarchive: + src: /tmp/prometheus-{{ prometheus_version }}.linux-amd64.tar.gz + remote_src: true + dest: /tmp + creates: /tmp/prometheus-{{ prometheus_version }}.linux-amd64/prometheus + when: not _prometheus_stat.stat.exists - - name: Install prometheus binary - become: true - copy: - src: "/tmp/prometheus-{{ prometheus_version }}.linux-amd64/prometheus" - dest: "/usr/local/bin/prometheus" - remote_src: true - mode: 0755 - owner: root - group: root - when: not _prometheus_stat.stat.exists +- name: Install prometheus binary + become: true + ansible.builtin.copy: + src: /tmp/prometheus-{{ prometheus_version }}.linux-amd64/prometheus + dest: /usr/local/bin/prometheus + remote_src: true + mode: "0755" + owner: root + group: root + when: not _prometheus_stat.stat.exists - - name: Create prometheus group - become: true - group: +- name: Create prometheus group + become: true + ansible.builtin.group: name: prometheus state: present system: true - - name: Create the prometheus user - become: true - user: - name: prometheus - groups: prometheus - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / +- name: Create the prometheus user + become: true + ansible.builtin.user: + name: prometheus + groups: prometheus + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / - - name: Ensure that promethus configuration directory exists - become: true - file: - path: /etc/prometheus - state: directory - mode: '0700' - owner: prometheus - group: prometheus +- name: Ensure that promethus configuration directory exists + become: true + ansible.builtin.file: + path: /etc/prometheus + state: directory + mode: "0700" + owner: prometheus + group: prometheus - - name: Ensure that promethus data directory exists - become: true - file: - path: /var/lib/prometheus - state: directory - mode: '0755' - owner: prometheus - group: prometheus +- name: Ensure that promethus data directory exists + become: true + ansible.builtin.file: + path: /var/lib/prometheus + state: directory + mode: "0755" + owner: prometheus + group: prometheus - - name: Copy prometheus configuration - become: true - template: - src: prometheus.yaml - dest: /etc/prometheus/prometheus.yaml - mode: '0600' - owner: prometheus - group: prometheus - register: _prometheus_configuration +- name: Copy prometheus configuration + become: true + ansible.builtin.template: + src: prometheus.yaml + dest: /etc/prometheus/prometheus.yaml + mode: "0600" + owner: prometheus + group: prometheus + register: _prometheus_configuration - - name: Copy the prometheus systemd service file - become: true - copy: - content: | - [Unit] - Description=Prometheus - After=network-online.target +- name: Copy the prometheus systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Prometheus + After=network-online.target - [Service] - Type=simple - User=prometheus - Group=prometheus - ExecReload=/bin/kill -HUP $MAINPID - ExecStart=/usr/local/bin/prometheus --config.file=/etc/prometheus/prometheus.yaml --storage.tsdb.path=/var/lib/prometheus --web.listen-address=:{{ prometheus_port }} - CapabilityBoundingSet=CAP_SET_UID - LimitNOFILE=65000 - LockPersonality=true - NoNewPrivileges=true - MemoryDenyWriteExecute=true - PrivateDevices=true - PrivateTmp=true - ProtectHome=true - RemoveIPC=true - RestrictSUIDSGID=true - PrivateUsers=true - ProtectControlGroups=true - ProtectKernelModules=true - ProtectKernelTunables=true - ProtectSystem=strict - SyslogIdentifier=prometheus - Restart=always - ReadWritePaths=/var/lib/prometheus + [Service] + Type=simple + User=prometheus + Group=prometheus + ExecReload=/bin/kill -HUP $MAINPID + ExecStart=/usr/local/bin/prometheus --config.file=/etc/prometheus/prometheus.yaml --storage.tsdb.path=/var/lib/prometheus --web.listen-address=:{{ prometheus_port }} + CapabilityBoundingSet=CAP_SET_UID + LimitNOFILE=65000 + LockPersonality=true + NoNewPrivileges=true + MemoryDenyWriteExecute=true + PrivateDevices=true + PrivateTmp=true + ProtectHome=true + RemoveIPC=true + RestrictSUIDSGID=true + PrivateUsers=true + ProtectControlGroups=true + ProtectKernelModules=true + ProtectKernelTunables=true + ProtectSystem=strict + SyslogIdentifier=prometheus + Restart=always + ReadWritePaths=/var/lib/prometheus - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/prometheus.service - owner: root - group: root - mode: 0644 - register: _prometheus_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/prometheus.service + owner: root + group: root + mode: "0644" + register: _prometheus_service - - name: Ensure Prometheus is enabled on boot - become: true - systemd: - daemon_reload: true - name: prometheus - enabled: true - state: restarted - when: _prometheus_service.changed or _prometheus_configuration.changed +- name: Ensure Prometheus is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: prometheus + enabled: true + state: restarted + when: _prometheus_service.changed or _prometheus_configuration.changed diff --git a/roles/slurm/defaults/main.yml b/roles/slurm/defaults/main.yml index 131d0d6e..5fa83f95 100644 --- a/roles/slurm/defaults/main.yml +++ b/roles/slurm/defaults/main.yml @@ -1,3 +1,4 @@ +--- munge_enabled: false munge_key: eC36WeTj1JKUGyQEcfqkaRO0fDBoyTVHnkn7kE5sOZ1YUYyqWMSp3qeRZEmtEX9B @@ -31,7 +32,7 @@ slurmdbd_config: LogFile: /var/log/slurm/slurmdbd.log PidFile: /var/lib/slurm/slurmdbd.pid -golang_version: "1.15.6" +golang_version: 1.15.6 golang_sha256: 3918e6cc85e7eaaa6f859f1bdbaac772e7a825b0eb423c63d3ae68b21f84b844 slurm_exporter_enabled: false @@ -39,7 +40,6 @@ slurm_exporter_version: "0.15" slurm_exporter_sha256: 77893b595093427c6d9be12af8b340d1a22c0017855d570b3d422338853b855c slurm_exporter_port: "9341" - # role: mysql mysql_databases: - slurm @@ -48,7 +48,7 @@ mysql_databases: mysql_users: - username: slurm password: W9T0N4ejQBR4RmQCU6GmbbZa - privileges: '*.*:ALL' + privileges: "*.*:ALL" - username: conda-store password: eIbmUditL4RbQm0YPeLozRme - privileges: '*.*:ALL' + privileges: "*.*:ALL" diff --git a/roles/slurm/tasks/golang.yaml b/roles/slurm/tasks/golang.yaml index 209a0ff9..b20c3580 100644 --- a/roles/slurm/tasks/golang.yaml +++ b/roles/slurm/tasks/golang.yaml @@ -1,32 +1,32 @@ --- - - name: Check that the go binary exists - stat: - path: "/usr/local/bin/go" - register: _golang_stat +- name: Check that the go binary exists + ansible.builtin.stat: + path: /usr/local/bin/go + register: _golang_stat - - name: Download go tarball to local folder - get_url: - url: "https://golang.org/dl/go{{ golang_version }}.linux-amd64.tar.gz" - checksum: "sha256:{{ golang_sha256 }}" - dest: "/tmp/golang-{{ golang_version }}.tar.gz" - force: false - mode: 0755 - when: not _golang_stat.stat.exists +- name: Download go tarball to local folder + ansible.builtin.get_url: + url: https://golang.org/dl/go{{ golang_version }}.linux-amd64.tar.gz + checksum: sha256:{{ golang_sha256 }} + dest: /tmp/golang-{{ golang_version }}.tar.gz + force: false + mode: "0755" + when: not _golang_stat.stat.exists - - name: Unpack golang - become: true - unarchive: - src: "/tmp/golang-{{ golang_version }}.tar.gz" - remote_src: true - dest: "/opt" - creates: "/opt/go/bin/go" - when: not _golang_stat.stat.exists +- name: Unpack golang + become: true + ansible.builtin.unarchive: + src: /tmp/golang-{{ golang_version }}.tar.gz + remote_src: true + dest: /opt + creates: /opt/go/bin/go + when: not _golang_stat.stat.exists - - name: Set golang to user path - become: true - copy: - content: | - export PATH=/opt/go/bin:$PATH - dest: /etc/profile.d/golang.sh - mode: '0755' - when: not _golang_stat.stat.exists +- name: Set golang to user path + become: true + ansible.builtin.copy: + content: | + export PATH=/opt/go/bin:$PATH + dest: /etc/profile.d/golang.sh + mode: "0755" + when: not _golang_stat.stat.exists diff --git a/roles/slurm/tasks/main.yaml b/roles/slurm/tasks/main.yaml index 9da94d8e..091f8f11 100644 --- a/roles/slurm/tasks/main.yaml +++ b/roles/slurm/tasks/main.yaml @@ -1,62 +1,62 @@ --- - - name: Install munge - include_tasks: munge.yaml - when: munge_enabled - - - name: Install slurm client packages - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - slurm-client - - slurm-wlm-doc - - - name: ensure that slurm configuration directory exists - become: true - file: - path: /etc/slurm - state: directory - mode: '0755' - owner: root - group: root - - - name: install slurm.conf - become: true - template: - src: "templates/slurm.conf" - dest: "/etc/slurm/slurm.conf" - owner: root - group: root - mode: 0444 - register: _slurm_config - - - name: Install extra execution host configs - become: true - copy: - content: | - CgroupAutomount=yes - ConstrainCores=yes - ConstrainRAMSpace=yes - ConstrainSwapSpace=yes - dest: "/etc/slurm/cgroup.conf" - owner: root - group: root - mode: 0444 - register: _slurm_cgroup_config - - - name: Install slurmdbd - include_tasks: slurmdbd.yaml - when: slurmdbd_enabled - - - name: Install slurmctld - include_tasks: slurmctld.yaml - when: slurmctld_enabled - - - name: Install slurmd - include_tasks: slurmd.yaml - when: slurmd_enabled - - - name: Install slurm exporter - include_tasks: slurm_exporter.yaml - when: slurm_exporter_enabled +- name: Install munge + ansible.builtin.include_tasks: munge.yaml + when: munge_enabled + +- name: Install slurm client packages + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - slurm-client + - slurm-wlm-doc + +- name: Ensure that slurm configuration directory exists + become: true + ansible.builtin.file: + path: /etc/slurm + state: directory + mode: "0755" + owner: root + group: root + +- name: Install slurm.conf + become: true + ansible.builtin.template: + src: templates/slurm.conf + dest: /etc/slurm/slurm.conf + owner: root + group: root + mode: "0444" + register: _slurm_config + +- name: Install extra execution host configs + become: true + ansible.builtin.copy: + content: | + CgroupAutomount=yes + ConstrainCores=yes + ConstrainRAMSpace=yes + ConstrainSwapSpace=yes + dest: /etc/slurm/cgroup.conf + owner: root + group: root + mode: "0444" + register: _slurm_cgroup_config + +- name: Install slurmdbd + ansible.builtin.include_tasks: slurmdbd.yaml + when: slurmdbd_enabled + +- name: Install slurmctld + ansible.builtin.include_tasks: slurmctld.yaml + when: slurmctld_enabled + +- name: Install slurmd + ansible.builtin.include_tasks: slurmd.yaml + when: slurmd_enabled + +- name: Install slurm exporter + ansible.builtin.include_tasks: slurm_exporter.yaml + when: slurm_exporter_enabled diff --git a/roles/slurm/tasks/munge.yaml b/roles/slurm/tasks/munge.yaml index 23efd8f0..63d89761 100644 --- a/roles/slurm/tasks/munge.yaml +++ b/roles/slurm/tasks/munge.yaml @@ -1,52 +1,52 @@ --- - # https://stackoverflow.com/questions/57079707/slurm-and-munge-invalid-credential - - name: Ensure munge gid is fixed - become: true - group: - name: munge - gid: 151 +# https://stackoverflow.com/questions/57079707/slurm-and-munge-invalid-credential +- name: Ensure munge gid is fixed + become: true + ansible.builtin.group: + name: munge + gid: 151 - - name: Ensure munge uid fixed - become: true - user: - name: munge - groups: munge - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / - uid: 150 +- name: Ensure munge uid fixed + become: true + ansible.builtin.user: + name: munge + groups: munge + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / + uid: 150 - - name: Check munge directory - become: yes - file: - path: /etc/munge - owner: munge - group: munge - mode: 0700 - state: directory +- name: Check munge directory + become: true + ansible.builtin.file: + path: /etc/munge + owner: munge + group: munge + mode: "0700" + state: directory - - name: Install munge key - become: yes - copy: - content: "{{ munge_key }}" - dest: /etc/munge/munge.key - owner: munge - group: munge - mode: 0400 +- name: Install munge key + become: true + ansible.builtin.copy: + content: "{{ munge_key }}" + dest: /etc/munge/munge.key + owner: munge + group: munge + mode: "0400" - - name: Install munge controller packages - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - munge +- name: Install munge controller packages + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - munge - - name: Ensure Munge is enabled and running - become: yes - service: - name: munge - enabled: true - state: started +- name: Ensure Munge is enabled and running + become: true + ansible.builtin.service: + name: munge + enabled: true + state: started diff --git a/roles/slurm/tasks/slurm_exporter.yaml b/roles/slurm/tasks/slurm_exporter.yaml index 7a2607dd..dd595cc9 100644 --- a/roles/slurm/tasks/slurm_exporter.yaml +++ b/roles/slurm/tasks/slurm_exporter.yaml @@ -1,95 +1,94 @@ --- - - name: Install golang - include_tasks: golang.yaml +- name: Install golang + ansible.builtin.include_tasks: golang.yaml +- name: Check that the slurm exporter binary exists + ansible.builtin.stat: + path: /usr/local/bin/prometheus_slurm_exporter + register: _slurm_exporter_stat - - name: Check that the slurm exporter binary exists - stat: - path: "/usr/local/bin/prometheus_slurm_exporter" - register: _slurm_exporter_stat +- name: Download prometheus-slurm-exporter tarball to local folder + ansible.builtin.get_url: + url: https://github.com/vpenso/prometheus-slurm-exporter/archive/{{ slurm_exporter_version }}.tar.gz + checksum: sha256:{{ slurm_exporter_sha256 }} + dest: /tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}.tar.gz + force: false + mode: "0755" + when: not _slurm_exporter_stat.stat.exists - - name: Download prometheus-slurm-exporter tarball to local folder - get_url: - url: "https://github.com/vpenso/prometheus-slurm-exporter/archive/{{ slurm_exporter_version }}.tar.gz" - checksum: "sha256:{{ slurm_exporter_sha256 }}" - dest: "/tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}.tar.gz" - force: false - mode: '0755' - when: not _slurm_exporter_stat.stat.exists +- name: Unpack prometheus slurm exporter + ansible.builtin.unarchive: + src: /tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}.tar.gz + remote_src: true + dest: /tmp + creates: /tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/main.go + when: not _slurm_exporter_stat.stat.exists - - name: Unpack prometheus slurm exporter - unarchive: - src: "/tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}.tar.gz" - remote_src: true - dest: "/tmp" - creates: "/tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/main.go" - when: not _slurm_exporter_stat.stat.exists +- name: Build prometheus_slurm_exporter + ansible.builtin.shell: + chdir: /tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/ + cmd: | + export GOPATH=/tmp/ + /opt/go/bin/go mod download + /opt/go/bin/go build -o bin/prometheus_slurm_exporter main.go accounts.go cpus.go nodes.go partitions.go queue.go scheduler.go users.go + creates: /tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/bin/prometheus_slurm_exporter + when: not _slurm_exporter_stat.stat.exists - - name: Build prometheus_slurm_exporter - shell: - chdir: "/tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/" - cmd: | - export GOPATH=/tmp/ - /opt/go/bin/go mod download - /opt/go/bin/go build -o bin/prometheus_slurm_exporter main.go accounts.go cpus.go nodes.go partitions.go queue.go scheduler.go users.go - creates: "/tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/bin/prometheus_slurm_exporter" - when: not _slurm_exporter_stat.stat.exists +- name: Install prometheus_slurm_exporter binary + become: true + ansible.builtin.copy: + src: /tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/bin/prometheus_slurm_exporter + dest: /usr/local/bin/prometheus_slurm_exporter + remote_src: true + mode: "0755" + owner: root + group: root + when: not _slurm_exporter_stat.stat.exists - - name: Install prometheus_slurm_exporter binary - become: true - copy: - src: "/tmp/prometheus-slurm-exporter-{{ slurm_exporter_version }}/bin/prometheus_slurm_exporter" - dest: "/usr/local/bin/prometheus_slurm_exporter" - remote_src: true - mode: '0755' - owner: root - group: root - when: not _slurm_exporter_stat.stat.exists - - - name: Create prometheus_slurm_exporter group - become: true - group: +- name: Create prometheus_slurm_exporter group + become: true + ansible.builtin.group: name: prometheus_slurm_exporter state: present system: true - - name: Create the prometheus_slurm_exporter user - become: true - user: - name: prometheus_slurm_exporter - groups: prometheus_slurm_exporter - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / +- name: Create the prometheus_slurm_exporter user + become: true + ansible.builtin.user: + name: prometheus_slurm_exporter + groups: prometheus_slurm_exporter + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / - - name: Copy the prometheus_slurm_exporter systemd service file - become: true - copy: - content: | - [Unit] - Description=Prometheus SLURM Exporter +- name: Copy the prometheus_slurm_exporter systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Prometheus SLURM Exporter - [Service] - User=prometheus_slurm_exporter - Group=prometheus_slurm_exporter - ExecStart=/usr/local/bin/prometheus_slurm_exporter -listen-address :{{ slurm_exporter_port }} - Restart=always - RestartSec=15 + [Service] + User=prometheus_slurm_exporter + Group=prometheus_slurm_exporter + ExecStart=/usr/local/bin/prometheus_slurm_exporter -listen-address :{{ slurm_exporter_port }} + Restart=always + RestartSec=15 - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/prometheus_slurm_exporter.service - owner: root - group: root - mode: 0644 - register: _slurm_exporter_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/prometheus_slurm_exporter.service + owner: root + group: root + mode: "0644" + register: _slurm_exporter_service - - name: Ensure prometheus_slurm_exporter is enabled on boot - become: true - systemd: - daemon_reload: true - name: prometheus_slurm_exporter - enabled: true - state: restarted - when: _slurm_exporter_service.changed +- name: Ensure prometheus_slurm_exporter is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: prometheus_slurm_exporter + enabled: true + state: restarted + when: _slurm_exporter_service.changed diff --git a/roles/slurm/tasks/slurmctld.yaml b/roles/slurm/tasks/slurmctld.yaml index c9d0229e..0eae5e4a 100644 --- a/roles/slurm/tasks/slurmctld.yaml +++ b/roles/slurm/tasks/slurmctld.yaml @@ -1,71 +1,71 @@ --- - - name: Ensure slurm state directory exists - become: true - file: - path: "{{ slurm_config.StateSaveLocation }}" - state: directory - mode: '0700' - owner: slurm - group: slurm +- name: Ensure slurm state directory exists + become: true + ansible.builtin.file: + path: "{{ slurm_config.StateSaveLocation }}" + state: directory + mode: "0700" + owner: slurm + group: slurm - - name: Ensure slurm log directory exists - become: true - file: - path: "{{ slurm_config.SlurmctldLogFile | dirname }}" - state: directory - mode: '0700' - owner: slurm - group: slurm +- name: Ensure slurm log directory exists + become: true + ansible.builtin.file: + path: "{{ slurm_config.SlurmctldLogFile | dirname }}" + state: directory + mode: "0700" + owner: slurm + group: slurm - - name: Ensure slurm pid directory exists - become: true - file: - path: "{{ slurm_config.SlurmctldPidFile | dirname }}" - state: directory - mode: '0755' - owner: slurm - group: slurm +- name: Ensure slurm pid directory exists + become: true + ansible.builtin.file: + path: "{{ slurm_config.SlurmctldPidFile | dirname }}" + state: directory + mode: "0755" + owner: slurm + group: slurm - - name: Copy the slurmctl systemd service file - become: true - copy: - content: | - [Unit] - Description=Slurm controller daemon - After=network.target munge.service - ConditionPathExists=/etc/slurm/slurm.conf +- name: Copy the slurmctl systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Slurm controller daemon + After=network.target munge.service + ConditionPathExists=/etc/slurm/slurm.conf - [Service] - Type=forking - User=root - ExecStart=/usr/sbin/slurmctld - ExecReload=/bin/kill -HUN $MAINPID - PIDFile={{ slurm_config.SlurmctldPidFile }} - LimitNOFILE=65536 - TasksMax=infinity + [Service] + Type=forking + User=root + ExecStart=/usr/sbin/slurmctld + ExecReload=/bin/kill -HUN $MAINPID + PIDFile={{ slurm_config.SlurmctldPidFile }} + LimitNOFILE=65536 + TasksMax=infinity - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/slurmctld.service - owner: root - group: root - mode: 0644 - register: _slurmctld_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/slurmctld.service + owner: root + group: root + mode: "0644" + register: _slurmctld_service - - name: Install slurm controller packages - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - slurmctld - register: _slurmctld_package +- name: Install slurm controller packages + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - slurmctld + register: _slurmctld_package - - name: Ensure slurmctld is enabled on boot - become: true - systemd: - daemon_reload: true - name: slurmctld - enabled: true - state: restarted - when: _slurm_config.changed or _slurm_cgroup_config.changed or _slurmctld_service.changed or _slurmctld_package.changed +- name: Ensure slurmctld is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: slurmctld + enabled: true + state: restarted + when: _slurm_config.changed or _slurm_cgroup_config.changed or _slurmctld_service.changed or _slurmctld_package.changed diff --git a/roles/slurm/tasks/slurmd.yaml b/roles/slurm/tasks/slurmd.yaml index 7a52f2d2..7105c146 100644 --- a/roles/slurm/tasks/slurmd.yaml +++ b/roles/slurm/tasks/slurmd.yaml @@ -1,74 +1,74 @@ --- - - name: Create slurm spool directory - become: true - file: - path: "{{ slurm_config.SlurmdSpoolDir }}" - owner: root - group: root - mode: 0755 - state: directory +- name: Create slurm spool directory + become: true + ansible.builtin.file: + path: "{{ slurm_config.SlurmdSpoolDir }}" + owner: root + group: root + mode: "0755" + state: directory - - name: Create slurm log directory - become: true - file: - path: "{{ slurm_config.SlurmdLogFile | dirname }}" - owner: slurm - group: slurm - mode: 0755 - state: directory +- name: Create slurm log directory + become: true + ansible.builtin.file: + path: "{{ slurm_config.SlurmdLogFile | dirname }}" + owner: slurm + group: slurm + mode: "0755" + state: directory - - name: Ensure slurm pid directory exists - become: true - file: - path: "{{ slurm_config.SlurmdPidFile | dirname }}" - state: directory - mode: '0755' - owner: slurm - group: slurm +- name: Ensure slurm pid directory exists + become: true + ansible.builtin.file: + path: "{{ slurm_config.SlurmdPidFile | dirname }}" + state: directory + mode: "0755" + owner: slurm + group: slurm - - name: Copy the slurmctl systemd service file - become: true - copy: - content: | - [Unit] - Description=Slurm node daemon - After=network.target munge.service remote-fs.target - ConditionPathExists=/etc/slurm/slurm.conf +- name: Copy the slurmctl systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Slurm node daemon + After=network.target munge.service remote-fs.target + ConditionPathExists=/etc/slurm/slurm.conf - [Service] - Type=forking - User=root - ExecStart=/usr/sbin/slurmd - ExecReload=/bin/kill -HUN $MAINPID - PIDFile={{ slurm_config.SlurmdPidFile }} - KillMode=process - LimitNOFILE=131072 - LimitMEMLOCK=infinity - LimitSTACK=infinity - Delegate=yes - TasksMax=infinity + [Service] + Type=forking + User=root + ExecStart=/usr/sbin/slurmd + ExecReload=/bin/kill -HUN $MAINPID + PIDFile={{ slurm_config.SlurmdPidFile }} + KillMode=process + LimitNOFILE=131072 + LimitMEMLOCK=infinity + LimitSTACK=infinity + Delegate=yes + TasksMax=infinity - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/slurmd.service - owner: root - group: root - mode: 0644 - register: _slurmd_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/slurmd.service + owner: root + group: root + mode: "0644" + register: _slurmd_service - - name: Install Slurmd execution host packages - become: true - package: - name: slurmd - cache_valid_time: 3600 - state: present - register: _slurmd_package +- name: Install Slurmd execution host packages + become: true + ansible.builtin.package: + name: slurmd + cache_valid_time: 3600 + state: present + register: _slurmd_package - - name: Ensure slurmd is enabled on boot - become: true - systemd: - daemon_reload: true - name: slurmd - enabled: true - state: restarted - when: _slurm_config.changed or _slurm_cgroup_config.changed or _slurmd_service.changed or _slurmd_package.changed +- name: Ensure slurmd is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: slurmd + enabled: true + state: restarted + when: _slurm_config.changed or _slurm_cgroup_config.changed or _slurmd_service.changed or _slurmd_package.changed diff --git a/roles/slurm/tasks/slurmdbd.yaml b/roles/slurm/tasks/slurmdbd.yaml index 0f7f4af2..3cf1f7e7 100644 --- a/roles/slurm/tasks/slurmdbd.yaml +++ b/roles/slurm/tasks/slurmdbd.yaml @@ -1,72 +1,72 @@ --- - - name: Ensure slurmdbd log directory exists - become: true - file: - path: "{{ slurmdbd_config.LogFile | dirname }}" - state: directory - mode: '0700' - owner: slurm - group: slurm +- name: Ensure slurmdbd log directory exists + become: true + ansible.builtin.file: + path: "{{ slurmdbd_config.LogFile | dirname }}" + state: directory + mode: "0700" + owner: slurm + group: slurm - - name: Ensure slurm pid directory exists - become: true - file: - path: "{{ slurmdbd_config.PidFile | dirname }}" - state: directory - mode: '0755' - owner: slurm - group: slurm +- name: Ensure slurm pid directory exists + become: true + ansible.builtin.file: + path: "{{ slurmdbd_config.PidFile | dirname }}" + state: directory + mode: "0755" + owner: slurm + group: slurm - - name: install slurmdbd.conf - become: true - template: - src: "templates/slurmdbd.conf" - dest: "/etc/slurm/slurmdbd.conf" - owner: slurm - group: slurm - mode: 0600 - register: _slurmdbd_config +- name: Install slurmdbd.conf + become: true + ansible.builtin.template: + src: templates/slurmdbd.conf + dest: /etc/slurm/slurmdbd.conf + owner: slurm + group: slurm + mode: "0600" + register: _slurmdbd_config - - name: Copy the slurmdbd systemd service file - become: true - copy: - content: | - [Unit] - Description=Slurm DBD accounting daemon - After=network.target munge.service - ConditionPathExists=/etc/slurm/slurmdbd.conf +- name: Copy the slurmdbd systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=Slurm DBD accounting daemon + After=network.target munge.service + ConditionPathExists=/etc/slurm/slurmdbd.conf - [Service] - Type=forking - User=slurm - ExecStart=/usr/sbin/slurmdbd - ExecReload=/bin/kill -HUN $MAINPID - PIDFile={{ slurmdbd_config.PidFile }} - LimitNOFILE=65536 - TasksMax=infinity + [Service] + Type=forking + User=slurm + ExecStart=/usr/sbin/slurmdbd + ExecReload=/bin/kill -HUN $MAINPID + PIDFile={{ slurmdbd_config.PidFile }} + LimitNOFILE=65536 + TasksMax=infinity - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/slurmdbd.service - owner: root - group: root - mode: 0644 - register: _slurmdbd_service + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/slurmdbd.service + owner: root + group: root + mode: "0644" + register: _slurmdbd_service - - name: Install slurm controller packages - become: true - apt: - state: latest - cache_valid_time: 3600 - name: - - slurmdbd - register: _slurmdbd_package +- name: Install slurm controller packages + become: true + ansible.builtin.apt: + state: latest + cache_valid_time: 3600 + name: + - slurmdbd + register: _slurmdbd_package - - name: Ensure slurmdbd is enabled on boot - become: true - systemd: - daemon_reload: true - name: slurmdbd - enabled: true - state: restarted - when: _slurmdbd_config.changed or _slurmdbd_service.changed or _slurmdbd_package.changed +- name: Ensure slurmdbd is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: slurmdbd + enabled: true + state: restarted + when: _slurmdbd_config.changed or _slurmdbd_service.changed or _slurmdbd_package.changed diff --git a/roles/traefik/defaults/main.yml b/roles/traefik/defaults/main.yml index 2701b97d..84dfc691 100644 --- a/roles/traefik/defaults/main.yml +++ b/roles/traefik/defaults/main.yml @@ -1,9 +1,10 @@ +--- traefik_enabled: false -traefik_version: "2.4.0" +traefik_version: 2.4.0 traefik_sha256: 1e8cd14a0c09ba44ba8b44a94441caaf1edc3e043a65a7d43f9c870be47f70aa traefik_metrics_port: "8082" -traefik_tls_type: "self-signed" +traefik_tls_type: self-signed # traefik_domain: # traefik_letsencrypt_email: # required if type == "letsencrypt" # traefik_tls_certificate: @@ -11,13 +12,13 @@ traefik_tls_type: "self-signed" # role: jupyterhub jupyterhub_proxy_port: "15002" -jupyterhub_base_url: "/" +jupyterhub_base_url: / jupyterhub_ssh_internal_port: "8021" jupyterhub_ssh_external_port: "8022" # role: grafana grafana_port: "3000" -grafana_base_url: "/monitoring" +grafana_base_url: /monitoring # role: dask_gateway dask_gateway_api_port: "8010" @@ -28,7 +29,7 @@ dask_gateway_scheduler_external_port: "8786" keycloak_port: "30020" # role: conda_store -conda_store_prefix: "/conda-store" +conda_store_prefix: /conda-store conda_store_port: "5000" # role: minio diff --git a/roles/traefik/handlers/main.yaml b/roles/traefik/handlers/main.yaml index 08a90b2b..2b111a1a 100644 --- a/roles/traefik/handlers/main.yaml +++ b/roles/traefik/handlers/main.yaml @@ -1,9 +1,10 @@ --- - - name: "restart services traefik" - become: true - service: - name={{item}} - enabled=yes - state=restarted - with_items: - - "traefik" +- name: Restart services traefik + become: true + ansible.builtin.service: + name: "{{item}}" + enabled: "yes" + state: restarted + cmd: "" + with_items: + - traefik diff --git a/roles/traefik/tasks/main.yaml b/roles/traefik/tasks/main.yaml index 4c16567b..5d090dec 100644 --- a/roles/traefik/tasks/main.yaml +++ b/roles/traefik/tasks/main.yaml @@ -1,4 +1,4 @@ --- - - name: Install traefik - include_tasks: traefik.yaml - when: traefik_enabled +- name: Install traefik + ansible.builtin.include_tasks: traefik.yaml + when: traefik_enabled diff --git a/roles/traefik/tasks/traefik.yaml b/roles/traefik/tasks/traefik.yaml index c9c76969..48329849 100644 --- a/roles/traefik/tasks/traefik.yaml +++ b/roles/traefik/tasks/traefik.yaml @@ -1,218 +1,206 @@ --- - - name: Check that the traefik binary exists - stat: - path: "/usr/local/bin/traefik" - register: _traefik_stat - - - name: Download traefik binary - become: true - get_url: - url: "https://github.com/traefik/traefik/releases/download/v{{ traefik_version }}/traefik_v{{ traefik_version }}_linux_amd64.tar.gz" - checksum: "sha256:{{ traefik_sha256 }}" - dest: "/tmp/traefik_v{{ traefik_version }}_linux_amd64.tar.gz" - force: false - mode: 0755 - when: not _traefik_stat.stat.exists - - - name: Unpack traefik binary - unarchive: - src: "/tmp/traefik_v{{ traefik_version }}_linux_amd64.tar.gz" - remote_src: true - dest: "/tmp" - creates: "/tmp/traefik" - when: not _traefik_stat.stat.exists - - - name: Install traefik binary - become: true - copy: - src: "/tmp/traefik" - dest: "/usr/local/bin/traefik" - remote_src: true - mode: 0755 - owner: root - group: root - when: not _traefik_stat.stat.exists - - - - name: Create traefik group - become: true - group: - name: traefik - state: present - system: true - - - - name: Create the traefik user - become: true - user: - name: traefik - groups: traefik - append: true - shell: /usr/sbin/nologin - system: true - create_home: false - home: / - - - name: Ensure that traefik configuration directory exists - become: true - file: - path: /etc/traefik - state: directory - mode: '0700' - owner: traefik - group: traefik - - - - name: Ensure that traefik acme configuration directory exists - become: true - file: - path: /etc/traefik/acme - state: directory - mode: '0700' - owner: traefik - group: traefik - - - - name: Ensure that traefik certs configuration directory exists - become: true - file: - path: /etc/traefik/certs - state: directory - mode: '0700' - owner: traefik - group: traefik - when: traefik_tls_certificate is defined - - - - name: Copy TLS certificate if provided - become: true - copy: - src: "{{ traefik_tls_certificate }}" - dest: /etc/traefik/certs/{{ traefik_tls_certificate | basename }} - remote_src: "{{ traefik_tls_certificate_remote_src | default(false) }}" - mode: '444' - owner: traefik - group: traefik - when: traefik_tls_certificate is defined - notify: restart services traefik - register: _traefik_tls_certificate - - - - name: Copy TLS key if provided - become: true - copy: - src: "{{ traefik_tls_key }}" - dest: /etc/traefik/certs/{{ traefik_tls_key | basename }} - remote_src: "{{ traefik_tls_key_remote_src | default(false) }}" - mode: '0400' - owner: traefik - group: traefik - when: traefik_tls_key is defined - notify: restart services traefik - register: _traefik_tls_key - - - - name: Copy traefik configuration - become: true - template: - src: templates/traefik.yaml - dest: /etc/traefik/traefik.yaml - mode: '0600' - owner: traefik - group: traefik - notify: restart services traefik - - - name: Copy traefik dynamic configuration - become: true - template: - src: templates/traefik_dynamic.yaml - dest: /etc/traefik/traefik_dynamic.yaml - mode: '0600' - owner: traefik - group: traefik - notify: restart services traefik - - - name: Copy the traefik systemd service file - become: true - copy: - content: | - [Unit] - Description=traefik proxy - After=network-online.target - Wants=network-online.target systemd-networkd-wait-online.service - - [Service] - Restart=always - User=traefik - Group=traefik - ExecStart=/usr/local/bin/traefik --configfile=/etc/traefik/traefik.yaml - LimitNOFILE=1048576 - PrivateTmp=true - PrivateDevices=false - ProtectHome=true - ProtectSystem=full - ReadWriteDirectories=/etc/traefik/acme - CapabilityBoundingSet=CAP_NET_BIND_SERVICE - AmbientCapabilities=CAP_NET_BIND_SERVICE - NoNewPrivileges=true - - [Install] - WantedBy=multi-user.target - dest: /etc/systemd/system/traefik.service - owner: root - group: root - mode: 0644 - notify: restart services traefik - - - - name: Ensure Traefik is enabled on boot - become: true - systemd: - daemon_reload: true - name: traefik - enabled: true - state: started - - - - name: Allow traefik http through firewall - become: true - community.general.ufw: - rule: allow - port: "80" - proto: tcp - when: firewall_enabled - - - - name: Allow traefik https through firewall - become: true - community.general.ufw: - rule: allow - port: "443" - proto: tcp - when: firewall_enabled - - - - name: Allow jupyterhub-ssh through firewall - become: true - community.general.ufw: - rule: allow - port: "{{ jupyterhub_ssh_external_port }}" - proto: tcp - when: firewall_enabled - - - - name: Allow dask-gateway-scheduler through firewall - become: true - community.general.ufw: - rule: allow - port: "{{ dask_gateway_scheduler_external_port }}" - proto: tcp - when: firewall_enabled - - - name: Allow minio through firewall - become: true - community.general.ufw: - rule: allow - port: "{{ minio_external_port }}" - proto: tcp - when: firewall_enabled +- name: Check that the traefik binary exists + ansible.builtin.stat: + path: /usr/local/bin/traefik + register: _traefik_stat + +- name: Download traefik binary + become: true + ansible.builtin.get_url: + url: https://github.com/traefik/traefik/releases/download/v{{ traefik_version }}/traefik_v{{ traefik_version }}_linux_amd64.tar.gz + checksum: sha256:{{ traefik_sha256 }} + dest: /tmp/traefik_v{{ traefik_version }}_linux_amd64.tar.gz + force: false + mode: "0755" + when: not _traefik_stat.stat.exists + +- name: Unpack traefik binary + ansible.builtin.unarchive: + src: /tmp/traefik_v{{ traefik_version }}_linux_amd64.tar.gz + remote_src: true + dest: /tmp + creates: /tmp/traefik + when: not _traefik_stat.stat.exists + +- name: Install traefik binary + become: true + ansible.builtin.copy: + src: /tmp/traefik + dest: /usr/local/bin/traefik + remote_src: true + mode: "0755" + owner: root + group: root + when: not _traefik_stat.stat.exists + +- name: Create traefik group + become: true + ansible.builtin.group: + name: traefik + state: present + system: true + +- name: Create the traefik user + become: true + ansible.builtin.user: + name: traefik + groups: traefik + append: true + shell: /usr/sbin/nologin + system: true + create_home: false + home: / + +- name: Ensure that traefik configuration directory exists + become: true + ansible.builtin.file: + path: /etc/traefik + state: directory + mode: "0700" + owner: traefik + group: traefik + +- name: Ensure that traefik acme configuration directory exists + become: true + ansible.builtin.file: + path: /etc/traefik/acme + state: directory + mode: "0700" + owner: traefik + group: traefik + +- name: Ensure that traefik certs configuration directory exists + become: true + ansible.builtin.file: + path: /etc/traefik/certs + state: directory + mode: "0700" + owner: traefik + group: traefik + when: traefik_tls_certificate is defined + +- name: Copy TLS certificate if provided + become: true + ansible.builtin.copy: + src: "{{ traefik_tls_certificate }}" + dest: /etc/traefik/certs/{{ traefik_tls_certificate | basename }} + remote_src: "{{ traefik_tls_certificate_remote_src | default(false) }}" + mode: "444" + owner: traefik + group: traefik + when: traefik_tls_certificate is defined + notify: restart services traefik + register: _traefik_tls_certificate + +- name: Copy TLS key if provided + become: true + ansible.builtin.copy: + src: "{{ traefik_tls_key }}" + dest: /etc/traefik/certs/{{ traefik_tls_key | basename }} + remote_src: "{{ traefik_tls_key_remote_src | default(false) }}" + mode: "0400" + owner: traefik + group: traefik + when: traefik_tls_key is defined + notify: restart services traefik + register: _traefik_tls_key + +- name: Copy traefik configuration + become: true + ansible.builtin.template: + src: templates/traefik.yaml + dest: /etc/traefik/traefik.yaml + mode: "0600" + owner: traefik + group: traefik + notify: restart services traefik + +- name: Copy traefik dynamic configuration + become: true + ansible.builtin.template: + src: templates/traefik_dynamic.yaml + dest: /etc/traefik/traefik_dynamic.yaml + mode: "0600" + owner: traefik + group: traefik + notify: restart services traefik + +- name: Copy the traefik systemd service file + become: true + ansible.builtin.copy: + content: | + [Unit] + Description=traefik proxy + After=network-online.target + Wants=network-online.target systemd-networkd-wait-online.service + + [Service] + Restart=always + User=traefik + Group=traefik + ExecStart=/usr/local/bin/traefik --configfile=/etc/traefik/traefik.yaml + LimitNOFILE=1048576 + PrivateTmp=true + PrivateDevices=false + ProtectHome=true + ProtectSystem=full + ReadWriteDirectories=/etc/traefik/acme + CapabilityBoundingSet=CAP_NET_BIND_SERVICE + AmbientCapabilities=CAP_NET_BIND_SERVICE + NoNewPrivileges=true + + [Install] + WantedBy=multi-user.target + dest: /etc/systemd/system/traefik.service + owner: root + group: root + mode: "0644" + notify: restart services traefik + +- name: Ensure Traefik is enabled on boot + become: true + ansible.builtin.systemd: + daemon_reload: true + name: traefik + enabled: true + state: started + +- name: Allow traefik http through firewall + become: true + community.general.ufw: + rule: allow + port: "80" + proto: tcp + when: firewall_enabled + +- name: Allow traefik https through firewall + become: true + community.general.ufw: + rule: allow + port: "443" + proto: tcp + when: firewall_enabled + +- name: Allow jupyterhub-ssh through firewall + become: true + community.general.ufw: + rule: allow + port: "{{ jupyterhub_ssh_external_port }}" + proto: tcp + when: firewall_enabled + +- name: Allow dask-gateway-scheduler through firewall + become: true + community.general.ufw: + rule: allow + port: "{{ dask_gateway_scheduler_external_port }}" + proto: tcp + when: firewall_enabled + +- name: Allow minio through firewall + become: true + community.general.ufw: + rule: allow + port: "{{ minio_external_port }}" + proto: tcp + when: firewall_enabled diff --git a/tasks/copy_files.yaml b/tasks/copy_files.yaml index ac98c5c7..47301fd3 100644 --- a/tasks/copy_files.yaml +++ b/tasks/copy_files.yaml @@ -1,12 +1,12 @@ --- - - name: Copy arbitrary files onto nodes - become: True - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: "{{ item.owner | default(omit) }}" - group: "{{ item.group | default(omit) }}" - mode: "{{ item.mode | default(omit) }}" - directory_mode: "{{ item.directory_mode | default(omit) }}" - when: (vars['copy_files_' + myhost]) is defined - loop: "{{ vars['copy_files_' + myhost] }}" +- name: Copy arbitrary files onto nodes + become: true + ansible.builtin.copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ item.owner | default(omit) }}" + group: "{{ item.group | default(omit) }}" + mode: "{{ item.mode | default(omit) }}" + directory_mode: "{{ item.directory_mode | default(omit) }}" + when: (vars['copy_files_' + myhost]) is defined + loop: "{{ vars['copy_files_' + myhost] }}"