forked from Consonance/container-host-bag
-
Notifications
You must be signed in to change notification settings - Fork 2
/
install.yml
120 lines (106 loc) · 4.2 KB
/
install.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
---
# file: install.yml
# This file is used for installing a container host
# Not sure why, but I needed this to create a /etc/hosts that actually works with the Ubuntu image here on openstack
- hosts: master:worker
become: yes
become_user: root
# needed for ubuntu 16.04 LS and above
gather_facts: no
pre_tasks:
- name: 'install python2'
raw: sudo apt-get -y install python-simplejson
# Idempotent way to build a /etc/hosts file with Ansible using your Ansible hosts inventory for a source.
# Will include all hosts the playbook is run on.
# Inspired from http://xmeblog.blogspot.com/2013/06/ansible-dynamicaly-update-etchosts.html
tasks:
- name: Prep python
apt: name=python update_cache=yes
# - name: Build hosts file for all hosts
# lineinfile: dest=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].ansible_default_ipv4.address }} {{item}}" state=present
# when: hostvars[item].ansible_default_ipv4.address is defined and docker is not defined
# with_items: groups['all']
- name: Setting hostname to {{ inventory_hostname }}
hostname: name={{ inventory_hostname }}
- name: Disable automatic retrieval of hostname from dhcp on Amazon EC2
template: src=roles/common/templates/dhclient.conf dest=/etc/dhcp/dhclient.conf
- name: Disable setting hostname from /etc/hostname on Amazon EC2
template: src=roles/common/templates/hostname.conf dest=/etc/init/hostname.conf
- name: Disable ephemeral mount at /mnt for amazon AWS
shell: umount /mnt
register: umount_output
ignore_errors: yes
- name: Create directory for downloads
file: path=/home/ubuntu/downloads owner=ubuntu group=ubuntu state=directory
# This is a super-fugly way of determining the number of hosts in total, Ansible bug referenced
- hosts: master:worker
become_user: root
tasks:
- name: Determine number of hosts in fugly way
include: roles/common/tasks/determine_num_hosts.yml
- hosts: master
roles:
- { role: common }
#broken in Ansible 2.0
#- vars:
# single_node_lvm: True
# shared_storage_system: lvm
# vendor_data: /datastore
# lvm_device_whitelist: /dev/xvdd,/dev/xvde
#include: roles/storage/common/tasks/play_deploy_lvm_storage.yml
- hosts: master:worker
become: yes
become_user: root
tasks:
- name: Make sure ubuntu owns the datastore
file: path=/datastore owner=ubuntu group=ubuntu state=directory
- hosts: master:worker
become: yes
become_user: root
vars:
swap_on: False
roles:
- { role: mount_device, when: device is defined }
# 9000 equals to aproximately 10 GB of SWAP; we don't want to run the playbook if the instance already has 10 GB of SWAP because the role was most probably already applied.
- { role: swap-enabled, when: swap_on and ansible_swaptotal_mb < 9000 }
- { role: java, java_provider: Oracle8 }
- { role: install-docker }
# - { role: seqware-container }
# - { role: workflow-dewrapper-dependencies, when: (workflow_name is defined and 'DEWrapper' in workflow_name ) }
#- hosts: master:worker
# become: yes
# become_user: root
# vars:
# user_name: ubuntu
# tasks:
# - name: Copy gnos.pem keys to worker
# copy: src={{ item }} dest=/home/{{ user_name }}/.gnos/ owner={{ user_name }} group={{ user_name }}
# with_fileglob:
# - /home/{{ user_name }}/.gnos/*
#- hosts: master
# become: yes
# become_user: root
# vars:
# installed_workflows_path: /workflows
# target_workflow_path: /workflows
# user_name: ubuntu
# roles:
# - { role: workflow, when: http_workflows is defined or s3_workflows is defined }
#- hosts: master
# become: yes
# become_user: root
# roles:
# - { role: containers, when: containers is defined or http_containers is defined or s3_containers is defined }
# Install monitoring components onto the client, once it is set up with all necessary containers. Maybe should happen before the consonance stuff...
# Not strictly needed for consonance 2 development
# - hosts: master
# become_user: root
# include: ../monitoring-bag/site.yml sensu_server_host=sensu-server
- hosts: master
become: yes
become_user: root
roles:
- { role: consonance }
#- hosts: master
# roles:
# - { role: test, when: test_workflows is defined and test_workflows }