-
-
Notifications
You must be signed in to change notification settings - Fork 174
/
inventory.template.cfg
65 lines (52 loc) · 2.84 KB
/
inventory.template.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# OpenShift Inventory Template.
# Note that when the infrastructure is generated by Terraform, this file is
# expanded into './inventory.cfg', based on the rules in:
#
# ./modules/openshift/08-inventory.tf
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
etcd
nodes
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=ec2-user
# If ansible_ssh_user is not root, ansible_become must be set to true
ansible_become=true
# Deploy OKD 3.11.
openshift_deployment_type=origin
openshift_release=v3.11
# We need a wildcard DNS setup for our public access to services, fortunately
# we can use the superb xip.io to get one for free.
openshift_public_hostname=${public_hostname}
openshift_master_default_subdomain=${public_hostname}
# Use an htpasswd file as the indentity provider.
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
# Uncomment the line below to enable metrics for the cluster.
# openshift_hosted_metrics_deploy=true
# Use API keys rather than instance roles so that tenant containers don't get
# Openshift's EC2/EBS permissions
openshift_cloudprovider_kind=aws
openshift_cloudprovider_aws_access_key=${access_key}
openshift_cloudprovider_aws_secret_key=${secret_key}
# Set the cluster_id.
openshift_clusterid=${cluster_id}
# Define the standard set of node groups, as per:
# https://github.com/openshift/openshift-ansible#node-group-definition-and-mapping
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true']}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true']}, {'name': 'node-config-master-infra', 'labels': ['node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true']}, {'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/infra=true,node-role.kubernetes.io/master=true,node-role.kubernetes.io/compute=true']}]
# Create the masters host group. Note that due do:
# https://github.com/dwmkerr/terraform-aws-openshift/issues/40
# We cannot use the internal DNS names (such as master.openshift.local) as there
# is a bug with the installer when using the AWS cloud provider.
# Note that we use the master node as an infra node as well, which is not recommended for production use.
[masters]
${master_hostname}
# host group for etcd
[etcd]
${master_hostname}
# all nodes - along with their openshift_node_groups.
[nodes]
${master_hostname} openshift_node_group_name='node-config-master-infra' openshift_schedulable=true
${node1_hostname} openshift_node_group_name='node-config-compute'
${node2_hostname} openshift_node_group_name='node-config-compute'