forked from Marco-Aga/TAP_MATE_Marco_Agatello
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yaml
129 lines (120 loc) · 3.66 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
version: '3.8'
services:
portainer:
container_name: portainer
image: portainer/portainer-ce
restart: always
ports:
- "9000:9000/tcp"
environment:
- TZ=Europe/Rome
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./portainer:/data
zookeeper:
container_name: zookeeper
image: confluentinc/cp-zookeeper:7.4.4
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 22181:2181
healthcheck:
# Definsico un healthcheck per il container zookeeper che controlla se il servizio è raggiungibile
test: ["CMD", "nc", "-z", "localhost", "2181"]
interval: 10s
timeout: 10s
retries: 15
kafka:
container_name: kafka
image: confluentinc/cp-kafka:7.4.4
ports:
- 29092:29092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: INSIDE://kafka:9092,OUTSIDE://192.168.1.15:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_LISTENERS: INSIDE://0.0.0.0:9092,OUTSIDE://:29092
KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
KAFKA_CREATE_TOPICS: "stream_input:1:1"
depends_on:
# Kafka aspetta che Zookeeper sia pronto grazie all'health check di Zookeeper
zookeeper:
condition: service_healthy
#Faccio la stessa cosa per il container kafka
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "9092"]
interval: 10s
timeout: 10s
retries: 15
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- kafka
environment:
DYNAMIC_CONFIG_ENABLED: 'true'
KAFKA_CLUSTERS_0_NAME: MATE
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
logstash:
container_name: logstash
image: docker.elastic.co/logstash/logstash:8.13.2
volumes:
- ./logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
- ./Buffer:/usr/share/logstash/Buffer
ports:
- "5000:5000"
environment:
XPACK_MONITORING_ENABLED: 'false'
TAP_TOKEN: your_api_key_here
#Aspetto che il servizio kafka sia raggiungibile per iviare i dati
depends_on:
kafka:
condition: service_healthy
elasticsearch:
container_name: elasticsearch
image: docker.arvancloud.ir/elasticsearch:7.16.3
environment:
- bootstrap.memory_lock=true
- discovery.type=single-node
- xpack.security.enabled=true
- ELASTIC_PASSWORD=1234Catania
- ELASTIC_USERNAME=elastic
mem_limit: 4 GB
ports:
- "9200:9200"
- "9300:9300"
#Creo anche un healthcheck per il container elasticsearch per spark
healthcheck:
test: ["CMD-SHELL", "curl -u elastic:1234Catania -s http://localhost:9200/_cluster/health | grep -q '\"status\":\"green\"\\|\"status\":\"yellow\"' || exit 1"]
interval: 10s
timeout: 10s
retries: 15
kibana:
container_name: kibana
image: docker.arvancloud.ir/kibana:7.16.3
environment:
- ELASTICSEARCH_URL=http://elasticsearch:9200
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD=1234Catania
ports:
- "5601:5601"
depends_on:
- elasticsearch
spark:
container_name: spark-app
build:
context: ./spark-app
environment:
TAP_ES_HOST: elasticsearch
TAP_KAFKA_HOST: kafka
TAP_KAFKA_PORT: 9092
#Aspetto che il servizio kafka e elasticsearch siano raggiungibili per inviare i dati e ricevere i dati
depends_on:
kafka:
condition: service_healthy
elasticsearch:
condition: service_healthy