-
Notifications
You must be signed in to change notification settings - Fork 11
/
docker-compose.yml
513 lines (472 loc) · 14.9 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
version: '3.6'
volumes:
apache-nifi_data:
driver: local
zookeeper-data:
driver: local
zookeeper-log:
driver: local
kafka-data:
driver: local
hadoop_namenode:
driver: local
hadoop-datanode-1:
driver: local
hadoop-datanode-2:
driver: local
hadoop-datanode-3:
driver: local
hadoop_historyserver:
driver: local
elasticsearch-data:
driver: local
logstash-data:
driver: local
postgres_data:
driver: local
pgadmin-data:
driver: local
rabbitmq_data:
driver: local
networks:
platform_network:
ipam:
config:
- subnet: 192.168.1.0/24
services:
# Apache Tika Server
tika_server:
image: apache/tika:1.24
container_name: tika_server
networks:
platform_network:
ipv4_address: 192.168.1.13
ports:
- "9997:9998"
# Apache Tika Server with OCR
tika_server_ocr:
image: apache/tika:1.24-full
container_name: tika_server_ocr
networks:
platform_network:
ipv4_address: 192.168.1.14
ports:
- "9998:9998"
# Easy to use SFTP (SSH File Transfer Protocol) server with OpenSSH.
sftp:
image: atmoz/sftp
container_name: sftp
volumes:
- ./sftp/users.conf:/etc/sftp/users.conf:ro
- ./sftp/upload:/home/ssanchez/uploads
networks:
platform_network:
ipv4_address: 192.168.1.15
ports:
- "2222:22"
# ZooKeeper is a centralized service for maintaining configuration information,
# naming, providing distributed synchronization, and providing group services.
# It provides distributed coordination for our Kafka cluster.
# http://zookeeper.apache.org/
zookeeper:
image: confluentinc/cp-zookeeper
container_name: zookeeper
# ZooKeeper is designed to "fail-fast", so it is important to allow it to
# restart automatically.
restart: unless-stopped
volumes:
- zookeeper-data:/var/lib/zookeeper/data
- zookeeper-log:/var/lib/zookeeper/log
networks:
platform_network:
ipv4_address: 192.168.1.16
environment:
ZOOKEEPER_CLIENT_PORT: 2181
# Unofficial convenience binaries and Docker images for Apache NiFi
nifi:
build:
context: ./nifi
container_name: nifi
volumes:
- 'apache-nifi_data:/apache/nifi'
environment:
- NIFI_WEB_HTTP_PORT=8080
- NIFI_CLUSTER_IS_NODE=true
- NIFI_CLUSTER_NODE_PROTOCOL_PORT=8082
- NIFI_ZK_CONNECT_STRING=zookeeper:2181
- NIFI_ELECTION_MAX_WAIT=1 min
networks:
platform_network:
ipv4_address: 192.168.1.17
ports:
- '8080:8080'
namenode:
image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
container_name: hadoop-namenode
restart: always
ports:
- 8089:9870
volumes:
- hadoop_namenode:/hadoop/dfs/name
environment:
- CLUSTER_NAME=test
networks:
platform_network:
ipv4_address: 192.168.1.18
env_file:
- ./hadoop.env
datanode1:
image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
container_name: hadoop-datanode-1
restart: always
volumes:
- hadoop-datanode-1:/hadoop/dfs/data
environment:
SERVICE_PRECONDITION: "namenode:9870"
networks:
platform_network:
ipv4_address: 192.168.1.19
env_file:
- ./hadoop.env
datanode2:
image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
container_name: hadoop-datanode-2
restart: always
volumes:
- hadoop-datanode-2:/hadoop/dfs/data
environment:
SERVICE_PRECONDITION: "namenode:9870"
networks:
platform_network:
ipv4_address: 192.168.1.20
env_file:
- ./hadoop.env
datanode3:
image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
container_name: hadoop-datanode-3
restart: always
volumes:
- hadoop-datanode-3:/hadoop/dfs/data
environment:
SERVICE_PRECONDITION: "namenode:9870"
networks:
platform_network:
ipv4_address: 192.168.1.21
env_file:
- ./hadoop.env
resourcemanager:
image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
container_name: hadoop-resourcemanager
restart: always
environment:
SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 datanode3:9864"
env_file:
- ./hadoop.env
networks:
platform_network:
ipv4_address: 192.168.1.22
ports:
- '8081:8088'
nodemanager:
image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
container_name: hadoop-nodemanager
restart: always
environment:
SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 datanode3:9864 resourcemanager:8088"
env_file:
- ./hadoop.env
historyserver:
image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
container_name: hadoop-historyserver
restart: always
environment:
SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 datanode3:9864 resourcemanager:8088"
volumes:
- hadoop_historyserver:/hadoop/yarn/timeline
networks:
platform_network:
ipv4_address: 192.168.1.23
env_file:
- ./hadoop.env
# Kafka is a distributed streaming platform. It is used to build real-time streaming
# data pipelines that reliably move data between systems and platforms, and to build
# real-time streaming applications that transform or react to the streams of data.
# http://kafka.apache.org/
kafka:
image: confluentinc/cp-kafka
container_name: kafka
volumes:
- kafka-data:/var/lib/kafka
environment:
# Required. Instructs Kafka how to get in touch with ZooKeeper.
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_NUM_PARTITIONS: 1
KAFKA_COMPRESSION_TYPE: gzip
# Required when running in a single-node cluster, as we are. We would be able to take the default if we had
# three or more nodes in the cluster.
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
# Required. Kafka will publish this address to ZooKeeper so clients know
# how to get in touch with Kafka. "PLAINTEXT" indicates that no authentication
# mechanism will be used.
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
networks:
platform_network:
ipv4_address: 192.168.1.24
links:
- zookeeper
# The Kafka REST Proxy provides a RESTful interface to a Kafka cluster.
# It makes it easy to produce and consume messages, view the state
# of the cluster, and perform administrative actions without using
# the native Kafka protocol or clients.
# https://github.com/confluentinc/kafka-rest
kafka-rest-proxy:
image: confluentinc/cp-kafka-rest:latest
container_name: kafka-rest-proxy
environment:
# Specifies the ZooKeeper connection string. This service connects
# to ZooKeeper so that it can broadcast its endpoints as well as
# react to the dynamic topology of the Kafka cluster.
KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper:2181
# The address on which Kafka REST will listen for API requests.
KAFKA_REST_LISTENERS: http://0.0.0.0:8082/
# Required. This is the hostname used to generate absolute URLs in responses.
# It defaults to the Java canonical hostname for the container, which might
# not be resolvable in a Docker environment.
KAFKA_REST_HOST_NAME: kafka-rest-proxy
# The list of Kafka brokers to connect to. This is only used for bootstrapping,
# the addresses provided here are used to initially connect to the cluster,
# after which the cluster will dynamically change. Thanks, ZooKeeper!
KAFKA_REST_BOOTSTRAP_SERVERS: kafka:9092
# Kafka REST relies upon Kafka, ZooKeeper
# This will instruct docker to wait until those services are up
# before attempting to start Kafka REST.
networks:
platform_network:
ipv4_address: 192.168.1.25
depends_on:
- zookeeper
- kafka
# Browse Kafka topics and understand what's happening on your cluster.
# Find topics / view topic metadata / browse topic data
# (kafka messages) / view topic configuration / download data.
# https://github.com/Landoop/kafka-topics-ui
kafka-topics-ui:
image: landoop/kafka-topics-ui:latest
container_name: kafka-topics-ui
ports:
- "8082:8000"
networks:
platform_network:
ipv4_address: 192.168.1.26
environment:
# Required. Instructs the UI where it can find the Kafka REST Proxy.
KAFKA_REST_PROXY_URL: "http://kafka-rest-proxy:8082/"
# This instructs the docker image to use Caddy to proxy traffic to kafka-topics-ui.
PROXY: "true"
# kafka-topics-ui relies upon Kafka REST.
# This will instruct docker to wait until those services are up
# before attempting to start kafka-topics-ui.
depends_on:
- kafka-rest-proxy
mongo:
image: mongo
container_name: mongo
env_file:
- .env
restart: always
networks:
platform_network:
ipv4_address: 192.168.1.27
environment:
- MONGO_INITDB_ROOT_USERNAME=${MONGO_ROOT_USER}
- MONGO_INITDB_ROOT_PASSWORD=${MONGO_ROOT_PASSWORD}
- MONGO_INITDB_DATABASE=${MONGO_DB}
# Web-based MongoDB admin interface, written with Node.js and express
mongo-express:
image: mongo-express
container_name: mongo-express
env_file:
- .env
restart: always
environment:
- ME_CONFIG_MONGODB_SERVER=mongo
- ME_CONFIG_MONGODB_PORT=27017
- ME_CONFIG_MONGODB_ENABLE_ADMIN=true
- ME_CONFIG_MONGODB_AUTH_DATABASE=admin
- ME_CONFIG_MONGODB_ADMINUSERNAME=${MONGO_ROOT_USER}
- ME_CONFIG_MONGODB_ADMINPASSWORD=${MONGO_ROOT_PASSWORD}
- ME_CONFIG_BASICAUTH_USERNAME=${MONGOEXPRESS_LOGIN}
- ME_CONFIG_BASICAUTH_PASSWORD=${MONGOEXPRESS_PASSWORD}
depends_on:
- mongo
networks:
platform_network:
ipv4_address: 192.168.1.28
ports:
- "8083:8081"
# Elasticsearch is a powerful open source search and analytics engine that makes data easy to explore.
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2
container_name: elasticsearch
environment:
- ELASTIC_PASSWORD=ssanchez00
- "ES_JAVA_OPTS=-Xmx256m -Xms256m"
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
networks:
platform_network:
ipv4_address: 192.168.1.29
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./elasticsearch/certificate/elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12
# Logstash is a tool for managing events and logs.
# Logstash is an open source data collection engine with real-time pipelining
# capabilities. Logstash can dynamically unify data from disparate sources and normalize the data into destinations of your choice.
logstash:
build:
context: ./logstash
dockerfile: Dockerfile
container_name: logstash
networks:
platform_network:
ipv4_address: 192.168.1.30
volumes:
- logstash-data:/usr/share/logstash/data
- ./logstash/pipeline/:/usr/share/logstash/pipeline/
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml
- ./logstash/certificate/:/etc/logstash/keys/
restart: always
environment:
- "LS_JAVA_OPTS=-Xmx400m -Xms400m"
kibana:
image: docker.elastic.co/kibana/kibana:7.6.2
container_name: kibana
environment:
- ELASTICSEARCH_USERNAME=elastic
- ELASTICSEARCH_PASSWORD=ssanchez00
networks:
platform_network:
ipv4_address: 192.168.1.31
ports:
- "8084:5601"
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml
- ./kibana/certificate/:/etc/kibana/keys/
depends_on:
- elasticsearch
postgres:
build:
context: ./keycloak
container_name: keycloak_db
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
platform_network:
ipv4_address: 192.168.1.32
environment:
POSTGRES_DB: keycloak
POSTGRES_USER: keycloak
POSTGRES_PASSWORD: ssanchez00
pgadmin:
image: dpage/pgadmin4
container_name: keycloak_db_ui
restart: always
environment:
PGADMIN_DEFAULT_EMAIL: [email protected]
PGADMIN_DEFAULT_PASSWORD: ssanchez00
PGADMIN_LISTEN_PORT: 80
networks:
platform_network:
ipv4_address: 192.168.1.33
ports:
- 8085:80
volumes:
- pgadmin-data:/var/lib/pgadmin
keycloak:
image: jboss/keycloak
container_name: keycloak
environment:
DB_VENDOR: POSTGRES
DB_ADDR: postgres
DB_DATABASE: keycloak
DB_USER: keycloak
DB_SCHEMA: public
DB_PASSWORD: ssanchez00
KEYCLOAK_USER: admin
KEYCLOAK_PASSWORD: ssanchez00
networks:
platform_network:
ipv4_address: 192.168.1.34
ports:
- 8086:8080
depends_on:
- postgres
consul:
image: consul:latest
container_name: consul-server
command: agent -server -ui -node=server1 -bootstrap-expect=1 -client=0.0.0.0
networks:
platform_network:
ipv4_address: 192.168.1.35
ports:
- 8087:8500
# File Management Service
file_management_service:
image: ssanchez11/file_management_service:0.0.1-SNAPSHOT
container_name: file_management_service
restart: unless-stopped
networks:
platform_network:
ipv4_address: 192.168.1.36
# File Metadata Service
file_metadata_service:
image: ssanchez11/file_metadata_service:0.0.1-SNAPSHOT
container_name: file_metadata_service
restart: unless-stopped
networks:
platform_network:
ipv4_address: 192.168.1.37
# File Search Service
file_search_service:
image: ssanchez11/file_search_service:0.0.1-SNAPSHOT
container_name: file_search_service
restart: unless-stopped
networks:
platform_network:
ipv4_address: 192.168.1.38
# File Notification Service
file_notification_service:
image: ssanchez11/file_notifications_service:0.0.1-SNAPSHOT
container_name: file_notifications_service
restart: unless-stopped
networks:
platform_network:
ipv4_address: 192.168.1.39
# File API Gateway Service
files_api_gateway_service:
image: ssanchez11/files_api_gateway_service:0.0.1-SNAPSHOT
container_name: files_api_gateway_service
restart: unless-stopped
networks:
platform_network:
ipv4_address: 192.168.1.40
ports:
- "2223:22"
# Rabbit MQ Stomp
rabbitmq-stomp:
image: activiti/rabbitmq-stomp
container_name: rabbitmq-stomp
volumes:
- 'rabbitmq_data:/var/lib/rabbitmq'
networks:
platform_network:
ipv4_address: 192.168.1.41
ports:
- '8088:15672'