diff --git a/docker-compose.yml b/docker-compose.yml index ef5f7f94..d5e5e2db 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -64,25 +64,45 @@ services: - CM_MONGO_API_PASSWORD=${CM_MONGO_API_PASSWORD} - CM_MONGO_USER_USERNAME=${CM_MONGO_USER_USERNAME} - CM_MONGO_USER_PASSWORD=${CM_MONGO_USER_PASSWORD} + - CM_DATABASE_NAME=${CM_DATABASE_NAME} + - CM_DATABASE_STORAGE_COLLECTION_NAME=${CM_DATABASE_STORAGE_COLLECTION_NAME} + - CM_DATABASE_SIZE_GB=${CM_DATABASE_SIZE_GB} + - CM_DATABASE_SIZE_TARGET_PERCENT=${CM_DATABASE_SIZE_TARGET_PERCENT} + - CM_DATABASE_DELETE_THRESHOLD_PERCENT=${CM_DATABASE_DELETE_THRESHOLD_PERCENT} + - CM_DATABASE_MAX_TTL_RETENTION_SECONDS=${CM_DATABASE_MAX_TTL_RETENTION_SECONDS} + - CM_DATABASE_MIN_TTL_RETENTION_SECONDS=${CM_DATABASE_MIN_TTL_RETENTION_SECONDS} + - CM_DATABASE_COMPACTION_TRIGGER_PERCENT=${CM_DATABASE_COMPACTION_TRIGGER_PERCENT} ports: - "27017:27017" volumes: - mongodb_data_container:/data/db + - ./docker/mongo/manage-volume-cron:/docker-entrypoint-initdb.d/manage-volume-cron - ./docker/mongo/keyfile.txt:/data/keyfile.txt - ./docker/mongo/a_init_replicas.js:/docker-entrypoint-initdb.d/a_init_replicas.js - ./docker/mongo/b_create_indexes.js:/docker-entrypoint-initdb.d/b_create_indexes.js + - ./docker/mongo/manage_volume.js:/docker-entrypoint-initdb.d/manage_volume.js healthcheck: test: | test $$(mongosh --username ${MONGO_INITDB_ROOT_USERNAME} --password ${MONGO_INITDB_ROOT_PASSWORD} --quiet --eval "try { rs.initiate({ _id: 'rs0', members: [{ _id: 0, host: '${DB_HOST_IP}' }] }).ok } catch (_) { rs.status().ok }") -eq 1 interval: 10s - start_period: 30s + start_period: 60s entrypoint: - bash - -c - - | + - | + apt update + apt install -y cron gettext systemctl dos2unix + systemctl start cron + systemctl enable cron + envsubst < /docker-entrypoint-initdb.d/manage-volume-cron > /etc/cron.d/manage-volume-cron + dos2unix /etc/cron.d/manage-volume-cron + chmod 644 /etc/cron.d/manage-volume-cron + systemctl restart cron chmod 400 /data/keyfile.txt chown 999:999 /data/keyfile.txt + exec docker-entrypoint.sh $$@ + command: ["mongod", "--replSet", "rs0", "--bind_ip_all", "--keyFile", "/data/keyfile.txt"] logging: diff --git a/docker/mongo/manage_volume.js b/docker/mongo/manage_volume.js index 3dbf65d8..39bc7986 100644 --- a/docker/mongo/manage_volume.js +++ b/docker/mongo/manage_volume.js @@ -35,8 +35,9 @@ const CM_MONGO_ROOT_USERNAME = process.env.MONGO_INITDB_ROOT_USERNAME || "root"; const CM_MONGO_ROOT_PASSWORD = process.env.MONGO_INITDB_ROOT_PASSWORD || "root"; const MS_PER_HOUR = 60 * 60 * 1000; -const DB_TARGET_SIZE_BYTES = CM_DATABASE_SIZE_GB * CM_DATABASE_SIZE_TARGET_PERCENT * 1024 * 1024 * 1024; -const DB_DELETE_SIZE_BYETS = CM_DATABASE_SIZE_GB * CM_DATABASE_DELETE_THRESHOLD_PERCENT * 1024 * 1024 * 1024; +const BYTE_TO_GB = 1024 * 1024 * 1024; +const DB_TARGET_SIZE_BYTES = CM_DATABASE_SIZE_GB * CM_DATABASE_SIZE_TARGET_PERCENT * BYTE_TO_GB; +const DB_DELETE_SIZE_BYETS = CM_DATABASE_SIZE_GB * CM_DATABASE_DELETE_THRESHOLD_PERCENT * BYTE_TO_GB; print("Managing Mongo Data Volumes"); @@ -56,9 +57,13 @@ class CollectionStats{ class StorageRecord{ - constructor(collectionStats){ + constructor(collectionStats, totalAllocatedStorage, totalFreeSpace, totalIndexSize){ this.collectionStats = collectionStats; this.recordGeneratedAt = ISODate(); + this.totalAllocatedStorage = totalAllocatedStorage; + this.totalFreeSpace = totalFreeSpace; + this.totalIndexSize = totalIndexSize; + this.totalSize = totalAllocatedStorage + totalFreeSpace + totalIndexSize; } } @@ -103,36 +108,32 @@ function updateTTL(){ const growth = ema_deltas(sizes); const oldestSpat = db.getCollection("ProcessedSpat").find().sort({"recordGeneratedAt":1}).limit(1); - let new_tll = ttl; + let new_ttl = ttl; let possible_ttl = ttl; // Check if collection is still growing to capacity, or if it in steady state if(oldestSpat.recordGeneratedAt > ISODate() - ttl + MS_PER_HOUR && growth > 0){ possible_ttl = DB_TARGET_SIZE_BYTES / growth; }else{ - possible_ttl = 3600 * ((DB_TARGET_SIZE_BYTES - sizes[0])/1024/1024/1024) + ttl; // Shift the TTL by roughly 1 hour for every GB of data over or under + possible_ttl = 3600 * ((DB_TARGET_SIZE_BYTES - sizes[0])/BYTE_TO_GB) + ttl; // Shift the TTL by roughly 1 hour for every GB of data over or under } // Clamp TTL and assign to new TTL; if(!isNaN(possible_ttl) && possible_ttl != 0){ if(possible_ttl > CM_DATABASE_MAX_TTL_RETENTION_SECONDS){ - new_tll = CM_DATABASE_MAX_TTL_RETENTION_SECONDS; + new_ttl = CM_DATABASE_MAX_TTL_RETENTION_SECONDS; }else if(possible_ttl < CM_DATABASE_MIN_TTL_RETENTION_SECONDS){ - new_tll = CM_DATABASE_MIN_TTL_RETENTION_SECONDS; + new_ttl = CM_DATABASE_MIN_TTL_RETENTION_SECONDS; }else{ - new_tll = Math.round(possible_ttl); + new_ttl = Math.round(possible_ttl); } - print("Calculated New TTL for MongoDB: " + new_tll); - applyNewTTL(new_tll); + new_ttl = Number(new_ttl); + print("Calculated New TTL for MongoDB: " + new_ttl); + applyNewTTL(new_ttl); }else{ print("Not Updating TTL New TTL is NaN"); } - - - - - } function getLatestTTL(){ @@ -177,6 +178,7 @@ function addNewStorageRecord(){ var collections = db.getCollectionNames(); let totalAllocatedStorage = 0; let totalFreeSpace = 0; + let totalIndexSize = 0; let records = []; @@ -191,13 +193,14 @@ function addNewStorageRecord(){ records.push(new CollectionStats(collections[i], allocatedStorage, freeSpace, indexSize)); - totalAllocatedStorage += allocatedStorage + indexSize; + totalAllocatedStorage += allocatedStorage totalFreeSpace += freeSpace; + totalIndexSize += indexSize; - print(collections[i], allocatedStorage / 1024 / 1024 / 1024, freeSpace/ 1024 / 1024 / 1024); + print(collections[i], allocatedStorage / BYTE_TO_GB, freeSpace/ BYTE_TO_GB, indexSize / BYTE_TO_GB); } - const storageRecord = new StorageRecord(records); + const storageRecord = new StorageRecord(records, totalAllocatedStorage, totalFreeSpace, totalIndexSize); db.getCollection(CM_DATABASE_STORAGE_COLLECTION_NAME).insertOne(storageRecord); } @@ -222,10 +225,12 @@ function compactCollections(){ let allocatedStorage = Number(blockManager["file size in bytes"]); // If free space makes up a significant proportion of allocated storage - if(freeSpace > allocatedStorage * CM_DATABASE_COMPACTION_TRIGGER_PERCENT && allocatedStorage > (1024 * 1024 * 1024)){ + if(freeSpace > allocatedStorage * CM_DATABASE_COMPACTION_TRIGGER_PERCENT && allocatedStorage > (1 * BYTE_TO_GB)){ if(!activeCompactions.includes(collections[i])){ print("Compacting Collection", collections[i]); db.runCommand({compact: collections[i], force:true}); + }else{ + print("Skipping Compaction, Collection Compaction is already scheduled"); } } } diff --git a/sample.env b/sample.env index 235b1962..7dc285dc 100644 --- a/sample.env +++ b/sample.env @@ -17,11 +17,12 @@ # (Required) The IP address of Docker host machine which can be found by running "ifconfig" # Hint: look for "inet addr:" within "eth0" or "en0" for OSX -# For Local deployments DB_HOST_IP and KAFKA_BROKER_IP should be set to DOCKER_HOST_IP, for remote deployments, these can be set independently. + DOCKER_HOST_IP= DB_HOST_IP= KAFKA_BROKER_IP= -KAFKA_CONNECT_IP=localhost +KAFKA_CONNECT_IP= + # Set to "CONFLUENT" if broker is a Confluent Cloud broker KAFKA_TYPE= @@ -29,12 +30,21 @@ KAFKA_TYPE= CONFLUENT_KEY= CONFLUENT_SECRET= -# The Username and passwords to use for accessing mongoDB. +# Setup MongoDB username and password MONGO_INITDB_ROOT_USERNAME=root -MONGO_INITDB_ROOT_PASSWORD=root -CM_MONGO_CONNECTOR_USERNAME=connector -CM_MONGO_CONNECTOR_PASSWORD=connector -CM_MONGO_API_USERNAME=api -CM_MONGO_API_PASSWORD=api -CM_MONGO_USER_USERNAME=user -CM_MONGO_USER_PASSWORD=user +MONGO_INITDB_ROOT_PASSWORD= +CM_MONGO_CONNECTOR_USERNAME= connector +CM_MONGO_CONNECTOR_PASSWORD= +CM_MONGO_API_USERNAME= api +CM_MONGO_API_PASSWORD= +CM_MONGO_USER_USERNAME= user +CM_MONGO_USER_PASSWORD= + +CM_DATABASE_NAME=ConflictMonitor +CM_DATABASE_STORAGE_COLLECTION_NAME=MongoStorage +CM_DATABASE_SIZE_GB=1000 +CM_DATABASE_SIZE_TARGET_PERCENT=0.8 +CM_DATABASE_DELETE_THRESHOLD_PERCENT=0.9 +CM_DATABASE_MAX_TTL_RETENTION_SECONDS=5184000 # 60 days +CM_DATABASE_MIN_TTL_RETENTION_SECONDS=604800 # 7 days +CM_DATABASE_COMPACTION_TRIGGER_PERCENT=0.5