From d51c2171aae8d05c7328993b6c3505e2b4214641 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Tue, 25 Aug 2020 09:23:30 -0400 Subject: [PATCH 01/14] Updated templates to support Boto3 for KC and new config file --- templates/kobo-env/envfiles/aws.txt.tpl | 2 +- templates/kobo-env/envfiles/kobocat.txt.tpl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/kobo-env/envfiles/aws.txt.tpl b/templates/kobo-env/envfiles/aws.txt.tpl index d85d525..e5c4481 100644 --- a/templates/kobo-env/envfiles/aws.txt.tpl +++ b/templates/kobo-env/envfiles/aws.txt.tpl @@ -11,7 +11,7 @@ ${USE_AWS}AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} # To use S3, the specified buckets must already exist and the owner of your `AWS_ACCESS_KEY_ID` must have the appropriate S3 permissions. -${USE_AWS}KOBOCAT_DEFAULT_FILE_STORAGE=storages.backends.s3boto.S3BotoStorage +${USE_AWS}KOBOCAT_DEFAULT_FILE_STORAGE=storages.backends.s3boto3.S3Boto3Storage ${USE_AWS}KOBOCAT_AWS_STORAGE_BUCKET_NAME=${AWS_BUCKET_NAME} ${USE_AWS}KPI_DEFAULT_FILE_STORAGE=storages.backends.s3boto3.S3Boto3Storage diff --git a/templates/kobo-env/envfiles/kobocat.txt.tpl b/templates/kobo-env/envfiles/kobocat.txt.tpl index de847e6..e3148cc 100644 --- a/templates/kobo-env/envfiles/kobocat.txt.tpl +++ b/templates/kobo-env/envfiles/kobocat.txt.tpl @@ -2,7 +2,7 @@ KOBOCAT_DJANGO_DEBUG=${DEBUG} TEMPLATE_DEBUG=${DEBUG} ${USE_X_FORWARDED_HOST}USE_X_FORWARDED_HOST=True -DJANGO_SETTINGS_MODULE=onadata.settings.kc_environ +DJANGO_SETTINGS_MODULE=onadata.settings.prod ENKETO_VERSION=Express KOBOCAT_BROKER_URL=redis://{% if REDIS_PASSWORD %}:${REDIS_PASSWORD_URL_ENCODED}@{% endif REDIS_PASSWORD %}redis-main.${PRIVATE_DOMAIN_NAME}:${REDIS_MAIN_PORT}/2 From 85a96adca6975e607e5bbcb45fc528385dd14d6e Mon Sep 17 00:00:00 2001 From: Joshua Beretta Date: Wed, 7 Oct 2020 15:10:37 +0200 Subject: [PATCH 02/14] changing KOBO_DOCKER_BRANCH in helpers/config.py --- helpers/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index a8f3313..60b7f05 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -32,7 +32,7 @@ class Config(with_metaclass(Singleton)): DEFAULT_PROXY_PORT = "8080" DEFAULT_NGINX_PORT = "80" DEFAULT_NGINX_HTTPS_PORT = "443" - KOBO_DOCKER_BRANCH = '2.020.37' + KOBO_DOCKER_BRANCH = 'support-kobokitten' KOBO_INSTALL_VERSION = '3.2.1' def __init__(self): From 928efad5414884ee557a0842194b3eb9f492a9e9 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Mon, 8 Feb 2021 15:07:13 -0500 Subject: [PATCH 03/14] Merge with support-kobokitten --- conftest.py | 28 + helpers/aws_validation.py | 113 + helpers/cli.py | 174 +- helpers/command.py | 596 ++-- helpers/config.py | 2427 +++++++++-------- helpers/network.py | 124 +- helpers/setup.py | 220 +- helpers/singleton.py | 17 - helpers/template.py | 433 +-- helpers/updater.py | 7 +- helpers/upgrading.py | 344 ++- readme.md | 69 +- requirements_py2_tests.txt | 14 - ...ts_py3_tests.txt => requirements_tests.txt | 2 +- run.py | 89 +- setup.py | 5 +- ...r-compose.backend.primary.override.yml.tpl | 4 +- ...compose.backend.secondary.override.yml.tpl | 4 +- .../kobo-env/enketo_express/config.json.tpl | 1 + templates/kobo-env/envfiles/databases.txt.tpl | 3 + .../postgres/primary/postgres.conf.tpl | 4 + tests/test_config.py | 862 +++--- tests/test_run.py | 69 +- tests/utils.py | 106 +- tox.ini | 11 +- 25 files changed, 3349 insertions(+), 2377 deletions(-) create mode 100644 conftest.py create mode 100644 helpers/aws_validation.py delete mode 100644 requirements_py2_tests.txt rename requirements_py3_tests.txt => requirements_tests.txt (85%) diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..6436cbf --- /dev/null +++ b/conftest.py @@ -0,0 +1,28 @@ +# coding: utf-8 +import os +import pytest + + +def clean_up(): + """ + Removes files created by tests + """ + files = ['.uniqid', + 'upsert_db_users'] + for file_ in files: + try: + os.remove(os.path.join('/tmp', file_)) + except FileNotFoundError: + pass + + +@pytest.fixture(scope="session", autouse=True) +def setup(request): + # Clean up before tests begin in case of orphan files. + clean_up() + request.addfinalizer(_tear_down) + + +def _tear_down(): + clean_up() + pass diff --git a/helpers/aws_validation.py b/helpers/aws_validation.py new file mode 100644 index 0000000..965f568 --- /dev/null +++ b/helpers/aws_validation.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +import datetime +import hashlib +import hmac +from urllib.error import HTTPError +from urllib.request import Request, urlopen + + +class AWSValidation: + """ + A class to validate AWS credentials without using boto3 as a dependency. + + The structure and methods have been adapted from the AWS documentation: + http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python + """ + + METHOD = 'POST' + SERVICE = 'sts' + REGION = 'us-east-1' + HOST = 'sts.amazonaws.com' + ENDPOINT = 'https://sts.amazonaws.com' + REQUEST_PARAMETERS = 'Action=GetCallerIdentity&Version=2011-06-15' + CANONICAL_URI = '/' + SIGNED_HEADERS = 'host;x-amz-date' + PAYLOAD_HASH = hashlib.sha256(''.encode()).hexdigest() + ALGORITHM = 'AWS4-HMAC-SHA256' + + def __init__(self, aws_access_key_id, aws_secret_access_key): + self.access_key = aws_access_key_id + self.secret_key = aws_secret_access_key + + @staticmethod + def _sign(key, msg): + return hmac.new(key, msg.encode(), hashlib.sha256).digest() + + @classmethod + def _get_signature_key(cls, key, date_stamp, region_name, service_name): + k_date = cls._sign(('AWS4' + key).encode(), date_stamp) + k_region = cls._sign(k_date, region_name) + k_service = cls._sign(k_region, service_name) + return cls._sign(k_service, 'aws4_request') + + def _get_request_url_and_headers(self): + t = datetime.datetime.utcnow() + amzdate = t.strftime('%Y%m%dT%H%M%SZ') + datestamp = t.strftime('%Y%m%d') + + canonical_querystring = self.REQUEST_PARAMETERS + + canonical_headers = '\n'.join( + [f'host:{self.HOST}', f'x-amz-date:{amzdate}', ''] + ) + + canonical_request = '\n'.join( + [ + self.METHOD, + self.CANONICAL_URI, + canonical_querystring, + canonical_headers, + self.SIGNED_HEADERS, + self.PAYLOAD_HASH, + ] + ) + + credential_scope = '/'.join( + [datestamp, self.REGION, self.SERVICE, 'aws4_request'] + ) + + string_to_sign = '\n'.join( + [ + self.ALGORITHM, + amzdate, + credential_scope, + hashlib.sha256(canonical_request.encode()).hexdigest(), + ] + ) + + signing_key = self._get_signature_key( + self.secret_key, datestamp, self.REGION, self.SERVICE + ) + + signature = hmac.new( + signing_key, string_to_sign.encode(), hashlib.sha256 + ).hexdigest() + + authorization_header = ( + '{} Credential={}/{}, SignedHeaders={}, Signature={}'.format( + self.ALGORITHM, + self.access_key, + credential_scope, + self.SIGNED_HEADERS, + signature, + ) + ) + + headers = {'x-amz-date': amzdate, 'Authorization': authorization_header} + request_url = '?'.join([self.ENDPOINT, canonical_querystring]) + + return request_url, headers + + def validate_credentials(self): + request_url, headers = self._get_request_url_and_headers() + req = Request(request_url, headers=headers, method=self.METHOD) + + try: + with urlopen(req) as res: + if res.status == 200: + return True + else: + return False + except HTTPError as e: + return False + diff --git a/helpers/cli.py b/helpers/cli.py index 1bd794f..5383994 100644 --- a/helpers/cli.py +++ b/helpers/cli.py @@ -1,42 +1,108 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - -import re import subprocess import sys +import re +import textwrap -PY2 = sys.version_info[0] == 2 -if PY2: - input = raw_input - string_type = unicode -else: - string_type = str +class CLI: -class CLI(object): + NO_COLOR = '\033[0;0m' + COLOR_ERROR = '\033[0;31m' # dark red + COLOR_SUCCESS = '\033[0;32m' # dark green + COLOR_INFO = '\033[1;34m' # blue + COLOR_WARNING = '\033[1;31m' # red + COLOR_QUESTION = '\033[1;33m' # dark yellow + COLOR_DEFAULT = '\033[1;37m' # white - NO_COLOR = "\033[0m" - COLOR_ERROR = "\033[91m" - COLOR_SUCCESS = "\033[92m" - COLOR_INFO = "\033[94m" - COLOR_WARNING = "\033[95m" + EMPTY_CHARACTER = '-' + + DEFAULT_CHOICES = { + '1': True, + '2': False, + } + # We need an inverted dict version of `DEFAULT_CHOICES` to be able to + # retrieve keys from the values + DEFAULT_RESPONSES = dict(zip(DEFAULT_CHOICES.values(), + DEFAULT_CHOICES.keys())) + + @classmethod + def colored_input(cls, message, color=NO_COLOR, default=None): + text = cls.get_message_with_default(message, default) + input_ = input(cls.colorize(text, color)) + + # User wants to delete value previously entered. + if input_ == '-': + default = '' + input_ = '' - EMPTY_CHARACTER = "-" + return input_ if input_ is not None and input_ != '' else default + + @classmethod + def colored_print(cls, message, color=NO_COLOR): + print(cls.colorize(message, color)) + + @classmethod + def colorize(cls, message, color=NO_COLOR): + return '{}{}{}'.format(color, message, cls.NO_COLOR) + + @classmethod + def framed_print(cls, message, color=COLOR_WARNING, columns=70): + border = '═' * (columns - 2) + framed_message = [ + '╔{}╗'.format(border), + '║ {} ║'.format(' ' * (columns - 4)), + ] + + if not isinstance(message, list): + paragraphs = message.split('\n') + else: + paragraphs = ''.join(message).split('\n') + + for paragraph in paragraphs: + if paragraph == '': + framed_message.append( + '║ {} ║'.format(' ' * (columns - 4)) + ) + continue + + for line in textwrap.wrap(paragraph, columns - 4): + message_length = len(line) + spacer = ' ' * (columns - 4 - message_length) + framed_message.append( + '║ {}{} ║'.format(line, spacer) + ) + + framed_message.append('║ {} ║'.format(' ' * (columns - 4))) + framed_message.append('╚{}╝'.format(border)) + cls.colored_print('\n'.join(framed_message), color=color) @classmethod - def get_response(cls, validators=None, default="", to_lower=True, + def get_response(cls, validators=None, default='', to_lower=True, error_msg="Sorry, I didn't understand that!"): - + + use_default = False + # If not validators are provided, let's use default validation + # "Yes/No", where "Yes" equals 1, and "No" equals 2 + # Example: + # Are you sure? + # 1) Yes + # 2) No + if validators is None: + use_default = True + default = cls.DEFAULT_RESPONSES[default] + validators = cls.DEFAULT_CHOICES.keys() + while True: try: - response = cls.colored_input("", cls.COLOR_WARNING, default) + response = cls.colored_input('', cls.COLOR_QUESTION, default) if (response.lower() in map(lambda x: x.lower(), validators) or - validators is None or - (isinstance(validators, string_type) and - validators.startswith("~") and - re.match(validators[1:], response) - )): + validators is None or + (isinstance(validators, str) and + validators.startswith('~') and + re.match(validators[1:], response) + )): break else: cls.colored_print(error_msg, @@ -44,39 +110,29 @@ def get_response(cls, validators=None, default="", to_lower=True, except ValueError: cls.colored_print("Sorry, I didn't understand that.", cls.COLOR_ERROR) - - return response.lower() if to_lower else response - @classmethod - def colored_print(cls, message, color=NO_COLOR): - print(cls.colorize(message, color)) - - @classmethod - def colored_input(cls, message, color=NO_COLOR, default=None): - text = cls.get_message_with_default(message, default) - input_ = input(cls.colorize(text, color)) + if use_default: + return cls.DEFAULT_CHOICES[response] - # User wants to delete value previously entered. - if input_ == "-": - default = "" - input_ = "" - - return input_ if input_ is not None and input_ != "" else default - - @classmethod - def colorize(cls, message, color=NO_COLOR): - return "{}{}{}".format(color, message, cls.NO_COLOR) + return response.lower() if to_lower else response @classmethod def get_message_with_default(cls, message, default): - message = "{} ".format(message) if message else "" - default = "{}[{}]{}: ".format(cls.COLOR_WARNING, default, cls.NO_COLOR) \ - if default else "" + message = '{} '.format(message) if message else '' + + if default is None: + default = '' + else: + default = '{white}[{off}{default}{white}]{off}: '.format( + white=cls.COLOR_DEFAULT, + off=cls.NO_COLOR, + default=default + ) if message: - message = "{}: ".format(message.strip()) if not default else message + message = '{}: '.format(message.strip()) if not default else message - return "{}{}".format(message, default) + return '{}{}'.format(message, default) @classmethod def run_command(cls, command, cwd=None, polling=False): @@ -84,13 +140,10 @@ def run_command(cls, command, cwd=None, polling=False): process = subprocess.Popen(command, stdout=subprocess.PIPE, cwd=cwd) while True: output = process.stdout.readline() - if output == "" and process.poll() is not None: + if output == '' and process.poll() is not None: break if output: - if PY2: - print(output.strip()) - else: - print(output.decode().strip()) + print(output.decode().strip()) return process.poll() else: try: @@ -102,6 +155,17 @@ def run_command(cls, command, cwd=None, polling=False): # ^^^ this doesn't seem to be true? let's write it explicitly # see https://docs.python.org/3/library/subprocess.html#subprocess.check_output sys.stderr.write(cpe.output) - cls.colored_print("An error has occurred", CLI.COLOR_ERROR) + cls.colored_print('An error has occurred', CLI.COLOR_ERROR) sys.exit(1) return stdout + + @classmethod + def yes_no_question(cls, question, default=True, + labels=['Yes', 'No']): + cls.colored_print(question, color=cls.COLOR_QUESTION) + for index, label in enumerate(labels): + cls.colored_print('\t{index}) {label}'.format( + index=index + 1, + label=label + )) + return cls.get_response(default=default) diff --git a/helpers/command.py b/helpers/command.py index 40e638d..fea6dff 100644 --- a/helpers/command.py +++ b/helpers/command.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals import sys import time @@ -9,41 +8,47 @@ from helpers.config import Config from helpers.network import Network from helpers.template import Template -from helpers.upgrading import migrate_single_to_two_databases +from helpers.upgrading import Upgrading class Command: @staticmethod def help(): - print(("Usage: python run.py [options]\n" - "\n" - " Options:\n" - " -i, --info\n" - " Show KoBoToolbox Url and super user credentials\n" - " -l, --logs\n" - " Display docker logs\n" - " -b, --build\n" - " Build kpi and kobocat (only on dev/staging mode)\n" - " -bkf, --build-kpi\n" - " Build kpi (only on dev/staging mode)\n" - " -bkc, --build-kobocat\n" - " Build kobocat (only on dev/staging mode)\n" - " -s, --setup\n" - " Prompt questions to rebuild configuration. Restart KoBoToolbox\n" - " -S, --stop\n" - " Stop KoBoToolbox\n" - " -u, --update, --upgrade [branch or tag]\n" - " Update KoBoToolbox\n" - " -cf, --compose-frontend [docker-compose arguments]\n" - " Run a docker-compose command in the front-end environment\n" - " -cb, --compose-backend [docker-compose arguments]\n" - " Run a docker-compose command in the back-end environment\n" - " -m, --maintenance\n" - " Activate maintenance mode. All traffic is redirected to maintenance page\n" - " -v, --version\n" - " Display current version\n" - )) + output = [ + 'Usage: python run.py [options]', + '', + ' Options:', + ' -i, --info', + ' Show KoBoToolbox Url and super user credentials', + ' -l, --logs', + ' Display docker logs', + ' -b, --build', + ' Build kpi and kobocat (only on dev/staging mode)', + ' -bkf, --build-kpi', + ' Build kpi (only on dev/staging mode)', + ' -bkc, --build-kobocat', + ' Build kobocat (only on dev/staging mode)', + ' -s, --setup', + ' Prompt questions to (re)write configuration files', + ' -S, --stop', + ' Stop KoBoToolbox', + ' -u, --update, --upgrade [branch or tag]', + ' Update KoBoToolbox', + ' -cf, --compose-frontend [docker-compose arguments]', + ' Run a docker-compose command in the front-end ' + 'environment', + ' -cb, --compose-backend [docker-compose arguments]', + ' Run a docker-compose command in the back-end ' + 'environment', + ' -m, --maintenance', + ' Activate maintenance mode. All traffic is ' + 'redirected to maintenance page', + ' -v, --version', + ' Display current version', + '' + ] + print('\n'.join(output)) @classmethod def build(cls, image=None): @@ -53,222 +58,239 @@ def build(cls, image=None): :param image: str """ - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() - if config_object.dev_mode or config_object.staging_mode: + if config.dev_mode or config.staging_mode: def build_image(image_): - frontend_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "build", "--force-rm", "--no-cache", - image_] - - CLI.run_command(frontend_command, config.get("kobodocker_path")) - - if image is None or image == "kf": - config["kpi_dev_build_id"] = "{prefix}{timestamp}".format( - prefix=config_object.get_prefix("frontend"), + frontend_command = [ + 'docker-compose', + '-f', + 'docker-compose.frontend.yml', + '-f', + 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'build', '--force-rm', '--no-cache', + image_ + ] + + CLI.run_command(frontend_command, dict_['kobodocker_path']) + + if image is None or image == 'kf': + dict_['kpi_dev_build_id'] = '{prefix}{timestamp}'.format( + prefix=config.get_prefix('frontend'), timestamp=str(int(time.time())) ) - config_object.write_config() - Template.render(config_object) - build_image("kpi") + config.write_config() + Template.render(config) + build_image('kpi') - if image is None or image == "kc": - pull_base_command = ["docker", - "pull", - "kobotoolbox/koboform_base"] + if image is None or image == 'kc': + pull_base_command = ['docker', + 'pull', + 'kobotoolbox/koboform_base'] - CLI.run_command(pull_base_command, config.get("kobodocker_path")) + CLI.run_command(pull_base_command, dict_['kobodocker_path']) - config["kc_dev_build_id"] = "{prefix}{timestamp}".format( - prefix=config_object.get_prefix("frontend"), + dict_['kc_dev_build_id'] = '{prefix}{timestamp}'.format( + prefix=config.get_prefix('frontend'), timestamp=str(int(time.time())) ) - config_object.write_config() - Template.render(config_object) - build_image("kobocat") + config.write_config() + Template.render(config) + build_image('kobocat') @classmethod def compose_frontend(cls, args): - config_object = Config() - config = config_object.get_config() - command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend")] + config = Config() + dict_ = config.get_dict() + command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend')] command.extend(args) - subprocess.call(command, cwd=config.get("kobodocker_path")) + subprocess.call(command, cwd=dict_['kobodocker_path']) @classmethod def compose_backend(cls, args): - config_object = Config() - config = config_object.get_config() - backend_role = config.get("backend_server_role", "primary") + config = Config() + dict_ = config.get_dict() + backend_role = dict_['backend_server_role'] command = [ - "docker-compose", - "-f", "docker-compose.backend.{}.yml".format(backend_role), - "-f", "docker-compose.backend.{}.override.yml".format(backend_role), - "-p", config_object.get_prefix("backend") + 'docker-compose', + '-f', 'docker-compose.backend.{}.yml'.format(backend_role), + '-f', 'docker-compose.backend.{}.override.yml'.format(backend_role), + '-p', config.get_prefix('backend') ] command.extend(args) - subprocess.call(command, cwd=config.get("kobodocker_path")) + subprocess.call(command, cwd=dict_['kobodocker_path']) @classmethod def info(cls, timeout=600): - config_object = Config() - config = config_object.get_config() - - main_url = "{}://{}.{}{}".format( - "https" if config.get("https") == Config.TRUE else "http", - config.get("kpi_subdomain"), - config.get("public_domain_name"), - ":{}".format(config.get("exposed_nginx_docker_port")) if ( - config.get("exposed_nginx_docker_port") and - str(config.get("exposed_nginx_docker_port")) != Config.DEFAULT_NGINX_PORT - ) else "" + config = Config() + dict_ = config.get_dict() + + nginx_port = dict_['exposed_nginx_docker_port'] + + main_url = '{}://{}.{}{}'.format( + 'https' if dict_['https'] else 'http', + dict_['kpi_subdomain'], + dict_['public_domain_name'], + ':{}'.format(nginx_port) if ( + nginx_port and + str(nginx_port) != Config.DEFAULT_NGINX_PORT + ) else '' ) stop = False start = int(time.time()) success = False - hostname = "{}.{}".format(config.get("kpi_subdomain"), config.get("public_domain_name")) - nginx_port = int(Config.DEFAULT_NGINX_HTTPS_PORT) if config.get("https") == Config.TRUE \ - else int(config.get("exposed_nginx_docker_port", Config.DEFAULT_NGINX_PORT)) - https = config.get("https") == Config.TRUE + hostname = '{}.{}'.format(dict_['kpi_subdomain'], + dict_['public_domain_name']) + https = dict_['https'] + nginx_port = int(Config.DEFAULT_NGINX_HTTPS_PORT) \ + if https else int(dict_['exposed_nginx_docker_port']) already_retried = False while not stop: - if Network.status_check(hostname, "/service_health/", nginx_port, https) == Network.STATUS_OK_200: + if Network.status_check(hostname, + '/service_health/', + nginx_port, https) == Network.STATUS_OK_200: stop = True success = True elif int(time.time()) - start >= timeout: if timeout > 0: CLI.colored_print( - "\n`KoBoToolbox` has not started yet. This is can be normal with low CPU/RAM computers.\n", + '\n`KoBoToolbox` has not started yet. ' + 'This is can be normal with low CPU/RAM computers.\n', CLI.COLOR_INFO) - CLI.colored_print("Wait for another {} seconds?".format(timeout), CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) - - if response == Config.TRUE: + question = 'Wait for another {} seconds?'.format(timeout) + response = CLI.yes_no_question(question) + if response: start = int(time.time()) continue else: - if already_retried is False: + if not already_retried: already_retried = True - CLI.colored_print(("\nSometimes frontend containers " - "can not communicate with backend containers.\n" - "Restarting the frontend containers usually fixes it.\n"), - CLI.COLOR_INFO) - CLI.colored_print("Do you want to try?".format(timeout), CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) - if response == Config.TRUE: + CLI.colored_print( + '\nSometimes front-end containers cannot ' + 'communicate with back-end containers.\n' + 'Restarting the front-end containers usually ' + 'fixes it.\n', CLI.COLOR_INFO) + question = 'Would you like to try?' + response = CLI.yes_no_question(question) + if response: start = int(time.time()) cls.restart_frontend() continue stop = True else: - sys.stdout.write(".") + sys.stdout.write('.') sys.stdout.flush() time.sleep(10) # Create a new line - print("") + print('') if success: - username = config.get("super_user_username") - password = config.get("super_user_password") - username_chars_count = len(username) + 6 - password_chars_count = len(password) + 10 - url_chars_count = len(main_url) + 6 - max_chars_count = max(username_chars_count, password_chars_count, url_chars_count) - - CLI.colored_print("╔═{}═╗".format("═" * max_chars_count), CLI.COLOR_WARNING) - CLI.colored_print("║ Ready {} ║".format( - " " * (max_chars_count - len("Ready "))), CLI.COLOR_WARNING) - CLI.colored_print("║ URL: {}/{} ║".format( - main_url, " " * (max_chars_count - url_chars_count)), CLI.COLOR_WARNING) - CLI.colored_print("║ User: {}{} ║".format( - username, " " * (max_chars_count - username_chars_count)), CLI.COLOR_WARNING) - CLI.colored_print("║ Password: {}{} ║".format( - password, " " * (max_chars_count - password_chars_count)), CLI.COLOR_WARNING) - CLI.colored_print("╚═{}═╝".format("═" * max_chars_count), CLI.COLOR_WARNING) + username = dict_['super_user_username'] + password = dict_['super_user_password'] + + message = ( + 'Ready\n' + 'URL: {url}\n' + 'User: {username}\n' + 'Password: {password}' + ).format( + url=main_url, + username=username, + password=password) + CLI.framed_print(message, + color=CLI.COLOR_SUCCESS) + else: - CLI.colored_print("KoBoToolbox could not start! " - "Please try `python3 run.py --logs` to see the logs.", - CLI.COLOR_ERROR) + message = ( + 'KoBoToolbox could not start!\n' + 'Please try `python3 run.py --logs` to see the logs.' + ) + CLI.framed_print(message, color=CLI.COLOR_ERROR) return success @classmethod def logs(cls): - config_object = Config() - config = config_object.get_config() - - if config_object.primary_backend or config_object.secondary_backend: - backend_role = config.get("backend_server_role", "primary") - - backend_command = ["docker-compose", - "-f", "docker-compose.backend.{}.yml".format(backend_role), - "-f", "docker-compose.backend.{}.override.yml".format(backend_role), - "-p", config_object.get_prefix("backend"), - "logs", "-f"] - CLI.run_command(backend_command, config.get("kobodocker_path"), True) - - if config_object.frontend: - frontend_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "logs", "-f"] - CLI.run_command(frontend_command, config.get("kobodocker_path"), True) + config = Config() + dict_ = config.get_dict() + + if config.primary_backend or config.secondary_backend: + backend_role = dict_['backend_server_role'] + + backend_command = ['docker-compose', + '-f', + 'docker-compose.backend.{}.yml'.format( + backend_role), + '-f', + 'docker-compose.backend.{}.override.yml'.format( + backend_role), + '-p', + config.get_prefix('backend'), + 'logs', + '-f' + ] + CLI.run_command(backend_command, + dict_['kobodocker_path'], + True) + + if config.frontend: + frontend_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'logs', '-f'] + CLI.run_command(frontend_command, + dict_['kobodocker_path'], + True) @classmethod def configure_maintenance(cls): - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() - if not config_object.multi_servers or config_object.frontend: + if not config.multi_servers or config.frontend: - config_object.maintenance() - Template.render_maintenance(config_object) - config['maintenance_enabled'] = True - config_object.write_config() + config.maintenance() + Template.render_maintenance(config) + dict_['maintenance_enabled'] = True + config.write_config() cls.stop_nginx() cls.start_maintenance() @classmethod def stop_nginx(cls): - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() - nginx_stop_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "stop", "nginx"] + nginx_stop_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'stop', 'nginx'] - CLI.run_command(nginx_stop_command, config.get("kobodocker_path")) + CLI.run_command(nginx_stop_command, dict_['kobodocker_path']) @classmethod def start_maintenance(cls): - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() - frontend_command = ["docker-compose", - "-f", "docker-compose.maintenance.yml", - "-f", "docker-compose.maintenance.override.yml", - "-p", config_object.get_prefix("maintenance"), - "up", "-d"] + frontend_command = ['docker-compose', + '-f', 'docker-compose.maintenance.yml', + '-f', 'docker-compose.maintenance.override.yml', + '-p', config.get_prefix('maintenance'), + 'up', '-d'] - CLI.run_command(frontend_command, config.get("kobodocker_path")) - CLI.colored_print("Maintenance mode has been started", + CLI.run_command(frontend_command, dict_['kobodocker_path']) + CLI.colored_print('Maintenance mode has been started', CLI.COLOR_SUCCESS) @classmethod @@ -277,100 +299,105 @@ def restart_frontend(cls): @classmethod def start(cls, frontend_only=False): - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() cls.stop(output=False, frontend_only=frontend_only) if frontend_only: - CLI.colored_print("Launching frontend containers", CLI.COLOR_SUCCESS) + CLI.colored_print('Launching front-end containers', CLI.COLOR_INFO) else: - CLI.colored_print("Launching environment", CLI.COLOR_SUCCESS) + CLI.colored_print('Launching environment', CLI.COLOR_INFO) # Test if ports are available ports = [] - if config_object.proxy: - nginx_port = int(config.get("nginx_proxy_port", 80)) + if config.proxy: + nginx_port = int(dict_['nginx_proxy_port']) else: - nginx_port = int(config.get("exposed_nginx_docker_port", 80)) + nginx_port = int(dict_['exposed_nginx_docker_port']) - if frontend_only or config_object.frontend or \ - not config_object.multi_servers: + if frontend_only or config.frontend or \ + not config.multi_servers: ports.append(nginx_port) - if (not frontend_only or config_object.primary_backend or - config_object.secondary_backend) and \ - config_object.expose_backend_ports: - ports.append(config.get("postgresql_port", 5432)) - ports.append(config.get("mongo_port", 27017)) - ports.append(config.get("redis_main_port", 6379)) - ports.append(config.get("redis_cache_port", 6380)) + if (not frontend_only or config.primary_backend or + config.secondary_backend) and \ + config.expose_backend_ports: + ports.append(dict_['postgresql_port']) + ports.append(dict_['mongo_port']) + ports.append(dict_['redis_main_port']) + ports.append(dict_['redis_cache_port']) for port in ports: if Network.is_port_open(port): - CLI.colored_print("Port {} is already open. " - "KoboToolbox can't start".format(port), + CLI.colored_print('Port {} is already open. ' + 'KoboToolbox cannot start'.format(port), CLI.COLOR_ERROR) sys.exit(1) # Start the back-end containers - if not frontend_only and config_object.backend: - backend_role = config.get("backend_server_role", "primary") - - backend_command = ["docker-compose", - "-f", - "docker-compose.backend.{}.yml".format( - backend_role), - "-f", - "docker-compose.backend.{}.override.yml".format( - backend_role), - "-p", config_object.get_prefix("backend"), - "up", "-d"] - CLI.run_command(backend_command, config.get("kobodocker_path")) + if not frontend_only and config.backend: + + backend_role = dict_['backend_server_role'] + + backend_command = [ + 'docker-compose', + '-f', + 'docker-compose.backend.{}.yml'.format(backend_role), + '-f', + 'docker-compose.backend.{}.override.yml'.format(backend_role), + '-p', + config.get_prefix('backend'), + 'up', + '-d' + ] + CLI.run_command(backend_command, dict_['kobodocker_path']) # Start the front-end containers - if config_object.frontend: + if config.frontend: - # If this was previously a shared-database setup, migrate to separate - # databases for KPI and KoBoCAT - migrate_single_to_two_databases() + # If this was previously a shared-database setup, migrate to + # separate databases for KPI and KoBoCAT + Upgrading.migrate_single_to_two_databases(config) - frontend_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "up", "-d"] + frontend_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'up', '-d'] - if config.get('maintenance_enabled', False): + if dict_['maintenance_enabled']: cls.start_maintenance() # Start all front-end services except the non-maintenance NGINX frontend_command.extend([ - s for s in config_object.get_service_names() if s != 'nginx' + s for s in config.get_service_names() if s != 'nginx' ]) - CLI.run_command(frontend_command, config.get("kobodocker_path")) + CLI.run_command(frontend_command, dict_['kobodocker_path']) # Start reverse proxy if user uses it. - if config_object.use_letsencrypt: - proxy_command = ["docker-compose", - "up", "-d"] + if config.use_letsencrypt: + proxy_command = ['docker-compose', + 'up', '-d'] CLI.run_command(proxy_command, - config_object.get_letsencrypt_repo_path()) - - if config.get('maintenance_enabled', False): - CLI.colored_print("Maintenance mode is enabled. To resume " - "normal operation, use `--stop-maintenance`", - CLI.COLOR_INFO) + config.get_letsencrypt_repo_path()) + + if dict_['maintenance_enabled']: + CLI.colored_print( + 'Maintenance mode is enabled. To resume ' + 'normal operation, use `--stop-maintenance`', + CLI.COLOR_INFO, + ) elif not frontend_only: - if not config_object.multi_servers or config_object.frontend: - CLI.colored_print("Waiting for environment to be ready. " - "It can take a few minutes.", CLI.COLOR_SUCCESS) + if not config.multi_servers or config.frontend: + CLI.colored_print('Waiting for environment to be ready. ' + 'It can take a few minutes.', CLI.COLOR_INFO) cls.info() else: CLI.colored_print( - ("{} backend server is starting up and should be " - "up & running soon!\nPlease look at docker logs for " - "further information: `python3 run.py -cb logs -f`".format( - config.get('backend_server_role'))), + ('{} back-end server is starting up and should be ' + 'up & running soon!\nPlease look at docker logs for ' + 'further information: `python3 run.py -cb logs -f`'.format( + dict_['backend_server_role'])), CLI.COLOR_WARNING) @classmethod @@ -378,92 +405,93 @@ def stop(cls, output=True, frontend_only=False): """ Stop containers """ - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() - if not config_object.multi_servers or config_object.frontend: + if not config.multi_servers or config.frontend: # Shut down maintenance container in case it's up&running maintenance_down_command = [ - "docker-compose", - "-f", "docker-compose.maintenance.yml", - "-f", "docker-compose.maintenance.override.yml", - "-p", config_object.get_prefix("maintenance"), - "down"] + 'docker-compose', + '-f', 'docker-compose.maintenance.yml', + '-f', 'docker-compose.maintenance.override.yml', + '-p', config.get_prefix('maintenance'), + 'down'] CLI.run_command(maintenance_down_command, - config.get("kobodocker_path")) + dict_['kobodocker_path']) - # Shut down frontend containers - frontend_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "down"] - CLI.run_command(frontend_command, config.get("kobodocker_path")) + # Shut down front-end containers + frontend_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'down'] + CLI.run_command(frontend_command, dict_['kobodocker_path']) # Stop reverse proxy if user uses it. - if config_object.use_letsencrypt: - proxy_command = ["docker-compose", - "down"] - CLI.run_command(proxy_command, config_object.get_letsencrypt_repo_path()) + if config.use_letsencrypt: + proxy_command = ['docker-compose', + 'down'] + CLI.run_command(proxy_command, + config.get_letsencrypt_repo_path()) - if not frontend_only and config_object.backend: - backend_role = config.get("backend_server_role", "primary") + if not frontend_only and config.backend: + backend_role = dict_['backend_server_role'] backend_command = [ - "docker-compose", - "-f", - "docker-compose.backend.{}.yml".format(backend_role), - "-f", - "docker-compose.backend.{}.override.yml".format(backend_role), - "-p", config_object.get_prefix("backend"), - "down" + 'docker-compose', + '-f', + 'docker-compose.backend.{}.yml'.format(backend_role), + '-f', + 'docker-compose.backend.{}.override.yml'.format(backend_role), + '-p', config.get_prefix('backend'), + 'down' ] - CLI.run_command(backend_command, config.get("kobodocker_path")) + CLI.run_command(backend_command, dict_['kobodocker_path']) if output: - CLI.colored_print("KoBoToolbox has been stopped", CLI.COLOR_SUCCESS) + CLI.colored_print('KoBoToolbox has been stopped', CLI.COLOR_SUCCESS) @classmethod def stop_maintenance(cls): """ - Stop containers + Stop maintenance mode """ - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() - if not config_object.multi_servers or config_object.frontend: + if not config.multi_servers or config.frontend: # Shut down maintenance container in case it's up&running maintenance_down_command = [ - "docker-compose", - "-f", "docker-compose.maintenance.yml", - "-f", "docker-compose.maintenance.override.yml", - "-p", config_object.get_prefix("maintenance"), - "down"] + 'docker-compose', + '-f', 'docker-compose.maintenance.yml', + '-f', 'docker-compose.maintenance.override.yml', + '-p', config.get_prefix('maintenance'), + 'down'] CLI.run_command(maintenance_down_command, - config.get("kobodocker_path")) + dict_['kobodocker_path']) # Create and start NGINX container - frontend_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "up", "-d", "nginx"] - CLI.run_command(frontend_command, config.get("kobodocker_path")) - - CLI.colored_print("Maintenance mode has been stopped", + frontend_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'up', '-d', 'nginx'] + CLI.run_command(frontend_command, dict_['kobodocker_path']) + + CLI.colored_print('Maintenance mode has been stopped', CLI.COLOR_SUCCESS) - config['maintenance_enabled'] = False - config_object.write_config() + dict_['maintenance_enabled'] = False + config.write_config() @classmethod def version(cls): - git_commit_version_command = ["git", "rev-parse", "HEAD"] + git_commit_version_command = ['git', 'rev-parse', 'HEAD'] stdout = CLI.run_command(git_commit_version_command) - CLI.colored_print("KoBoInstall Version: {} (build {})".format( + CLI.colored_print('kobo-install Version: {} (build {})'.format( Config.KOBO_INSTALL_VERSION, stdout.strip()[0:7], ), CLI.COLOR_SUCCESS) diff --git a/helpers/config.py b/helpers/config.py index 60b7f05..bf1aab0 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals, division - import binascii import json import os @@ -13,50 +11,54 @@ from datetime import datetime from random import choice +from helpers.aws_validation import AWSValidation from helpers.cli import CLI from helpers.network import Network -from helpers.singleton import Singleton, with_metaclass +from helpers.singleton import Singleton +from helpers.upgrading import Upgrading # Use this class as a singleton to get the same configuration # for each instantiation. -class Config(with_metaclass(Singleton)): - - CONFIG_FILE = ".run.conf" - UNIQUE_ID_FILE = ".uniqid" - UPSERT_DB_USERS_TRIGGER_FILE = ".upsert_db_users" - TRUE = "1" - FALSE = "2" - LETSENCRYPT_DOCKER_DIR = "nginx-certbot" - ENV_FILES_DIR = "kobo-env" - DEFAULT_PROXY_PORT = "8080" - DEFAULT_NGINX_PORT = "80" - DEFAULT_NGINX_HTTPS_PORT = "443" - KOBO_DOCKER_BRANCH = 'support-kobokitten' - KOBO_INSTALL_VERSION = '3.2.1' +class Config(metaclass=Singleton): + + CONFIG_FILE = '.run.conf' + UNIQUE_ID_FILE = '.uniqid' + UPSERT_DB_USERS_TRIGGER_FILE = '.upsert_db_users' + LETSENCRYPT_DOCKER_DIR = 'nginx-certbot' + ENV_FILES_DIR = 'kobo-env' + DEFAULT_PROXY_PORT = '8080' + DEFAULT_NGINX_PORT = '80' + DEFAULT_NGINX_HTTPS_PORT = '443' + KOBO_DOCKER_BRANCH = 'block-publicly-internal-domain' + KOBO_INSTALL_VERSION = '4.4.2' + MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3 def __init__(self): - self.__config = self.read_config() self.__first_time = None - self.__primary_ip = Network.get_primary_ip() + self.__dict = self.read_config() @property def advanced_options(self): """ Checks whether advanced options should be displayed - :return: bool + + Returns: + bool """ - return self.__config.get("advanced") == Config.TRUE + return self.__dict['advanced'] def auto_detect_network(self): """ Tries to detect new ip - :return: bool + + Returns: + bool: `True` if network has changed """ changed = False - local_interfaces = Network.get_local_interfaces(all=True) + local_interfaces = Network.get_local_interfaces(all_=True) - if self.__config.get("local_interface_ip") not in local_interfaces.values(): + if self.__dict['local_interface_ip'] not in local_interfaces.values(): self.__detect_network() self.write_config() changed = True @@ -65,33 +67,35 @@ def auto_detect_network(self): @property def aws(self): """ - Checks whether questions are backend only - :return: bool + Checks whether questions are back end only + + Returns: + bool """ - return self.__config.get("use_aws") == Config.TRUE + return self.__dict['use_aws'] @property def backend(self): return not self.multi_servers or self.primary_backend or \ - self.secondary_backend + self.secondary_backend @property def block_common_http_ports(self): - return self.use_letsencrypt or self.__config.get("block_common_http_ports") == Config.TRUE + return self.use_letsencrypt or self.__dict['block_common_http_ports'] @property def expose_backend_ports(self): - return self.__config.get("expose_backend_ports") == Config.TRUE + return self.__dict['expose_backend_ports'] def get_env_files_path(self): current_path = os.path.realpath(os.path.normpath(os.path.join( - self.__config.get("kobodocker_path"), - "..", + self.__dict['kobodocker_path'], + '..', Config.ENV_FILES_DIR ))) old_path = os.path.realpath(os.path.normpath(os.path.join( - self.__config.get("kobodocker_path"), + self.__dict['kobodocker_path'], '..', 'kobo-deployments' ))) @@ -104,8 +108,8 @@ def get_env_files_path(self): def get_letsencrypt_repo_path(self): return os.path.realpath(os.path.normpath(os.path.join( - self.__config.get("kobodocker_path"), - "..", + self.__dict['kobodocker_path'], + '..', Config.LETSENCRYPT_DOCKER_DIR ))) @@ -119,38 +123,66 @@ def get_prefix(self, role): try: prefix_ = roles[role] except KeyError: - CLI.colored_print("Invalid composer file", CLI.COLOR_ERROR) - sys.exit(-1) + CLI.colored_print('Invalid composer file', CLI.COLOR_ERROR) + sys.exit(1) - if not self.__config.get("docker_prefix"): + if not self.__dict['docker_prefix']: return prefix_ - return "{}-{}".format(self.__config.get("docker_prefix"), - prefix_) + return '{}-{}'.format(self.__dict['docker_prefix'], prefix_) + + def get_upgraded_dict(self): + """ + Sometimes during upgrades, some keys are changed/deleted/added. + This method helps to get a compliant dict to expected config + + Returns: + dict + """ + + upgraded_dict = self.get_template() + upgraded_dict.update(self.__dict) + + # Upgrade to use two databases + upgraded_dict = Upgrading.two_databases(upgraded_dict, self.__dict) + + # Upgrade to use new terminology primary/secondary + upgraded_dict = Upgrading.new_terminology(upgraded_dict) + + # Upgrade to use booleans in `self.__dict` + upgraded_dict = Upgrading.use_booleans(upgraded_dict) + + return upgraded_dict @property def backend_questions(self): """ - Checks whether questions are backend only - :return: bool + Checks whether questions are back end only + + Returns: + bool """ return not self.multi_servers or not self.frontend def build(self): """ - Build configuration based on user's answer - :return: dict + Build configuration based on user's answers + + Returns: + dict: all values from user's responses needed to create + configuration files """ - if not self.__primary_ip: - CLI.colored_print("╔══════════════════════════════════════════════════════╗", CLI.COLOR_ERROR) - CLI.colored_print("║ No valid networks detected. Can not continue! ║", CLI.COLOR_ERROR) - CLI.colored_print("║ Please connect to a network and re-run the command. ║", CLI.COLOR_ERROR) - CLI.colored_print("╚══════════════════════════════════════════════════════╝", CLI.COLOR_ERROR) + if not Network.get_primary_ip(): + message = ( + 'No valid networks detected. Can not continue!\n' + 'Please connect to a network and re-run the command.' + ) + CLI.framed_print(message, color=CLI.COLOR_ERROR) sys.exit(1) else: - self.__config = self.__get_upgraded_config() self.__welcome() + self.__dict = self.get_upgraded_dict() self.__create_directory() self.__questions_advanced_options() @@ -197,36 +229,43 @@ def build(self): self.write_config() - return self.__config + return self.__dict @property def dev_mode(self): - return self.__config.get("dev_mode") == Config.TRUE + return self.__dict['dev_mode'] is True @property def first_time(self): """ Checks whether setup is running for the first time - :return: bool + + Returns: + bool """ if self.__first_time is None: - self.__first_time = self.__config.get("date_created") is None + self.__first_time = self.__dict.get('date_created') is None return self.__first_time @property def frontend(self): """ - Checks whether setup is running on a frontend server - :return: bool + Checks whether setup is running on a front-end server + + Returns: + dict: all values from user's responses needed to create + configuration files """ return not self.multi_servers or \ - self.__config.get("server_role") == "frontend" + self.__dict['server_role'] == 'frontend' @property def frontend_questions(self): """ - Checks whether questions are frontend only - :return: bool + Checks whether questions are front-end only + + Returns: + bool """ return not self.multi_servers or self.frontend @@ -234,128 +273,184 @@ def frontend_questions(self): def generate_password(cls): """ Generate 12 characters long random password - :return: str + + Returns: + str """ characters = string.ascii_letters \ - + "!$%+-_^~@#{}[]()/\'\"`~,;:.<>" \ + + '!$%+-_^~@#{}[]()/\'\'`~,;:.<>' \ + string.digits required_chars_count = 12 return ''.join(choice(characters) for _ in range(required_chars_count)) - def get_config(self): - return self.__config + def get_dict(self): + return self.__dict @classmethod - def get_config_template(cls): + def get_template(cls): primary_ip = Network.get_primary_ip() return { - "advanced": "2", - "debug": Config.FALSE, - "kobodocker_path": os.path.realpath(os.path.normpath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "..", - "..", - "kobo-docker")) - ), - "internal_domain_name": "docker.internal", - "private_domain_name": "kobo.private", - "public_domain_name": "kobo.local", - "kpi_subdomain": "kf", - "kc_subdomain": "kc", - "ee_subdomain": "ee", - "kc_postgres_db": "kobocat", - "kpi_postgres_db": "koboform", - "postgres_user": "kobo", - "postgres_password": Config.generate_password(), - "kc_path": "", - "kpi_path": "", - "super_user_username": "super_admin", - "super_user_password": Config.generate_password(), - "postgres_replication_password": Config.generate_password(), - "use_aws": Config.FALSE, - "use_private_dns": Config.FALSE, - "primary_backend_ip": primary_ip, - "local_interface_ip": primary_ip, - "multi": Config.FALSE, - "postgres_settings": Config.FALSE, - "postgres_ram": "2", - "postgres_profile": "Mixed", - "postgres_max_connections": "100", - "postgres_hard_drive_type": "hdd", - "postgres_settings_content": "", - "custom_secret_keys": Config.FALSE, - "enketo_api_token": binascii.hexlify(os.urandom(60)).decode("utf-8"), - "enketo_encryption_key": binascii.hexlify(os.urandom(60)).decode("utf-8"), - "django_secret_key": binascii.hexlify(os.urandom(50)).decode("utf-8"), + 'advanced': False, + 'aws_access_key': '', + 'aws_backup_bucket_deletion_rule_enabled': False, + 'aws_backup_bucket_name': '', + 'aws_backup_daily_retention': '30', + 'aws_backup_monthly_retention': '12', + 'aws_backup_upload_chunk_size': '15', + 'aws_backup_weekly_retention': '4', + 'aws_backup_yearly_retention': '2', + 'aws_bucket_name': '', + 'aws_credentials_valid': False, + 'aws_mongo_backup_minimum_size': '50', + 'aws_postgres_backup_minimum_size': '50', + 'aws_redis_backup_minimum_size': '5', + 'aws_secret_key': '', + 'aws_validate_credentials': True, + 'backend_server_role': 'primary', + 'backup_from_primary': True, + 'block_common_http_ports': True, + 'custom_secret_keys': False, + 'customized_ports': False, + 'debug': False, + 'default_from_email': 'support@kobo.local', + 'dev_mode': False, + 'django_secret_key': binascii.hexlify(os.urandom(50)).decode(), + 'docker_prefix': '', + 'ee_subdomain': 'ee', + 'enketo_api_token': binascii.hexlify(os.urandom(60)).decode(), + 'enketo_encryption_key': binascii.hexlify(os.urandom(60)).decode(), # default value from enketo. Because it was not customizable before # we want to keep the same value when users upgrade. - "enketo_less_secure_encryption_key": 'this $3cr3t key is crackable', - "use_backup": Config.FALSE, - "kobocat_media_schedule": "0 0 * * 0", - "mongo_backup_schedule": "0 1 * * 0", - "postgres_backup_schedule": "0 2 * * 0", - "redis_backup_schedule": "0 3 * * 0", - "aws_backup_bucket_name": "", - "aws_backup_yearly_retention": "2", - "aws_backup_monthly_retention": "12", - "aws_backup_weekly_retention": "4", - "aws_backup_daily_retention": "30", - "aws_mongo_backup_minimum_size": "50", - "aws_postgres_backup_minimum_size": "50", - "aws_redis_backup_minimum_size": "5", - "aws_backup_upload_chunk_size": "15", - "aws_backup_bucket_deletion_rule_enabled": Config.FALSE, - "backend_server_role": "primary", - "use_letsencrypt": Config.TRUE, - "proxy": Config.TRUE, - "https": Config.TRUE, - "nginx_proxy_port": Config.DEFAULT_PROXY_PORT, - "exposed_nginx_docker_port": Config.DEFAULT_NGINX_PORT, - "expose_backend_ports": Config.FALSE, - "postgresql_port": "5432", - "mongo_port": "27017", - "redis_main_port": "6379", - "redis_cache_port": "6380", - "local_installation": Config.FALSE, - "block_common_http_ports": Config.TRUE, - "npm_container": Config.TRUE, - "mongo_root_username": "root", - "mongo_root_password": Config.generate_password(), - "mongo_user_username": "kobo", - "mongo_user_password": Config.generate_password(), - "redis_password": Config.generate_password(), - "uwsgi_workers_start": "1", - "uwsgi_workers_max": "2", - "uwsgi_max_requests": "512", - "uwsgi_soft_limit": "128", - "uwsgi_harakiri": "120", - "uwsgi_worker_reload_mercy": "120", - "backup_from_primary": Config.TRUE, + 'enketo_less_secure_encryption_key': 'this $3cr3t key is crackable', + 'expose_backend_ports': False, + 'exposed_nginx_docker_port': Config.DEFAULT_NGINX_PORT, + 'google_api_key': '', + 'google_ua': '', + 'https': True, + 'internal_domain_name': 'docker.internal', + 'kc_dev_build_id': '', + 'kc_path': '', + 'kc_postgres_db': 'kobocat', + 'kc_subdomain': 'kc', + 'kobocat_media_backup_schedule': '0 0 * * 0', + 'kobocat_media_schedule': '0 0 * * 0', + 'kobocat_raven': '', + 'kobodocker_path': os.path.realpath(os.path.normpath(os.path.join( + os.path.dirname(os.path.realpath(__file__)), + '..', + '..', + 'kobo-docker')) + ), + 'kpi_dev_build_id': '', + 'kpi_path': '', + 'kpi_postgres_db': 'koboform', + 'kpi_raven': '', + 'kpi_raven_js': '', + 'kpi_subdomain': 'kf', + 'local_installation': False, + 'local_interface': Network.get_primary_interface(), + 'local_interface_ip': primary_ip, + 'letsencrypt_email': 'support@kobo.local', + 'maintenance_date_iso': '', + 'maintenance_date_str': '', + 'maintenance_email': 'support@kobo.local', + 'maintenance_enabled': False, + 'maintenance_eta': '2 hours', + 'mongo_backup_schedule': '0 1 * * 0', + 'mongo_port': '27017', + 'mongo_root_password': Config.generate_password(), + 'mongo_root_username': 'root', + 'mongo_user_password': Config.generate_password(), + 'mongo_user_username': 'kobo', + 'multi': False, + 'nginx_proxy_port': Config.DEFAULT_PROXY_PORT, + 'npm_container': True, + 'postgres_backup_schedule': '0 2 * * 0', + 'postgres_hard_drive_type': 'hdd', + 'postgres_max_connections': '100', + 'postgres_password': Config.generate_password(), + 'postgres_profile': 'Mixed', + 'postgres_ram': '2', + 'postgres_replication_password': Config.generate_password(), + 'postgres_settings': False, + 'postgres_settings_content': '\n'.join([ + '# Memory Configuration', + 'shared_buffers = 512MB', + 'effective_cache_size = 2GB', + 'work_mem = 10MB', + 'maintenance_work_mem = 128MB', + '', + '# Checkpoint Related Configuration', + 'min_wal_size = 512MB', + 'max_wal_size = 2GB', + 'checkpoint_completion_target = 0.9', + 'wal_buffers = 15MB', + '', + '# Network Related Configuration', + "listen_addresses = '*'", + 'max_connections = 100', + ]), + 'postgres_user': 'kobo', + 'postgresql_port': '5432', + 'primary_backend_ip': primary_ip, + 'private_domain_name': 'kobo.private', + 'proxy': True, + 'public_domain_name': 'kobo.local', + 'raven_settings': False, + 'redis_backup_schedule': '0 3 * * 0', + 'redis_cache_port': '6380', + 'redis_main_port': '6379', + 'redis_password': Config.generate_password(), + 'review_host': True, + 'server_role': 'frontend', + 'smtp_host': '', + 'smtp_password': '', + 'smtp_port': '25', + 'smtp_user': '', + 'smtp_use_tls': False, + 'staging_mode': False, + 'super_user_password': Config.generate_password(), + 'super_user_username': 'super_admin', + 'two_databases': True, + 'use_aws': False, + 'use_backup': False, + 'use_letsencrypt': True, + 'use_private_dns': False, + 'use_wal_e': False, + 'uwsgi_harakiri': '120', + 'uwsgi_max_requests': '512', + 'uwsgi_settings': False, + 'uwsgi_soft_limit': '128', + 'uwsgi_worker_reload_mercy': '120', + 'uwsgi_workers_max': '2', + 'uwsgi_workers_start': '1', } + # Keep properties sorted alphabetically def get_service_names(self): - service_list_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "config", "--services"] + service_list_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + 'config', '--services'] - services = CLI.run_command(service_list_command, self.__config["kobodocker_path"]) + services = CLI.run_command(service_list_command, + self.__dict['kobodocker_path']) return services.strip().split('\n') @property def is_secure(self): - return self.__config.get("https") == Config.TRUE + return self.__dict['https'] is True def init_letsencrypt(self): if self.use_letsencrypt: reverse_proxy_path = self.get_letsencrypt_repo_path() reverse_proxy_command = [ - "/bin/bash", - "init-letsencrypt.sh" + '/bin/bash', + 'init-letsencrypt.sh' ] CLI.run_command(reverse_proxy_command, reverse_proxy_path) @@ -363,9 +458,11 @@ def init_letsencrypt(self): def local_install(self): """ Checks whether installation is for `Workstation`s - :return: bool + + Returns: + bool """ - return self.__config.get("local_installation") == Config.TRUE + return self.__dict['local_installation'] def maintenance(self): self.__questions_maintenance() @@ -373,63 +470,86 @@ def maintenance(self): @property def primary_backend(self): """ - Checks whether setup is running on a primary backend server - :return: bool + Checks whether setup is running on a primary back-end server + + Returns: + bool """ return self.multi_servers and \ - self.__config.get('server_role') == 'backend' and \ - self.__config.get("backend_server_role") == "primary" + self.__dict['server_role'] == 'backend' and \ + self.__dict['backend_server_role'] == 'primary' @property def multi_servers(self): """ - Checks whether installation is for separate frontend and backend servers - :return: bool + Checks whether installation is for separate front-end and back-end + servers + + Returns: + bool """ - return self.__config.get("multi") == Config.TRUE + return self.__dict['multi'] @property def proxy(self): """ Checks whether installation is using a proxy or a load balancer - :return: bool + + Returns: + bool """ - return self.__config.get("proxy") == Config.TRUE + return self.__dict['proxy'] def read_config(self): """ Reads config from file `Config.CONFIG_FILE` if exists - :return: dict + + Returns: + dict """ - config = {} + dict_ = {} try: - base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + base_dir = os.path.dirname( + os.path.dirname(os.path.realpath(__file__))) config_file = os.path.join(base_dir, Config.CONFIG_FILE) - with open(config_file, "r") as f: - config = json.loads(f.read()) + with open(config_file, 'r') as f: + dict_ = json.loads(f.read()) except IOError: pass - self.__config = config + self.__dict = dict_ unique_id = self.read_unique_id() if not unique_id: - self.__config["unique_id"] = int(time.time()) + self.__dict['unique_id'] = int(time.time()) - return config + return dict_ def read_unique_id(self): """ Reads unique id from file `Config.UNIQUE_ID_FILE` - :return: str + + Returns: + str """ unique_id = None try: - unique_id_file = os.path.join(self.__config.get("kobodocker_path"), + unique_id_file = os.path.join(self.__dict['kobodocker_path'], Config.UNIQUE_ID_FILE) - with open(unique_id_file, "r") as f: + except KeyError: + if self.first_time: + return None + else: + CLI.framed_print('Bad configuration! The path of kobo-docker ' + 'path is missing. Please delete `.run.conf` ' + 'and start from scratch', + color=CLI.COLOR_ERROR) + sys.exit(1) + + try: + with open(unique_id_file, 'r') as f: unique_id = f.read().strip() - except Exception as e: + except FileNotFoundError: pass return unique_id @@ -437,59 +557,71 @@ def read_unique_id(self): @property def secondary_backend(self): """ - Checks whether setup is running on a secondary backend server - :return: bool + Checks whether setup is running on a secondary back-end server + + Returns: + bool """ return self.multi_servers and \ - self.__config.get('server_role') == 'backend' and \ - self.__config.get("backend_server_role") == "secondary" + self.__dict['server_role'] == 'backend' and \ + self.__dict['backend_server_role'] == 'secondary' def set_config(self, value): - self.__config = value + self.__dict = value @property def staging_mode(self): - return self.__config.get("staging_mode") == Config.TRUE + return self.__dict['staging_mode'] @property def use_letsencrypt(self): - return self.local_install is False and \ - self.__config["use_letsencrypt"] == Config.TRUE + return not self.local_install and self.__dict['use_letsencrypt'] @property def use_private_dns(self): - return self.__config["use_private_dns"] == Config.TRUE + return self.__dict['use_private_dns'] + + def validate_aws_credentials(self): + validation = AWSValidation( + aws_access_key_id=self.__dict['aws_access_key'], + aws_secret_access_key=self.__dict['aws_secret_key'], + ) + self.__dict['aws_credentials_valid'] = validation.validate_credentials() def write_config(self): """ Writes config to file `Config.CONFIG_FILE`. """ - # Adds `date_created`. This field will be use to determine first usage of the setup option. - if self.__config.get("date_created") is None: - self.__config["date_created"] = int(time.time()) - self.__config["date_modified"] = int(time.time()) + # Adds `date_created`. This field will be use to determine + # first usage of the setup option. + if self.__dict.get('date_created') is None: + self.__dict['date_created'] = int(time.time()) + self.__dict['date_modified'] = int(time.time()) try: - base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + base_dir = os.path.dirname( + os.path.dirname(os.path.realpath(__file__))) config_file = os.path.join(base_dir, Config.CONFIG_FILE) - with open(config_file, "w") as f: - f.write(json.dumps(self.__config, indent=2, sort_keys=True)) + with open(config_file, 'w') as f: + f.write(json.dumps(self.__dict, indent=2, sort_keys=True)) os.chmod(config_file, stat.S_IWRITE | stat.S_IREAD) except IOError: - CLI.colored_print("Could not write configuration file", CLI.COLOR_ERROR) + CLI.colored_print('Could not write configuration file', + CLI.COLOR_ERROR) sys.exit(1) def write_unique_id(self): try: - unique_id_file = os.path.join(self.__config.get("kobodocker_path"), Config.UNIQUE_ID_FILE) - with open(unique_id_file, "w") as f: - f.write(str(self.__config.get("unique_id"))) + unique_id_file = os.path.join(self.__dict['kobodocker_path'], + Config.UNIQUE_ID_FILE) + with open(unique_id_file, 'w') as f: + f.write(str(self.__dict['unique_id'])) os.chmod(unique_id_file, stat.S_IWRITE | stat.S_IREAD) except (IOError, OSError): - CLI.colored_print("Could not write unique_id file", CLI.COLOR_ERROR) + CLI.colored_print('Could not write unique_id file', CLI.COLOR_ERROR) return False return True @@ -498,22 +630,23 @@ def __create_directory(self): """ Create repository directory if it doesn't exist. """ - CLI.colored_print("Where do you want to install?", CLI.COLOR_SUCCESS) + CLI.colored_print('Where do you want to install?', CLI.COLOR_QUESTION) while True: - kobodocker_path = CLI.colored_input("", CLI.COLOR_SUCCESS, - self.__config.get("kobodocker_path")) - - if kobodocker_path.startswith("."): - base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - kobodocker_path = os.path.normpath(os.path.join(base_dir, kobodocker_path)) - - CLI.colored_print("Please confirm path [{}]".format(kobodocker_path), - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") + kobodocker_path = CLI.colored_input( + '', + CLI.COLOR_QUESTION, + self.__dict['kobodocker_path'] + ) - if CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) == Config.TRUE: + if kobodocker_path.startswith('.'): + base_dir = os.path.dirname( + os.path.dirname(os.path.realpath(__file__))) + kobodocker_path = os.path.normpath( + os.path.join(base_dir, kobodocker_path)) + question = 'Please confirm path [{}]'.format(kobodocker_path) + response = CLI.yes_no_question(question) + if response is True: if os.path.isdir(kobodocker_path): break else: @@ -521,18 +654,23 @@ def __create_directory(self): os.makedirs(kobodocker_path) break except OSError: - CLI.colored_print("Could not create directory {}!".format(kobodocker_path), CLI.COLOR_ERROR) - CLI.colored_print("Please make sure you have permissions and path is correct", CLI.COLOR_ERROR) - - self.__config["kobodocker_path"] = kobodocker_path + CLI.colored_print( + 'Could not create directory {}!'.format( + kobodocker_path), CLI.COLOR_ERROR) + CLI.colored_print( + 'Please make sure you have permissions ' + 'and path is correct', + CLI.COLOR_ERROR) + + self.__dict['kobodocker_path'] = kobodocker_path self.write_unique_id() self.__validate_installation() def __clone_repo(self, repo_path, repo_name): if repo_path: - if repo_path.startswith("."): + if repo_path.startswith('.'): full_repo_path = os.path.normpath(os.path.join( - self.__config["kobodocker_path"], + self.__dict['kobodocker_path'], repo_path )) else: @@ -543,193 +681,244 @@ def __clone_repo(self, repo_path, repo_name): try: os.makedirs(full_repo_path) except OSError: - CLI.colored_print("Please verify permissions.", CLI.COLOR_ERROR) + CLI.colored_print('Please verify permissions.', + CLI.COLOR_ERROR) sys.exit(1) # Only clone if folder is empty - if not os.path.isdir(os.path.join(full_repo_path, ".git")): + if not os.path.isdir(os.path.join(full_repo_path, '.git')): git_command = [ - "git", "clone", "https://github.com/kobotoolbox/{}".format(repo_name), + 'git', 'clone', + 'https://github.com/kobotoolbox/{}'.format(repo_name), full_repo_path ] - CLI.colored_print("Cloning `{}` repository to `{}` ".format( + CLI.colored_print('Cloning `{}` repository to `{}` '.format( repo_name, full_repo_path), CLI.COLOR_INFO) - CLI.run_command(git_command, cwd=os.path.dirname(full_repo_path)) + CLI.run_command(git_command, + cwd=os.path.dirname(full_repo_path)) def __detect_network(self): - self.__config["local_interface_ip"] = Network.get_primary_ip() + self.__dict['local_interface_ip'] = Network.get_primary_ip() if self.frontend: - self.__config["primary_backend_ip"] = self.__config["local_interface_ip"] + self.__dict['primary_backend_ip'] = self.__dict[ + 'local_interface_ip'] if self.advanced_options: - CLI.colored_print("Please choose which network interface you want to use?", CLI.COLOR_SUCCESS) + CLI.colored_print( + 'Please choose which network interface you want to use?', + CLI.COLOR_QUESTION) interfaces = Network.get_local_interfaces() - all_interfaces = Network.get_local_interfaces(all=True) - docker_interface = "docker0" - interfaces.update({"other": "Other"}) + all_interfaces = Network.get_local_interfaces(all_=True) + docker_interface = 'docker0' + interfaces.update({'other': 'Other'}) - if self.__config.get("local_interface") == docker_interface and \ + if self.__dict['local_interface'] == docker_interface and \ docker_interface in all_interfaces: - interfaces.update({docker_interface: all_interfaces.get(docker_interface)}) + interfaces.update( + {docker_interface: all_interfaces.get(docker_interface)}) for interface, ip_address in interfaces.items(): - CLI.colored_print("\t{}) {}".format(interface, ip_address)) + CLI.colored_print('\t{}) {}'.format(interface, ip_address)) choices = [str(interface) for interface in interfaces.keys()] - choices.append("other") + choices.append('other') response = CLI.get_response( choices, - self.__config.get("local_interface", Network.get_primary_interface())) + default=self.__dict['local_interface'] + ) - if response == "other": - interfaces = Network.get_local_interfaces(all=True) + if response == 'other': + interfaces = Network.get_local_interfaces(all_=True) for interface, ip_address in interfaces.items(): - CLI.colored_print("\t{}) {}".format(interface, ip_address)) + CLI.colored_print('\t{}) {}'.format(interface, ip_address)) choices = [str(interface) for interface in interfaces.keys()] - self.__config["local_interface"] = CLI.get_response( + self.__dict['local_interface'] = CLI.get_response( choices, - self.__config.get("local_interface", Network.get_primary_interface())) + self.__dict['local_interface'] + ) else: - self.__config["local_interface"] = response + self.__dict['local_interface'] = response - self.__config["local_interface_ip"] = interfaces[self.__config.get("local_interface")] + self.__dict['local_interface_ip'] = interfaces[ + self.__dict['local_interface']] if self.frontend: - self.__config["primary_backend_ip"] = self.__config.get("local_interface_ip") - - def __get_upgraded_config(self): - """ - Sometimes during upgrades, some keys are changed/deleted/added. - This method helps to get a compliant dict to expected config - - :return: dict - """ - - upgraded_config = self.get_config_template() - upgraded_config.update(self.__config) - - # If the configuration came from a previous version that had a - # single Postgres database, we need to make sure the new - # `kc_postgres_db` is set to the name of that single database, - # *not* the default from `get_config_template()` - if ( - self.__config.get("postgres_db") - and not self.__config.get("kc_postgres_db") - ): - upgraded_config["kc_postgres_db"] = self.__config["postgres_db"] - - # Force update user's config to use new terminology. - backend_role = upgraded_config.get('backend_server_role') - if backend_role in ['master', 'slave']: - upgraded_config['backend_server_role'] = 'primary' \ - if backend_role == 'master' else 'secondary' - - return upgraded_config + self.__dict['primary_backend_ip'] = self.__dict[ + 'local_interface_ip'] def __questions_advanced_options(self): """ Asks if user wants to see advanced options """ - CLI.colored_print("Do you want to see advanced options?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["advanced"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("advanced", Config.FALSE)) + self.__dict['advanced'] = CLI.yes_no_question( + 'Do you want to see advanced options?', + default=self.__dict['advanced']) def __questions_aws(self): """ Asks if user wants to see AWS option and asks for credentials if needed. """ - CLI.colored_print("Do you want to use AWS S3 storage?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["use_aws"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("use_aws", Config.FALSE)) - if self.__config["use_aws"] == Config.TRUE: - self.__config["aws_access_key"] = CLI.colored_input("AWS Access Key", CLI.COLOR_SUCCESS, - self.__config.get("aws_access_key", "")) - self.__config["aws_secret_key"] = CLI.colored_input("AWS Secret Key", CLI.COLOR_SUCCESS, - self.__config.get("aws_secret_key", "")) - self.__config["aws_bucket_name"] = CLI.colored_input("AWS Bucket name", CLI.COLOR_SUCCESS, - self.__config.get("aws_bucket_name", "")) + self.__dict['use_aws'] = CLI.yes_no_question( + 'Do you want to use AWS S3 storage?', + default=self.__dict['use_aws'] + ) + self.__questions_aws_configuration() + self.__questions_aws_validate_credentials() + + def __questions_aws_configuration(self): + + if self.__dict['use_aws']: + self.__dict['aws_access_key'] = CLI.colored_input( + 'AWS Access Key', CLI.COLOR_QUESTION, + self.__dict['aws_access_key']) + self.__dict['aws_secret_key'] = CLI.colored_input( + 'AWS Secret Key', CLI.COLOR_QUESTION, + self.__dict['aws_secret_key']) + self.__dict['aws_bucket_name'] = CLI.colored_input( + 'AWS Bucket name', CLI.COLOR_QUESTION, + self.__dict['aws_bucket_name']) else: - self.__config["aws_access_key"] = "" - self.__config["aws_secret_key"] = "" - self.__config["aws_bucket_name"] = "" + self.__dict['aws_access_key'] = '' + self.__dict['aws_secret_key'] = '' + self.__dict['aws_bucket_name'] = '' + + def __questions_aws_validate_credentials(self): + """ + Prompting user whether they would like to validate their entered AWS + credentials or continue without validation. + """ + # Resetting validation when setup is rerun + self.__dict['aws_credentials_valid'] = False + aws_credential_attempts = 0 + + if self.__dict['use_aws']: + self.__dict['aws_validate_credentials'] = CLI.yes_no_question( + 'Would you like to validate your AWS credentials?', + default=self.__dict['aws_validate_credentials'], + ) + + if self.__dict['use_aws'] and self.__dict['aws_validate_credentials']: + while ( + not self.__dict['aws_credentials_valid'] + and aws_credential_attempts + <= self.MAXIMUM_AWS_CREDENTIAL_ATTEMPTS + ): + aws_credential_attempts += 1 + self.validate_aws_credentials() + attempts_remaining = ( + self.MAXIMUM_AWS_CREDENTIAL_ATTEMPTS + - aws_credential_attempts + ) + if ( + not self.__dict['aws_credentials_valid'] + and attempts_remaining > 0 + ): + CLI.colored_print( + 'Invalid credentials, please try again.', + CLI.COLOR_WARNING, + ) + CLI.colored_print( + 'Attempts remaining for AWS validation: {}'.format( + attempts_remaining + ), + CLI.COLOR_INFO, + ) + self.__questions_aws_configuration() + else: + if not self.__dict['aws_credentials_valid']: + CLI.colored_print( + 'Please restart configuration', CLI.COLOR_ERROR + ) + sys.exit(1) + else: + CLI.colored_print( + 'AWS credentials successfully validated', + CLI.COLOR_SUCCESS + ) def __questions_aws_backup_settings(self): - self.__config["aws_backup_bucket_name"] = CLI.colored_input( - "AWS Backups bucket name", CLI.COLOR_SUCCESS, - self.__config.get("aws_backup_bucket_name", "")) + self.__dict['aws_backup_bucket_name'] = CLI.colored_input( + 'AWS Backups bucket name', CLI.COLOR_QUESTION, + self.__dict['aws_backup_bucket_name']) - if self.__config["aws_backup_bucket_name"] != "": + if self.__dict['aws_backup_bucket_name'] != '': - backup_from_primary = self.__config["backup_from_primary"] == Config.TRUE + backup_from_primary = self.__dict['backup_from_primary'] - CLI.colored_print("How many yearly backups to keep?", CLI.COLOR_SUCCESS) - self.__config["aws_backup_yearly_retention"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_backup_yearly_retention")) + CLI.colored_print('How many yearly backups to keep?', + CLI.COLOR_QUESTION) + self.__dict['aws_backup_yearly_retention'] = CLI.get_response( + r'~^\d+$', self.__dict['aws_backup_yearly_retention']) - CLI.colored_print("How many monthly backups to keep?", CLI.COLOR_SUCCESS) - self.__config["aws_backup_monthly_retention"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_backup_monthly_retention")) + CLI.colored_print('How many monthly backups to keep?', + CLI.COLOR_QUESTION) + self.__dict['aws_backup_monthly_retention'] = CLI.get_response( + r'~^\d+$', self.__dict['aws_backup_monthly_retention']) - CLI.colored_print("How many weekly backups to keep?", CLI.COLOR_SUCCESS) - self.__config["aws_backup_weekly_retention"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_backup_weekly_retention")) + CLI.colored_print('How many weekly backups to keep?', + CLI.COLOR_QUESTION) + self.__dict['aws_backup_weekly_retention'] = CLI.get_response( + r'~^\d+$', self.__dict['aws_backup_weekly_retention']) - CLI.colored_print("How many daily backups to keep?", CLI.COLOR_SUCCESS) - self.__config["aws_backup_daily_retention"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_backup_daily_retention")) + CLI.colored_print('How many daily backups to keep?', + CLI.COLOR_QUESTION) + self.__dict['aws_backup_daily_retention'] = CLI.get_response( + r'~^\d+$', self.__dict['aws_backup_daily_retention']) if (not self.multi_servers or (self.primary_backend and backup_from_primary) or (self.secondary_backend and not backup_from_primary)): - CLI.colored_print("PostgresSQL backup minimum size (in MB)?", - CLI.COLOR_SUCCESS) + CLI.colored_print('PostgresSQL backup minimum size (in MB)?', + CLI.COLOR_QUESTION) CLI.colored_print( - "Files below this size will be ignored when rotating backups.", + 'Files below this size will be ignored when ' + 'rotating backups.', CLI.COLOR_INFO) - self.__config["aws_postgres_backup_minimum_size"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_postgres_backup_minimum_size")) + self.__dict[ + 'aws_postgres_backup_minimum_size'] = CLI.get_response( + r'~^\d+$', + self.__dict['aws_postgres_backup_minimum_size']) if self.primary_backend or not self.multi_servers: - CLI.colored_print("MongoDB backup minimum size (in MB)?", - CLI.COLOR_SUCCESS) + CLI.colored_print('MongoDB backup minimum size (in MB)?', + CLI.COLOR_QUESTION) CLI.colored_print( - "Files below this size will be ignored when rotating backups.", + 'Files below this size will be ignored when ' + 'rotating backups.', CLI.COLOR_INFO) - self.__config["aws_mongo_backup_minimum_size"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_mongo_backup_minimum_size")) + self.__dict[ + 'aws_mongo_backup_minimum_size'] = CLI.get_response( + r'~^\d+$', + self.__dict['aws_mongo_backup_minimum_size']) - CLI.colored_print("Redis backup minimum size (in MB)?", - CLI.COLOR_SUCCESS) + CLI.colored_print('Redis backup minimum size (in MB)?', + CLI.COLOR_QUESTION) CLI.colored_print( - "Files below this size will be ignored when rotating backups.", + 'Files below this size will be ignored when ' + 'rotating backups.', CLI.COLOR_INFO) - self.__config["aws_redis_backup_minimum_size"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_redis_backup_minimum_size")) - - CLI.colored_print("Chunk size of multipart uploads (in MB)?", - CLI.COLOR_SUCCESS) - self.__config["aws_backup_upload_chunk_size"] = CLI.get_response( - r"~^\d+$", self.__config.get("aws_backup_upload_chunk_size")) - - CLI.colored_print("Use AWS LifeCycle deletion rule?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["aws_backup_bucket_deletion_rule_enabled"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("aws_backup_bucket_deletion_rule_enabled", - Config.FALSE)) + self.__dict[ + 'aws_redis_backup_minimum_size'] = CLI.get_response( + r'~^\d+$', + self.__dict['aws_redis_backup_minimum_size']) + + CLI.colored_print('Chunk size of multipart uploads (in MB)?', + CLI.COLOR_QUESTION) + self.__dict['aws_backup_upload_chunk_size'] = CLI.get_response( + r'~^\d+$', self.__dict['aws_backup_upload_chunk_size']) + + response = CLI.yes_no_question( + 'Use AWS LifeCycle deletion rule?', + default=self.__dict['aws_backup_bucket_deletion_rule_enabled'] + ) + self.__dict['aws_backup_bucket_deletion_rule_enabled'] = response def __questions_backup(self): """ @@ -737,85 +926,113 @@ def __questions_backup(self): """ if self.backend_questions or (self.frontend_questions and not self.aws): - CLI.colored_print("Do you want to activate backups?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["use_backup"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("use_backup", Config.FALSE)) + self.__dict['use_backup'] = CLI.yes_no_question( + 'Do you want to activate backups?', + default=self.__dict['use_backup'] + ) - if self.__config.get("use_backup") == Config.TRUE: + if self.__dict['use_backup']: if self.advanced_options: if self.backend_questions and not self.frontend_questions: self.__questions_aws() - schedule_regex_pattern = (r"^((((\d+(,\d+)*)|(\d+-\d+)|(\*(\/\d+)?)))" - r"(\s+(((\d+(,\d+)*)|(\d+\-\d+)|(\*(\/\d+)?)))){4})$") - CLI.colored_print("╔═════════════════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ Schedules use linux cron syntax with UTC datetimes. ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ For example, schedule at 12:00 AM E.S.T every Sunday would be: ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ 0 5 * * 0 ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ Please visit https://crontab.guru/ to generate a cron schedule. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═════════════════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) + # Prompting user whether they want to use WAL-E for + # continuous archiving - only if they are using aws + # for backups + if self.aws: + if self.primary_backend or not self.multi_servers: + self.__dict['use_wal_e'] = CLI.yes_no_question( + 'Do you want to use WAL-E for continuous ' + 'archiving of PostgreSQL backups?', + default=self.__dict['use_wal_e'] + ) + if self.__dict['use_wal_e']: + self.__dict['backup_from_primary'] = True + else: + # WAL-E cannot run on secondary + self.__dict['use_wal_e'] = False + else: + # WAL-E is only supported with AWS + self.__dict['use_wal_e'] = False + + schedule_regex_pattern = ( + r'^((((\d+(,\d+)*)|(\d+-\d+)|(\*(\/\d+)?)))' + r'(\s+(((\d+(,\d+)*)|(\d+\-\d+)|(\*(\/\d+)?)))){4})$') + message = ( + 'Schedules use linux cron syntax with UTC datetimes.\n' + 'For example, schedule at 12:00 AM E.S.T every Sunday ' + 'would be:\n' + '0 5 * * 0\n' + '\n' + 'Please visit https://crontab.guru/ to generate a ' + 'cron schedule.' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) if self.frontend_questions and not self.aws: - CLI.colored_print("KoBoCat media backup schedule?", CLI.COLOR_SUCCESS) - self.__config["kobocat_media_backup_schedule"] = CLI.get_response( - "~{}".format(schedule_regex_pattern), - self.__config.get( - "kobocat_media_backup_schedule", - "0 0 * * 0")) + CLI.colored_print('KoBoCat media backup schedule?', + CLI.COLOR_QUESTION) + self.__dict[ + 'kobocat_media_backup_schedule'] = CLI.get_response( + '~{}'.format(schedule_regex_pattern), + self.__dict['kobocat_media_backup_schedule']) if self.backend_questions: + if self.__dict['use_wal_e'] is True: + self.__dict['backup_from_primary'] = True + else: + if self.primary_backend: + response = CLI.yes_no_question( + 'Run PostgreSQL backup from primary ' + 'backend server?', + default=self.__dict['backup_from_primary'] + ) + self.__dict['backup_from_primary'] = response + elif self.secondary_backend: + self.__dict['backup_from_primary'] = False + else: + self.__dict['backup_from_primary'] = True - if self.primary_backend: - CLI.colored_print("Run PostgreSQL backup from primary backend server?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["backup_from_primary"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("backup_from_primary", Config.TRUE)) + backup_from_primary = self.__dict['backup_from_primary'] - backup_from_primary = self.__config["backup_from_primary"] == Config.TRUE if (not self.multi_servers or - (self.primary_backend and backup_from_primary) or - (self.secondary_backend and not backup_from_primary)): - CLI.colored_print("PostgreSQL backup schedule?", CLI.COLOR_SUCCESS) - self.__config["postgres_backup_schedule"] = CLI.get_response( - "~{}".format(schedule_regex_pattern), - self.__config.get( - "postgres_backup_schedule", - "0 2 * * 0")) + (self.primary_backend and backup_from_primary) + or + (self.secondary_backend and + not backup_from_primary)): + CLI.colored_print('PostgreSQL backup schedule?', + CLI.COLOR_QUESTION) + self.__dict[ + 'postgres_backup_schedule'] = CLI.get_response( + '~{}'.format(schedule_regex_pattern), + self.__dict['postgres_backup_schedule']) if self.primary_backend or not self.multi_servers: - - CLI.colored_print("MongoDB backup schedule?", CLI.COLOR_SUCCESS) - self.__config["mongo_backup_schedule"] = CLI.get_response( - "~{}".format(schedule_regex_pattern), - self.__config.get( - "mongo_backup_schedule", - "0 1 * * 0")) - - CLI.colored_print("Redis backup schedule?", CLI.COLOR_SUCCESS) - self.__config["redis_backup_schedule"] = CLI.get_response( - "~{}".format(schedule_regex_pattern), - self.__config.get( - "redis_backup_schedule", - "0 3 * * 0")) + CLI.colored_print('MongoDB backup schedule?', + CLI.COLOR_QUESTION) + self.__dict[ + 'mongo_backup_schedule'] = CLI.get_response( + '~{}'.format(schedule_regex_pattern), + self.__dict['mongo_backup_schedule']) + + CLI.colored_print('Redis backup schedule?', + CLI.COLOR_QUESTION) + self.__dict[ + 'redis_backup_schedule'] = CLI.get_response( + '~{}'.format(schedule_regex_pattern), + self.__dict['redis_backup_schedule']) if self.aws: self.__questions_aws_backup_settings() - + else: + # Back to default value + self.__dict['backup_from_primary'] = True + else: + # Back to default value + self.__dict['backup_from_primary'] = True else: - self.__config["use_backup"] = Config.FALSE + self.__dict['use_backup'] = False + self.__dict['backup_from_primary'] = True # Back to default value def __questions_dev_mode(self): """ @@ -831,221 +1048,227 @@ def __questions_dev_mode(self): if self.local_install: # NGinX different port - CLI.colored_print("Web server port?", CLI.COLOR_SUCCESS) - self.__config["exposed_nginx_docker_port"] = CLI.get_response( - r"~^\d+$", self.__config.get("exposed_nginx_docker_port", - Config.DEFAULT_NGINX_PORT)) - CLI.colored_print("Developer mode?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["dev_mode"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("dev_mode", Config.FALSE)) - self.__config["staging_mode"] = Config.FALSE + CLI.colored_print('Web server port?', CLI.COLOR_QUESTION) + self.__dict['exposed_nginx_docker_port'] = CLI.get_response( + r'~^\d+$', self.__dict['exposed_nginx_docker_port']) + self.__dict['dev_mode'] = CLI.yes_no_question( + 'Use developer mode?', + default=self.__dict['dev_mode'] + ) + self.__dict['staging_mode'] = False else: - - CLI.colored_print("Staging mode?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["staging_mode"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("staging_mode", Config.FALSE)) - self.__config["dev_mode"] = Config.FALSE + self.__dict['staging_mode'] = CLI.yes_no_question( + 'Use staging mode?', + default=self.__dict['staging_mode'] + ) + self.__dict['dev_mode'] = False if self.dev_mode or self.staging_mode: - CLI.colored_print("╔═══════════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ Where are the files located locally? It can be absolute ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ or relative to the directory of `kobo-docker`. ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ Leave empty if you don't need to overload the repository. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═══════════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - self.__config["kc_path"] = CLI.colored_input( - "KoBoCat files location", CLI.COLOR_SUCCESS, - self.__config.get("kc_path")) - - self.__clone_repo(self.__config["kc_path"], "kobocat") - self.__config["kpi_path"] = CLI.colored_input( - "KPI files location", CLI.COLOR_SUCCESS, - self.__config.get("kpi_path")) - self.__clone_repo(self.__config["kpi_path"], "kpi") - - # Create an unique id to build fresh image when starting containers - if (self.__config.get("kc_dev_build_id", "") == "" or - self.__config.get("kc_path") != self.__config.get("kc_path")): - self.__config["kc_dev_build_id"] = "{prefix}{timestamp}".format( - prefix=self.get_prefix("frontend"), + message = ( + 'Where are the files located locally? It can be absolute ' + 'or relative to the directory of `kobo-docker`.\n\n' + 'Leave empty if you do not need to overload the repository.' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) + + kc_path = self.__dict['kc_path'] + self.__dict['kc_path'] = CLI.colored_input( + 'KoBoCat files location?', CLI.COLOR_QUESTION, + self.__dict['kc_path']) + self.__clone_repo(self.__dict['kc_path'], 'kobocat') + + kpi_path = self.__dict['kpi_path'] + self.__dict['kpi_path'] = CLI.colored_input( + 'KPI files location?', CLI.COLOR_QUESTION, + self.__dict['kpi_path']) + self.__clone_repo(self.__dict['kpi_path'], 'kpi') + + # Create an unique id to build fresh image + # when starting containers + if ( + not self.__dict['kc_dev_build_id'] or + self.__dict['kc_path'] != kc_path + ): + build_id = '{prefix}{timestamp}'.format( + prefix=self.get_prefix('frontend'), timestamp=str(int(time.time())) ) - if (self.__config.get("kpi_dev_build_id", "") == "" or - self.__config.get("kpi_path") != self.__config.get("kpi_path")): - self.__config["kpi_dev_build_id"] = "{prefix}{timestamp}".format( - prefix=self.get_prefix("frontend"), + self.__dict['kc_dev_build_id'] = build_id + + if ( + not self.__dict['kpi_dev_build_id'] == '' or + self.__dict['kpi_path'] != kpi_path + ): + build_id = '{prefix}{timestamp}'.format( + prefix=self.get_prefix('frontend'), timestamp=str(int(time.time())) ) + self.__dict['kpi_dev_build_id'] = build_id + if self.dev_mode: - # Debug - CLI.colored_print("Enable DEBUG?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) True") - CLI.colored_print("\t2) False") - self.__config["debug"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("debug", Config.TRUE)) - - # Frontend development - CLI.colored_print("How do you want to run `npm`?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) From within the container") - CLI.colored_print("\t2) Locally") - self.__config["npm_container"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("npm_container", Config.TRUE)) + self.__dict['debug'] = CLI.yes_no_question( + 'Enable DEBUG?', + default=self.__dict['debug'] + ) + + # Front-end development + self.__dict['npm_container'] = CLI.yes_no_question( + 'How do you want to run `npm`?', + default=self.__dict['npm_container'], + labels=[ + 'From within the container', + 'Locally', + ] + ) else: # Force reset paths self.__reset(dev=True, reset_nginx_port=self.staging_mode) def __questions_docker_prefix(self): """ - Asks for Docker compose prefix. It allows to start containers with a custom prefix + Asks for Docker compose prefix. It allows to start + containers with a custom prefix """ - self.__config["docker_prefix"] = CLI.colored_input("Docker Compose prefix? (leave empty for default)", - CLI.COLOR_SUCCESS, - self.__config.get("docker_prefix", "")) + self.__dict['docker_prefix'] = CLI.colored_input( + 'Docker Compose prefix? (leave empty for default)', + CLI.COLOR_QUESTION, + self.__dict['docker_prefix']) def __questions_google(self): """ Asks for Google's keys """ # Google Analytics - self.__config["google_ua"] = CLI.colored_input("Google Analytics Identifier", CLI.COLOR_SUCCESS, - self.__config.get("google_ua", "")) + self.__dict['google_ua'] = CLI.colored_input( + 'Google Analytics Identifier', CLI.COLOR_QUESTION, + self.__dict['google_ua']) # Google API Key - self.__config["google_api_key"] = CLI.colored_input("Google API Key", CLI.COLOR_SUCCESS, - self.__config.get("google_api_key", "")) + self.__dict['google_api_key'] = CLI.colored_input( + 'Google API Key', + CLI.COLOR_QUESTION, + self.__dict['google_api_key']) def __questions_https(self): """ Asks for HTTPS usage """ - CLI.colored_print("Do you want to use HTTPS?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["https"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("https", Config.TRUE)) - + self.__dict['https'] = CLI.yes_no_question( + 'Do you want to use HTTPS?', + default=self.__dict['https'] + ) if self.is_secure: - CLI.colored_print("╔════════════════════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ Please note that certificates must be installed on a reverse-proxy ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ or a load balancer. ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ KoBoInstall can install one, if needed. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚════════════════════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) + message = ( + 'Please note that certificates must be installed on a ' + 'reverse-proxy or a load balancer.' + 'kobo-install can install one, if needed.' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) def __questions_installation_type(self): """ Asks for installation type """ - - CLI.colored_print("What kind of installation do you need?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) On your workstation") - CLI.colored_print("\t2) On a server") - self.__config["local_installation"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("local_installation", Config.FALSE)) + self.__dict['local_installation'] = CLI.yes_no_question( + 'What kind of installation do you need?', + default=self.__dict['local_installation'], + labels=[ + 'On your workstation', + 'On a server', + ] + ) if self.local_install: # Reset previous choices, in case server role is not the same. self.__reset(local_install=True, private_dns=True) def __questions_maintenance(self): if self.first_time: - CLI.colored_print("╔═══════════════════════════════════════════════════╗", CLI.COLOR_WARNING) - CLI.colored_print("║ You must run setup first: `python run.py --setup` ║", CLI.COLOR_WARNING) - CLI.colored_print("╚═══════════════════════════════════════════════════╝", CLI.COLOR_WARNING) + message = ( + 'You must run setup first: `python3 run.py --setup` ' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) sys.exit(1) def _round_nearest_quarter(dt): - - minutes = int(15 * round((float(dt.minute) + float(dt.second) / 60) / 15)) + minutes = int( + 15 * round((float(dt.minute) + float(dt.second) / 60) / 15)) return datetime(dt.year, dt.month, dt.day, dt.hour, minutes if minutes < 60 else 0) - CLI.colored_print("How long do you plan to this maintenance will last?", - CLI.COLOR_SUCCESS) - self.__config["maintenance_eta"] = CLI.get_response( - r"~^[\w\ ]+$", - self.__config.get("maintenance_eta", "2 hours")) + CLI.colored_print('How long do you plan to this maintenance will last?', + CLI.COLOR_QUESTION) + self.__dict['maintenance_eta'] = CLI.get_response( + r'~^[\w\ ]+$', + self.__dict['maintenance_eta']) date_start = _round_nearest_quarter(datetime.utcnow()) iso_format = '%Y%m%dT%H%M' - CLI.colored_print("Start Date/Time (ISO format) GMT?", - CLI.COLOR_SUCCESS) - self.__config["maintenance_date_iso"] = CLI.get_response( - r"~^\d{8}T\d{4}$", date_start.strftime(iso_format)) - self.__config["maintenance_date_iso"] = self.__config["maintenance_date_iso"].upper() - - date_iso = self.__config["maintenance_date_iso"] - self.__config["maintenance_date_str"] = datetime.strptime(date_iso, iso_format). \ + CLI.colored_print('Start Date/Time (ISO format) GMT?', + CLI.COLOR_QUESTION) + self.__dict['maintenance_date_iso'] = CLI.get_response( + r'~^\d{8}T\d{4}$', date_start.strftime(iso_format)) + self.__dict['maintenance_date_iso'] = self.__dict[ + 'maintenance_date_iso'].upper() + + date_iso = self.__dict['maintenance_date_iso'] + self.__dict['maintenance_date_str'] = datetime.strptime(date_iso, + iso_format). \ strftime('%A, %B %d at %H:%M GMT') - self.__config["maintenance_email"] = CLI.colored_input( - "Contact during maintenance?", - CLI.COLOR_SUCCESS, - self.__config.get("maintenance_email", - self.__config.get("default_from_email"))) + self.__dict['maintenance_email'] = CLI.colored_input( + 'Contact during maintenance?', + CLI.COLOR_QUESTION, + self.__dict['maintenance_email'] + ) self.write_config() def __questions_mongo(self): """ Ask for MongoDB credentials only when server is for: - - primary backend + - primary back end - single server installation """ if self.primary_backend or not self.multi_servers: - mongo_user_username = self.__config["mongo_user_username"] - mongo_user_password = self.__config["mongo_user_password"] - mongo_root_username = self.__config["mongo_root_username"] - mongo_root_password = self.__config["mongo_root_password"] + mongo_user_username = self.__dict['mongo_user_username'] + mongo_user_password = self.__dict['mongo_user_password'] + mongo_root_username = self.__dict['mongo_root_username'] + mongo_root_password = self.__dict['mongo_root_password'] CLI.colored_print("MongoDB root's username?", - CLI.COLOR_SUCCESS) - self.__config["mongo_root_username"] = CLI.get_response( - r"~^\w+$", - self.__config.get("mongo_root_username"), + CLI.COLOR_QUESTION) + self.__dict['mongo_root_username'] = CLI.get_response( + r'~^\w+$', + self.__dict['mongo_root_username'], to_lower=False) - CLI.colored_print("MongoDB root's password?", CLI.COLOR_SUCCESS) - self.__config["mongo_root_password"] = CLI.get_response( - r"~^.{8,}$", - self.__config.get("mongo_root_password"), + CLI.colored_print("MongoDB root's password?", CLI.COLOR_QUESTION) + self.__dict['mongo_root_password'] = CLI.get_response( + r'~^.{8,}$', + self.__dict['mongo_root_password'], to_lower=False, error_msg='Too short. 8 characters minimum.') CLI.colored_print("MongoDB user's username?", - CLI.COLOR_SUCCESS) - self.__config["mongo_user_username"] = CLI.get_response( - r"~^\w+$", - self.__config.get("mongo_user_username"), + CLI.COLOR_QUESTION) + self.__dict['mongo_user_username'] = CLI.get_response( + r'~^\w+$', + self.__dict['mongo_user_username'], to_lower=False) - CLI.colored_print("MongoDB user's password?", CLI.COLOR_SUCCESS) - self.__config["mongo_user_password"] = CLI.get_response( - r"~^.{8,}$", - self.__config.get("mongo_user_password"), + CLI.colored_print("MongoDB user's password?", CLI.COLOR_QUESTION) + self.__dict['mongo_user_password'] = CLI.get_response( + r'~^.{8,}$', + self.__dict['mongo_user_password'], to_lower=False, error_msg='Too short. 8 characters minimum.') - if (self.__config.get("mongo_secured") != Config.TRUE or - mongo_user_username != self.__config.get("mongo_user_username") or - mongo_user_password != self.__config.get("mongo_user_password") or - mongo_root_username != self.__config.get("mongo_root_username") or - mongo_root_password != self.__config.get("mongo_root_password")) and \ - not self.first_time: + if ( + not self.__dict.get('mongo_secured') + or mongo_user_username != self.__dict['mongo_user_username'] + or mongo_user_password != self.__dict['mongo_user_password'] + or mongo_root_username != self.__dict['mongo_root_username'] + or mongo_root_password != self.__dict['mongo_root_password'] + ) and not self.first_time: # Because chances are high we cannot communicate with DB # (e.g ports not exposed, containers down), we delegate the task @@ -1059,48 +1282,44 @@ def __questions_mongo(self): # Its format should be: `` content = '' - if (mongo_user_username != self.__config.get("mongo_user_username") or - mongo_root_username != self.__config.get("mongo_root_username")): - - CLI.colored_print("╔══════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ MongoDB root's and/or user's usernames have changed! ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚══════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - CLI.colored_print("Do you want to remove old users?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - delete_users = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) - - if delete_users == Config.TRUE: + if ( + mongo_user_username != self.__dict['mongo_user_username'] + or mongo_root_username != self.__dict['mongo_root_username'] + ): + + message = ( + 'WARNING!\n\n' + "MongoDB root's and/or user's usernames have changed!" + ) + CLI.framed_print(message) + question = 'Do you want to remove old users?' + response = CLI.yes_no_question(question) + if response is True: usernames_by_db = { mongo_user_username: 'formhub', mongo_root_username: 'admin' } for username, db in usernames_by_db.items(): - if username != "": - content += "{cr}{username}\t{db}".format( - cr="\n" if content else "", + if username != '': + content += '{cr}{username}\t{db}'.format( + cr='\n' if content else '', username=username, db=db ) self.__write_upsert_db_users_trigger_file(content, 'mongo') - self.__config["mongo_secured"] = Config.TRUE + self.__dict['mongo_secured'] = True def __questions_multi_servers(self): """ Asks if installation is for only one server - or different frontend and backend servers. + or different front-end and back-end servers. """ - CLI.colored_print("Do you want to use separate servers for frontend and backend?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["multi"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("multi", Config.FALSE)) + self.__dict['multi'] = CLI.yes_no_question( + 'Do you want to use separate servers for front end and back end?', + default=self.__dict['multi'] + ) def __questions_postgres(self): """ @@ -1108,75 +1327,71 @@ def __questions_postgres(self): Settings can be tweaked thanks to pgconfig.org API """ - CLI.colored_print("KoBoCat PostgreSQL database name?", - CLI.COLOR_SUCCESS) + CLI.colored_print('KoBoCat PostgreSQL database name?', + CLI.COLOR_QUESTION) kc_postgres_db = CLI.get_response( - r"~^\w+$", - self.__config.get("kc_postgres_db"), + r'~^\w+$', + self.__dict['kc_postgres_db'], to_lower=False ) - CLI.colored_print("KPI PostgreSQL database name?", - CLI.COLOR_SUCCESS) + CLI.colored_print('KPI PostgreSQL database name?', + CLI.COLOR_QUESTION) kpi_postgres_db = CLI.get_response( - r"~^\w+$", - self.__config.get("kpi_postgres_db"), + r'~^\w+$', + self.__dict['kpi_postgres_db'], to_lower=False) while kpi_postgres_db == kc_postgres_db: kpi_postgres_db = CLI.colored_input( - "KPI must use its own PostgreSQL database, not share one with " - "KoBoCAT. Please enter another database", + 'KPI must use its own PostgreSQL database, not share one with ' + 'KoBoCAT. Please enter another database', CLI.COLOR_ERROR, - Config.get_config_template()["kpi_postgres_db"], + Config.get_template()['kpi_postgres_db'], ) - if (kc_postgres_db != self.__config["kc_postgres_db"] or - (kpi_postgres_db != self.__config["kpi_postgres_db"] and - self.__config.get("two_databases") == Config.TRUE)): - CLI.colored_print("╔══════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ PostgreSQL database names have changed! ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ KoBoInstall does not support database name changes ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ after database initialization. ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ Data will not appear in KPI and/or KoBoCat. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚══════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - - CLI.colored_print("Do you want to continue?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - - if CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) == Config.FALSE: - sys.exit() - - self.__config["kc_postgres_db"] = kc_postgres_db - self.__config["kpi_postgres_db"] = kpi_postgres_db - self.__config["two_databases"] = Config.TRUE - - postgres_user = self.__config["postgres_user"] - postgres_password = self.__config["postgres_password"] + if (kc_postgres_db != self.__dict['kc_postgres_db'] or + (kpi_postgres_db != self.__dict['kpi_postgres_db'] and + self.__dict['two_databases'])): + message = ( + 'WARNING!\n\n' + 'PostgreSQL database names have changed!\n' + 'kobo-install does not support database name changes after ' + 'database initialization.\n' + 'Data will not appear in KPI and/or KoBoCAT.' + ) + CLI.framed_print(message) + + response = CLI.yes_no_question( + 'Do you want to continue?', + default=False + ) + if response is False: + sys.exit(0) + + self.__dict['kc_postgres_db'] = kc_postgres_db + self.__dict['kpi_postgres_db'] = kpi_postgres_db + self.__dict['two_databases'] = True + + postgres_user = self.__dict['postgres_user'] + postgres_password = self.__dict['postgres_password'] CLI.colored_print("PostgreSQL user's username?", - CLI.COLOR_SUCCESS) - self.__config["postgres_user"] = CLI.get_response( - r"~^\w+$", - self.__config.get("postgres_user"), + CLI.COLOR_QUESTION) + self.__dict['postgres_user'] = CLI.get_response( + r'~^\w+$', + self.__dict['postgres_user'], to_lower=False) - CLI.colored_print("PostgreSQL user's password?", CLI.COLOR_SUCCESS) - self.__config["postgres_password"] = CLI.get_response( - r"~^.{8,}$", - self.__config.get("postgres_password"), + CLI.colored_print("PostgreSQL user's password?", CLI.COLOR_QUESTION) + self.__dict['postgres_password'] = CLI.get_response( + r'~^.{8,}$', + self.__dict['postgres_password'], to_lower=False, error_msg='Too short. 8 characters minimum.') - if (postgres_user != self.__config.get("postgres_user") or - postgres_password != self.__config.get("postgres_password")) and \ + if (postgres_user != self.__dict['postgres_user'] or + postgres_password != self.__dict['postgres_password']) and \ not self.first_time: # Because chances are high we cannot communicate with DB @@ -1188,44 +1403,41 @@ def __questions_postgres(self): # action. # `content` will be read by PostgreSQL container at next boot - # It should always contain previous username and a boolean for deletion. + # It should always contain previous username and a boolean + # for deletion. # Its format should be: `` content = '{username}\tfalse'.format(username=postgres_user) - if postgres_user != self.__config.get("postgres_user"): + if postgres_user != self.__dict['postgres_user']: - CLI.colored_print("PostgreSQL user's username has changed!", CLI.COLOR_WARNING) - CLI.colored_print("Do you want to remove old user?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - delete_user = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) - - if delete_user == Config.TRUE: + CLI.colored_print("PostgreSQL user's username has changed!", + CLI.COLOR_WARNING) + question = 'Do you want to remove old user?', + response = CLI.yes_no_question(question) + if response is True: content = '{username}\ttrue'.format(username=postgres_user) - CLI.colored_print("╔══════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ WARNING! User cannot be deleted if it has been used ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ to initialize PostgreSQL server. ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ You will need to do it manually! ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚══════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) + message = ( + 'WARNING!\n\n' + 'User cannot be deleted if it has been used to ' + 'initialize PostgreSQL server.\n' + 'You will need to do it manually!' + ) + CLI.framed_print(message) self.__write_upsert_db_users_trigger_file(content, 'postgres') if self.backend_questions: # Postgres settings - CLI.colored_print("Do you want to tweak PostgreSQL settings?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["postgres_settings"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("postgres_settings", Config.FALSE)) - - if self.__config["postgres_settings"] == Config.TRUE: + self.__dict['postgres_settings'] = CLI.yes_no_question( + 'Do you want to tweak PostgreSQL settings?', + default=self.__dict['postgres_settings'] + ) + if self.__dict['postgres_settings']: - # pgconfig.org API is often unresponsive and make kobo-install hang forever. + CLI.colored_print('Launching pgconfig.org API container...', + CLI.COLOR_INFO) + # pgconfig.org API is often unresponsive and make kobo-install + # hang forever. # A docker image is available, let's use it instead. # (Hope docker hub is not down too). @@ -1243,523 +1455,601 @@ def __questions_postgres(self): 'sebastianwebber/pgconfig-api'] CLI.run_command(docker_command) - CLI.colored_print("Total Memory in GB?", CLI.COLOR_SUCCESS) - self.__config["postgres_ram"] = CLI.get_response(r"~^\d+$", self.__config.get("postgres_ram")) - - CLI.colored_print("Storage type?", CLI.COLOR_SUCCESS) - CLI.colored_print("\thdd) Hard Disk Drive") - CLI.colored_print("\tssd) Solid State Drive") - CLI.colored_print("\tsan) Storage Area Network") - self.__config["postgres_hard_drive_type"] = CLI.get_response( - ["hdd", "ssd", "san"], - self.__config.get("postgres_hard_drive_type").lower()) - - CLI.colored_print("Number of connections?", CLI.COLOR_SUCCESS) - self.__config["postgres_max_connections"] = CLI.get_response( - r"~^\d+$", - self.__config.get("postgres_max_connections")) + # From https://docs.pgconfig.org/api/#available-parameters + # Parameters are case-sensitive, for example + # `environment_name` must be one these values: + # - `WEB` + # - `OLTP`, + # - `DW` + # - `Mixed` + # - `Desktop` + # It's case-sensitive. + + CLI.colored_print('Total Memory in GB?', CLI.COLOR_QUESTION) + self.__dict['postgres_ram'] = CLI.get_response( + r'~^\d+$', + self.__dict['postgres_ram']) + + CLI.colored_print('Storage type?', CLI.COLOR_QUESTION) + CLI.colored_print('\thdd) Hard Disk Drive') + CLI.colored_print('\tssd) Solid State Drive') + CLI.colored_print('\tsan) Storage Area Network') + self.__dict['postgres_hard_drive_type'] = CLI.get_response( + ['hdd', 'ssd', 'san'], + self.__dict['postgres_hard_drive_type'].lower()) + + CLI.colored_print('Number of connections?', CLI.COLOR_QUESTION) + self.__dict['postgres_max_connections'] = CLI.get_response( + r'~^\d+$', + self.__dict['postgres_max_connections']) if self.multi_servers: - multi_servers_profiles = ["web", "oltp", "dw"] - if self.__config["postgres_profile"].lower() not in multi_servers_profiles: - self.__config["postgres_profile"] = "web" + multi_servers_profiles = ['web', 'oltp', 'dw'] + if self.__dict['postgres_profile'].lower() \ + not in multi_servers_profiles: + self.__dict['postgres_profile'] = 'web' - CLI.colored_print("Application profile?", CLI.COLOR_SUCCESS) - CLI.colored_print("\tweb) General Web application") - CLI.colored_print("\toltp) ERP or long transaction applications") - CLI.colored_print("\tdw) DataWare house") + CLI.colored_print('Application profile?', CLI.COLOR_QUESTION) + CLI.colored_print('\tweb) General Web application') + CLI.colored_print( + '\toltp) ERP or long transaction applications') + CLI.colored_print('\tdw) DataWare house') - self.__config["postgres_profile"] = CLI.get_response(["web", "oltp", "dw"], - self.__config.get("postgres_profile").lower()) + self.__dict['postgres_profile'] = CLI.get_response( + ['web', 'oltp', 'dw'], + self.__dict['postgres_profile'].lower()) - self.__config["postgres_profile"] = self.__config["postgres_profile"].upper() + self.__dict['postgres_profile'] = self.__dict[ + 'postgres_profile'].upper() elif self.dev_mode: - self.__config["postgres_profile"] = "Desktop" + self.__dict['postgres_profile'] = 'Desktop' else: - self.__config["postgres_profile"] = "Mixed" - - endpoint = "http://127.0.0.1:{open_port}/v1/tuning/get-config?environment_name={profile}" \ - "&format=conf&include_pgbadger=false&max_connections={max_connections}&" \ - "pg_version=9.5&total_ram={ram}GB&drive_type={drive_type}".format( - open_port=open_port, - profile=self.__config["postgres_profile"], - ram=self.__config["postgres_ram"], - max_connections=self.__config["postgres_max_connections"], - drive_type=self.__config["postgres_hard_drive_type"].upper() - ) + self.__dict['postgres_profile'] = 'Mixed' + + endpoint = 'http://127.0.0.1:{open_port}/v1/tuning/get-config' \ + '?environment_name={profile}&format=conf' \ + '&include_pgbadger=false' \ + '&max_connections={max_connections}' \ + '&pg_version=9.5' \ + '&total_ram={ram}GB' \ + '&drive_type={drive_type}' + endpoint = endpoint.format( + open_port=open_port, + profile=self.__dict['postgres_profile'], + ram=self.__dict['postgres_ram'], + max_connections=self.__dict['postgres_max_connections'], + drive_type=self.__dict['postgres_hard_drive_type'].upper() + ) response = Network.curl(endpoint) if response: - self.__config["postgres_settings_content"] = re.sub( - r"(log|lc_).+(\n|$)", "", response) + self.__dict['postgres_settings_content'] = re.sub( + r'(log|lc_).+(\n|$)', '', response) else: - if self.__config["postgres_settings_content"] == '': - CLI.colored_print("Use default settings.", - CLI.COLOR_INFO) - # If no response from API, keep defaults - self.__config["postgres_settings"] = Config.FALSE - else: - CLI.colored_print("\nKeep current settings.", - CLI.COLOR_INFO) + CLI.colored_print('\nAn error has occurred. Current ' + 'PostgreSQL settings will be used', + CLI.COLOR_INFO) # Stop container - docker_command = ['docker', 'stop', '-t', '0', 'pgconfig_container'] + docker_command = ['docker', 'stop', '-t', '0', + 'pgconfig_container'] CLI.run_command(docker_command) + CLI.colored_print('pgconfig.org API container has been stopped!', + CLI.COLOR_INFO) + else: + # Forcing the default settings to remain even if there + # is an existing value in .run.conf. Without this, + # the value for `postgres_settings_content` would not update + default_postgres_settings_content = '\n'.join([ + '# Memory Configuration', + 'shared_buffers = 512MB', + 'effective_cache_size = 2GB', + 'work_mem = 10MB', + 'maintenance_work_mem = 128MB', + '', + '# Checkpoint Related Configuration', + 'min_wal_size = 512MB', + 'max_wal_size = 2GB', + 'checkpoint_completion_target = 0.9', + 'wal_buffers = 15MB', + '', + '# Network Related Configuration', + "listen_addresses = '*'", + 'max_connections = 100', + ]) + self.__dict['postgres_settings_content'] = \ + default_postgres_settings_content def __questions_ports(self): """ Customize services ports """ + def reset_ports(): - self.__config["postgresql_port"] = "5432" - self.__config["mongo_port"] = "27017" - self.__config["redis_main_port"] = "6379" - self.__config["redis_cache_port"] = "6380" + self.__dict['postgresql_port'] = '5432' + self.__dict['mongo_port'] = '27017' + self.__dict['redis_main_port'] = '6379' + self.__dict['redis_cache_port'] = '6380' if not self.multi_servers: - CLI.colored_print("Do you want to expose backend container ports " - "(`PostgreSQL`, `MongoDB`, `redis`) ?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["expose_backend_ports"] = CLI.get_response( - [Config.TRUE, Config.FALSE], self.__config.get("expose_backend_ports", - Config.FALSE)) + self.__dict['expose_backend_ports'] = CLI.yes_no_question( + 'Do you want to expose back-end container ports ' + '(`PostgreSQL`, `MongoDB`, `redis`)?', + default=self.__dict['expose_backend_ports'] + ) else: - self.__config["expose_backend_ports"] = Config.TRUE + self.__dict['expose_backend_ports'] = True if not self.expose_backend_ports: reset_ports() return - CLI.colored_print("╔═════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ WARNING! When exposing backend container ports, ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ it's STRONGLY recommended to use a firewall to ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ grant access to frontend containers only. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - - CLI.colored_print("Do you want to customize service ports?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["customized_ports"] = CLI.get_response( - [Config.TRUE, Config.FALSE], self.__config.get("customized_ports", - Config.FALSE)) - - if self.__config.get("customized_ports") == Config.FALSE: + if self.backend: + message = ( + 'WARNING!\n\n' + 'When exposing back-end container ports, it is STRONGLY ' + 'recommended to use a firewall to grant access to front-end ' + 'containers only.' + ) + CLI.framed_print(message) + + self.__dict['customized_ports'] = CLI.yes_no_question( + 'Do you want to customize service ports?', + default=self.__dict['customized_ports'] + ) + + if not self.__dict['customized_ports']: reset_ports() return - CLI.colored_print("PostgreSQL?", CLI.COLOR_SUCCESS) - self.__config["postgresql_port"] = CLI.get_response( - r"~^\d+$", self.__config.get("postgresql_port", "5432")) + CLI.colored_print('PostgreSQL?', CLI.COLOR_QUESTION) + self.__dict['postgresql_port'] = CLI.get_response( + r'~^\d+$', self.__dict['postgresql_port']) - CLI.colored_print("MongoDB?", CLI.COLOR_SUCCESS) - self.__config["mongo_port"] = CLI.get_response( - r"~^\d+$", self.__config.get("mongo_port", "27017")) + CLI.colored_print('MongoDB?', CLI.COLOR_QUESTION) + self.__dict['mongo_port'] = CLI.get_response( + r'~^\d+$', self.__dict['mongo_port']) - CLI.colored_print("Redis (main)?", CLI.COLOR_SUCCESS) - self.__config["redis_main_port"] = CLI.get_response( - r"~^\d+$", self.__config.get("redis_main_port", "6379")) + CLI.colored_print('Redis (main)?', CLI.COLOR_QUESTION) + self.__dict['redis_main_port'] = CLI.get_response( + r'~^\d+$', self.__dict['redis_main_port']) - CLI.colored_print("Redis (cache)?", CLI.COLOR_SUCCESS) - self.__config["redis_cache_port"] = CLI.get_response( - r"~^\d+$", self.__config.get("redis_cache_port", "6380")) + CLI.colored_print('Redis (cache)?', CLI.COLOR_QUESTION) + self.__dict['redis_cache_port'] = CLI.get_response( + r'~^\d+$', self.__dict['redis_cache_port']) def __questions_private_routes(self): """ - Asks if configuration uses a DNS for private domain names for communication - between frontend and backend. - Otherwise, it will create entries in `extra_hosts` in composer file based - on the provided ip. + Asks if configuration uses a DNS for private domain names + for communication between front end and back end. + Otherwise, it will create entries in `extra_hosts` in composer + file based on the provided ip. """ - CLI.colored_print("Do you use DNS for private routes?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - - self.__config["use_private_dns"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("use_private_dns", - Config.FALSE)) - - if self.__config["use_private_dns"] == Config.FALSE: - CLI.colored_print("IP address (IPv4) of primary backend server?", CLI.COLOR_SUCCESS) - self.__config["primary_backend_ip"] = CLI.get_response( - r"~\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", - self.__config.get("primary_backend_ip", self.__primary_ip)) + self.__dict['use_private_dns'] = CLI.yes_no_question( + 'Do you use DNS for private routes?', + default=self.__dict['use_private_dns'] + ) + if self.__dict['use_private_dns'] is False: + CLI.colored_print('IP address (IPv4) of primary back-end server?', + CLI.COLOR_QUESTION) + self.__dict['primary_backend_ip'] = CLI.get_response( + r'~\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', + self.__dict['primary_backend_ip']) else: - self.__config["private_domain_name"] = CLI.colored_input("Private domain name", - CLI.COLOR_SUCCESS, - self.__config.get( - "private_domain_name", "")) + self.__dict['private_domain_name'] = CLI.colored_input( + 'Private domain name?', + CLI.COLOR_QUESTION, + self.__dict['private_domain_name']) def __questions_public_routes(self): """ Asks for public domain names """ - self.__config["public_domain_name"] = CLI.colored_input("Public domain name", CLI.COLOR_SUCCESS, - self.__config.get("public_domain_name", "")) - self.__config["kpi_subdomain"] = CLI.colored_input("KPI sub domain", CLI.COLOR_SUCCESS, - self.__config.get("kpi_subdomain", "")) - self.__config["kc_subdomain"] = CLI.colored_input("KoBoCat sub domain", CLI.COLOR_SUCCESS, - self.__config.get("kc_subdomain", "")) - self.__config["ee_subdomain"] = CLI.colored_input("Enketo Express sub domain name", - CLI.COLOR_SUCCESS, - self.__config.get("ee_subdomain", "")) + self.__dict['public_domain_name'] = CLI.colored_input( + 'Public domain name?', CLI.COLOR_QUESTION, + self.__dict['public_domain_name']) + self.__dict['kpi_subdomain'] = CLI.colored_input( + 'KPI sub domain?', + CLI.COLOR_QUESTION, + self.__dict['kpi_subdomain'] + ) + self.__dict['kc_subdomain'] = CLI.colored_input( + 'KoBoCat sub domain?', + CLI.COLOR_QUESTION, + self.__dict['kc_subdomain'] + ) + self.__dict['ee_subdomain'] = CLI.colored_input( + 'Enketo Express sub domain name?', + CLI.COLOR_QUESTION, + self.__dict['ee_subdomain'] + ) - parts = self.__config.get("public_domain_name", "").split(".") - self.__config["internal_domain_name"] = "{}.internal".format( - ".".join(parts[:-1]) + parts = self.__dict['public_domain_name'].split('.') + self.__dict['internal_domain_name'] = '{}.internal'.format( + '.'.join(parts[:-1]) ) if not self.multi_servers or \ (self.multi_servers and not self.use_private_dns): - self.__config["private_domain_name"] = "{}.private".format( - ".".join(parts[:-1]) + self.__dict['private_domain_name'] = '{}.private'.format( + '.'.join(parts[:-1]) ) def __questions_raven(self): - CLI.colored_print("Do you want to use Sentry?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["raven_settings"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("raven_settings", Config.FALSE)) - - if self.__config.get("raven_settings") == Config.TRUE: - self.__config["kpi_raven"] = CLI.colored_input("KPI Raven token", CLI.COLOR_SUCCESS, - self.__config.get("kpi_raven", "")) - self.__config["kobocat_raven"] = CLI.colored_input("KoBoCat Raven token", CLI.COLOR_SUCCESS, - self.__config.get("kobocat_raven", "")) - self.__config["kpi_raven_js"] = CLI.colored_input("KPI Raven JS token", CLI.COLOR_SUCCESS, - self.__config.get("kpi_raven_js", "")) + self.__dict['raven_settings'] = CLI.yes_no_question( + 'Do you want to use Sentry?', + default=self.__dict['raven_settings'] + ) + if self.__dict['raven_settings'] is True: + self.__dict['kpi_raven'] = CLI.colored_input( + 'KPI Raven token', + CLI.COLOR_QUESTION, + self.__dict['kpi_raven']) + self.__dict['kobocat_raven'] = CLI.colored_input( + 'KoBoCat Raven token', CLI.COLOR_QUESTION, + self.__dict['kobocat_raven']) + self.__dict['kpi_raven_js'] = CLI.colored_input( + 'KPI Raven JS token', CLI.COLOR_QUESTION, + self.__dict['kpi_raven_js']) else: - self.__config["kpi_raven"] = "" - self.__config["kobocat_raven"] = "" - self.__config["kpi_raven_js"] = "" + self.__dict['kpi_raven'] = '' + self.__dict['kobocat_raven'] = '' + self.__dict['kpi_raven_js'] = '' def __questions_redis(self): """ Ask for redis password only when server is for: - - primary backend + - primary back end - single server installation """ if self.primary_backend or not self.multi_servers: - CLI.colored_print("Redis password?", CLI.COLOR_SUCCESS) - self.__config["redis_password"] = CLI.get_response( - r"~^.{8,}|$", - self.__config.get("redis_password"), + CLI.colored_print('Redis password?', CLI.COLOR_QUESTION) + self.__dict['redis_password'] = CLI.get_response( + r'~^.{8,}|$', + self.__dict['redis_password'], to_lower=False, error_msg='Too short. 8 characters minimum.') - if not self.__config["redis_password"]: - CLI.colored_print("╔═════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ WARNING! it's STRONGLY recommended to set a ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ password for Redis as well. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - - CLI.colored_print("Do you want to continue?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - - if CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) == Config.FALSE: + if not self.__dict['redis_password']: + message = ( + 'WARNING!\n\n' + 'It is STRONGLY recommended to set a password for Redis ' + 'as well.' + ) + CLI.framed_print(message) + response = CLI.yes_no_question( + 'Do you want to continue without password?', + default=False + ) + if response is False: self.__questions_redis() def __questions_reverse_proxy(self): if self.is_secure: - CLI.colored_print("Auto-install HTTPS certificates with Let's Encrypt?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No - Use my own reverse-proxy/load-balancer") - self.__config["use_letsencrypt"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("use_letsencrypt", Config.TRUE)) - self.__config["proxy"] = Config.TRUE - self.__config["exposed_nginx_docker_port"] = Config.DEFAULT_NGINX_PORT + self.__dict['use_letsencrypt'] = CLI.yes_no_question( + "Auto-install HTTPS certificates with Let's Encrypt?", + default=self.__dict['use_letsencrypt'], + labels=[ + 'Yes', + 'No - Use my own reverse-proxy/load-balancer', + ] + ) + self.__dict['proxy'] = True + self.__dict[ + 'exposed_nginx_docker_port'] = Config.DEFAULT_NGINX_PORT if self.use_letsencrypt: - self.__config["nginx_proxy_port"] = Config.DEFAULT_PROXY_PORT + self.__dict['nginx_proxy_port'] = Config.DEFAULT_PROXY_PORT - CLI.colored_print("╔════════════════════════════════════════════════╗", CLI.COLOR_WARNING) - CLI.colored_print("║ Domain names must be publicly accessible. ║", CLI.COLOR_WARNING) - CLI.colored_print("║ Otherwise Let's Encrypt won't be able to valid ║", CLI.COLOR_WARNING) - CLI.colored_print("║ your certificates. ║", CLI.COLOR_WARNING) - CLI.colored_print("╚════════════════════════════════════════════════╝", CLI.COLOR_WARNING) - - while True: - letsencrypt_email = CLI.colored_input("Email address for Let's Encrypt", CLI.COLOR_SUCCESS, - self.__config.get("letsencrypt_email")) + message = ( + 'WARNING!\n\n' + 'Domain names must be publicly accessible.\n' + "Otherwise Let's Encrypt will not be able to valid your " + 'certificates.' + ) + CLI.framed_print(message) - CLI.colored_print("Please confirm [{}]".format(letsencrypt_email), - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") + if self.first_time: + email = self.__dict['default_from_email'] + self.__dict['letsencrypt_email'] = email - if CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) == Config.TRUE: - self.__config["letsencrypt_email"] = letsencrypt_email + while True: + letsencrypt_email = CLI.colored_input( + "Email address for Let's Encrypt?", + CLI.COLOR_QUESTION, + self.__dict['letsencrypt_email']) + question = 'Please confirm [{}]'.format(letsencrypt_email) + response = CLI.yes_no_question(question) + if response is True: + self.__dict['letsencrypt_email'] = letsencrypt_email break - self.__clone_repo(self.get_letsencrypt_repo_path(), "nginx-certbot") + self.__clone_repo(self.get_letsencrypt_repo_path(), + 'nginx-certbot') else: if self.advanced_options: - CLI.colored_print("Is `KoBoToolbox` behind a reverse-proxy/load-balancer?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["proxy"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("proxy", Config.FALSE)) - self.__config["use_letsencrypt"] = Config.FALSE + self.__dict['proxy'] = CLI.yes_no_question( + 'Are kobo-docker containers behind a ' + 'reverse-proxy/load-balancer?', + default=self.__dict['proxy'] + ) + self.__dict['use_letsencrypt'] = False else: - self.__config["proxy"] = Config.FALSE + self.__dict['proxy'] = False if self.proxy: # When proxy is enabled, public port is 80 or 443. # @TODO Give the user the possibility to customize it too. - self.__config["exposed_nginx_docker_port"] = Config.DEFAULT_NGINX_PORT + self.__dict[ + 'exposed_nginx_docker_port'] = Config.DEFAULT_NGINX_PORT if self.advanced_options: if not self.use_letsencrypt: - CLI.colored_print("Is your reverse-proxy/load-balancer installed on this server?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["block_common_http_ports"] = CLI.get_response( - [Config.TRUE, Config.FALSE], - self.__config.get("block_common_http_ports", Config.FALSE)) + response = CLI.yes_no_question( + 'Is your reverse-proxy/load-balancer installed on ' + 'this server?', + default=self.__dict['block_common_http_ports'] + ) + self.__dict['block_common_http_ports'] = response else: - self.__config["block_common_http_ports"] = Config.TRUE + self.__dict['block_common_http_ports'] = True - if not self.__is_port_allowed(self.__config["nginx_proxy_port"]): + if not self.__is_port_allowed( + self.__dict['nginx_proxy_port']): # Force nginx proxy port if port is not allowed - self.__config["nginx_proxy_port"] = Config.DEFAULT_PROXY_PORT + self.__dict[ + 'nginx_proxy_port'] = Config.DEFAULT_PROXY_PORT - CLI.colored_print("Internal port used by reverse proxy?", CLI.COLOR_SUCCESS) + CLI.colored_print('Internal port used by reverse proxy?', + CLI.COLOR_QUESTION) while True: - self.__config["nginx_proxy_port"] = CLI.get_response(r"~^\d+$", - self.__config.get("nginx_proxy_port")) - if self.__is_port_allowed(self.__config["nginx_proxy_port"]): + self.__dict['nginx_proxy_port'] = CLI.get_response( + r'~^\d+$', + self.__dict['nginx_proxy_port']) + if self.__is_port_allowed( + self.__dict['nginx_proxy_port']): break else: - CLI.colored_print("Ports 80 and 443 are reserved!", CLI.COLOR_ERROR) + CLI.colored_print('Ports 80 and 443 are reserved!', + CLI.COLOR_ERROR) else: - self.__config["block_common_http_ports"] = Config.TRUE + self.__dict['block_common_http_ports'] = True if not self.use_letsencrypt: - CLI.colored_print("Internal port used by reverse proxy is {}.".format( - Config.DEFAULT_PROXY_PORT - ), CLI.COLOR_WARNING) - self.__config["nginx_proxy_port"] = Config.DEFAULT_PROXY_PORT + CLI.colored_print( + 'Internal port used by reverse proxy is {}.'.format( + Config.DEFAULT_PROXY_PORT + ), CLI.COLOR_WARNING) + self.__dict['nginx_proxy_port'] = Config.DEFAULT_PROXY_PORT else: - self.__config["use_letsencrypt"] = Config.FALSE - self.__config["nginx_proxy_port"] = Config.DEFAULT_NGINX_PORT - self.__config["block_common_http_ports"] = Config.FALSE + self.__dict['use_letsencrypt'] = False + self.__dict['nginx_proxy_port'] = Config.DEFAULT_NGINX_PORT + self.__dict['block_common_http_ports'] = False def __questions_roles(self): - CLI.colored_print("Which role do you want to assign to this server?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) frontend") - CLI.colored_print("\t2) backend") - self.__config["server_role"] = CLI.get_response(["backend", "frontend"], - self.__config.get("server_role", "frontend")) - - if self.__config.get("server_role") == "backend": - CLI.colored_print("Which role do you want to assign to this backend server?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) primary") - CLI.colored_print("\t2) secondary") - self.__config["backend_server_role"] = CLI.get_response(["primary", "secondary"], - self.__config.get("backend_server_role", "primary")) + CLI.colored_print('Which role do you want to assign to this server?', + CLI.COLOR_QUESTION) + CLI.colored_print('\t1) frontend') + CLI.colored_print('\t2) backend') + self.__dict['server_role'] = CLI.get_response( + ['backend', 'frontend'], + self.__dict['server_role']) + + if self.__dict['server_role'] == 'backend': + CLI.colored_print( + 'Which role do you want to assign to this back-end server?', + CLI.COLOR_QUESTION) + CLI.colored_print('\t1) primary') + CLI.colored_print('\t2) secondary') + self.__dict['backend_server_role'] = CLI.get_response( + ['primary', 'secondary'], + self.__dict['backend_server_role']) else: - # It may be useless to force backend role when using multi servers. - self.__config["backend_server_role"] = "primary" + # It may be useless to force back-end role when using multi servers. + self.__dict['backend_server_role'] = 'primary' def __questions_secret_keys(self): - CLI.colored_print("Do you want to customize the application secret keys?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["custom_secret_keys"] = CLI.get_response( - [Config.TRUE, Config.FALSE], self.__config.get("custom_secret_keys")) - - if self.__config["custom_secret_keys"] == Config.TRUE: - CLI.colored_print("Django's secret key?", CLI.COLOR_SUCCESS) - self.__config["django_secret_key"] = CLI.get_response( - r"~^.{50,}$", - self.__config.get("django_secret_key"), + self.__dict['custom_secret_keys'] = CLI.yes_no_question( + 'Do you want to customize the application secret keys?', + default=self.__dict['custom_secret_keys'] + ) + if self.__dict['custom_secret_keys'] is True: + CLI.colored_print("Django's secret key?", CLI.COLOR_QUESTION) + self.__dict['django_secret_key'] = CLI.get_response( + r'~^.{50,}$', + self.__dict['django_secret_key'], to_lower=False, error_msg='Too short. 50 characters minimum.') - CLI.colored_print("Enketo's api key?", CLI.COLOR_SUCCESS) - self.__config["enketo_api_token"] = CLI.get_response( - r"~^.{50,}$", - self.__config.get("enketo_api_token"), + CLI.colored_print("Enketo's api key?", CLI.COLOR_QUESTION) + self.__dict['enketo_api_token'] = CLI.get_response( + r'~^.{50,}$', + self.__dict['enketo_api_token'], to_lower=False, error_msg='Too short. 50 characters minimum.') - CLI.colored_print("Enketo's encryption key?", CLI.COLOR_SUCCESS) - self.__config["enketo_encryption_key"] = CLI.get_response( - r"~^.{50,}$", - self.__config.get("enketo_encryption_key"), + CLI.colored_print("Enketo's encryption key?", CLI.COLOR_QUESTION) + self.__dict['enketo_encryption_key'] = CLI.get_response( + r'~^.{50,}$', + self.__dict['enketo_encryption_key'], to_lower=False, error_msg='Too short. 50 characters minimum.') - CLI.colored_print("Enketo's less secure encryption key?", CLI.COLOR_SUCCESS) - self.__config["enketo_less_secure_encryption_key"] = CLI.get_response( - r"~^.{10,}$", - self.__config.get("enketo_less_secure_encryption_key"), + CLI.colored_print("Enketo's less secure encryption key?", + CLI.COLOR_QUESTION) + self.__dict[ + 'enketo_less_secure_encryption_key'] = CLI.get_response( + r'~^.{10,}$', + self.__dict['enketo_less_secure_encryption_key'], to_lower=False, error_msg='Too short. 10 characters minimum.') def __questions_smtp(self): - self.__config["smtp_host"] = CLI.colored_input("SMTP server", CLI.COLOR_SUCCESS, - self.__config.get("smtp_host")) - self.__config["smtp_port"] = CLI.colored_input("SMTP port", CLI.COLOR_SUCCESS, - self.__config.get("smtp_port", "25")) - self.__config["smtp_user"] = CLI.colored_input("SMTP user", CLI.COLOR_SUCCESS, - self.__config.get("smtp_user", "")) - if self.__config.get("smtp_user"): - self.__config["smtp_password"] = CLI.colored_input("SMTP password", CLI.COLOR_SUCCESS, - self.__config.get("smtp_password")) - CLI.colored_print("Use TLS?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) True") - CLI.colored_print("\t2) False") - self.__config["smtp_use_tls"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("smtp_use_tls", Config.TRUE)) - self.__config["default_from_email"] = CLI.colored_input("From email address", CLI.COLOR_SUCCESS, - self.__config.get("default_from_email", - "support@{}".format( - self.__config.get( - "public_domain_name")))) + self.__dict['smtp_host'] = CLI.colored_input('SMTP server?', + CLI.COLOR_QUESTION, + self.__dict['smtp_host']) + self.__dict['smtp_port'] = CLI.colored_input('SMTP port?', + CLI.COLOR_QUESTION, + self.__dict['smtp_port']) + self.__dict['smtp_user'] = CLI.colored_input('SMTP user?', + CLI.COLOR_QUESTION, + self.__dict['smtp_user']) + if self.__dict['smtp_user']: + self.__dict['smtp_password'] = CLI.colored_input( + 'SMTP password', + CLI.COLOR_QUESTION, + self.__dict['smtp_password'] + ) + self.__dict['smtp_use_tls'] = CLI.yes_no_question( + 'Use TLS?', + default=self.__dict['smtp_use_tls'] + ) + + if self.first_time: + domain_name = self.__dict['public_domain_name'] + self.__dict['default_from_email'] = 'support@{}'.format(domain_name) + + self.__dict['default_from_email'] = CLI.colored_input( + 'From email address?', + CLI.COLOR_QUESTION, + self.__dict['default_from_email'] + ) def __questions_super_user_credentials(self): # Super user. Only ask for credentials the first time. # Super user is created if db doesn't exists. - username = CLI.colored_input("Super user's username", CLI.COLOR_SUCCESS, - self.__config.get("super_user_username")) - password = CLI.colored_input("Super user's password", CLI.COLOR_SUCCESS, - self.__config.get("super_user_password")) - - if username == self.__config.get("super_user_username") and \ - password != self.__config.get("super_user_password") and \ + username = CLI.colored_input("Super user's username?", + CLI.COLOR_QUESTION, + self.__dict['super_user_username']) + password = CLI.colored_input("Super user's password?", + CLI.COLOR_QUESTION, + self.__dict['super_user_password']) + + if username == self.__dict['super_user_username'] and \ + password != self.__dict['super_user_password'] and \ not self.first_time: - - _message_lines = [ - "╔════════════════════════════════════════════════════════════════╗", - "║ You have configured a new password for the super user. ║", - "║ This change will *not* take effect if KoBoToolbox has ever ║", - "║ been started before. Please use the web interface to change ║", - "║ passwords for existing users. ║", - "║ If you've forgotten your password: ║", - "║ 1. Enter the KPI container with `./run.py -cf exec kpi bash`; ║", - "║ 2. Create a new super user with `./manage.py createsuperuser`; ║", - "║ 3. Type `exit` to leave the KPI container; ║", - "╚════════════════════════════════════════════════════════════════╝" - ] - CLI.colored_print('\n'.join(_message_lines), CLI.COLOR_WARNING) - - self.__config["super_user_username"] = username - self.__config["super_user_password"] = password + message = ( + 'WARNING!\n\n' + 'You have configured a new password for the super user.\n' + 'This change will *not* take effect if KoBoToolbox has ever ' + 'been started before. Please use the web interface to change ' + 'passwords for existing users.\n' + 'If you have forgotten your password:\n' + '1. Enter the KPI container with `python3 run.py -cf exec kpi ' + 'bash`;\n' + '2. Create a new super user with `./manage.py ' + 'createsuperuser`;\n' + '3. Type `exit` to leave the KPI container;' + ) + CLI.framed_print(message) + self.__dict['super_user_username'] = username + self.__dict['super_user_password'] = password def __questions_uwsgi(self): if not self.dev_mode: - CLI.colored_print("Do you want to tweak uWSGI settings?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - self.__config["uwsgi_settings"] = CLI.get_response([Config.TRUE, Config.FALSE], - self.__config.get("uwsgi_settings", Config.FALSE)) - - if self.__config.get("uwsgi_settings") == Config.TRUE: - CLI.colored_print("Number of uWSGi workers to start?", CLI.COLOR_SUCCESS) - self.__config["uwsgi_workers_start"] = CLI.get_response( - r"~^\d+$", - self.__config.get("uwsgi_workers_start")) - - CLI.colored_print("Maximum uWSGi workers?", CLI.COLOR_SUCCESS) - self.__config["uwsgi_workers_max"] = CLI.get_response( - r"~^\d+$", - self.__config.get("uwsgi_workers_max")) - - CLI.colored_print("Maximum number of requests per worker?", CLI.COLOR_SUCCESS) - self.__config["uwsgi_max_requests"] = CLI.get_response( - r"~^\d+$", - self.__config.get("uwsgi_max_requests")) - - CLI.colored_print("Maximum memory per workers in MB?", CLI.COLOR_SUCCESS) - self.__config["uwsgi_soft_limit"] = CLI.get_response( - r"~^\d+$", - self.__config.get("uwsgi_soft_limit")) - - CLI.colored_print("Maximum time (in seconds) before killing an " - "unresponsive worker?", CLI.COLOR_SUCCESS) - self.__config["uwsgi_harakiri"] = CLI.get_response( - r"~^\d+$", - self.__config.get("uwsgi_harakiri")) - - CLI.colored_print("Maximum time (in seconds) a worker can take " - "to reload/shutdown?", CLI.COLOR_SUCCESS) - self.__config["uwsgi_worker_reload_mercy"] = CLI.get_response( - r"~^\d+$", - self.__config.get("uwsgi_worker_reload_mercy")) + self.__dict['uwsgi_settings'] = CLI.yes_no_question( + 'Do you want to tweak uWSGI settings?', + default=self.__dict['uwsgi_settings'] + ) + + if self.__dict['uwsgi_settings']: + CLI.colored_print('Number of uWSGI workers to start?', + CLI.COLOR_QUESTION) + self.__dict['uwsgi_workers_start'] = CLI.get_response( + r'~^\d+$', + self.__dict['uwsgi_workers_start']) + + CLI.colored_print('Maximum uWSGI workers?', CLI.COLOR_QUESTION) + self.__dict['uwsgi_workers_max'] = CLI.get_response( + r'~^\d+$', + self.__dict['uwsgi_workers_max']) + + CLI.colored_print('Maximum number of requests per worker?', + CLI.COLOR_QUESTION) + self.__dict['uwsgi_max_requests'] = CLI.get_response( + r'~^\d+$', + self.__dict['uwsgi_max_requests']) + + CLI.colored_print('Stop spawning workers if uWSGI memory use ', + 'exceeds this many MB: ', + CLI.COLOR_QUESTION) + self.__dict['uwsgi_soft_limit'] = CLI.get_response( + r'~^\d+$', + self.__dict['uwsgi_soft_limit']) + + CLI.colored_print('Maximum time (in seconds) before killing an ' + 'unresponsive worker?', CLI.COLOR_QUESTION) + self.__dict['uwsgi_harakiri'] = CLI.get_response( + r'~^\d+$', + self.__dict['uwsgi_harakiri']) + + CLI.colored_print('Maximum time (in seconds) a worker can take ' + 'to reload/shutdown?', CLI.COLOR_QUESTION) + self.__dict['uwsgi_worker_reload_mercy'] = CLI.get_response( + r'~^\d+$', + self.__dict['uwsgi_worker_reload_mercy']) return - self.__config["uwsgi_workers_start"] = "1" - self.__config["uwsgi_workers_max"] = "2" - self.__config["uwsgi_max_requests"] = "512" - self.__config["uwsgi_soft_limit"] = "128" - self.__config["uwsgi_harakiri"] = "120" - self.__config["uwsgi_worker_reload_mercy"] = "120" + self.__dict['uwsgi_workers_start'] = '1' + self.__dict['uwsgi_workers_max'] = '2' + self.__dict['uwsgi_max_requests'] = '512' + self.__dict['uwsgi_soft_limit'] = '128' + self.__dict['uwsgi_harakiri'] = '120' + self.__dict['uwsgi_worker_reload_mercy'] = '120' def __is_port_allowed(self, port): - return not (self.block_common_http_ports and port in [Config.DEFAULT_NGINX_PORT, - Config.DEFAULT_NGINX_HTTPS_PORT]) + return not (self.block_common_http_ports and port in [ + Config.DEFAULT_NGINX_PORT, + Config.DEFAULT_NGINX_HTTPS_PORT]) def __reset(self, **kwargs): """ Resets several properties to their default. - It can be useful, if user changes the type of installation on the same server - :return: bool + It can be useful, if user changes the type of installation on + the same server + + Returns: + bool """ all = True if not kwargs else False - dev_mode = kwargs.get("dev", False) - local_install = kwargs.get("local_install", False) - private_dns = kwargs.get("private_dns", False) - reset_nginx_port = kwargs.get("reset_nginx_port", False) + dev_mode = kwargs.get('dev', False) + local_install = kwargs.get('local_install', False) + private_dns = kwargs.get('private_dns', False) + reset_nginx_port = kwargs.get('reset_nginx_port', False) if dev_mode or all: - self.__config["dev_mode"] = Config.FALSE - self.__config["staging_mode"] = Config.FALSE - self.__config["kc_path"] = "" - self.__config["kpi_path"] = "" - self.__config["debug"] = Config.FALSE + self.__dict['dev_mode'] = False + self.__dict['staging_mode'] = False + self.__dict['kc_path'] = '' + self.__dict['kpi_path'] = '' + self.__dict['debug'] = False if reset_nginx_port: - self.__config["exposed_nginx_docker_port"] = Config.DEFAULT_NGINX_PORT + self.__dict[ + 'exposed_nginx_docker_port'] = Config.DEFAULT_NGINX_PORT if private_dns or all: - self.__config["use_private_dns"] = Config.FALSE + self.__dict['use_private_dns'] = False if local_install or all: - self.__config["multi"] = Config.FALSE - self.__config["https"] = Config.FALSE - self.__config["proxy"] = Config.FALSE - self.__config["nginx_proxy_port"] = Config.DEFAULT_NGINX_PORT - self.__config["use_letsencrypt"] = Config.FALSE + self.__dict['multi'] = False + self.__dict['https'] = False + self.__dict['proxy'] = False + self.__dict['nginx_proxy_port'] = Config.DEFAULT_NGINX_PORT + self.__dict['use_letsencrypt'] = False def __secure_mongo(self): """ Force creations of MongoDB users/passwords when users upgrade from - a non secure version of KoBoInstall + a non secure version of kobo-install """ # ToDo remove duplicated code with `__questions_mongo` - - if self.__config.get("mongo_secured") != Config.TRUE and not self.first_time: + if not self.__dict.get('mongo_secured') and not self.first_time: self.__write_upsert_db_users_trigger_file('', 'mongo') - self.__config["mongo_secured"] = Config.TRUE + self.__dict['mongo_secured'] = True def __validate_installation(self): """ @@ -1768,71 +2058,84 @@ def __validate_installation(self): :return: bool """ if self.first_time: - mongo_dir_path = os.path.join(self.__config["kobodocker_path"], ".vols", "mongo") - postgres_dir_path = os.path.join(self.__config["kobodocker_path"], ".vols", "db") - mongo_data_exists = (os.path.exists(mongo_dir_path) and os.path.isdir(mongo_dir_path) and - os.listdir(mongo_dir_path)) - postgres_data_exists = os.path.exists(postgres_dir_path) and os.path.isdir(postgres_dir_path) + mongo_dir_path = os.path.join(self.__dict['kobodocker_path'], + '.vols', 'mongo') + postgres_dir_path = os.path.join(self.__dict['kobodocker_path'], + '.vols', 'db') + mongo_data_exists = ( + os.path.exists(mongo_dir_path) and os.path.isdir( + mongo_dir_path) and + os.listdir(mongo_dir_path)) + postgres_data_exists = os.path.exists( + postgres_dir_path) and os.path.isdir(postgres_dir_path) if mongo_data_exists or postgres_data_exists: - # Not a reliable way to detect whether folder contains `KoBoInstall` files - # We assume that if `docker-compose.backend.template.yml` is there, - # Docker images are the good ones. + # Not a reliable way to detect whether folder contains + # kobo-install files. We assume that if + # `docker-compose.backend.template.yml` is there, Docker + # images are the good ones. # TODO Find a better way - docker_composer_file_path = os.path.join(self.__config["kobodocker_path"], - "docker-compose.backend.template.yml") + docker_composer_file_path = os.path.join( + self.__dict['kobodocker_path'], + 'docker-compose.backend.template.yml') if not os.path.exists(docker_composer_file_path): - CLI.colored_print("╔════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) - CLI.colored_print("║ WARNING !!! ║", CLI.COLOR_WARNING) - CLI.colored_print("║ ║", CLI.COLOR_WARNING) - CLI.colored_print("║ You are installing over existing data. ║", CLI.COLOR_WARNING) - CLI.colored_print("║ ║", CLI.COLOR_WARNING) - CLI.colored_print("║ It's recommended to backup your data and import it ║", CLI.COLOR_WARNING) - CLI.colored_print("║ to a fresh installed (by KoBoInstall) database. ║", CLI.COLOR_WARNING) - CLI.colored_print("║ ║", CLI.COLOR_WARNING) - CLI.colored_print("║ KoBoInstall uses these images: ║", CLI.COLOR_WARNING) - CLI.colored_print("║ - MongoDB: mongo:3.4 ║", CLI.COLOR_WARNING) - CLI.colored_print("║ - PostgreSQL: mdillon/postgis:9.5 ║", CLI.COLOR_WARNING) - CLI.colored_print("║ ║", CLI.COLOR_WARNING) - CLI.colored_print("║ Be sure to upgrade to these versions before ║", CLI.COLOR_WARNING) - CLI.colored_print("║ going further! ║", CLI.COLOR_WARNING) - CLI.colored_print("╚════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) - CLI.colored_print("Are you sure you want to continue?", CLI.COLOR_SUCCESS) - CLI.colored_print("\tyes") - CLI.colored_print("\tno") - response = CLI.get_response(["yes", "no"], "no") - if response == "no": - sys.exit() + message = ( + 'WARNING!\n\n' + 'You are installing over existing data.\n' + '\n' + 'It is recommended to backup your data and import it ' + 'to a fresh installed (by KoBoInstall) database.\n' + '\n' + 'kobo-install uses these images:\n' + ' - MongoDB: mongo:3.4\n' + ' - PostgreSQL: mdillon/postgis:9.5\n' + '\n' + 'Be sure to upgrade to these versions before going ' + 'further!' + ) + CLI.framed_print(message) + response = CLI.yes_no_question( + 'Are you sure you want to continue?', + default=False + ) + if response is False: + sys.exit(0) else: - CLI.colored_print("Administrator privilege escalation is needed to prepare DB", - CLI.COLOR_WARNING) - # Write `kobo_first_run` file to run postgres container's entrypoint flawlessly. - os.system("echo $(date) | sudo tee -a {} > /dev/null".format( - os.path.join(self.__config["kobodocker_path"], ".vols", "db", "kobo_first_run") - )) - - def __welcome(self): - CLI.colored_print("╔═══════════════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) - CLI.colored_print("║ Welcome to KoBoInstall! ║", CLI.COLOR_WARNING) - CLI.colored_print("║ ║", CLI.COLOR_WARNING) - CLI.colored_print("║ You are going to be asked some questions that will ║", CLI.COLOR_WARNING) - CLI.colored_print("║ determine how to build the configuration of `KoBoToolBox`. ║", CLI.COLOR_WARNING) - CLI.colored_print("║ ║", CLI.COLOR_WARNING) - CLI.colored_print("║ Some questions already have default values (within brackets). ║", CLI.COLOR_WARNING) - CLI.colored_print("║ Just press `enter` to accept the default value or enter `-` ║", CLI.COLOR_WARNING) - CLI.colored_print("║ to remove previously entered value. ║", CLI.COLOR_WARNING) - CLI.colored_print("║ Otherwise choose between choices or type your answer. ║", CLI.COLOR_WARNING) - CLI.colored_print("╚═══════════════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) + CLI.colored_print( + 'Privileges escalation is needed to prepare DB', + CLI.COLOR_WARNING) + # Write `kobo_first_run` file to run postgres + # container's entrypoint flawlessly. + os.system( + 'echo $(date) | sudo tee -a {} > /dev/null'.format( + os.path.join(self.__dict['kobodocker_path'], + '.vols', 'db', 'kobo_first_run') + )) + + @staticmethod + def __welcome(): + message = ( + 'Welcome to kobo-install.\n' + '\n' + 'You are going to be asked some questions that will determine how ' + 'to build the configuration of `KoBoToolBox`.\n' + '\n' + 'Some questions already have default values (within brackets).\n' + 'Just press `enter` to accept the default value or enter `-` to ' + 'remove previously entered value.\n' + 'Otherwise choose between choices or type your answer. ' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) def __write_upsert_db_users_trigger_file(self, content, destination): try: - trigger_file = os.path.join(self.__config.get("kobodocker_path"), + trigger_file = os.path.join(self.__dict['kobodocker_path'], destination, Config.UPSERT_DB_USERS_TRIGGER_FILE) - with open(trigger_file, "w") as f: + with open(trigger_file, 'w') as f: f.write(content) except (IOError, OSError): - CLI.colored_print("Could not write {} file".format( + CLI.colored_print('Could not write {} file'.format( Config.UPSERT_DB_USERS_TRIGGER_FILE), CLI.COLOR_ERROR) return False diff --git a/helpers/network.py b/helpers/network.py index 4dda650..708baa0 100644 --- a/helpers/network.py +++ b/helpers/network.py @@ -1,20 +1,12 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - import array import fcntl -try: - import httplib - from urllib2 import urlopen -except: - from http import client as httplib - from urllib.request import urlopen - import platform -import re import socket import struct import sys +from http import client as httplib +from urllib.request import urlopen from helpers.cli import CLI @@ -24,31 +16,38 @@ class Network: STATUS_OK_200 = 200 @staticmethod - def get_local_interfaces(all=False): + def get_local_interfaces(all_=False): """ - Returns a dictionary of name:ip key value pairs. - Linux Only! - Source: https://gist.github.com/bubthegreat/24c0c43ad159d8dfed1a5d3f6ca99f9b - - :param all: bool If False, filter virtual interfaces such VMWare, Docker etc... - :return: dict + Returns a dictionary of name:ip key value pairs. + Linux Only! + Source: https://gist.github.com/bubthegreat/24c0c43ad159d8dfed1a5d3f6ca99f9b + + Args: + all_ (bool): If False, filter virtual interfaces such VMWare, + Docker etc... + Returns: + dict """ ip_dict = {} - excluded_interfaces = ("lo", "docker", "br-", "veth", "vmnet") + excluded_interfaces = ('lo', 'docker', 'br-', 'veth', 'vmnet') - if platform.system() == "Linux": - # Max possible bytes for interface result. Will truncate if more than 4096 characters to describe interfaces. + if platform.system() == 'Linux': + # Max possible bytes for interface result. + # Will truncate if more than 4096 characters to describe interfaces. MAX_BYTES = 4096 - # We're going to make a blank byte array to operate on. This is our fill char. + # We're going to make a blank byte array to operate on. + # This is our fill char. FILL_CHAR = b'\0' - # Command defined in ioctl.h for the system operation for get iface list - # Defined at https://code.woboq.org/qt5/include/bits/ioctls.h.html under - # /* Socket configuration controls. */ section. + # Command defined in ioctl.h for the system operation for get iface + # list. + # Defined at https://code.woboq.org/qt5/include/bits/ioctls.h.html + # under /* Socket configuration controls. */ section. SIOCGIFCONF = 0x8912 - # Make a dgram socket to use as our file descriptor that we'll operate on. + # Make a dgram socket to use as our file descriptor that we'll + # operate on. sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Make a byte array with our fill character. @@ -61,29 +60,38 @@ def get_local_interfaces(all=False): mutable_byte_buffer = struct.pack('iL', MAX_BYTES, names_address) # mutate our mutable_byte_buffer with the results of get_iface_list. - # NOTE: mutated_byte_buffer is just a reference to mutable_byte_buffer - for the sake of clarity we've defined them as - # separate variables, however they are the same address space - that's how fcntl.ioctl() works since the mutate_flag=True + # NOTE: mutated_byte_buffer is just a reference to + # mutable_byte_buffer - for the sake of clarity we've defined + # them as separate variables, however they are the same address + # space - that's how fcntl.ioctl() works since the mutate_flag=True # by default. - mutated_byte_buffer = fcntl.ioctl(sock.fileno(), SIOCGIFCONF, mutable_byte_buffer) + mutated_byte_buffer = fcntl.ioctl(sock.fileno(), + SIOCGIFCONF, + mutable_byte_buffer) - # Get our max_bytes of our mutated byte buffer that points to the names variable address space. - max_bytes_out, names_address_out = struct.unpack('iL', mutated_byte_buffer) + # Get our max_bytes of our mutated byte buffer + # that points to the names variable address space. + max_bytes_out, names_address_out = struct.unpack( + 'iL', + mutated_byte_buffer) - # Convert names to a bytes array - keep in mind we've mutated the names array, so now our bytes out should represent - # the bytes results of the get iface list ioctl command. - namestr = names.tostring() + # Convert names to a bytes array - keep in mind we've mutated the + # names array, so now our bytes out should represent the bytes + # results of the get iface list ioctl command. + namestr = names.tobytes() namestr[:max_bytes_out] bytes_out = namestr[:max_bytes_out] - # Each entry is 40 bytes long. The first 16 bytes are the name string. - # the 20-24th bytes are IP address octet strings in byte form - one for each byte. + # Each entry is 40 bytes long. The first 16 bytes are the + # name string. The 20-24th bytes are IP address octet strings in + # byte form - one for each byte. # Don't know what 17-19 are, or bytes 25:40. for i in range(0, max_bytes_out, 40): name = namestr[i: i + 16].split(FILL_CHAR, 1)[0] - name = name.decode('utf-8') + name = name.decode() ip_bytes = namestr[i + 20:i + 24] full_addr = [] for netaddr in ip_bytes: @@ -91,20 +99,32 @@ def get_local_interfaces(all=False): full_addr.append(str(netaddr)) elif isinstance(netaddr, str): full_addr.append(str(ord(netaddr))) - if not name.startswith(excluded_interfaces) or all: + if not name.startswith(excluded_interfaces) or all_: ip_dict[name] = '.'.join(full_addr) else: try: import netifaces except ImportError: - CLI.colored_print("You must install netinfaces first! Please type `pip install netifaces --user`", CLI.COLOR_ERROR) + CLI.colored_print('You must install netinfaces first! Please ' + 'type `pip install netifaces --user`', + CLI.COLOR_ERROR) sys.exit(1) for interface in netifaces.interfaces(): - if not interface.startswith(excluded_interfaces) or all: + if not interface.startswith(excluded_interfaces) or all_: ifaddresses = netifaces.ifaddresses(interface) - if ifaddresses.get(netifaces.AF_INET) and ifaddresses.get(netifaces.AF_INET)[0].get("addr"): - ip_dict[interface] = ifaddresses.get(netifaces.AF_INET)[0].get("addr") + if ( + ifaddresses.get(netifaces.AF_INET) + and ifaddresses.get(netifaces.AF_INET)[0].get('addr') + ): + addresses = ifaddresses.get(netifaces.AF_INET) + ip_dict[interface] = addresses[0].get('addr') + for i in range(1, len(addresses)): + virtual_interface = '{interface}:{idx}'.format( + interface=interface, + idx=i + ) + ip_dict[virtual_interface] = addresses[i]['addr'] return ip_dict @@ -117,7 +137,7 @@ def get_primary_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable - s.connect(("10.255.255.255", 1)) + s.connect(('10.255.255.255', 1)) ip_address = s.getsockname()[0] except: ip_address = None @@ -137,16 +157,20 @@ def get_primary_interface(cls): if ip_address == primary_ip: return interface - return "eth0" + return 'eth0' @staticmethod def status_check(hostname, endpoint, port=80, secure=False): try: if secure: - conn = httplib.HTTPSConnection("{}:{}".format(hostname, port), timeout=10) + conn = httplib.HTTPSConnection( + '{}:{}'.format(hostname, port), + timeout=10) else: - conn = httplib.HTTPConnection("{}:{}".format(hostname, port), timeout=10) - conn.request("GET", endpoint) + conn = httplib.HTTPConnection( + '{}:{}'.format(hostname, port), + timeout=10) + conn.request('GET', endpoint) response = conn.getresponse() return response.status except: @@ -157,7 +181,7 @@ def status_check(hostname, endpoint, port=80, secure=False): @staticmethod def is_port_open(port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - result = sock.connect_ex(("127.0.0.1", int(port))) + result = sock.connect_ex(('127.0.0.1', int(port))) return result == 0 @staticmethod @@ -166,9 +190,11 @@ def curl(url): response = urlopen(url) data = response.read() if isinstance(data, str): - return data # Python 2 + # Python 2 + return data else: - return data.decode(response.headers.get_content_charset()) # Python 3 + # Python 3 + return data.decode(response.headers.get_content_charset()) except Exception as e: pass return diff --git a/helpers/setup.py b/helpers/setup.py index 899796d..bc3355c 100644 --- a/helpers/setup.py +++ b/helpers/setup.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals import os import shutil import sys @@ -14,99 +13,90 @@ class Setup: @classmethod - def clone_kobodocker(cls, config_object): + def clone_kobodocker(cls, config): """ - :param config_object: `Config` + Args: + config (helpers.config.Config) """ - config = config_object.get_config() - do_update = config_object.first_time + dict_ = config.get_dict() + do_update = config.first_time - if not os.path.isdir(os.path.join(config["kobodocker_path"], ".git")): + if not os.path.isdir(os.path.join(dict_['kobodocker_path'], '.git')): # Move unique id file to /tmp in order to clone without errors # (e.g. not empty directory) tmp_dirpath = tempfile.mkdtemp() - shutil.move(os.path.join(config["kobodocker_path"], + shutil.move(os.path.join(dict_['kobodocker_path'], Config.UNIQUE_ID_FILE), os.path.join(tmp_dirpath, Config.UNIQUE_ID_FILE)) # clone project git_command = [ - "git", "clone", "https://github.com/kobotoolbox/kobo-docker", - config["kobodocker_path"] + 'git', 'clone', 'https://github.com/kobotoolbox/kobo-docker', + dict_['kobodocker_path'] ] CLI.run_command(git_command, cwd=os.path.dirname( - config["kobodocker_path"])) + dict_['kobodocker_path'])) shutil.move(os.path.join(tmp_dirpath, Config.UNIQUE_ID_FILE), - os.path.join(config["kobodocker_path"], + os.path.join(dict_['kobodocker_path'], Config.UNIQUE_ID_FILE)) shutil.rmtree(tmp_dirpath) do_update = True # Force update if do_update: - cls.update_kobodocker(config) + cls.update_kobodocker(dict_) @classmethod def post_update(cls, cron): - config_object = Config() + config = Config() # When `cron` is True, we want to bypass question and just recreate # YML and environment files from new templates if cron is True: - current_config = config_object.get_config_template() - current_config.update(config_object.get_config()) - config_object.set_config(current_config) - Template.render(config_object, force=True) + current_dict = config.get_upgraded_dict() + config.set_config(current_dict) + config.write_config() + Template.render(config, force=True) sys.exit(0) - CLI.colored_print("╔═════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ After an update, it's strongly recommended to run ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ `./run.py --setup` to regenerate environment files. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - - CLI.colored_print("Do you want to proceed?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) - if response == Config.TRUE: - current_config = config_object.build() - Template.render(config_object) - config_object.init_letsencrypt() - Setup.update_hosts(current_config) - - CLI.colored_print("Do you want to (re)start containers?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) - if response == Config.TRUE: + message = ( + 'After an update, it is strongly recommended to run\n' + '`python3 run.py --setup` to regenerate environment files.' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) + response = CLI.yes_no_question('Do you want to proceed?') + if response is True: + current_dict = config.build() + Template.render(config) + config.init_letsencrypt() + Setup.update_hosts(current_dict) + question = 'Do you want to (re)start containers?' + response = CLI.yes_no_question(question) + if response is True: Command.start() @staticmethod - def update_kobodocker(config=None): + def update_kobodocker(dict_=None): """ - :param config: Config().get_config() + Args: + dict_ (dict): Dictionary provided by `Config.get_dict()` """ - if not config: - config_object = Config() - config = config_object.get_config() + if not dict_: + config = Config() + dict_ = config.get_dict() # fetch new tags and prune - git_command = ["git", "fetch", "-p"] - CLI.run_command(git_command, cwd=config["kobodocker_path"]) + git_command = ['git', 'fetch', '-p'] + CLI.run_command(git_command, cwd=dict_['kobodocker_path']) # checkout branch - git_command = ["git", "checkout", "--force", Config.KOBO_DOCKER_BRANCH] - CLI.run_command(git_command, cwd=config["kobodocker_path"]) + git_command = ['git', 'checkout', '--force', Config.KOBO_DOCKER_BRANCH] + CLI.run_command(git_command, cwd=dict_['kobodocker_path']) # update code - git_command = ["git", "pull", "origin", Config.KOBO_DOCKER_BRANCH] - CLI.run_command(git_command, cwd=config["kobodocker_path"]) + git_command = ['git', 'pull', 'origin', Config.KOBO_DOCKER_BRANCH] + CLI.run_command(git_command, cwd=dict_['kobodocker_path']) @staticmethod def update_koboinstall(version): @@ -115,44 +105,51 @@ def update_koboinstall(version): CLI.run_command(git_fetch_prune_command) # checkout branch - git_command = ["git", "checkout", "--force", version] + git_command = ['git', 'checkout', '--force', version] CLI.run_command(git_command) # update code - git_command = ["git", "pull", "origin", version] + git_command = ['git', 'pull', 'origin', version] CLI.run_command(git_command) @classmethod - def update_hosts(cls, config): + def update_hosts(cls, dict_): + """ + + Args: + dict_ (dict): Dictionary provided by `Config.get_dict()` + """ + if dict_['local_installation']: + start_sentence = '### (BEGIN) KoBoToolbox local routes' + end_sentence = '### (END) KoBoToolbox local routes' - if config.get("local_installation") == Config.TRUE: - start_sentence = "### (BEGIN) KoBoToolbox local routes" - end_sentence = "### (END) KoBoToolbox local routes" + _, tmp_file_path = tempfile.mkstemp() - with open("/etc/hosts", "r") as f: + with open('/etc/hosts', 'r') as f: tmp_host = f.read() start_position = tmp_host.find(start_sentence) end_position = tmp_host.find(end_sentence) if start_position > -1: - tmp_host = tmp_host[0: start_position] + tmp_host[end_position + len(end_sentence) + 1:] - - routes = "{ip_address} " \ - "{kpi_subdomain}.{public_domain_name} " \ - "{kc_subdomain}.{public_domain_name} " \ - "{ee_subdomain}.{public_domain_name}".format( - ip_address=config.get("local_interface_ip"), - public_domain_name=config.get("public_domain_name"), - kpi_subdomain=config.get("kpi_subdomain"), - kc_subdomain=config.get("kc_subdomain"), - ee_subdomain=config.get("ee_subdomain") + tmp_host = tmp_host[0: start_position] \ + + tmp_host[end_position + len(end_sentence) + 1:] + + routes = '{ip_address} ' \ + '{kpi_subdomain}.{public_domain_name} ' \ + '{kc_subdomain}.{public_domain_name} ' \ + '{ee_subdomain}.{public_domain_name}'.format( + ip_address=dict_['local_interface_ip'], + public_domain_name=dict_['public_domain_name'], + kpi_subdomain=dict_['kpi_subdomain'], + kc_subdomain=dict_['kc_subdomain'], + ee_subdomain=dict_['ee_subdomain'] ) - tmp_host = ("{bof}" - "\n{start_sentence}" - "\n{routes}" - "\n{end_sentence}" + tmp_host = ('{bof}' + '\n{start_sentence}' + '\n{routes}' + '\n{end_sentence}' ).format( bof=tmp_host.strip(), start_sentence=start_sentence, @@ -160,31 +157,36 @@ def update_hosts(cls, config): end_sentence=end_sentence ) - with open("/tmp/etchosts", "w") as f: + with open(tmp_file_path, 'w') as f: f.write(tmp_host) - if config.get("review_host") != Config.FALSE: - CLI.colored_print("╔═══════════════════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ Administrative privileges are required to update your /etc/hosts. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═══════════════════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - CLI.colored_print("Do you want to review your /etc/hosts file before overwriting it?", - CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - config["review_host"] = CLI.get_response([Config.TRUE, Config.FALSE], - config.get("review_host", Config.FALSE)) - if config["review_host"] == Config.TRUE: - print(tmp_host) - CLI.colored_input("Press any keys when ready") - - # Save 'review_host' - config_ = Config() - config_.write_config() - - return_value = os.system("sudo mv /etc/hosts /etc/hosts.old && sudo mv /tmp/etchosts /etc/hosts") + message = ( + 'Privileges escalation is required to update ' + 'your `/etc/hosts`.' + ) + CLI.framed_print(message, color=CLI.COLOR_INFO) + dict_['review_host'] = CLI.yes_no_question( + 'Do you want to review your /etc/hosts file ' + 'before overwriting it?', + default=dict_['review_host'] + ) + if dict_['review_host']: + print(tmp_host) + CLI.colored_input('Press any keys when ready') + + # Save 'review_host' + config = Config() + config.write_config() + + cmd = ( + 'sudo cp /etc/hosts /etc/hosts.old ' + '&& sudo cp {tmp_file_path} /etc/hosts' + ).format(tmp_file_path=tmp_file_path) + + return_value = os.system(cmd) + + os.unlink(tmp_file_path) + if return_value != 0: sys.exit(1) @@ -195,28 +197,18 @@ def validate_already_run(): pulled and checked out before going further. """ - config_object = Config() - config = config_object.get_config() + config = Config() + dict_ = config.get_dict() def display_error_message(message): - max_chars_count = 51 - message_length = len(message) - spacer = " " * (max_chars_count - message_length) - - CLI.colored_print("╔═════════════════════════════════════════════════════╗", - CLI.COLOR_ERROR) - CLI.colored_print("║ {}{} ║".format(message, spacer), - CLI.COLOR_ERROR) - CLI.colored_print("║ Please run `./run.py --setup` first . ║", - CLI.COLOR_ERROR) - CLI.colored_print("╚═════════════════════════════════════════════════════╝", - CLI.COLOR_ERROR) + message += '\nPlease run `python3 run.py --setup` first.' + CLI.framed_print(message, color=CLI.COLOR_ERROR) sys.exit(1) try: - config['kobodocker_path'] + dict_['kobodocker_path'] except KeyError: display_error_message('No configuration file found.') - if not os.path.isdir(os.path.join(config["kobodocker_path"], ".git")): + if not os.path.isdir(os.path.join(dict_['kobodocker_path'], '.git')): display_error_message('`kobo-docker` repository is missing!') diff --git a/helpers/singleton.py b/helpers/singleton.py index 3ac501d..7b13106 100644 --- a/helpers/singleton.py +++ b/helpers/singleton.py @@ -1,21 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - - -# Copy this method from `six` library to avoid import -# Remove it when dropping Python2 support -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - class Singleton(type): _instances = {} diff --git a/helpers/template.py b/helpers/template.py index 098f080..03c1948 100644 --- a/helpers/template.py +++ b/helpers/template.py @@ -1,84 +1,87 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - import fnmatch import json import os import re import stat import sys -try: - from urllib.parse import quote_plus -except ImportError: - from urllib import quote_plus - from string import Template as PyTemplate +from urllib.parse import quote_plus from helpers.cli import CLI from helpers.config import Config class Template: - UNIQUE_ID_FILE = ".uniqid" + UNIQUE_ID_FILE = '.uniqid' @classmethod - def render(cls, config_object, force=False): + def render(cls, config, force=False): + """ + Write configuration files based on `config` + + Args: + config (helpers.config.Config) + force (bool) + """ - config = config_object.get_config() - template_variables = cls.__get_template_variables(config_object) + dict_ = config.get_dict() + template_variables = cls.__get_template_variables(config) - environment_directory = config_object.get_env_files_path() + environment_directory = config.get_env_files_path() unique_id = cls.__read_unique_id(environment_directory) - if force is not True and \ - unique_id is not None and str(config.get("unique_id", "")) != str(unique_id): - CLI.colored_print("╔═════════════════════════════════════════════════════════════════════╗", - CLI.COLOR_WARNING) - CLI.colored_print("║ WARNING! ║", - CLI.COLOR_WARNING) - CLI.colored_print("║ Existing environment files are detected. Files will be overwritten. ║", - CLI.COLOR_WARNING) - CLI.colored_print("╚═════════════════════════════════════════════════════════════════════╝", - CLI.COLOR_WARNING) - - CLI.colored_print("Do you want to continue?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - - if CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) == Config.FALSE: - sys.exit() - - cls.__write_unique_id(environment_directory, config.get("unique_id")) + if ( + not force and unique_id + and str(dict_.get('unique_id', '')) != str(unique_id) + ): + message = ( + 'WARNING!\n\n' + 'Existing environment files are detected. Files will be ' + 'overwritten.' + ) + CLI.framed_print(message) + response = CLI.yes_no_question( + 'Do you want to continue?', + default=False + ) + if not response: + sys.exit(0) + + cls.__write_unique_id(environment_directory, dict_['unique_id']) base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - templates_path_parent = os.path.join(base_dir, "templates") + templates_path_parent = os.path.join(base_dir, 'templates') # Environment - templates_path = os.path.join(templates_path_parent, Config.ENV_FILES_DIR, "") + templates_path = os.path.join(templates_path_parent, + Config.ENV_FILES_DIR, + '') for root, dirnames, filenames in os.walk(templates_path): - destination_directory = cls.__create_directory(environment_directory, - root, - templates_path) + destination_directory = cls.__create_directory( + environment_directory, + root, + templates_path) cls.__write_templates(template_variables, root, destination_directory, filenames) # kobo-docker - templates_path = os.path.join(templates_path_parent, "kobo-docker") + templates_path = os.path.join(templates_path_parent, 'kobo-docker') for root, dirnames, filenames in os.walk(templates_path): - destination_directory = config.get("kobodocker_path") + destination_directory = dict_['kobodocker_path'] cls.__write_templates(template_variables, root, destination_directory, filenames) # nginx-certbox - if config_object.use_letsencrypt: + if config.use_letsencrypt: templates_path = os.path.join(templates_path_parent, - Config.LETSENCRYPT_DOCKER_DIR, "") + Config.LETSENCRYPT_DOCKER_DIR, '') for root, dirnames, filenames in os.walk(templates_path): destination_directory = cls.__create_directory( - config_object.get_letsencrypt_repo_path(), + config.get_letsencrypt_repo_path(), root, templates_path) cls.__write_templates(template_variables, @@ -87,178 +90,206 @@ def render(cls, config_object, force=False): filenames) @classmethod - def render_maintenance(cls, config_object): + def render_maintenance(cls, config): - config = config_object.get_config() - template_variables = cls.__get_template_variables(config_object) + dict_ = config.get_dict() + template_variables = cls.__get_template_variables(config) base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - templates_path_parent = os.path.join(base_dir, "templates") + templates_path_parent = os.path.join(base_dir, 'templates') # kobo-docker - templates_path = os.path.join(templates_path_parent, "kobo-docker") + templates_path = os.path.join(templates_path_parent, 'kobo-docker') for root, dirnames, filenames in os.walk(templates_path): - filenames = [filename for filename in filenames if 'maintenance' in filename] - destination_directory = config.get("kobodocker_path") + filenames = [filename + for filename in filenames if 'maintenance' in filename] + destination_directory = dict_['kobodocker_path'] cls.__write_templates(template_variables, root, destination_directory, filenames) @classmethod - def __create_directory(cls, template_root_directory, path="", base_dir=""): + def __create_directory(cls, template_root_directory, path='', base_dir=''): + + # Handle case when path is root and equals ''. + path = os.path.join(path, '') - path = os.path.join(path, "") # Handle case when path is root and equals "". destination_directory = os.path.realpath(os.path.join( template_root_directory, - path.replace(base_dir, "") + path.replace(base_dir, '') )) + if not os.path.isdir(destination_directory): try: os.makedirs(destination_directory) except OSError: CLI.colored_print( - "Can not create {}. " - "Please verify permissions!".format(destination_directory), + 'Can not create {}. ' + 'Please verify permissions!'.format(destination_directory), CLI.COLOR_ERROR) sys.exit(1) return destination_directory @staticmethod - def __get_template_variables(config_object): + def __get_template_variables(config): + """ + Write configuration files based on `config` + + Args: + config (helpers.config.Config) + """ - config = config_object.get_config() + dict_ = config.get_dict() - def _get_value(property_, true_value="", false_value="#", - comparison_value=Config.TRUE): + def _get_value(property_, true_value='', false_value='#', + comparison_value=True): return true_value \ - if config.get(property_) == comparison_value \ + if dict_[property_] == comparison_value \ else false_value - if config_object.proxy: - nginx_port = config.get("nginx_proxy_port") + if config.proxy: + nginx_port = dict_['nginx_proxy_port'] else: - nginx_port = config.get("exposed_nginx_docker_port") + nginx_port = dict_['exposed_nginx_docker_port'] return { - "PROTOCOL": _get_value("https", "https", "http"), - "USE_HTTPS": _get_value("https"), - "USE_AWS": _get_value("use_aws"), - "AWS_ACCESS_KEY_ID": config.get("aws_access_key", ""), - "AWS_SECRET_ACCESS_KEY": config.get("aws_secret_key", ""), - "AWS_BUCKET_NAME": config.get("aws_bucket_name", ""), - "GOOGLE_UA": config.get("google_ua", ""), - "GOOGLE_API_KEY": config.get("google_api_key", ""), - "INTERNAL_DOMAIN_NAME": config.get("internal_domain_name", ""), - "PRIVATE_DOMAIN_NAME": config.get("private_domain_name", ""), - "PUBLIC_DOMAIN_NAME": config.get("public_domain_name", ""), - "KOBOFORM_SUBDOMAIN": config.get("kpi_subdomain", ""), - "KOBOCAT_SUBDOMAIN": config.get("kc_subdomain", ""), - "ENKETO_SUBDOMAIN": config.get("ee_subdomain", ""), - "KOBO_SUPERUSER_USERNAME": config.get("super_user_username", ""), - "KOBO_SUPERUSER_PASSWORD": config.get("super_user_password", ""), - "ENKETO_API_KEY": config.get("enketo_api_token"), - "DJANGO_SECRET_KEY": config.get("django_secret_key"), - "ENKETO_ENCRYPTION_KEY": config.get("enketo_encryption_key"), - "ENKETO_LESS_SECURE_ENCRYPTION_KEY": config.get("enketo_less_secure_encryption_key"), - "KOBOCAT_RAVEN_DSN": config.get("kobocat_raven", ""), - "KPI_RAVEN_DSN": config.get("kpi_raven", ""), - "KPI_RAVEN_JS_DSN": config.get("kpi_raven_js", ""), - "KC_POSTGRES_DB": config.get("kc_postgres_db", ""), - "KPI_POSTGRES_DB": config.get("kpi_postgres_db", ""), - "POSTGRES_USER": config.get("postgres_user", ""), - "POSTGRES_PASSWORD": config.get("postgres_password", ""), - "POSTGRES_PASSWORD_URL_ENCODED": quote_plus(config.get("postgres_password", "")), - "DEBUG": config.get("debug", False) == Config.TRUE, - "SMTP_HOST": config.get("smtp_host", ""), - "SMTP_PORT": config.get("smtp_port", ""), - "SMTP_USER": config.get("smtp_user", ""), - "SMTP_PASSWORD": config.get("smtp_password", ""), - "SMTP_USE_TLS": config.get("smtp_use_tls", Config.TRUE) == Config.TRUE, - "DEFAULT_FROM_EMAIL": config.get("default_from_email", ""), - "PRIMARY_BACKEND_IP": config.get("primary_backend_ip"), - "LOCAL_INTERFACE_IP": config.get("local_interface_ip"), - "KC_PATH": config.get("kc_path", ""), - "KPI_PATH": config.get("kpi_path", ""), - "USE_KPI_DEV_MODE": _get_value("kpi_path", - true_value="#", - false_value="", - comparison_value=""), - "USE_KC_DEV_MODE": _get_value("kc_path", - true_value="#", - false_value="", - comparison_value=""), - "KC_DEV_BUILD_ID": config.get("kc_dev_build_id", ""), - "KPI_DEV_BUILD_ID": config.get("kpi_dev_build_id", ""), - "NGINX_PUBLIC_PORT": config.get("exposed_nginx_docker_port", - Config.DEFAULT_NGINX_PORT), - "NGINX_EXPOSED_PORT": nginx_port, - "UWSGI_WORKERS_MAX": config.get("uwsgi_workers_max"), - "UWSGI_WORKERS_START": config.get("uwsgi_workers_start"), - "UWSGI_MAX_REQUESTS": config.get("uwsgi_max_requests"), - "UWSGI_SOFT_LIMIT": int(config.get("uwsgi_soft_limit")) * 1024 * 1024, - "UWSGI_HARAKIRI": config.get("uwsgi_harakiri"), - "UWSGI_WORKER_RELOAD_MERCY": config.get("uwsgi_worker_reload_mercy"), - "UWSGI_PASS_TIMEOUT": int(config.get("uwsgi_harakiri")) + 10, - "POSTGRES_REPLICATION_PASSWORD": config.get("postgres_replication_password"), - "WSGI_SERVER": "runserver_plus" if config_object.dev_mode else "uWSGI", - "USE_X_FORWARDED_HOST": "" if config_object.dev_mode else "#", - "OVERRIDE_POSTGRES_SETTINGS": _get_value("postgres_settings"), - "POSTGRES_APP_PROFILE": config.get("postgres_profile", ""), - "POSTGRES_RAM": config.get("postgres_ram", ""), - "POSTGRES_SETTINGS": config.get("postgres_settings_content", ""), - "POSTGRES_BACKUP_FROM_SECONDARY": _get_value("backup_from_primary", - comparison_value=Config.FALSE), - "POSTGRES_PORT": config.get("postgresql_port", "5432"), - "MONGO_PORT": config.get("mongo_port", "27017"), - "REDIS_MAIN_PORT": config.get("redis_main_port", "6739"), - "REDIS_CACHE_PORT": config.get("redis_cache_port", "6380"), - "USE_BACKUP": "" if config.get("use_backup") == Config.TRUE else "#", - "USE_AWS_BACKUP": "" if config_object.aws and - config.get("use_backup") == Config.TRUE and - config.get("aws_backup_bucket_name") != "" else "#", - "USE_MEDIA_BACKUP": "" if (not config_object.aws and - config.get("use_backup") == Config.TRUE) else "#", - "KOBOCAT_MEDIA_BACKUP_SCHEDULE": config.get("kobocat_media_backup_schedule"), - "MONGO_BACKUP_SCHEDULE": config.get("mongo_backup_schedule"), - "POSTGRES_BACKUP_SCHEDULE": config.get("postgres_backup_schedule"), - "REDIS_BACKUP_SCHEDULE": config.get("redis_backup_schedule"), - "AWS_BACKUP_BUCKET_NAME": config.get("aws_backup_bucket_name"), - "AWS_BACKUP_YEARLY_RETENTION": config.get("aws_backup_yearly_retention"), - "AWS_BACKUP_MONTHLY_RETENTION": config.get("aws_backup_monthly_retention"), - "AWS_BACKUP_WEEKLY_RETENTION": config.get("aws_backup_weekly_retention"), - "AWS_BACKUP_DAILY_RETENTION": config.get("aws_backup_daily_retention"), - "AWS_MONGO_BACKUP_MINIMUM_SIZE": config.get("aws_mongo_backup_minimum_size"), - "AWS_POSTGRES_BACKUP_MINIMUM_SIZE": config.get("aws_postgres_backup_minimum_size"), - "AWS_REDIS_BACKUP_MINIMUM_SIZE": config.get("aws_redis_backup_minimum_size"), - "AWS_BACKUP_UPLOAD_CHUNK_SIZE": config.get("aws_backup_upload_chunk_size"), - "AWS_BACKUP_BUCKET_DELETION_RULE_ENABLED": _get_value( - "aws_backup_bucket_deletion_rule_enabled", "True", "False"), - "LETSENCRYPT_EMAIL": config.get("letsencrypt_email"), - "MAINTENANCE_ETA": config.get("maintenance_eta", ""), - "MAINTENANCE_DATE_ISO": config.get("maintenance_date_iso", ""), - "MAINTENANCE_DATE_STR": config.get("maintenance_date_str", ""), - "MAINTENANCE_EMAIL": config.get("maintenance_email", ""), - "USE_NPM_FROM_HOST": "" if (config_object.dev_mode and - config.get("npm_container") == Config.FALSE) else "#", - "DOCKER_PREFIX": config_object.get_prefix("backend"), - "USE_BACKEND_NETWORK": _get_value("expose_backend_ports", - comparison_value=Config.FALSE), - "EXPOSE_BACKEND_PORTS": _get_value("expose_backend_ports"), - "USE_FAKE_DNS": _get_value("local_installation"), - "ADD_BACKEND_EXTRA_HOSTS": "" if (config_object.expose_backend_ports and - not config_object.use_private_dns) else "#", - "USE_EXTRA_HOSTS": "" if (config_object.local_install or - config_object.expose_backend_ports and - not config_object.use_private_dns) else "#", - "MONGO_ROOT_USERNAME": config.get("mongo_root_username"), - "MONGO_ROOT_PASSWORD": config.get("mongo_root_password"), - "MONGO_USER_USERNAME": config.get("mongo_user_username"), - "MONGO_USER_PASSWORD": config.get("mongo_user_password"), - "REDIS_PASSWORD": config.get("redis_password"), - "REDIS_PASSWORD_URL_ENCODED": quote_plus(config.get("redis_password")), - "REDIS_PASSWORD_JS_ENCODED": json.dumps(config.get("redis_password")), + 'PROTOCOL': _get_value('https', 'https', 'http'), + 'USE_HTTPS': _get_value('https'), + 'USE_AWS': _get_value('use_aws'), + 'AWS_ACCESS_KEY_ID': dict_['aws_access_key'], + 'AWS_SECRET_ACCESS_KEY': dict_['aws_secret_key'], + 'AWS_BUCKET_NAME': dict_['aws_bucket_name'], + 'GOOGLE_UA': dict_['google_ua'], + 'GOOGLE_API_KEY': dict_['google_api_key'], + 'INTERNAL_DOMAIN_NAME': dict_['internal_domain_name'], + 'PRIVATE_DOMAIN_NAME': dict_['private_domain_name'], + 'PUBLIC_DOMAIN_NAME': dict_['public_domain_name'], + 'KOBOFORM_SUBDOMAIN': dict_['kpi_subdomain'], + 'KOBOCAT_SUBDOMAIN': dict_['kc_subdomain'], + 'ENKETO_SUBDOMAIN': dict_['ee_subdomain'], + 'KOBO_SUPERUSER_USERNAME': dict_['super_user_username'], + 'KOBO_SUPERUSER_PASSWORD': dict_['super_user_password'], + 'ENKETO_API_KEY': dict_['enketo_api_token'], + 'DJANGO_SECRET_KEY': dict_['django_secret_key'], + 'ENKETO_ENCRYPTION_KEY': dict_['enketo_encryption_key'], + 'ENKETO_LESS_SECURE_ENCRYPTION_KEY': dict_[ + 'enketo_less_secure_encryption_key'], + 'KOBOCAT_RAVEN_DSN': dict_['kobocat_raven'], + 'KPI_RAVEN_DSN': dict_['kpi_raven'], + 'KPI_RAVEN_JS_DSN': dict_['kpi_raven_js'], + 'KC_POSTGRES_DB': dict_['kc_postgres_db'], + 'KPI_POSTGRES_DB': dict_['kpi_postgres_db'], + 'POSTGRES_USER': dict_['postgres_user'], + 'POSTGRES_PASSWORD': dict_['postgres_password'], + 'POSTGRES_PASSWORD_URL_ENCODED': quote_plus( + dict_['postgres_password']), + 'DEBUG': dict_['debug'], + 'SMTP_HOST': dict_['smtp_host'], + 'SMTP_PORT': dict_['smtp_port'], + 'SMTP_USER': dict_['smtp_user'], + 'SMTP_PASSWORD': dict_['smtp_password'], + 'SMTP_USE_TLS': dict_['smtp_use_tls'], + 'DEFAULT_FROM_EMAIL': dict_['default_from_email'], + 'PRIMARY_BACKEND_IP': dict_['primary_backend_ip'], + 'LOCAL_INTERFACE_IP': dict_['local_interface_ip'], + 'KC_PATH': dict_['kc_path'], + 'KPI_PATH': dict_['kpi_path'], + 'USE_KPI_DEV_MODE': _get_value('kpi_path', + true_value='#', + false_value='', + comparison_value=''), + 'USE_KC_DEV_MODE': _get_value('kc_path', + true_value='#', + false_value='', + comparison_value=''), + 'KC_DEV_BUILD_ID': dict_['kc_dev_build_id'], + 'KPI_DEV_BUILD_ID': dict_['kpi_dev_build_id'], + 'NGINX_PUBLIC_PORT': dict_['exposed_nginx_docker_port'], + 'NGINX_EXPOSED_PORT': nginx_port, + 'UWSGI_WORKERS_MAX': dict_['uwsgi_workers_max'], + 'UWSGI_WORKERS_START': dict_['uwsgi_workers_start'], + 'UWSGI_MAX_REQUESTS': dict_['uwsgi_max_requests'], + 'UWSGI_SOFT_LIMIT': int( + dict_['uwsgi_soft_limit']) * 1024 * 1024, + 'UWSGI_HARAKIRI': dict_['uwsgi_harakiri'], + 'UWSGI_WORKER_RELOAD_MERCY': dict_[ + 'uwsgi_worker_reload_mercy'], + 'UWSGI_PASS_TIMEOUT': int(dict_['uwsgi_harakiri']) + 10, + 'POSTGRES_REPLICATION_PASSWORD': dict_[ + 'postgres_replication_password'], + 'WSGI_SERVER': 'runserver_plus' if config.dev_mode else 'uWSGI', + 'USE_X_FORWARDED_HOST': '' if config.dev_mode else '#', + 'OVERRIDE_POSTGRES_SETTINGS': _get_value('postgres_settings'), + 'POSTGRES_APP_PROFILE': dict_['postgres_profile'], + 'POSTGRES_RAM': dict_['postgres_ram'], + 'POSTGRES_SETTINGS': dict_['postgres_settings_content'], + 'POSTGRES_BACKUP_FROM_SECONDARY': _get_value( + 'backup_from_primary', + comparison_value=False), + 'POSTGRES_PORT': dict_['postgresql_port'], + 'MONGO_PORT': dict_['mongo_port'], + 'REDIS_MAIN_PORT': dict_['redis_main_port'], + 'REDIS_CACHE_PORT': dict_['redis_cache_port'], + 'USE_BACKUP': '' if dict_['use_backup'] else '#', + 'USE_WAL_E': _get_value('use_wal_e'), + 'USE_AWS_BACKUP': '' if (config.aws and + dict_['aws_backup_bucket_name'] != '' and + dict_['use_backup']) else '#', + 'USE_MEDIA_BACKUP': '' if (not config.aws and + dict_['use_backup']) else '#', + 'KOBOCAT_MEDIA_BACKUP_SCHEDULE': dict_[ + 'kobocat_media_backup_schedule'], + 'MONGO_BACKUP_SCHEDULE': dict_['mongo_backup_schedule'], + 'POSTGRES_BACKUP_SCHEDULE': dict_['postgres_backup_schedule'], + 'REDIS_BACKUP_SCHEDULE': dict_['redis_backup_schedule'], + 'AWS_BACKUP_BUCKET_NAME': dict_['aws_backup_bucket_name'], + 'AWS_BACKUP_YEARLY_RETENTION': dict_[ + 'aws_backup_yearly_retention'], + 'AWS_BACKUP_MONTHLY_RETENTION': dict_[ + 'aws_backup_monthly_retention'], + 'AWS_BACKUP_WEEKLY_RETENTION': dict_[ + 'aws_backup_weekly_retention'], + 'AWS_BACKUP_DAILY_RETENTION': dict_[ + 'aws_backup_daily_retention'], + 'AWS_MONGO_BACKUP_MINIMUM_SIZE': dict_[ + 'aws_mongo_backup_minimum_size'], + 'AWS_POSTGRES_BACKUP_MINIMUM_SIZE': dict_[ + 'aws_postgres_backup_minimum_size'], + 'AWS_REDIS_BACKUP_MINIMUM_SIZE': dict_[ + 'aws_redis_backup_minimum_size'], + 'AWS_BACKUP_UPLOAD_CHUNK_SIZE': dict_[ + 'aws_backup_upload_chunk_size'], + 'AWS_BACKUP_BUCKET_DELETION_RULE_ENABLED': _get_value( + 'aws_backup_bucket_deletion_rule_enabled', 'True', 'False'), + 'LETSENCRYPT_EMAIL': dict_['letsencrypt_email'], + 'MAINTENANCE_ETA': dict_['maintenance_eta'], + 'MAINTENANCE_DATE_ISO': dict_['maintenance_date_iso'], + 'MAINTENANCE_DATE_STR': dict_['maintenance_date_str'], + 'MAINTENANCE_EMAIL': dict_['maintenance_email'], + 'USE_NPM_FROM_HOST': '' if (config.dev_mode and + not dict_['npm_container']) else '#', + 'DOCKER_PREFIX': config.get_prefix('backend'), + 'USE_BACKEND_NETWORK': _get_value('expose_backend_ports', + comparison_value=False), + 'EXPOSE_BACKEND_PORTS': _get_value('expose_backend_ports'), + 'USE_FAKE_DNS': _get_value('local_installation'), + 'ADD_BACKEND_EXTRA_HOSTS': '' if ( + config.expose_backend_ports and + not config.use_private_dns) else '#', + 'USE_EXTRA_HOSTS': '' if (config.local_install or + config.expose_backend_ports and + not config.use_private_dns) else '#', + 'MONGO_ROOT_USERNAME': dict_['mongo_root_username'], + 'MONGO_ROOT_PASSWORD': dict_['mongo_root_password'], + 'MONGO_USER_USERNAME': dict_['mongo_user_username'], + 'MONGO_USER_PASSWORD': dict_['mongo_user_password'], + 'REDIS_PASSWORD': dict_['redis_password'], + 'REDIS_PASSWORD_URL_ENCODED': quote_plus( + dict_['redis_password']), + 'REDIS_PASSWORD_JS_ENCODED': json.dumps( + dict_['redis_password']), } @staticmethod @@ -267,12 +298,13 @@ def __read_unique_id(destination_directory): Reads unique id from file `Template.UNIQUE_ID_FILE` :return: str """ - unique_id = "" + unique_id = '' if os.path.isdir(destination_directory): try: - unique_id_file = os.path.join(destination_directory, Template.UNIQUE_ID_FILE) - with open(unique_id_file, "r") as f: + unique_id_file = os.path.join(destination_directory, + Template.UNIQUE_ID_FILE) + with open(unique_id_file, 'r') as f: unique_id = f.read().strip() except IOError: pass @@ -282,27 +314,30 @@ def __read_unique_id(destination_directory): return unique_id @staticmethod - def __write_templates(template_variables_, root_, destination_directory_, filenames_): + def __write_templates(template_variables_, root_, destination_directory_, + filenames_): for filename in fnmatch.filter(filenames_, '*.tpl'): - with open(os.path.join(root_, filename), "r") as template: + with open(os.path.join(root_, filename), 'r') as template: t = ExtendedPyTemplate(template.read(), template_variables_) - with open(os.path.join(destination_directory_, filename[:-4]), "w") as f: + with open(os.path.join(destination_directory_, filename[:-4]), + 'w') as f: f.write(t.substitute(template_variables_)) @classmethod def __write_unique_id(cls, destination_directory, unique_id): try: - unique_id_file = os.path.join(destination_directory, Template.UNIQUE_ID_FILE) + unique_id_file = os.path.join(destination_directory, + Template.UNIQUE_ID_FILE) # Ensure kobo-deployment is created. cls.__create_directory(destination_directory) - with open(unique_id_file, "w") as f: + with open(unique_id_file, 'w') as f: f.write(str(unique_id)) os.chmod(unique_id_file, stat.S_IWRITE | stat.S_IREAD) except (IOError, OSError): - CLI.colored_print("Could not write unique_id file", CLI.COLOR_ERROR) + CLI.colored_print('Could not write unique_id file', CLI.COLOR_ERROR) return False return True @@ -315,10 +350,10 @@ class ExtendedPyTemplate(PyTemplate): Usage example: ``` { - "host": "redis-cache.kobo.local", - "port": "6379"{% if REDIS_PASSWORD %},{% endif REDIS_PASSWORD %} + 'host': 'redis-cache.kobo.local', + 'port': '6379'{% if REDIS_PASSWORD %},{% endif REDIS_PASSWORD %} {% if REDIS_PASSWORD %} - "password": ${REDIS_PASSWORD} + 'password': ${REDIS_PASSWORD} {% endif REDIS_PASSWORD %} } ``` @@ -326,17 +361,17 @@ class ExtendedPyTemplate(PyTemplate): If `REDIS_PASSWORD` equals '123456', output would be: ``` { - "host": "redis-cache.kobo.local", - "port": "6379", - "password": '123456' + 'host': 'redis-cache.kobo.local', + 'port': '6379', + 'password': '123456' } ``` If `REDIS_PASSWORD` equals '' (or `False` or `None`), output would be: ``` { - "host": "redis-cache.kobo.local", - "port": "6379" + 'host': 'redis-cache.kobo.local', + 'port': '6379' } ``` @@ -350,11 +385,13 @@ def __init__(self, template, template_variables_): if self.IF_PATTERN.format(key) in template: if value: if_pattern = r'{}\s*'.format(self.IF_PATTERN.format(key)) - endif_pattern = r'\s*{}'.format(self.ENDIF_PATTERN.format(key)) + endif_pattern = r'\s*{}'.format( + self.ENDIF_PATTERN.format(key)) template = re.sub(if_pattern, '', template) template = re.sub(endif_pattern, '', template) else: - pattern = r'{}(.|\s)*?{}'.format(self.IF_PATTERN.format(key), - self.ENDIF_PATTERN.format(key)) + pattern = r'{}(.|\s)*?{}'.format( + self.IF_PATTERN.format(key), + self.ENDIF_PATTERN.format(key)) template = re.sub(pattern, '', template) super(ExtendedPyTemplate, self).__init__(template) diff --git a/helpers/updater.py b/helpers/updater.py index ccd432e..2b80e83 100644 --- a/helpers/updater.py +++ b/helpers/updater.py @@ -1,6 +1,4 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - import os import sys @@ -23,7 +21,8 @@ def run(cls, version='stable', cron=False, update_self=True): if update_self: # Update kobo-install first Setup.update_koboinstall(version) - CLI.colored_print("KoBoInstall has been updated", CLI.COLOR_SUCCESS) + CLI.colored_print('kobo-install has been updated', + CLI.COLOR_SUCCESS) # Reload this script to use `version`. # NB:`argv[0]` does not automatically get set to the executable @@ -34,5 +33,5 @@ def run(cls, version='stable', cron=False, update_self=True): # Update kobo-docker Setup.update_kobodocker() - CLI.colored_print("KoBoToolbox has been updated", CLI.COLOR_SUCCESS) + CLI.colored_print('kobo-docker has been updated', CLI.COLOR_SUCCESS) Setup.post_update(cron) diff --git a/helpers/upgrading.py b/helpers/upgrading.py index 3097905..8405b3e 100644 --- a/helpers/upgrading.py +++ b/helpers/upgrading.py @@ -1,125 +1,249 @@ # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - import subprocess import sys from helpers.cli import CLI -from helpers.config import Config -def migrate_single_to_two_databases(): - """ - Check the contents of the databases. If KPI's is empty or doesn't exist - while KoBoCAT's has user data, then we are migrating from a - single-database setup - """ - config_object = Config() - config = config_object.get_config() - backend_role = config.get("backend_server_role", "primary") +class Upgrading: - def _kpi_db_alias_kludge(command): + @staticmethod + def migrate_single_to_two_databases(config): """ - Sorry, this is not very nice. See - https://github.com/kobotoolbox/kobo-docker/issues/264. + Check the contents of the databases. If KPI's is empty or doesn't exist + while KoBoCAT's has user data, then we are migrating from a + single-database setup + + Args + config (helpers.config.Config) """ - set_env = 'DATABASE_URL="${KPI_DATABASE_URL}"' - return [ - "bash", "-c", - "{} {}".format(set_env, command) - ] + dict_ = config.get_dict() + backend_role = dict_['backend_server_role'] - kpi_run_command = ["docker-compose", - "-f", "docker-compose.frontend.yml", - "-f", "docker-compose.frontend.override.yml", - "-p", config_object.get_prefix("frontend"), - "run", "--rm", "kpi"] - - # Make sure Postgres is running - # We add this message to users because when AWS backups are activated, - # it takes a long time to install the virtualenv in PostgreSQL container, - # so the `wait_for_database` below sits there a while. - # It makes us think KoBoInstall is frozen. - CLI.colored_print("Waiting for PostgreSQL database to be up & running...", - CLI.COLOR_INFO) - frontend_command = kpi_run_command + _kpi_db_alias_kludge(" ".join([ - "python", "manage.py", - "wait_for_database", "--retries", "45" - ])) - CLI.run_command(frontend_command, config.get("kobodocker_path")) - CLI.colored_print("The PostgreSQL database is running!", CLI.COLOR_SUCCESS) - - frontend_command = kpi_run_command + _kpi_db_alias_kludge(" ".join([ - "python", "manage.py", - "is_database_empty", "kpi", "kobocat" - ])) - output = CLI.run_command(frontend_command, config.get("kobodocker_path")) - # TODO: read only stdout and don't consider stderr unless the exit code - # is non-zero. Currently, `output` combines both stdout and stderr - kpi_kc_db_empty = output.strip().split("\n")[-1] - - if kpi_kc_db_empty == "True\tFalse": - # KPI empty but KC is not: run the two-database upgrade script + def _kpi_db_alias_kludge(command): + """ + Sorry, this is not very nice. See + https://github.com/kobotoolbox/kobo-docker/issues/264. + """ + set_env = 'DATABASE_URL="${KPI_DATABASE_URL}"' + return [ + 'bash', '-c', + '{} {}'.format(set_env, command) + ] + + kpi_run_command = ['docker-compose', + '-f', 'docker-compose.frontend.yml', + '-f', 'docker-compose.frontend.override.yml', + '-p', config.get_prefix('frontend'), + 'run', '--rm', 'kpi'] + + # Make sure Postgres is running + # We add this message to users because when AWS backups are activated, + # it takes a long time to install the virtualenv in PostgreSQL + # container, so the `wait_for_database` below sits there a while. + # It makes us think kobo-install is frozen. CLI.colored_print( - "Upgrading from single-database setup to separate databases " - "for KPI and KoBoCAT", - CLI.COLOR_INFO - ) - _message_lines = [ - '╔══════════════════════════════════════════════════════════════╗', - '║ Upgrading to separate databases is required to run the ║', - '║ latest release of KoBoToolbox, but it may be a slow process ║', - '║ if you have a lot of data. Expect at least one minute of ║', - '║ downtime for every 1,500 KPI assets. Assets are surveys and ║', - '║ library items: questions, blocks, and templates. ║', - '║ Survey *submissions* are not involved. ║', - '║ ║', - '║ To postpone this process, downgrade to the last ║', - '║ single-database release by stopping this script and ║', - '║ executing the following commands: ║', - '║ ║', - '║ python3 run.py --stop ║', - '║ git fetch ║', - '║ git checkout shared-database-obsolete ║', - '║ python3 run.py --update ║', - '║ python3 run.py --setup ║', - '║ ║', - '╚══════════════════════════════════════════════════════════════╝', - 'For help, visit https://community.kobotoolbox.org/t/upgrading-to-separate-databases-for-kpi-and-kobocat/7202.', - ] - CLI.colored_print('\n'.join(_message_lines), CLI.COLOR_WARNING) - CLI.colored_print("Do you want to proceed?", CLI.COLOR_SUCCESS) - CLI.colored_print("\t1) Yes") - CLI.colored_print("\t2) No") - response = CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) - if response != Config.TRUE: - sys.exit(0) - - backend_command = [ - "docker-compose", - "-f", - "docker-compose.backend.{}.yml".format(backend_role), - "-f", - "docker-compose.backend.{}.override.yml".format(backend_role), - "-p", config_object.get_prefix("backend"), - "exec", "postgres", "bash", - "/kobo-docker-scripts/primary/clone_data_from_kc_to_kpi.sh", - "--noinput" - ] - try: - subprocess.check_call( - backend_command, cwd=config.get("kobodocker_path") + 'Waiting for PostgreSQL database to be up & running...', + CLI.COLOR_INFO) + frontend_command = kpi_run_command + _kpi_db_alias_kludge(' '.join([ + 'python', 'manage.py', + 'wait_for_database', '--retries', '45' + ])) + CLI.run_command(frontend_command, dict_['kobodocker_path']) + CLI.colored_print('The PostgreSQL database is running!', + CLI.COLOR_SUCCESS) + + frontend_command = kpi_run_command + _kpi_db_alias_kludge(' '.join([ + 'python', 'manage.py', + 'is_database_empty', 'kpi', 'kobocat' + ])) + output = CLI.run_command(frontend_command, dict_['kobodocker_path']) + # TODO: read only stdout and don't consider stderr unless the exit code + # is non-zero. Currently, `output` combines both stdout and stderr + kpi_kc_db_empty = output.strip().split('\n')[-1] + + if kpi_kc_db_empty == 'True\tFalse': + # KPI empty but KC is not: run the two-database upgrade script + CLI.colored_print( + 'Upgrading from single-database setup to separate databases ' + 'for KPI and KoBoCAT', + CLI.COLOR_INFO + ) + message = ( + 'Upgrading to separate databases is required to run the latest ' + 'release of KoBoToolbox, but it may be a slow process if you ' + 'have a lot of data. Expect at least one minute of downtime ' + 'for every 1,500 KPI assets. Assets are surveys and library ' + 'items: questions, blocks, and templates.\n' + '\n' + 'To postpone this process, downgrade to the last ' + 'single-database release by stopping this script and executing ' + 'the following commands:\n' + '\n' + ' python3 run.py --stop\n' + ' git fetch\n' + ' git checkout shared-database-obsolete\n' + ' python3 run.py --update\n' + ' python3 run.py --setup\n' ) - except subprocess.CalledProcessError: - CLI.colored_print("An error has occurred", CLI.COLOR_ERROR) + CLI.framed_print(message) + message = ( + 'For help, visit https://community.kobotoolbox.org/t/upgrading-' + 'to-separate-databases-for-kpi-and-kobocat/7202.' + ) + CLI.colored_print(message, CLI.COLOR_WARNING) + response = CLI.yes_no_question( + 'Do you want to proceed?', + default=False + ) + if response is False: + sys.exit(0) + + backend_command = [ + 'docker-compose', + '-f', + 'docker-compose.backend.{}.yml'.format(backend_role), + '-f', + 'docker-compose.backend.{}.override.yml'.format(backend_role), + '-p', config.get_prefix('backend'), + 'exec', 'postgres', 'bash', + '/kobo-docker-scripts/primary/clone_data_from_kc_to_kpi.sh', + '--noinput' + ] + try: + subprocess.check_call( + backend_command, cwd=dict_['kobodocker_path'] + ) + except subprocess.CalledProcessError: + CLI.colored_print('An error has occurred', CLI.COLOR_ERROR) + sys.exit(1) + + elif kpi_kc_db_empty not in [ + 'True\tTrue', + 'False\tTrue', + 'False\tFalse', + ]: + # The output was invalid + CLI.colored_print('An error has occurred', CLI.COLOR_ERROR) + sys.stderr.write(kpi_kc_db_empty) sys.exit(1) - elif kpi_kc_db_empty not in [ - "True\tTrue", - "False\tTrue", - "False\tFalse", - ]: - # The output was invalid - CLI.colored_print("An error has occurred", CLI.COLOR_ERROR) - sys.stderr.write(kpi_kc_db_empty) - sys.exit(1) + @staticmethod + def new_terminology(upgraded_dict): + """ + Updates configuration to use new `kobo-docker` terminology. + See: https://github.com/kobotoolbox/kobo-docker/pull/294 + + Args: + upgraded_dict (dict): Configuration values to be upgraded + + Returns: + dict + """ + + backend_role = upgraded_dict['backend_server_role'] + if backend_role in ['master', 'slave']: + upgraded_dict['backend_server_role'] = 'primary' \ + if backend_role == 'master' else 'secondary' + + return upgraded_dict + + @staticmethod + def two_databases(upgraded_dict, current_dict): + """ + If the configuration came from a previous version that had a single + Postgres database, we need to make sure the new `kc_postgres_db` is + set to the name of that single database, *not* the default from + `Config.get_template()` + + Args: + upgraded_dict (dict): Configuration values to be upgraded + current_dict (dict): Current configuration values + (i.e. `Config.get_dict()`) + Returns: + dict + + """ + + try: + current_dict['postgres_db'] + except KeyError: + # Install has been made with two databases. + return upgraded_dict + + try: + current_dict['kc_postgres_db'] + except KeyError: + # Configuration does not have names of KPI and KoBoCAT databases. + # Let's copy old single database name to KoBoCAT database name + upgraded_dict['kc_postgres_db'] = current_dict['postgres_db'] + + # Force this property to False. It helps to detect whether the + # database names have changed in `Config.__questions_postgres()` + upgraded_dict['two_databases'] = False + + return upgraded_dict + + @staticmethod + def use_booleans(upgraded_dict): + """ + Until version 3.x, two constants (`Config.TRUE` and `Config.FALSE`) were + used to store "Yes/No" users' responses. It made the code more + complex than it should have been. + This method converts these values to boolean. + - `Config.TRUE` -> `True` + - `Config.FALSE` -> False` + Args: + upgraded_dict (dict): Configuration values to be upgraded + + Returns: + dict + """ + try: + upgraded_dict['use_booleans_v4'] + except KeyError: + pass + else: + return upgraded_dict + + boolean_properties = [ + 'advanced', + 'aws_backup_bucket_deletion_rule_enabled', + 'backup_from_primary', + 'block_common_http_ports', + 'custom_secret_keys', + 'customized_ports', + 'debug', + 'dev_mode', + 'expose_backend_ports', + 'https', + 'local_installation', + 'multi', + 'npm_container', + 'postgres_settings', + 'proxy', + 'raven_settings', + 'review_host', + 'smtp_use_tls', + 'staging_mode', + 'two_databases', + 'use_aws', + 'use_backup', + 'use_letsencrypt', + 'use_private_dns', + 'use_wal_e', + 'uwsgi_settings', + ] + for property_ in boolean_properties: + try: + if isinstance(upgraded_dict[property_], bool): + continue + except KeyError: + pass + else: + upgraded_dict[property_] = True \ + if upgraded_dict[property_] == '1' else False + + upgraded_dict['use_booleans_v4'] = True + + return upgraded_dict diff --git a/readme.md b/readme.md index 8e6295f..ebb134d 100644 --- a/readme.md +++ b/readme.md @@ -56,10 +56,10 @@ Get version: Build kpi and kobocat (dev mode): `$kobo-install> python3 run.py --build` -Run docker commands on frontend containers: +Run docker commands on front-end containers: `$kobo-install> python run.py --compose-frontend [docker-compose arguments]` -Run docker commands on backend containers: +Run docker commands on back-end containers: `$kobo-install> python run.py --compose-backend [docker-compose arguments]` Start maintenance mode: @@ -79,50 +79,53 @@ User can choose between 2 types of installations: |Option|Default|Workstation|Server |---|---|---|---| |Installation directory| **../kobo-docker** | ✓ | ✓ | -|SMTP information| | ✓ | ✓ (frontend only) | -|Public domain name| **kobo.local** | | ✓ (frontend only) | -|Subdomain names| **kf, kc, ee** | | ✓ (frontend only) | -|Use HTTPS1| **False** (Workstation)
**True** (Server) | | ✓ (frontend only) | -|Super user's username| **super_admin** | ✓ | ✓ (frontend only) | -|Super user's password| **Random string** | ✓ | ✓ (frontend only) | -|Activate backups2| **False** | ✓ | ✓ (backend only) | +|SMTP information| | ✓ | ✓ (front end only) | +|Public domain name| **kobo.local** | | ✓ (front end only) | +|Subdomain names| **kf, kc, ee** | | ✓ (front end only) | +|Use HTTPS1| **False** (Workstation)
**True** (Server) | | ✓ (front end only) | +|Super user's username| **super_admin** | ✓ | ✓ (front end only) | +|Super user's password| **Random string** | ✓ | ✓ (front end only) | +|Activate backups2| **False** | ✓ | ✓ (back end only) | ### Advanced Options |Option|Default|Workstation|Server |---|---|---|---| |Webserver port| **80** | ✓ | | -|Reverse proxy interal port| **8080** | | ✓ (frontend only) | -|Network interface| **Autodetected** | ✓ | ✓ (frontend only) | +|Reverse proxy interal port| **8080** | | ✓ (front end only) | +|Network interface| **Autodetected** | ✓ | ✓ (front end only) | |Use separate servers| **No** | | ✓ | -|Use DNS for private routes| **No** | | ✓ (frontend only) | -|Primary backend IP _(if previous answer is no)_| **Local IP** | | ✓ (frontend only) | +|Use DNS for private routes| **No** | | ✓ (front end only) | +|Primary back end IP _(if previous answer is no)_| **Local IP** | | ✓ (front end only) | |PostgreSQL DB| **kobo** | ✓ | ✓ | |PostgreSQL user's username| **kobo** | ✓ | ✓ | |PostgreSQL user's password| **Autogenerate** | ✓ | ✓ | -|PostgreSQL number of connections3| **100** | ✓ | ✓ (backend only) | -|PostgreSQL RAM3| **2** | ✓ | ✓ (backend only) | -|PostgreSQL Application Profile3| **Mixed** | ✓ | ✓ (backend only) | -|PostgreSQL Storage3| **HDD** | ✓ | ✓ (backend only) | +|PostgreSQL number of connections3| **100** | ✓ | ✓ (back end only) | +|PostgreSQL RAM3| **2** | ✓ | ✓ (back end only) | +|PostgreSQL Application Profile3| **Mixed** | ✓ | ✓ (back end only) | +|PostgreSQL Storage3| **HDD** | ✓ | ✓ (back end only) | |MongoDB super user's username| **root** | ✓ | ✓ | |MongoDB super user's password| **Autogenerate** | ✓ | ✓ | |MongoDB user's username| **kobo** | ✓ | ✓ | |MongoDB user's password| **Autogenerate** | ✓ | ✓ | |Redis password4| **Autogenerate** | ✓ | ✓ | -|Use AWS storage| **No** | ✓ | ✓ (frontend only) | -|uWGI workers| **start: 2, max: 4** | ✓ | ✓ (frontend only) | -|uWGI memory limit| **128 MB** | ✓ | ✓ (frontend only) | -|uWGI harakiri timeout | **120s** | ✓ | ✓ (frontend only) | -|uWGI worker reload timeout | **120s** | ✓ | ✓ (frontend only) | -|Google UA| | ✓ | ✓ (frontend only) | -|Google API Key| | ✓ | ✓ (frontend only) | -|Raven tokens| | ✓ | ✓ (frontend only) | +|Use AWS storage5| **No** | ✓ | ✓ | +|Use WAL-E PostgreSQL backups6 | **No** | ✓ | ✓ (back end only) | +|uWGI workers| **start: 2, max: 4** | ✓ | ✓ (front end only) | +|uWGI memory limit| **128 MB** | ✓ | ✓ (front end only) | +|uWGI harakiri timeout | **120s** | ✓ | ✓ (front end only) | +|uWGI worker reload timeout | **120s** | ✓ | ✓ (front end only) | +|Google UA| | ✓ | ✓ (front end only) | +|Google API Key| | ✓ | ✓ (front end only) | +|Raven tokens| | ✓ | ✓ (front end only) | |Debug| **False** | ✓ | | |Developer mode| **False** | ✓ | | -|Staging mode| **False** | | ✓ (frontend only) | +|Staging mode| **False** | | ✓ (front end only) | 1) _HTTPS certificates must be installed on a Reverse Proxy. -`KoBoInstall` can install one and use `Let's Encrypt` to generate certificates thanks to [nginx-certbot project](https://github.com/wmnnd/nginx-certbot "")_ +`kobo-install` can install one and use `Let's Encrypt` to generate certificates + thanks + to [nginx-certbot project](https://github.com/wmnnd/nginx-certbot "")_ 2) _If AWS credentials are provided, backups are sent to configured bucket_ @@ -130,17 +133,21 @@ User can choose between 2 types of installations: 4) _Redis password is optional but **strongly** recommended_ +5) _If AWS storage is selected, credentials must be provided if backups are activated_ + +6) _WAL-E backups for PostgreSQL are only available when using AWS storage_ + ℹ Intercom App ID [must now](https://github.com/kobotoolbox/kpi/pull/2285) be configured through "Per user settings" in the Django admin interface of KPI. ## Requirements - Linux 5 / macOS 6 -- Python 2.7/3.5+ _Python2 support will be dropped in a future release_ +- Python 3.5+ - [Docker](https://www.docker.com/get-started "") & [Docker Compose](https://docs.docker.com/compose/install/ "") - Available TCP Ports: 7 1. 80 NGINX - 1. 443 NGINX (if you use KoBoInstall with LetsEncrypt proxy) + 1. 443 NGINX (if you use kobo-install with LetsEncrypt proxy) 2. Additional ports when `expose ports` advanced option has been selected 1. 5432 PostgreSQL 3. 6379-6380 redis @@ -148,8 +155,8 @@ User can choose between 2 types of installations: _**WARNING:**_ - - _If you use a firewall, be sure to open traffic publicly on NGINX port, otherwise KoBoInstall cannot work_ - - _By default, additional ports are not exposed except when using multi servers configuration. If you choose to expose them, **be sure to not expose them publicly** (e.g. use a firewall and allow traffic between frontend and backend containers only. NGINX port still has to stay publicly opened though)._ + - _If you use a firewall, be sure to open traffic publicly on NGINX port, otherwise kobo-install cannot work_ + - _By default, additional ports are not exposed except when using multi servers configuration. If you choose to expose them, **be sure to not expose them publicly** (e.g. use a firewall and allow traffic between front-end and back-end containers only. NGINX port still has to stay publicly opened though)._ 5) _It has been tested with Ubuntu 14.04, 16.04 and 18.04, CentOS 8_ diff --git a/requirements_py2_tests.txt b/requirements_py2_tests.txt deleted file mode 100644 index 6b554c7..0000000 --- a/requirements_py2_tests.txt +++ /dev/null @@ -1,14 +0,0 @@ -argparse==1.2.1 -atomicwrites==1.2.1 -attrs==18.2.0 -funcsigs==1.0.2 -mock==2.0.0 -more-itertools==4.3.0 -pathlib2==2.3.2 -pbr==5.0.0 -pluggy==0.8.0 -py==1.7.0 -pytest==3.9.2 -scandir==1.9.0 -six==1.11.0 -wsgiref==0.1.2 diff --git a/requirements_py3_tests.txt b/requirements_tests.txt similarity index 85% rename from requirements_py3_tests.txt rename to requirements_tests.txt index 71007f5..1aa9b23 100644 --- a/requirements_py3_tests.txt +++ b/requirements_tests.txt @@ -5,4 +5,4 @@ pathlib2==2.3.2 pluggy==0.8.0 py==1.7.0 pytest==3.9.2 -six==1.11.0 +netifaces==0.10.7 diff --git a/run.py b/run.py index 1684b9b..86d6d3f 100755 --- a/run.py +++ b/run.py @@ -1,11 +1,18 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -from __future__ import print_function, unicode_literals - import platform import sys from helpers.cli import CLI +if sys.version_info[0] == 2: + message = ( + 'Python 2.7 has reached the end of its life on ' + 'January 1st, 2020. Please upgrade your Python as Python 2.7 is ' + 'not maintained anymore.' + ) + CLI.framed_print(message, color=CLI.COLOR_ERROR) + sys.exit(1) + from helpers.command import Command from helpers.config import Config from helpers.setup import Setup @@ -15,40 +22,32 @@ def run(force_setup=False): - if sys.version_info[0] == 2: - CLI.colored_print("╔═══════════════════════════════════════════════════════════════╗", CLI.COLOR_ERROR) - CLI.colored_print("║ DEPRECATION: Python 2.7 has reached the end of its life on ║", CLI.COLOR_ERROR) - CLI.colored_print("║ January 1st, 2020. Please upgrade your Python as Python 2.7 ║", CLI.COLOR_ERROR) - CLI.colored_print("║ is not maintained anymore. ║", CLI.COLOR_ERROR) - CLI.colored_print("║ A future version of KoBoInstall will drop support for it. ║", CLI.COLOR_ERROR) - CLI.colored_print("╚═══════════════════════════════════════════════════════════════╝", CLI.COLOR_ERROR) - - if not platform.system() in ["Linux", "Darwin"]: - CLI.colored_print("Not compatible with this OS", CLI.COLOR_ERROR) + if not platform.system() in ['Linux', 'Darwin']: + CLI.colored_print('Not compatible with this OS', CLI.COLOR_ERROR) else: config = Config() - current_config = config.get_config() + dict_ = config.get_dict() if config.first_time: force_setup = True if force_setup: - current_config = config.build() + dict_ = config.build() Setup.clone_kobodocker(config) Template.render(config) config.init_letsencrypt() - Setup.update_hosts(current_config) + Setup.update_hosts(dict_) else: if config.auto_detect_network(): Template.render(config) - Setup.update_hosts(current_config) + Setup.update_hosts(dict_) Command.start() -if __name__ == "__main__": +if __name__ == '__main__': try: - # avoid inifinte self-updating loops + # avoid infinite self-updating loops update_self = Updater.NO_UPDATE_SELF_OPTION not in sys.argv while True: try: @@ -57,53 +56,55 @@ def run(force_setup=False): break if len(sys.argv) > 2: - if sys.argv[1] == "-cf" or sys.argv[1] == "--compose-frontend": + if sys.argv[1] == '-cf' or sys.argv[1] == '--compose-frontend': Command.compose_frontend(sys.argv[2:]) - elif sys.argv[1] == "-cb" or sys.argv[1] == "--compose-backend": + elif sys.argv[1] == '-cb' or sys.argv[1] == '--compose-backend': Command.compose_backend(sys.argv[2:]) - elif sys.argv[1] == "-u" or sys.argv[1] == "--update": + elif sys.argv[1] == '-u' or sys.argv[1] == '--update': Updater.run(sys.argv[2], update_self=update_self) - elif sys.argv[1] == "--upgrade": + elif sys.argv[1] == '--upgrade': Updater.run(sys.argv[2], update_self=update_self) - elif sys.argv[1] == "--auto-update": + elif sys.argv[1] == '--auto-update': Updater.run(sys.argv[2], cron=True, update_self=update_self) else: - CLI.colored_print("Bad syntax. Try 'run.py --help'", CLI.COLOR_ERROR) + CLI.colored_print("Bad syntax. Try 'run.py --help'", + CLI.COLOR_ERROR) elif len(sys.argv) == 2: - if sys.argv[1] == "-h" or sys.argv[1] == "--help": + if sys.argv[1] == '-h' or sys.argv[1] == '--help': Command.help() - elif sys.argv[1] == "-u" or sys.argv[1] == "--update": + elif sys.argv[1] == '-u' or sys.argv[1] == '--update': Updater.run(update_self=update_self) - elif sys.argv[1] == "--upgrade": - # "update" was called "upgrade" in a previous release; accept - # either "update" or "upgrade" here to ease the transition + elif sys.argv[1] == '--upgrade': + # 'update' was called 'upgrade' in a previous release; accept + # either 'update' or 'upgrade' here to ease the transition Updater.run(update_self=update_self) - elif sys.argv[1] == "--auto-update": + elif sys.argv[1] == '--auto-update': Updater.run(cron=True, update_self=update_self) - elif sys.argv[1] == "-i" or sys.argv[1] == "--info": + elif sys.argv[1] == '-i' or sys.argv[1] == '--info': Command.info(0) - elif sys.argv[1] == "-s" or sys.argv[1] == "--setup": + elif sys.argv[1] == '-s' or sys.argv[1] == '--setup': run(force_setup=True) - elif sys.argv[1] == "-S" or sys.argv[1] == "--stop": + elif sys.argv[1] == '-S' or sys.argv[1] == '--stop': Command.stop() - elif sys.argv[1] == "-l" or sys.argv[1] == "--logs": + elif sys.argv[1] == '-l' or sys.argv[1] == '--logs': Command.logs() - elif sys.argv[1] == "-b" or sys.argv[1] == "--build": + elif sys.argv[1] == '-b' or sys.argv[1] == '--build': Command.build() - elif sys.argv[1] == "-bkf" or sys.argv[1] == "--build-kpi": - Command.build("kf") - elif sys.argv[1] == "-bkc" or sys.argv[1] == "--build-kobocat": - Command.build("kc") - elif sys.argv[1] == "-v" or sys.argv[1] == "--version": + elif sys.argv[1] == '-bkf' or sys.argv[1] == '--build-kpi': + Command.build('kf') + elif sys.argv[1] == '-bkc' or sys.argv[1] == '--build-kobocat': + Command.build('kc') + elif sys.argv[1] == '-v' or sys.argv[1] == '--version': Command.version() - elif sys.argv[1] == "-m" or sys.argv[1] == "--maintenance": + elif sys.argv[1] == '-m' or sys.argv[1] == '--maintenance': Command.configure_maintenance() - elif sys.argv[1] == "-sm" or sys.argv[1] == "--stop-maintenance": + elif sys.argv[1] == '-sm' or sys.argv[1] == '--stop-maintenance': Command.stop_maintenance() else: - CLI.colored_print("Bad syntax. Try 'run.py --help'", CLI.COLOR_ERROR) + CLI.colored_print("Bad syntax. Try 'run.py --help'", + CLI.COLOR_ERROR) else: run() except KeyboardInterrupt: - CLI.colored_print("\nUser interrupted execution", CLI.COLOR_INFO) + CLI.colored_print('\nUser interrupted execution', CLI.COLOR_INFO) diff --git a/setup.py b/setup.py index c9c425d..9283065 100644 --- a/setup.py +++ b/setup.py @@ -3,9 +3,10 @@ from helpers.config import Config setup( - name='KoBoInstall', + name='kobo-install', version=Config.KOBO_INSTALL_VERSION, - packages=find_packages(exclude=['tests']), # Include all the python modules except `tests`, + # Include all the python modules except `tests`, + packages=find_packages(exclude=['tests']), url='https://github.com/kobotoolbox/kobo-install/', license='', author='KoBoToolbox', diff --git a/templates/kobo-docker/docker-compose.backend.primary.override.yml.tpl b/templates/kobo-docker/docker-compose.backend.primary.override.yml.tpl index be9ee07..f7a8277 100644 --- a/templates/kobo-docker/docker-compose.backend.primary.override.yml.tpl +++ b/templates/kobo-docker/docker-compose.backend.primary.override.yml.tpl @@ -4,8 +4,8 @@ version: '2.2' services: postgres: - ${OVERRIDE_POSTGRES_SETTINGS}volumes: - ${OVERRIDE_POSTGRES_SETTINGS} - ../kobo-env/postgres/primary/postgres.conf:/kobo-docker-scripts/primary/postgres.conf + volumes: + - ../kobo-env/postgres/primary/postgres.conf:/kobo-docker-scripts/primary/postgres.conf ${POSTGRES_BACKUP_FROM_SECONDARY}environment: ${POSTGRES_BACKUP_FROM_SECONDARY} - POSTGRES_BACKUP_FROM_SECONDARY=True ${EXPOSE_BACKEND_PORTS}ports: diff --git a/templates/kobo-docker/docker-compose.backend.secondary.override.yml.tpl b/templates/kobo-docker/docker-compose.backend.secondary.override.yml.tpl index 0560cde..d839393 100644 --- a/templates/kobo-docker/docker-compose.backend.secondary.override.yml.tpl +++ b/templates/kobo-docker/docker-compose.backend.secondary.override.yml.tpl @@ -6,8 +6,8 @@ services: extends: file: docker-compose.backend.template.yml service: postgres - ${OVERRIDE_POSTGRES_SETTINGS}volumes: - ${OVERRIDE_POSTGRES_SETTINGS} - ../kobo-env/postgres/secondary/postgres.conf:/kobo-docker-scripts/secondary/postgres.conf + volumes: + - ../kobo-env/postgres/secondary/postgres.conf:/kobo-docker-scripts/secondary/postgres.conf ports: - ${POSTGRES_PORT}:5432 ${ADD_BACKEND_EXTRA_HOSTS}extra_hosts: diff --git a/templates/kobo-env/enketo_express/config.json.tpl b/templates/kobo-env/enketo_express/config.json.tpl index 6e8877d..00f9047 100644 --- a/templates/kobo-env/enketo_express/config.json.tpl +++ b/templates/kobo-env/enketo_express/config.json.tpl @@ -7,6 +7,7 @@ }, "encryption key": "${ENKETO_ENCRYPTION_KEY}", "less secure encryption key": "${ENKETO_LESS_SECURE_ENCRYPTION_KEY}", + "support": "${DEFAULT_FROM_EMAIL}", "widgets": [ "note", "select-desktop", diff --git a/templates/kobo-env/envfiles/databases.txt.tpl b/templates/kobo-env/envfiles/databases.txt.tpl index 2206ef5..789bfa4 100644 --- a/templates/kobo-env/envfiles/databases.txt.tpl +++ b/templates/kobo-env/envfiles/databases.txt.tpl @@ -42,6 +42,9 @@ KOBO_POSTGRES_PRIMARY_ENDPOINT=primary.postgres.${PRIVATE_DOMAIN_NAME} # Default Postgres backup schedule is weekly at 02:00 AM UTC on Sunday. ${USE_BACKUP}POSTGRES_BACKUP_SCHEDULE=${POSTGRES_BACKUP_SCHEDULE} +# WAL-E archiving and backup support +${USE_WAL_E}USE_WAL_E=1 + #-------------------------------------------------------------------------------- # REDIS #-------------------------------------------------------------------------------- diff --git a/templates/kobo-env/postgres/primary/postgres.conf.tpl b/templates/kobo-env/postgres/primary/postgres.conf.tpl index f5078fb..141c8df 100644 --- a/templates/kobo-env/postgres/primary/postgres.conf.tpl +++ b/templates/kobo-env/postgres/primary/postgres.conf.tpl @@ -14,3 +14,7 @@ # Total Memory (RAM): ${POSTGRES_RAM}GB ${POSTGRES_SETTINGS} + +${USE_WAL_E}archive_mode = on +${USE_WAL_E}archive_command = 'envdir $$PGDATA/wal-e.d/env wal-e wal-push %p' +${USE_WAL_E}archive_timeout = 60 diff --git a/tests/test_config.py b/tests/test_config.py index 4d3e3f2..7960543 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,295 +1,440 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals - -import pytest import os +import pytest +import random import shutil import tempfile import time -try: - from unittest.mock import patch, MagicMock -except ImportError: - from mock import patch, MagicMock +from unittest.mock import patch, MagicMock from helpers.cli import CLI from helpers.config import Config from .utils import ( read_config, write_trigger_upsert_db_users, + MockAWSValidation ) +CHOICE_YES = '1' +CHOICE_NO = '2' + def test_read_config(): - config_object = read_config() + config = read_config() def test_advanced_options(): - config_object = read_config() - with patch.object(CLI, "colored_input", return_value=Config.TRUE) as mock_ci: - config_object._Config__questions_advanced_options() - assert config_object.advanced_options + config = read_config() + with patch.object(CLI, 'colored_input', + return_value=CHOICE_YES) as mock_ci: + config._Config__questions_advanced_options() + assert config.advanced_options - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci: - config_object._Config__questions_advanced_options() - assert not config_object.advanced_options + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci: + config._Config__questions_advanced_options() + assert not config.advanced_options def test_installation(): - config_object = read_config() - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci: - config_object._Config__questions_installation_type() - assert not config_object.local_install + config = read_config() + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci: + config._Config__questions_installation_type() + assert not config.local_install - with patch.object(CLI, "colored_input", return_value=Config.TRUE) as mock_ci: - config_object._Config__questions_installation_type() - assert config_object.local_install - assert not config_object.multi_servers - assert not config_object.use_letsencrypt + with patch.object(CLI, 'colored_input', + return_value=CHOICE_YES) as mock_ci: + config._Config__questions_installation_type() + assert config.local_install + assert not config.multi_servers + assert not config.use_letsencrypt - return config_object + return config -@patch("helpers.config.Config._Config__clone_repo", MagicMock(return_value=True)) +@patch('helpers.config.Config._Config__clone_repo', + MagicMock(return_value=True)) def test_staging_mode(): - config_object = read_config() + config = read_config() kc_repo_path = tempfile.mkdtemp() kpi_repo_path = tempfile.mkdtemp() - with patch("helpers.cli.CLI.colored_input") as mock_colored_input: - mock_colored_input.side_effect = iter([Config.TRUE, kc_repo_path, kpi_repo_path]) - config_object._Config__questions_dev_mode() - config = config_object.get_config() - assert not config_object.dev_mode - assert config_object.staging_mode - assert config.get("kpi_path") == kpi_repo_path and config.get("kc_path") == kc_repo_path + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter([CHOICE_YES, + kc_repo_path, + kpi_repo_path]) + config._Config__questions_dev_mode() + dict_ = config.get_dict() + assert not config.dev_mode + assert config.staging_mode + assert dict_['kpi_path'] == kpi_repo_path and \ + dict_['kc_path'] == kc_repo_path shutil.rmtree(kc_repo_path) shutil.rmtree(kpi_repo_path) -@patch("helpers.config.Config._Config__clone_repo", MagicMock(return_value=True)) +@patch('helpers.config.Config._Config__clone_repo', + MagicMock(return_value=True)) def test_dev_mode(): - config_object = test_installation() + config = test_installation() kc_repo_path = tempfile.mkdtemp() kpi_repo_path = tempfile.mkdtemp() - with patch("helpers.cli.CLI.colored_input") as mock_colored_input: - - mock_colored_input.side_effect = iter(["8080", Config.TRUE, kc_repo_path, kpi_repo_path, - Config.FALSE, Config.FALSE]) - - config_object._Config__questions_dev_mode() - config = config_object.get_config() - assert config_object.dev_mode - assert not config_object.staging_mode - assert config_object.get_config().get("exposed_nginx_docker_port") == "8080" - assert config.get("kpi_path") == kpi_repo_path and config.get("kc_path") == kc_repo_path - assert config.get("npm_container") == Config.FALSE + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter(['8080', + CHOICE_YES, + kc_repo_path, + kpi_repo_path, + CHOICE_YES, + CHOICE_NO, + ]) + + config._Config__questions_dev_mode() + dict_ = config.get_dict() + assert config.dev_mode + assert not config.staging_mode + assert config.get_dict().get('exposed_nginx_docker_port') == '8080' + assert dict_['kpi_path'] == kpi_repo_path and \ + dict_['kc_path'] == kc_repo_path + assert dict_['npm_container'] is False shutil.rmtree(kc_repo_path) shutil.rmtree(kpi_repo_path) - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci: - config_object._Config__questions_dev_mode() - config = config_object.get_config() - assert not config_object.dev_mode - assert config.get("kpi_path") == "" and config.get("kc_path") == "" + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci: + config._Config__questions_dev_mode() + dict_ = config.get_dict() + assert not config.dev_mode + assert dict_['kpi_path'] == '' and dict_['kc_path'] == '' def test_server_roles_questions(): - config_object = read_config() - assert config_object.frontend_questions - assert config_object.backend_questions + config = read_config() + assert config.frontend_questions + assert config.backend_questions - with patch("helpers.cli.CLI.colored_input") as mock_colored_input: + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter( + [CHOICE_YES, 'frontend', 'backend', 'secondary']) - mock_colored_input.side_effect = iter([Config.TRUE, "frontend", "backend", "secondary"]) + config._Config__questions_multi_servers() - config_object._Config__questions_multi_servers() + config._Config__questions_roles() + assert config.frontend_questions + assert not config.backend_questions - config_object._Config__questions_roles() - assert config_object.frontend_questions - assert not config_object.backend_questions - - config_object._Config__questions_roles() - assert not config_object.frontend_questions - assert config_object.backend_questions - assert config_object.secondary_backend + config._Config__questions_roles() + assert not config.frontend_questions + assert config.backend_questions + assert config.secondary_backend def test_use_https(): - config_object = read_config() - - assert config_object.is_secure - - with patch.object(CLI, "colored_input", return_value=Config.TRUE) as mock_ci: - config_object._Config__questions_https() - assert not config_object.local_install - assert config_object.is_secure - - with patch.object(CLI, "colored_input", return_value=Config.TRUE) as mock_ci: - config_object._Config__questions_installation_type() - assert config_object.local_install - assert not config_object.is_secure - - -@patch("helpers.config.Config._Config__clone_repo", MagicMock(return_value=True)) + config = read_config() + + assert config.is_secure + + with patch.object(CLI, 'colored_input', + return_value=CHOICE_YES) as mock_ci: + config._Config__questions_https() + assert not config.local_install + assert config.is_secure + + with patch.object(CLI, 'colored_input', + return_value=CHOICE_YES) as mock_ci: + config._Config__questions_installation_type() + assert config.local_install + assert not config.is_secure + +def _aws_validation_setup(): + config = read_config() + + assert not config._Config__dict['use_aws'] + assert not config._Config__dict['aws_credentials_valid'] + + return config + +def test_aws_credentials_invalid_with_no_configuration(): + config = _aws_validation_setup() + + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = CHOICE_NO + assert not config._Config__dict['use_aws'] + assert not config._Config__dict['aws_credentials_valid'] + +def test_aws_validation_fails_with_system_exit(): + config = _aws_validation_setup() + + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter( + [CHOICE_YES, '', '', '', CHOICE_YES, '', '', '', '', '', ''] + ) + try: + config._Config__questions_aws() + except SystemExit: + pass + assert not config._Config__dict['aws_credentials_valid'] + +def test_aws_invalid_credentials_continue_without_validation(): + config = _aws_validation_setup() + + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter([CHOICE_YES,'', '', '', CHOICE_NO]) + config._Config__questions_aws() + assert not config._Config__dict['aws_credentials_valid'] + +@patch('helpers.aws_validation.AWSValidation.validate_credentials', + new=MockAWSValidation.validate_credentials) +def test_aws_validation_passes_with_valid_credentials(): + config = _aws_validation_setup() + + # correct keys, no validation, should continue without issue + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter( + [ + CHOICE_YES, + 'test_access_key', + 'test_secret_key', + 'test_bucket_name', + CHOICE_NO, + ] + ) + config._Config__questions_aws() + assert not config._Config__dict['aws_credentials_valid'] + + # correct keys in first attempt, choose to validate, continue + # without issue + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + config._Config__dict['aws_credentials_valid'] = False + mock_colored_input.side_effect = iter( + [ + CHOICE_YES, + 'test_access_key', + 'test_secret_key', + 'test_bucket_name', + CHOICE_YES, + ] + ) + config._Config__questions_aws() + assert config._Config__dict['aws_credentials_valid'] + + # correct keys in second attempt, choose to validate, continue + # without issue + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + config._Config__dict['aws_credentials_valid'] = False + mock_colored_input.side_effect = iter( + [ + CHOICE_YES, + '', + '', + '', + CHOICE_YES, + 'test_access_key', + 'test_secret_key', + 'test_bucket_name', + ] + ) + config._Config__questions_aws() + assert config._Config__dict['aws_credentials_valid'] + + # correct keys in third attempt, choose to validate, continue + # without issue + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + config._Config__dict['aws_credentials_valid'] = False + mock_colored_input.side_effect = iter( + [ + CHOICE_YES, + '', + '', + '', + CHOICE_YES, + '', + '', + '', + 'test_access_key', + 'test_secret_key', + 'test_bucket_name', + ] + ) + config._Config__questions_aws() + assert config._Config__dict['aws_credentials_valid'] + +@patch('helpers.config.Config._Config__clone_repo', + MagicMock(return_value=True)) def test_proxy_letsencrypt(): - config_object = read_config() + config = read_config() - assert config_object.proxy - assert config_object.use_letsencrypt + assert config.proxy + assert config.use_letsencrypt # Force custom exposed port - config_object._Config__config["exposed_nginx_docker_port"] = "8088" - - with patch("helpers.cli.CLI.colored_input") as mock_colored_input: - mock_colored_input.side_effect = iter([Config.TRUE, - "test@test.com", - Config.TRUE, - Config.DEFAULT_NGINX_PORT]) # Use default options - config_object._Config__questions_reverse_proxy() - assert config_object.proxy - assert config_object.use_letsencrypt - assert config_object.block_common_http_ports - assert config_object.get_config().get("nginx_proxy_port") == Config.DEFAULT_PROXY_PORT - assert config_object.get_config().get("exposed_nginx_docker_port") == Config.DEFAULT_NGINX_PORT + config._Config__dict['exposed_nginx_docker_port'] = '8088' + + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + # Use default options + mock_colored_input.side_effect = iter([CHOICE_YES, + 'test@test.com', + CHOICE_YES, + Config.DEFAULT_NGINX_PORT]) + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert config.proxy + assert config.use_letsencrypt + assert config.block_common_http_ports + assert dict_['nginx_proxy_port'] == Config.DEFAULT_PROXY_PORT + assert dict_['exposed_nginx_docker_port'] == Config.DEFAULT_NGINX_PORT def test_proxy_no_letsencrypt_advanced(): - config_object = read_config() - # Force advanced options - config_object._Config__config["advanced"] = Config.TRUE - assert config_object.advanced_options - assert config_object.proxy - assert config_object.use_letsencrypt - proxy_port = Config.DEFAULT_NGINX_PORT + config = read_config() + # Force advanced options + config._Config__dict['advanced'] = True + assert config.advanced_options + assert config.proxy + assert config.use_letsencrypt + proxy_port = Config.DEFAULT_NGINX_PORT - with patch("helpers.cli.CLI.colored_input") as mock_colored_input: - mock_colored_input.side_effect = iter([Config.FALSE, Config.FALSE, proxy_port]) - config_object._Config__questions_reverse_proxy() - assert config_object.proxy - assert not config_object.use_letsencrypt - assert not config_object.block_common_http_ports - assert config_object.get_config().get("nginx_proxy_port") == proxy_port + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter( + [CHOICE_NO, CHOICE_NO, proxy_port]) + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert config.proxy + assert not config.use_letsencrypt + assert not config.block_common_http_ports + assert dict_['nginx_proxy_port'] == proxy_port def test_proxy_no_letsencrypt(): - config_object = read_config() + config = read_config() - assert config_object.proxy - assert config_object.use_letsencrypt + assert config.proxy + assert config.use_letsencrypt - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci: - config_object._Config__questions_reverse_proxy() - assert config_object.proxy - assert not config_object.use_letsencrypt - assert config_object.block_common_http_ports - assert config_object.get_config().get("nginx_proxy_port") == Config.DEFAULT_PROXY_PORT + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci: + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert config.proxy + assert not config.use_letsencrypt + assert config.block_common_http_ports + assert dict_['nginx_proxy_port'] == Config.DEFAULT_PROXY_PORT def test_proxy_no_letsencrypt_retains_custom_nginx_proxy_port(): - CUSTOM_PROXY_PORT = 9090 - config_object = read_config(overrides={ - 'advanced': Config.TRUE, - 'use_letsencrypt': Config.FALSE, - 'nginx_proxy_port': str(CUSTOM_PROXY_PORT), + custom_proxy_port = 9090 + config = read_config(overrides={ + 'advanced': True, + 'use_letsencrypt': False, + 'nginx_proxy_port': str(custom_proxy_port), }) with patch.object( - CLI, "colored_input", + CLI, 'colored_input', new=classmethod(lambda cls, message, color, default: default) ) as mock_ci: - config_object._Config__questions_reverse_proxy() - assert(config_object.get_config().get("nginx_proxy_port") - == str(CUSTOM_PROXY_PORT)) + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert dict_['nginx_proxy_port'] == str(custom_proxy_port) def test_no_proxy_no_ssl(): - config_object = read_config() - assert config_object.is_secure - assert config_object.get_config().get("nginx_proxy_port") == Config.DEFAULT_PROXY_PORT + config = read_config() + dict_ = config.get_dict() + assert config.is_secure + assert dict_['nginx_proxy_port'] == Config.DEFAULT_PROXY_PORT proxy_port = Config.DEFAULT_NGINX_PORT - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci: - config_object._Config__questions_https() - assert not config_object.is_secure + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci: + config._Config__questions_https() + assert not config.is_secure - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci_2: - config_object._Config__questions_reverse_proxy() - assert not config_object.proxy - assert not config_object.use_letsencrypt - assert not config_object.block_common_http_ports - assert config_object.get_config().get("nginx_proxy_port") == proxy_port + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci_2: + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert not config.proxy + assert not config.use_letsencrypt + assert not config.block_common_http_ports + assert dict_['nginx_proxy_port'] == proxy_port def test_proxy_no_ssl_advanced(): - config_object = read_config() + config = read_config() # Force advanced options - config_object._Config__config["advanced"] = Config.TRUE - assert config_object.advanced_options - assert config_object.is_secure + config._Config__dict['advanced'] = True + assert config.advanced_options + assert config.is_secure - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci: - config_object._Config__questions_https() - assert not config_object.is_secure + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci: + config._Config__questions_https() + assert not config.is_secure # Proxy - not on the same server proxy_port = Config.DEFAULT_NGINX_PORT - with patch("helpers.cli.CLI.colored_input") as mock_colored_input_1: - mock_colored_input_1.side_effect = iter([Config.TRUE, Config.FALSE, proxy_port]) - config_object._Config__questions_reverse_proxy() - assert config_object.proxy - assert not config_object.use_letsencrypt - assert not config_object.block_common_http_ports - assert config_object.get_config().get("nginx_proxy_port") == proxy_port + with patch('helpers.cli.CLI.colored_input') as mock_colored_input_1: + mock_colored_input_1.side_effect = iter( + [CHOICE_YES, CHOICE_NO, proxy_port]) + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert config.proxy + assert not config.use_letsencrypt + assert not config.block_common_http_ports + assert dict_['nginx_proxy_port'] == proxy_port # Proxy - on the same server proxy_port = Config.DEFAULT_PROXY_PORT - with patch("helpers.cli.CLI.colored_input") as mock_colored_input_2: - mock_colored_input_2.side_effect = iter([Config.TRUE, Config.TRUE, proxy_port]) - config_object._Config__questions_reverse_proxy() - assert config_object.proxy - assert not config_object.use_letsencrypt - assert config_object.block_common_http_ports - assert config_object.get_config().get("nginx_proxy_port") == proxy_port + with patch('helpers.cli.CLI.colored_input') as mock_colored_input_2: + mock_colored_input_2.side_effect = iter( + [CHOICE_YES, CHOICE_YES, proxy_port]) + config._Config__questions_reverse_proxy() + dict_ = config.get_dict() + assert config.proxy + assert not config.use_letsencrypt + assert config.block_common_http_ports + assert dict_['nginx_proxy_port'] == proxy_port def test_port_allowed(): - config_object = read_config() + config = read_config() # Use let's encrypt by default - assert not config_object._Config__is_port_allowed(Config.DEFAULT_NGINX_PORT) - assert not config_object._Config__is_port_allowed("443") - assert config_object._Config__is_port_allowed(Config.DEFAULT_PROXY_PORT) + assert not config._Config__is_port_allowed(Config.DEFAULT_NGINX_PORT) + assert not config._Config__is_port_allowed('443') + assert config._Config__is_port_allowed(Config.DEFAULT_PROXY_PORT) # Don't use let's encrypt - config_object._Config__config["use_letsencrypt"] = Config.FALSE - config_object._Config__config["block_common_http_ports"] = Config.FALSE - assert config_object._Config__is_port_allowed(Config.DEFAULT_NGINX_PORT) - assert config_object._Config__is_port_allowed("443") + config._Config__dict['use_letsencrypt'] = False + config._Config__dict['block_common_http_ports'] = False + assert config._Config__is_port_allowed(Config.DEFAULT_NGINX_PORT) + assert config._Config__is_port_allowed('443') def test_create_directory(): - config_object = read_config() + config = read_config() destination_path = tempfile.mkdtemp() - with patch("helpers.cli.CLI.colored_input") as mock_colored_input: - mock_colored_input.side_effect = iter([destination_path, Config.TRUE]) - config_object._Config__create_directory() - config = config_object.get_config() - assert config.get("kobodocker_path") == destination_path + with patch('helpers.cli.CLI.colored_input') as mock_colored_input: + mock_colored_input.side_effect = iter([destination_path, CHOICE_YES]) + config._Config__create_directory() + dict_ = config.get_dict() + assert dict_['kobodocker_path'] == destination_path shutil.rmtree(destination_path) @patch('helpers.config.Config.write_config', new=lambda *a, **k: None) def test_maintenance(): - config_object = read_config() + config = read_config() # First time with pytest.raises(SystemExit) as pytest_wrapped_e: - config_object.maintenance() + config.maintenance() assert pytest_wrapped_e.type == SystemExit assert pytest_wrapped_e.value.code == 1 @@ -301,135 +446,184 @@ def test_maintenance(): '20190101T0200', # OK 'email@example.com' ]) - config_object._Config__config["date_created"] = time.time() - config_object._Config__first_time = False - config_object.maintenance() - config = config_object.get_config() - expected_str = 'Tuesday, January 01 at 02:00 GMT' - assert config.get('maintenance_date_str') == expected_str + config._Config__dict['date_created'] = time.time() + config._Config__first_time = False + config.maintenance() + dict_ = config.get_dict() + expected_str = 'Tuesday, January 01 ' \ + 'at 02:00 GMT' + assert dict_['maintenance_date_str'] == expected_str def test_exposed_ports(): - config_object = read_config() - with patch.object(CLI, "colored_input", return_value=Config.TRUE) as mock_ci: + config = read_config() + with patch.object(CLI, 'colored_input', + return_value=CHOICE_YES) as mock_ci: # Choose multi servers options - config_object._Config__questions_multi_servers() + config._Config__questions_multi_servers() - with patch("helpers.cli.CLI.colored_input") as mock_ci: + with patch('helpers.cli.CLI.colored_input') as mock_ci: # Choose to customize ports - mock_ci.side_effect = iter([Config.TRUE, "5532", "27117", "6479", "6480"]) - config_object._Config__questions_ports() - - assert config_object._Config__config["postgresql_port"] == "5532" - assert config_object._Config__config["mongo_port"] == "27117" - assert config_object._Config__config["redis_main_port"] == "6479" - assert config_object._Config__config["redis_cache_port"] == "6480" - assert config_object.expose_backend_ports - - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci_1: + mock_ci.side_effect = iter( + [CHOICE_YES, '5532', '27117', '6479', '6480']) + config._Config__questions_ports() + + assert config._Config__dict['postgresql_port'] == '5532' + assert config._Config__dict['mongo_port'] == '27117' + assert config._Config__dict['redis_main_port'] == '6479' + assert config._Config__dict['redis_cache_port'] == '6480' + assert config.expose_backend_ports + + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci_1: # Choose to single server - config_object._Config__questions_multi_servers() + config._Config__questions_multi_servers() - with patch.object(CLI, "colored_input", return_value=Config.FALSE) as mock_ci_2: + with patch.object(CLI, 'colored_input', + return_value=CHOICE_NO) as mock_ci_2: # Choose to not expose ports - config_object._Config__questions_ports() + config._Config__questions_ports() - assert config_object._Config__config["postgresql_port"] == "5432" - assert config_object._Config__config["mongo_port"] == "27017" - assert config_object._Config__config["redis_main_port"] == "6379" - assert config_object._Config__config["redis_cache_port"] == "6380" - assert not config_object.expose_backend_ports + assert config._Config__dict['postgresql_port'] == '5432' + assert config._Config__dict['mongo_port'] == '27017' + assert config._Config__dict['redis_main_port'] == '6379' + assert config._Config__dict['redis_cache_port'] == '6380' + assert not config.expose_backend_ports @patch('helpers.config.Config.write_config', new=lambda *a, **k: None) def test_force_secure_mongo(): - config_object = read_config() - config_ = config_object.get_config() + config = read_config() + dict_ = config.get_dict() - with patch("helpers.cli.CLI.colored_input") as mock_ci: + with patch('helpers.cli.CLI.colored_input') as mock_ci: # We need to run it like if user has already run the setup once to - # force MongoDB to "upsert" users. - config_object._Config__first_time = False + # force MongoDB to 'upsert' users. + config._Config__first_time = False # Run with no advanced options + mock_ci.side_effect = iter([ - config_["kobodocker_path"], - Config.TRUE, # Confirm path - config_["advanced"], - config_["local_installation"], - config_["public_domain_name"], - config_["kpi_subdomain"], - config_["kc_subdomain"], - config_["ee_subdomain"], - Config.FALSE, # Do you want to use HTTPS? - config_.get("smtp_host", ""), - config_.get("smtp_port", "25"), - config_.get("smtp_user", ""), - "test@test.com", - config_["super_user_username"], - config_["super_user_password"], - config_["use_backup"] + dict_['kobodocker_path'], + CHOICE_YES, # Confirm path + CHOICE_NO, + CHOICE_NO, + dict_['public_domain_name'], + dict_['kpi_subdomain'], + dict_['kc_subdomain'], + dict_['ee_subdomain'], + CHOICE_NO, # Do you want to use HTTPS? + dict_['smtp_host'], + dict_['smtp_port'], + dict_['smtp_user'], + 'test@test.com', + dict_['super_user_username'], + dict_['super_user_password'], + CHOICE_NO, ]) - new_config = config_object.build() - assert new_config.get("mongo_secured") == Config.TRUE + new_config = config.build() + assert new_config['mongo_secured'] is True @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file', new=write_trigger_upsert_db_users) def test_secure_mongo_advanced_options(): - config_object = read_config() - with patch("helpers.cli.CLI.colored_input") as mock_ci: + config = read_config() + config._Config__dict['advanced'] = True + + # Try when setup is run for the first time. + config._Config__first_time = True + with patch('helpers.cli.CLI.colored_input') as mock_ci: + mock_ci.side_effect = iter([ + 'root', + 'root_password', + 'mongo_kobo_user', + 'mongo_password', + ]) + config._Config__questions_mongo() + assert not os.path.exists('/tmp/upsert_db_users') + + # Try when setup has been already run once + # If it's an upgrade, users should not see: + # ╔══════════════════════════════════════════════════════╗ + # ║ MongoDB root's and/or user's usernames have changed! ║ + # ╚══════════════════════════════════════════════════════╝ + config._Config__first_time = False + config._Config__dict['mongo_secured'] = False + + with patch('helpers.cli.CLI.colored_input') as mock_ci: mock_ci.side_effect = iter([ - "root", - "root_password", - "mongo_kobo_user", - "mongo_password" + 'root', + 'root_password', + 'mongo_kobo_user', + 'mongo_password', ]) - config_object._Config__questions_mongo() - assert not os.path.exists("/tmp/upsert_db_users") + config._Config__questions_mongo() + assert os.path.exists('/tmp/upsert_db_users') + assert os.path.getsize('/tmp/upsert_db_users') == 0 + os.remove('/tmp/upsert_db_users') + + # Try when setup has been already run once + # If it's NOT an upgrade, Users should see: + # ╔══════════════════════════════════════════════════════╗ + # ║ MongoDB root's and/or user's usernames have changed! ║ + # ╚══════════════════════════════════════════════════════╝ + config._Config__dict['mongo_secured'] = True + with patch('helpers.cli.CLI.colored_input') as mock_ci: + mock_ci.side_effect = iter([ + 'root', + 'root_passw0rd', + 'kobo_user', + 'mongo_password', + CHOICE_YES, + ]) + config._Config__questions_mongo() + assert os.path.exists('/tmp/upsert_db_users') + assert os.path.getsize('/tmp/upsert_db_users') != 0 + os.remove('/tmp/upsert_db_users') @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file', new=write_trigger_upsert_db_users) def test_update_mongo_passwords(): - config_object = read_config() - with patch("helpers.cli.CLI.colored_input") as mock_ci: - config_object._Config__first_time = False - config_object._Config__config["mongo_root_username"] = 'root' - config_object._Config__config["mongo_user_username"] = 'user' + config = read_config() + with patch('helpers.cli.CLI.colored_input') as mock_ci: + config._Config__first_time = False + config._Config__dict['mongo_root_username'] = 'root' + config._Config__dict['mongo_user_username'] = 'user' mock_ci.side_effect = iter([ - "root", - "root_password", - "user", - "mongo_password" + 'root', + 'root_password', + 'user', + 'mongo_password' ]) - config_object._Config__questions_mongo() - assert os.path.exists("/tmp/upsert_db_users") - assert os.path.getsize("/tmp/upsert_db_users") == 0 - os.remove("/tmp/upsert_db_users") + config._Config__questions_mongo() + assert os.path.exists('/tmp/upsert_db_users') + assert os.path.getsize('/tmp/upsert_db_users') == 0 + os.remove('/tmp/upsert_db_users') @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file', new=write_trigger_upsert_db_users) def test_update_mongo_usernames(): - config_object = read_config() - with patch("helpers.cli.CLI.colored_input") as mock_ci: - config_object._Config__first_time = False - config_object._Config__config["mongo_root_username"] = 'root' - config_object._Config__config["mongo_user_username"] = 'user' + config = read_config() + with patch('helpers.cli.CLI.colored_input') as mock_ci: + config._Config__first_time = False + config._Config__dict['mongo_root_username'] = 'root' + config._Config__dict['mongo_user_username'] = 'user' mock_ci.side_effect = iter([ - "admin", - "root_password", - "another_user", - "mongo_password", - Config.TRUE # Delete users + 'admin', + 'root_password', + 'another_user', + 'mongo_password', + CHOICE_YES # Delete users ]) - config_object._Config__questions_mongo() - assert os.path.exists("/tmp/upsert_db_users") - with open("/tmp/upsert_db_users", "r") as f: + config._Config__questions_mongo() + assert os.path.exists('/tmp/upsert_db_users') + with open('/tmp/upsert_db_users', 'r') as f: content = f.read() - expected_content = "user\tformhub\nroot\tadmin" + expected_content = 'user\tformhub\nroot\tadmin' assert content == expected_content - os.remove("/tmp/upsert_db_users") + os.remove('/tmp/upsert_db_users') @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file', @@ -442,25 +636,25 @@ def test_update_postgres_password(): When password changes, file must contain `` Users should not be deleted if they already exist. """ - config_object = read_config() - with patch("helpers.cli.CLI.colored_input") as mock_ci: - config_object._Config__first_time = False - config_object._Config__config["postgres_user"] = 'user' - config_object._Config__config["postgres_password"] = 'password' + config = read_config() + with patch('helpers.cli.CLI.colored_input') as mock_ci: + config._Config__first_time = False + config._Config__dict['postgres_user'] = 'user' + config._Config__dict['postgres_password'] = 'password' mock_ci.side_effect = iter([ - "kobocat", - "koboform", - "user", - "user_password", - Config.FALSE # Tweak settings + 'kobocat', + 'koboform', + 'user', + 'user_password', + CHOICE_NO, # Tweak settings ]) - config_object._Config__questions_postgres() - assert os.path.exists("/tmp/upsert_db_users") - with open("/tmp/upsert_db_users", "r") as f: + config._Config__questions_postgres() + assert os.path.exists('/tmp/upsert_db_users') + with open('/tmp/upsert_db_users', 'r') as f: content = f.read() - expected_content = "user\tfalse" + expected_content = 'user\tfalse' assert content == expected_content - os.remove("/tmp/upsert_db_users") + os.remove('/tmp/upsert_db_users') @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file', @@ -472,26 +666,26 @@ def test_update_postgres_username(): When username changes, file must contain `` """ - config_object = read_config() - with patch("helpers.cli.CLI.colored_input") as mock_ci: - config_object._Config__first_time = False - config_object._Config__config["postgres_user"] = 'user' - config_object._Config__config["postgres_password"] = 'password' + config = read_config() + with patch('helpers.cli.CLI.colored_input') as mock_ci: + config._Config__first_time = False + config._Config__dict['postgres_user'] = 'user' + config._Config__dict['postgres_password'] = 'password' mock_ci.side_effect = iter([ - "kobocat", - "koboform", - "another_user", - "password", - Config.TRUE, # Delete user - Config.FALSE # Tweak settings + 'kobocat', + 'koboform', + 'another_user', + 'password', + CHOICE_YES, # Delete user + CHOICE_NO, # Tweak settings ]) - config_object._Config__questions_postgres() - assert os.path.exists("/tmp/upsert_db_users") - with open("/tmp/upsert_db_users", "r") as f: + config._Config__questions_postgres() + assert os.path.exists('/tmp/upsert_db_users') + with open('/tmp/upsert_db_users', 'r') as f: content = f.read() - expected_content = "user\ttrue" + expected_content = 'user\ttrue' assert content == expected_content - os.remove("/tmp/upsert_db_users") + os.remove('/tmp/upsert_db_users') def test_update_postgres_db_name_from_single_database(): @@ -500,22 +694,68 @@ def test_update_postgres_db_name_from_single_database(): With two databases, KoBoCat has its own database. We ensure that `kc_postgres_db` gets `postgres_db` value. """ - config_object = read_config() - config = config_object.get_config() - old_db_name = "postgres_db_kobo" - config_object._Config__config["postgres_db"] = old_db_name - del config_object._Config__config['kc_postgres_db'] - assert "postgres_db" in config - assert "kc_postgres_db" not in config - config = config_object._Config__get_upgraded_config() - assert config.get("kc_postgres_db") == old_db_name + config = read_config() + dict_ = config.get_dict() + old_db_name = 'postgres_db_kobo' + config._Config__dict['postgres_db'] = old_db_name + del config._Config__dict['kc_postgres_db'] + assert 'postgres_db' in dict_ + assert 'kc_postgres_db' not in dict_ + dict_ = config.get_upgraded_dict() + assert dict_['kc_postgres_db'] == old_db_name def test_new_terminology(): """ Ensure config uses `primary` instead of `master` """ - config_object = read_config() - config_object._Config__config["backend_server_role"] = 'master' - config = config_object._Config__get_upgraded_config() - assert config.get("backend_server_role") == 'primary' + config = read_config() + config._Config__dict['backend_server_role'] = 'master' + dict_ = config.get_upgraded_dict() + assert dict_['backend_server_role'] == 'primary' + + +def test_use_boolean(): + """ + Ensure config uses booleans instead of '1' or '2' + """ + config = read_config() + boolean_properties = [ + 'advanced', + 'aws_backup_bucket_deletion_rule_enabled', + 'backup_from_primary', + 'block_common_http_ports', + 'custom_secret_keys', + 'customized_ports', + 'debug', + 'dev_mode', + 'expose_backend_ports', + 'https', + 'local_installation', + 'multi', + 'npm_container', + 'postgres_settings', + 'proxy', + 'raven_settings', + 'review_host', + 'smtp_use_tls', + 'staging_mode', + 'two_databases', + 'use_aws', + 'use_backup', + 'use_letsencrypt', + 'use_private_dns', + 'use_wal_e', + 'uwsgi_settings', + ] + expected_dict = {} + for property_ in boolean_properties: + old_value = str(random.randint(1, 2)) + expected_dict[property_] = True if old_value == '1' else False + config._Config__dict[property_] = old_value + + dict_ = config.get_upgraded_dict() + + for property_ in boolean_properties: + assert dict_[property_] == expected_dict[property_] + diff --git a/tests/test_run.py b/tests/test_run.py index d9d6018..7120b11 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -1,32 +1,25 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals - -try: - from unittest.mock import patch, MagicMock - builtin_open = "builtins.open" -except ImportError: - from mock import patch, MagicMock - builtin_open = "__builtin__.open" +from unittest.mock import patch, MagicMock from helpers.command import Command -from helpers.config import Config from .utils import ( read_config, - run_command, + MockCommand, MockDocker, + MockUpgrading, ) @patch('helpers.network.Network.is_port_open', MagicMock(return_value=False)) -@patch('helpers.command.migrate_single_to_two_databases', - MagicMock(return_value=None)) +@patch('helpers.command.Upgrading.migrate_single_to_two_databases', + new=MockUpgrading.migrate_single_to_two_databases) @patch('helpers.command.Command.info', MagicMock(return_value=True)) @patch('helpers.cli.CLI.run_command', - new=run_command) + new=MockCommand.run_command) def test_toggle_trivial(): - config_object = read_config() + config = read_config() Command.start() mock_docker = MockDocker() expected_containers = MockDocker.FRONTEND_CONTAINERS + \ @@ -41,15 +34,15 @@ def test_toggle_trivial(): @patch('helpers.network.Network.is_port_open', MagicMock(return_value=False)) -@patch('helpers.command.migrate_single_to_two_databases', - MagicMock(return_value=None)) +@patch('helpers.command.Upgrading.migrate_single_to_two_databases', + new=MockUpgrading.migrate_single_to_two_databases) @patch('helpers.command.Command.info', MagicMock(return_value=True)) @patch('helpers.cli.CLI.run_command', - new=run_command) + new=MockCommand.run_command) def test_toggle_no_letsencrypt(): config_object = read_config() - config_object._Config__config['use_letsencrypt'] = Config.FALSE + config_object._Config__dict['use_letsencrypt'] = False Command.start() mock_docker = MockDocker() expected_containers = MockDocker.FRONTEND_CONTAINERS + \ @@ -63,12 +56,12 @@ def test_toggle_no_letsencrypt(): @patch('helpers.network.Network.is_port_open', MagicMock(return_value=False)) -@patch('helpers.command.migrate_single_to_two_databases', - MagicMock(return_value=None)) +@patch('helpers.command.Upgrading.migrate_single_to_two_databases', + new=MockUpgrading.migrate_single_to_two_databases) @patch('helpers.command.Command.info', MagicMock(return_value=True)) @patch('helpers.cli.CLI.run_command', - new=run_command) + new=MockCommand.run_command) def test_toggle_frontend(): config_object = read_config() Command.start(frontend_only=True) @@ -84,17 +77,17 @@ def test_toggle_frontend(): @patch('helpers.network.Network.is_port_open', MagicMock(return_value=False)) -@patch('helpers.command.migrate_single_to_two_databases', - MagicMock(return_value=None)) +@patch('helpers.command.Upgrading.migrate_single_to_two_databases', + new=MockUpgrading.migrate_single_to_two_databases) @patch('helpers.command.Command.info', MagicMock(return_value=True)) @patch('helpers.cli.CLI.run_command', - new=run_command) + new=MockCommand.run_command) def test_toggle_primary_backend(): config_object = read_config() - config_object._Config__config['backend_server_role'] = 'primary' - config_object._Config__config['server_role'] = 'backend' - config_object._Config__config['multi'] = Config.TRUE + config_object._Config__dict['backend_server_role'] = 'primary' + config_object._Config__dict['server_role'] = 'backend' + config_object._Config__dict['multi'] = True Command.start() mock_docker = MockDocker() @@ -108,17 +101,17 @@ def test_toggle_primary_backend(): @patch('helpers.network.Network.is_port_open', MagicMock(return_value=False)) -@patch('helpers.command.migrate_single_to_two_databases', - MagicMock(return_value=None)) +@patch('helpers.command.Upgrading.migrate_single_to_two_databases', + new=MockUpgrading.migrate_single_to_two_databases) @patch('helpers.command.Command.info', MagicMock(return_value=True)) @patch('helpers.cli.CLI.run_command', - new=run_command) + new=MockCommand.run_command) def test_toggle_secondary_backend(): config_object = read_config() - config_object._Config__config['backend_server_role'] = 'secondary' - config_object._Config__config['server_role'] = 'backend' - config_object._Config__config['multi'] = Config.TRUE + config_object._Config__dict['backend_server_role'] = 'secondary' + config_object._Config__dict['server_role'] = 'backend' + config_object._Config__dict['multi'] = True mock_docker = MockDocker() Command.start() @@ -132,12 +125,12 @@ def test_toggle_secondary_backend(): @patch('helpers.network.Network.is_port_open', MagicMock(return_value=False)) -@patch('helpers.command.migrate_single_to_two_databases', - MagicMock(return_value=None)) +@patch('helpers.command.Upgrading.migrate_single_to_two_databases', + new=MockUpgrading.migrate_single_to_two_databases) @patch('helpers.command.Command.info', MagicMock(return_value=True)) @patch('helpers.cli.CLI.run_command', - new=run_command) + new=MockCommand.run_command) def test_toggle_maintenance(): config_object = read_config() mock_docker = MockDocker() @@ -147,13 +140,13 @@ def test_toggle_maintenance(): MockDocker.LETSENCRYPT assert sorted(mock_docker.ps()) == sorted(expected_containers) - config_object._Config__config['maintenance_enabled'] = True + config_object._Config__dict['maintenance_enabled'] = True Command.start() maintenance_containers = MockDocker.PRIMARY_BACKEND_CONTAINERS + \ MockDocker.MAINTENANCE_CONTAINERS + \ MockDocker.LETSENCRYPT assert sorted(mock_docker.ps()) == sorted(maintenance_containers) - config_object._Config__config['maintenance_enabled'] = False + config_object._Config__dict['maintenance_enabled'] = False Command.start() assert sorted(mock_docker.ps()) == sorted(expected_containers) Command.stop() diff --git a/tests/utils.py b/tests/utils.py index 1998a6f..1ddcc00 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,15 +1,6 @@ # -*- coding: utf-8 -*- -from __future__ import unicode_literals - import json -try: - from unittest.mock import patch, mock_open - builtin_open = "builtins.open" -except ImportError: - from mock import patch, mock_open - builtin_open = "__builtin__.open" - -from six import with_metaclass +from unittest.mock import patch, mock_open from helpers.config import Config from helpers.singleton import Singleton @@ -17,42 +8,74 @@ def read_config(overrides=None): - config_dict = dict(Config.get_config_template()) - config_dict["kobodocker_path"] = "/tmp" + config_dict = dict(Config.get_template()) + config_dict['kobodocker_path'] = '/tmp' if overrides is not None: config_dict.update(overrides) - with patch(builtin_open, mock_open(read_data=json.dumps(config_dict))) as mock_file: - config_object = Config() - config_object.read_config() - assert config_object.get_config().get("kobodocker_path") == config_dict.get("kobodocker_path") - return config_object + str_config = json.dumps(config_dict) + # `Config()` constructor calls `read_config()` internally + # We need to mock `open()` twice. + # - Once to read kobo-install config file (i.e. `.run.conf`) + # - Once to read value of `unique_id` (i.e. `/tmp/.uniqid`) + with patch('builtins.open', spec=open) as mock_file: + mock_file.side_effect = iter([ + mock_open(read_data=str_config).return_value, + mock_open(read_data='').return_value, + ]) + config = Config() + # We call `read_config()` another time to be sure to reset the config + # before each test. Thanks to `mock_open`, `Config.get_dict()` always + # returns `config_dict`. + with patch('builtins.open', spec=open) as mock_file: + mock_file.side_effect = iter([ + mock_open(read_data=str_config).return_value, + mock_open(read_data='').return_value, + ]) + config.read_config() -def reset_config(config_object): + dict_ = config.get_dict() + assert config_dict['kobodocker_path'] == dict_['kobodocker_path'] - config_dict = dict(Config.get_config_template()) - config_dict["kobodocker_path"] = "/tmp" - config_object.__config = config_dict + return config -def run_command(command, cwd=None, polling=False): - if 'docker-compose' != command[0]: - raise Exception('Command: `{}` is not implemented!'.format(command[0])) +def reset_config(config): - mock_docker = MockDocker() - return mock_docker.compose(command, cwd) + dict_ = dict(Config.get_template()) + dict_['kobodocker_path'] = '/tmp' + config.__dict = dict_ -def write_trigger_upsert_db_users_mock(*args): +def write_trigger_upsert_db_users(*args): content = args[1] - with open("/tmp/upsert_db_users", "w") as f: + with open('/tmp/upsert_db_users', 'w') as f: f.write(content) -class MockDocker(with_metaclass(Singleton)): +class MockCommand: + """ + Create a mock class for Python2 retro compatibility. + Python2 does not pass the class as the first argument explicitly when + `run_command` (as a standalone method) is used as a mock. + """ + @classmethod + def run_command(cls, command, cwd=None, polling=False): + if 'docker-compose' != command[0]: + message = 'Command: `{}` is not implemented!'.format(command[0]) + raise Exception(message) + + mock_docker = MockDocker() + return mock_docker.compose(command, cwd) + + +class MockDocker(metaclass=Singleton): - PRIMARY_BACKEND_CONTAINERS = ['primary_postgres', 'mongo', 'redis_main', 'redis_cache'] + PRIMARY_BACKEND_CONTAINERS = ['primary_postgres', + 'mongo', + 'redis_main', + 'redis_cache'] SECONDARY_BACKEND_CONTAINERS = ['secondary_postgres'] FRONTEND_CONTAINERS = ['nginx', 'kobocat', 'kpi', 'enketo_express'] MAINTENANCE_CONTAINERS = ['maintenance', 'kobocat', 'kpi', 'enketo_express'] @@ -69,7 +92,9 @@ def compose(self, command, cwd): letsencrypt = cwd == config_object.get_letsencrypt_repo_path() if command[-2] == 'config': - return "\n".join([c for c in self.FRONTEND_CONTAINERS if c != 'nginx']) + return '\n'.join([c + for c in self.FRONTEND_CONTAINERS + if c != 'nginx']) if command[-2] == 'up': if letsencrypt: self.__containers += self.LETSENCRYPT @@ -103,3 +128,22 @@ def compose(self, command, cwd): pass return True + + +class MockUpgrading: + + @staticmethod + def migrate_single_to_two_databases(config): + pass + + +class MockAWSValidation: + + def validate_credentials(self): + if ( + self.access_key == 'test_access_key' + and self.secret_key == 'test_secret_key' + ): + return True + else: + return False diff --git a/tox.ini b/tox.ini index 6cc2a2f..f73f663 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,9 @@ # content of: tox.ini , put in same dir as setup.py [tox] skipsdist=True -envlist = py27,py37,py38 +envlist = py36,py37,py38 [testenv] -deps = -rrequirements_py3_tests.txt +deps = -rrequirements_tests.txt commands = - pytest -vv {posargs} - -[testenv:py27] -deps = -rrequirements_py2_tests.txt -commands = - pytest -vv {posargs} + pytest -vv {posargs} --disable-pytest-warnings From c64b07a3f00c2fb916bafdb7a8290d9b14b4ad37 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Tue, 9 Feb 2021 09:51:29 -0500 Subject: [PATCH 04/14] Deactivate SSRF with local installation --- helpers/config.py | 7 ++++++- helpers/template.py | 5 +++++ templates/kobo-env/enketo_express/config.json.tpl | 6 ++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index bf1aab0..3a53066 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -1047,7 +1047,7 @@ def __questions_dev_mode(self): if self.frontend_questions: if self.local_install: - # NGinX different port + # NGINX different port CLI.colored_print('Web server port?', CLI.COLOR_QUESTION) self.__dict['exposed_nginx_docker_port'] = CLI.get_response( r'~^\d+$', self.__dict['exposed_nginx_docker_port']) @@ -1178,6 +1178,11 @@ def __questions_installation_type(self): ] ) if self.local_install: + message = ( + 'WARNING!\n\n' + 'SSRF protection is disabled with local installation' + ) + CLI.framed_print(message, color=CLI.COLOR_WARNING) # Reset previous choices, in case server role is not the same. self.__reset(local_install=True, private_dns=True) diff --git a/helpers/template.py b/helpers/template.py index 03c1948..9d508af 100644 --- a/helpers/template.py +++ b/helpers/template.py @@ -290,6 +290,11 @@ def _get_value(property_, true_value='', false_value='#', dict_['redis_password']), 'REDIS_PASSWORD_JS_ENCODED': json.dumps( dict_['redis_password']), + 'ENKETO_ALLOW_PRIVATE_IP_ADDRESS': _get_value( + 'local_installation', + true_value='true', + false_value='false' + ) } @staticmethod diff --git a/templates/kobo-env/enketo_express/config.json.tpl b/templates/kobo-env/enketo_express/config.json.tpl index 00f9047..c792153 100644 --- a/templates/kobo-env/enketo_express/config.json.tpl +++ b/templates/kobo-env/enketo_express/config.json.tpl @@ -5,6 +5,12 @@ "server url": "", "api key": "${ENKETO_API_KEY}" }, + "ip filtering": { + "allowPrivateIPAddress": ${ENKETO_ALLOW_PRIVATE_IP_ADDRESS}, + "allowMetaIPAddress": false, + "allowIPAddressList": [], + "denyAddressList": [] + }, "encryption key": "${ENKETO_ENCRYPTION_KEY}", "less secure encryption key": "${ENKETO_LESS_SECURE_ENCRYPTION_KEY}", "support": "${DEFAULT_FROM_EMAIL}", From 995a0a6c8a89007bfca1d6d157459aa3b109f628 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Tue, 9 Feb 2021 09:52:46 -0500 Subject: [PATCH 05/14] Dev mode: Celery can be skipped, dev settings are loaded on startup --- helpers/config.py | 7 +++++++ helpers/template.py | 2 ++ .../kobo-docker/docker-compose.frontend.override.yml.tpl | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/helpers/config.py b/helpers/config.py index 3a53066..13bc52d 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -418,6 +418,7 @@ def get_template(cls): 'two_databases': True, 'use_aws': False, 'use_backup': False, + 'use_celery': True, 'use_letsencrypt': True, 'use_private_dns': False, 'use_wal_e': False, @@ -1056,6 +1057,12 @@ def __questions_dev_mode(self): default=self.__dict['dev_mode'] ) self.__dict['staging_mode'] = False + if self.dev_mode: + self.__dict['use_celery'] = CLI.yes_no_question( + 'Use Celery for background tasks?', + default=self.__dict['use_celery'] + ) + else: self.__dict['staging_mode'] = CLI.yes_no_question( 'Use staging mode?', diff --git a/helpers/template.py b/helpers/template.py index 9d508af..46c4cc9 100644 --- a/helpers/template.py +++ b/helpers/template.py @@ -290,6 +290,8 @@ def _get_value(property_, true_value='', false_value='#', dict_['redis_password']), 'REDIS_PASSWORD_JS_ENCODED': json.dumps( dict_['redis_password']), + 'USE_DEV_MODE': _get_value('dev_mode'), + 'USE_CELERY': _get_value('use_celery', comparison_value=False), 'ENKETO_ALLOW_PRIVATE_IP_ADDRESS': _get_value( 'local_installation', true_value='true', diff --git a/templates/kobo-docker/docker-compose.frontend.override.yml.tpl b/templates/kobo-docker/docker-compose.frontend.override.yml.tpl index 0e935e3..84c135a 100644 --- a/templates/kobo-docker/docker-compose.frontend.override.yml.tpl +++ b/templates/kobo-docker/docker-compose.frontend.override.yml.tpl @@ -16,6 +16,8 @@ services: - KC_UWSGI_CHEAPER_RSS_LIMIT_SOFT=${UWSGI_SOFT_LIMIT} - KC_UWSGI_HARAKIRI=${UWSGI_HARAKIRI} - KC_UWSGI_WORKER_RELOAD_MERCY=${UWSGI_WORKER_RELOAD_MERCY} + ${USE_DEV_MODE}- DJANGO_SETTINGS_MODULE=onadata.settings.dev + ${USE_CELERY}- SKIP_CELERY=True ${USE_EXTRA_HOSTS}extra_hosts: ${USE_FAKE_DNS}- ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP} ${USE_FAKE_DNS}- ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP} @@ -43,6 +45,8 @@ services: - KPI_UWSGI_CHEAPER_RSS_LIMIT_SOFT=${UWSGI_SOFT_LIMIT} - KPI_UWSGI_HARAKIRI=${UWSGI_HARAKIRI} - KPI_UWSGI_WORKER_RELOAD_MERCY=${UWSGI_WORKER_RELOAD_MERCY} + ${USE_CELERY}- SKIP_CELERY=True + ${USE_DEV_MODE}- DJANGO_SETTINGS_MODULE=kobo.settings.dev ${USE_HTTPS}- SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO, https ${USE_NPM_FROM_HOST}- FRONTEND_DEV_MODE=host ${USE_EXTRA_HOSTS}extra_hosts: From bbf5b2a25147615f5d6e550ce66528017304e709 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Tue, 9 Feb 2021 10:31:55 -0500 Subject: [PATCH 06/14] Reset Celery flag when environment is not local, fix tests --- helpers/config.py | 4 ++++ tests/test_config.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/helpers/config.py b/helpers/config.py index 13bc52d..7fab277 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -1069,6 +1069,7 @@ def __questions_dev_mode(self): default=self.__dict['staging_mode'] ) self.__dict['dev_mode'] = False + self.__dict['use_celery'] = True if self.dev_mode or self.staging_mode: message = ( @@ -1192,6 +1193,8 @@ def __questions_installation_type(self): CLI.framed_print(message, color=CLI.COLOR_WARNING) # Reset previous choices, in case server role is not the same. self.__reset(local_install=True, private_dns=True) + else: + self.__reset(dev=True) def __questions_maintenance(self): if self.first_time: @@ -2038,6 +2041,7 @@ def __reset(self, **kwargs): self.__dict['kc_path'] = '' self.__dict['kpi_path'] = '' self.__dict['debug'] = False + self.__dict['use_celery'] = True if reset_nginx_port: self.__dict[ 'exposed_nginx_docker_port'] = Config.DEFAULT_NGINX_PORT diff --git a/tests/test_config.py b/tests/test_config.py index 7960543..46ad0aa 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -86,6 +86,7 @@ def test_dev_mode(): with patch('helpers.cli.CLI.colored_input') as mock_colored_input: mock_colored_input.side_effect = iter(['8080', CHOICE_YES, + CHOICE_NO, kc_repo_path, kpi_repo_path, CHOICE_YES, @@ -100,6 +101,7 @@ def test_dev_mode(): assert dict_['kpi_path'] == kpi_repo_path and \ dict_['kc_path'] == kc_repo_path assert dict_['npm_container'] is False + assert dict_['use_celery'] is False shutil.rmtree(kc_repo_path) shutil.rmtree(kpi_repo_path) From c4f17ffebca9b9a956ab9bba29a02694bfee24a9 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Wed, 10 Feb 2021 12:54:36 -0500 Subject: [PATCH 07/14] Change kobo-docker branch to "support-kobocat-2.0" --- helpers/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index 7fab277..4f23b3e 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -30,7 +30,7 @@ class Config(metaclass=Singleton): DEFAULT_PROXY_PORT = '8080' DEFAULT_NGINX_PORT = '80' DEFAULT_NGINX_HTTPS_PORT = '443' - KOBO_DOCKER_BRANCH = 'block-publicly-internal-domain' + KOBO_DOCKER_BRANCH = 'support-kobocat-2.0' KOBO_INSTALL_VERSION = '4.4.2' MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3 From 9e1a8e393d6d08d7ca82f6bd032190ef23771053 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Thu, 18 Feb 2021 10:45:23 -0500 Subject: [PATCH 08/14] Improved logic of Config.__reset() --- helpers/config.py | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/helpers/config.py b/helpers/config.py index 4f23b3e..a56772f 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -197,7 +197,7 @@ def build(self): if self.frontend or self.secondary_backend: self.__questions_private_routes() else: - self.__reset(private_dns=True) + self.__reset(fake_dns=True) if self.frontend_questions: self.__questions_public_routes() @@ -1130,7 +1130,7 @@ def __questions_dev_mode(self): ) else: # Force reset paths - self.__reset(dev=True, reset_nginx_port=self.staging_mode) + self.__reset(production=True, nginx_default=self.staging_mode) def __questions_docker_prefix(self): """ @@ -1177,6 +1177,8 @@ def __questions_installation_type(self): """ Asks for installation type """ + previous_installation_type = self.__dict['local_installation'] + self.__dict['local_installation'] = CLI.yes_no_question( 'What kind of installation do you need?', default=self.__dict['local_installation'], @@ -1191,10 +1193,14 @@ def __questions_installation_type(self): 'SSRF protection is disabled with local installation' ) CLI.framed_print(message, color=CLI.COLOR_WARNING) + + if previous_installation_type != self.__dict['local_installation']: # Reset previous choices, in case server role is not the same. - self.__reset(local_install=True, private_dns=True) - else: - self.__reset(dev=True) + self.__reset( + production=not self.local_install, + http=self.local_install, + fake_dns=self.local_install, + ) def __questions_maintenance(self): if self.first_time: @@ -2026,30 +2032,33 @@ def __reset(self, **kwargs): It can be useful, if user changes the type of installation on the same server - Returns: - bool + Kwargs: + production (bool): If `True`, reset config to production mode + http (bool): If `True`, only set values related to http/https config + fake_dns (bool): If `True`, reset config to fake dns on docker-compose files # noqa + nginx_default (bool): If `True`, reset NGINX exposed port to default """ - all = True if not kwargs else False - dev_mode = kwargs.get('dev', False) - local_install = kwargs.get('local_install', False) - private_dns = kwargs.get('private_dns', False) - reset_nginx_port = kwargs.get('reset_nginx_port', False) + all_ = True if not kwargs else False + production = kwargs.get('production', False) + http = kwargs.get('http', False) + fake_dns = kwargs.get('fake_dns', False) + nginx_default = kwargs.get('nginx_default', False) - if dev_mode or all: + if production or all_: self.__dict['dev_mode'] = False self.__dict['staging_mode'] = False self.__dict['kc_path'] = '' self.__dict['kpi_path'] = '' self.__dict['debug'] = False self.__dict['use_celery'] = True - if reset_nginx_port: + if nginx_default: self.__dict[ 'exposed_nginx_docker_port'] = Config.DEFAULT_NGINX_PORT - if private_dns or all: + if fake_dns or all_: self.__dict['use_private_dns'] = False - if local_install or all: + if http or all_: self.__dict['multi'] = False self.__dict['https'] = False self.__dict['proxy'] = False From 85ca50cdf9be567123a45e218885f1e96bf1b635 Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Thu, 18 Feb 2021 10:51:26 -0500 Subject: [PATCH 09/14] Fixed: WAL-e is still True when deactivating backups if it has been previously activated --- helpers/config.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/helpers/config.py b/helpers/config.py index a56772f..a33422a 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -1029,11 +1029,9 @@ def __questions_backup(self): # Back to default value self.__dict['backup_from_primary'] = True else: - # Back to default value - self.__dict['backup_from_primary'] = True + self.__reset(no_backups=True) else: - self.__dict['use_backup'] = False - self.__dict['backup_from_primary'] = True # Back to default value + self.__reset(no_backups=True) def __questions_dev_mode(self): """ @@ -2043,6 +2041,7 @@ def __reset(self, **kwargs): http = kwargs.get('http', False) fake_dns = kwargs.get('fake_dns', False) nginx_default = kwargs.get('nginx_default', False) + no_backups = kwargs.get('no_backups', False) if production or all_: self.__dict['dev_mode'] = False @@ -2065,6 +2064,11 @@ def __reset(self, **kwargs): self.__dict['nginx_proxy_port'] = Config.DEFAULT_NGINX_PORT self.__dict['use_letsencrypt'] = False + if no_backups or all_: + self.__dict['backup_from_primary'] = True + self.__dict['use_backup'] = False + self.__dict['use_wal_e'] = False + def __secure_mongo(self): """ Force creations of MongoDB users/passwords when users upgrade from From 9ddf5e5a2f2874c9de51eaf9dcefea77fd62510f Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Thu, 18 Feb 2021 12:40:24 -0500 Subject: [PATCH 10/14] Bump version to 5.0.0-dev --- helpers/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index a33422a..ed61a6c 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -31,7 +31,7 @@ class Config(metaclass=Singleton): DEFAULT_NGINX_PORT = '80' DEFAULT_NGINX_HTTPS_PORT = '443' KOBO_DOCKER_BRANCH = 'support-kobocat-2.0' - KOBO_INSTALL_VERSION = '4.4.2' + KOBO_INSTALL_VERSION = '5.0.0-dev' MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3 def __init__(self): From 338e35ce8d80a7c8a07ad2c961e0a57e3cb14c9a Mon Sep 17 00:00:00 2001 From: "John N. Milner" Date: Tue, 16 Mar 2021 18:14:49 -0400 Subject: [PATCH 11/14] Use `beta` branch of kobo-docker --- helpers/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index e239500..4eb67a4 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -30,7 +30,7 @@ class Config(metaclass=Singleton): DEFAULT_PROXY_PORT = '8080' DEFAULT_NGINX_PORT = '80' DEFAULT_NGINX_HTTPS_PORT = '443' - KOBO_DOCKER_BRANCH = 'support-kobocat-2.0' + KOBO_DOCKER_BRANCH = 'beta' KOBO_INSTALL_VERSION = '5.0.0-dev' MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3 From 8cef3de32262e3de2e5a246df01536a3b7f9b5ba Mon Sep 17 00:00:00 2001 From: Olivier Leger Date: Tue, 15 Jun 2021 17:23:13 -0400 Subject: [PATCH 12/14] Use release 2.021.24 of kobo-docker --- helpers/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/config.py b/helpers/config.py index 4eb67a4..3dd6449 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -30,8 +30,8 @@ class Config(metaclass=Singleton): DEFAULT_PROXY_PORT = '8080' DEFAULT_NGINX_PORT = '80' DEFAULT_NGINX_HTTPS_PORT = '443' - KOBO_DOCKER_BRANCH = 'beta' - KOBO_INSTALL_VERSION = '5.0.0-dev' + KOBO_DOCKER_BRANCH = '2.021.24' + KOBO_INSTALL_VERSION = '5.0.0' MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3 def __init__(self): From ab3870babc0f5d43dc4024790fb16fd3c8b2c79e Mon Sep 17 00:00:00 2001 From: "John N. Milner" Date: Fri, 25 Jun 2021 21:20:39 -0400 Subject: [PATCH 13/14] Use release 2.021.24a of kobo-docker --- helpers/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index 3dd6449..81a8009 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -30,7 +30,7 @@ class Config(metaclass=Singleton): DEFAULT_PROXY_PORT = '8080' DEFAULT_NGINX_PORT = '80' DEFAULT_NGINX_HTTPS_PORT = '443' - KOBO_DOCKER_BRANCH = '2.021.24' + KOBO_DOCKER_BRANCH = '2.021.24a' KOBO_INSTALL_VERSION = '5.0.0' MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3 From 26269ceac58948777de31dbc16577db0e4b6c107 Mon Sep 17 00:00:00 2001 From: "John N. Milner" Date: Wed, 30 Jun 2021 02:20:14 -0400 Subject: [PATCH 14/14] Use release 2.021.24b of kobo-docker --- helpers/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/config.py b/helpers/config.py index 81a8009..4001839 100644 --- a/helpers/config.py +++ b/helpers/config.py @@ -30,7 +30,7 @@ class Config(metaclass=Singleton): DEFAULT_PROXY_PORT = '8080' DEFAULT_NGINX_PORT = '80' DEFAULT_NGINX_HTTPS_PORT = '443' - KOBO_DOCKER_BRANCH = '2.021.24a' + KOBO_DOCKER_BRANCH = '2.021.24b' KOBO_INSTALL_VERSION = '5.0.0' MAXIMUM_AWS_CREDENTIAL_ATTEMPTS = 3