From 4bcc0db1087af96430ad0b4d03f70b44ed7f24ea Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Fri, 8 Apr 2022 08:21:59 -0700 Subject: [PATCH 01/62] feat: add Pulumi Automation API scripts with only AWS support This change adds Python scripts that use the Pulumi Automation API to stand up MARA like how the bin/start.sh scripts currently do. --- pulumi/python/automation/colorize.py | 45 +++ pulumi/python/automation/env_config_parser.py | 35 +++ pulumi/python/automation/main.py | 268 ++++++++++++++++++ pulumi/python/automation/providers/aws.py | 190 +++++++++++++ .../automation/providers/base_provider.py | 101 +++++++ .../automation/providers/pulumi_project.py | 43 +++ .../python/automation/stack_config_parser.py | 47 +++ 7 files changed, 729 insertions(+) create mode 100644 pulumi/python/automation/colorize.py create mode 100644 pulumi/python/automation/env_config_parser.py create mode 100755 pulumi/python/automation/main.py create mode 100644 pulumi/python/automation/providers/aws.py create mode 100644 pulumi/python/automation/providers/base_provider.py create mode 100644 pulumi/python/automation/providers/pulumi_project.py create mode 100644 pulumi/python/automation/stack_config_parser.py diff --git a/pulumi/python/automation/colorize.py b/pulumi/python/automation/colorize.py new file mode 100644 index 0000000..b32a0f3 --- /dev/null +++ b/pulumi/python/automation/colorize.py @@ -0,0 +1,45 @@ +import collections +import os +import random +import sys +import typing +from importlib.machinery import SourceFileLoader + + +def println_nocolor(text: str, output: typing.TextIO = sys.stdout): + print(text, file=output) + + +if os.environ.get('NO_COLOR'): + PRINTLN_FUNC = println_nocolor +else: + lolcat_fields = ['animate', 'duration', 'force', 'freq', 'mode', 'speed', 'spread', 'os'] + LolCatOptions = collections.namedtuple('LolCatOptions', lolcat_fields) + + if os.environ.get('VIRTUAL_ENV'): + venv = os.environ.get('VIRTUAL_ENV') + lolcat_path = os.path.sep.join([venv, 'bin', 'lolcat']) + if os.path.exists(lolcat_path): + loader = SourceFileLoader('lolcat', lolcat_path) + lolcat = loader.load_module() + + if lolcat: + options = LolCatOptions(animate=False, + duration=12, + freq=0.1, + os=random.randint(0, 256), + mode=lolcat.detect_mode(), + speed=-1.0, + spread=0.5, + force=False) + colorizer = lolcat.LolCat(mode=options.mode, output=sys.stdout) + + def println_color(text: str): + colorizer.println_plain(text, options) + sys.stdout.write('\x1b[0m') + sys.stdout.flush() + + PRINTLN_FUNC = println_color + else: + PRINTLN_FUNC = println_nocolor + diff --git a/pulumi/python/automation/env_config_parser.py b/pulumi/python/automation/env_config_parser.py new file mode 100644 index 0000000..df09a3d --- /dev/null +++ b/pulumi/python/automation/env_config_parser.py @@ -0,0 +1,35 @@ +import os +from typing import Optional, Mapping +from configparser import ConfigParser + +import stack_config_parser + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +DEFAULT_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi', 'environment'])) + + +class EnvConfigParser(ConfigParser): + _stack_config: Optional[stack_config_parser.PulumiStackConfig] = None + config_path: Optional[str] = None + + def __init__(self) -> None: + super().__init__() + self.optionxform = lambda option: option + + def stack_name(self) -> str: + return self.get(section='main', option='PULUMI_STACK') + + def main_section(self) -> Mapping[str, str]: + return self['main'] + + +def read(config_file_path: str = DEFAULT_PATH) -> EnvConfigParser: + config_parser = EnvConfigParser() + config_parser.optionxform = lambda option: option + + with open(config_file_path, 'r') as f: + content = f'[main]{os.linesep}{f.read()}' + config_parser.read_string(content) + config_parser.config_path = config_file_path + + return config_parser diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py new file mode 100755 index 0000000..1af6642 --- /dev/null +++ b/pulumi/python/automation/main.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +import getopt +import importlib +import importlib.util +import logging +import os +import shutil +import sys +import typing + +import yaml + +import colorize +import env_config_parser +from typing import List, Optional +from fart import fart +from providers.base_provider import Provider +from providers.pulumi_project import PulumiProject +from pulumi import automation as auto +from typing import Any, Hashable, Dict, Union + +import stack_config_parser + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +OPERATIONS: List[str] = ['down', 'destroy', 'refresh', 'show-execution', 'up', 'validate', 'list-providers'] +PROVIDERS: typing.Iterable[str] = Provider.list_providers() +PROJECT_ROOT = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..'])) +FART_FONT = fart.load_font('standard') + + +def usage(): + usage_text = f"""Modern Application Reference Architecture (MARA) Runner + +USAGE: + main.py [FLAGS] [OPERATION] + +FLAGS: + -d, --debug Enable debug output on all of the commands executed + -h, --help Prints help information + -p, --provider= Specifies the provider used (e.g. {', '.join(PROVIDERS)}) + +OPERATIONS: + down/destroy Destroys all provisioned infrastructure + list-providers Lists all of the supported providers + refresh Refreshes the Pulumi state of all provisioned infrastructure + show-execution Displays the execution order of the Pulumi projects used to provision + up Provisions all configured infrastructure + validate Validates that the environment and configuration is correct +""" + print(usage_text) + + +def provider_instance(provider_name: str) -> Provider: + module = importlib.import_module(name=f'providers.{provider_name}') + return module.INSTANCE + + +def main(): + try: + shortopts = 'hdp:' + longopts = ["help", 'debug', 'provider='] + opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts) + except getopt.GetoptError as err: + print(err) # will print something like "option -a not recognized" + usage() + sys.exit(2) + + provider_name: Optional[str] = None + debug_on = False + + # Parse flags + for opt, value in opts: + if opt in ('-h', '--help'): + usage() + sys.exit(0) + elif opt in ('-p', '--provider'): + if value.lower() != 'none': + provider_name = value.lower() + elif opt in ('-d', '--debug'): + debug_on = True + + # Make sure we got an operation - it is the last string passed as an argument + if len(sys.argv) > 1: + operation = sys.argv[-1] + else: + print(f'No operation specified') + usage() + sys.exit(2) + + if operation not in OPERATIONS: + print(f'Unknown operation specified: {operation}') + usage() + sys.exit(2) + + # Start processing operations, first we process those that do not depend on providers + if operation == 'list-providers': + for provider in PROVIDERS: + print(provider, file=sys.stdout) + sys.exit(0) + + # Now validate providers because everything underneath here depends on them + if not provider_name or provider_name.strip() == '': + print('Provider must be specified') + sys.exit(2) + if provider_name not in PROVIDERS: + print(f'Unknown provider specified: {provider_name}') + sys.exit(2) + + provider = provider_instance(provider_name) + + if operation == 'show-execution': + provider.display_execution_order(output=sys.stdout) + sys.exit(0) + + env_config = env_config_parser.read() + stack_config = read_or_prompt_for_stack_config(provider=provider, env_config=env_config) + + validate_with_verbosity = operation == 'validate' or debug_on + try: + validate(provider=provider, env_config=env_config, stack_config=stack_config, + verbose=validate_with_verbosity) + except Exception as e: + logging.exception('Validation failed: %s', e) + sys.exit(3) + + if operation == 'refresh': + refresh(provider=provider, env_config=env_config, stack_config=stack_config) + elif operation == 'up': + up(provider=provider, env_config=env_config, stack_config=stack_config) + elif operation == 'down' or operation == 'destroy': + down(provider=provider, env_config=env_config, stack_config=stack_config) + elif operation != 'validate': + print(f'Unknown operation: {operation}') + sys.exit(2) + + +def read_or_prompt_for_stack_config(provider: Provider, + env_config: env_config_parser.EnvConfigParser) -> stack_config_parser.PulumiStackConfig: + try: + stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) + except FileNotFoundError as e: + print(f' > stack configuration file at path does not exist: {e.filename}') + print(f' creating new configuration based on user input') + + stack_defaults_path = os.path.sep.join([os.path.dirname(e.filename), + 'Pulumi.stackname.yaml.example']) + + stack_defaults: Union[Dict[Hashable, Any], list, None] + with open(stack_defaults_path, 'r') as f: + stack_defaults = yaml.safe_load(stream=f) + + stack_config_values = provider.new_stack_config(env_config=env_config, defaults=stack_defaults['config']) + + with open(e.filename, 'w') as f: + yaml.safe_dump(data=stack_config_values, stream=f) + stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) + + return stack_config + + +def render_header(text: str): + header = fart.render_fart(text=text, font=FART_FONT) + colorize.PRINTLN_FUNC(header) + + +def validate(provider: Provider, + env_config: env_config_parser.EnvConfigParser, + stack_config: stack_config_parser.PulumiStackConfig, + verbose: Optional[bool] = False): + # First, we validate that we have the right tools installed + def check_path(cmd: str, fail_message: str) -> bool: + cmd_path = shutil.which(cmd) + if cmd_path: + if verbose: + print(f' > {cmd} found at path: {cmd_path}') + return True + else: + print(f'{cmd} is not installed - {fail_message}') + return False + + success = True + + # Validate presence of required tools + if not check_path('make', 'it must be installed if you intend to build NGINX Ingress Controller from source'): + success = False + if not check_path('docker', 'it must be installed if you intend to build NGINX Ingress Controller from source'): + success = False + if not check_path('node', 'NodeJS is required to run required Pulumi modules, install in order to continue'): + success = False + + if not success: + sys.exit(3) + + # Next, we validate that the environment file has the required values + try: + provider.validate_env_config(env_config.main_section()) + except Exception as e: + print(f' > environment file at path failed validation: {env_config.config_path}') + raise e + if verbose: + print(f' > environment file validated at path: {env_config.config_path}') + + try: + provider.validate_stack_config(stack_config) + except Exception as e: + print(f' > stack configuration file at path failed validation: {stack_config.config_path}') + raise e + if verbose: + print(f' > stack configuration file validated at path: {stack_config.config_path}') + + print(' > configuration is OK') + + +def build_pulumi_stack(pulumi_project: PulumiProject, + stack_name: str, + stack_config: stack_config_parser.PulumiStackConfig) -> auto.Stack: + print(f'project: {pulumi_project.name()} path: {pulumi_project.path()}') + stack = auto.create_or_select_stack(stack_name=stack_name, + opts=auto.LocalWorkspaceOptions( + env_vars={ + 'PULUMI_SKIP_UPDATE_CHECK': 'true' + } + ), + project_name=pulumi_project.name(), + work_dir=pulumi_project.path()) + stack.set_all_config(stack_config.to_pulumi_config_value()) + return stack + + +def refresh(provider: Provider, + env_config: env_config_parser.EnvConfigParser, + stack_config: stack_config_parser.PulumiStackConfig): + for pulumi_project in provider.execution_order(): + render_header(pulumi_project.description) + stack = build_pulumi_stack(pulumi_project=pulumi_project, + stack_name=env_config.stack_name(), + stack_config=stack_config) + stack.refresh_config() + stack.refresh(on_output=print) + + +def up(provider: Provider, + env_config: env_config_parser.EnvConfigParser, + stack_config: stack_config_parser.PulumiStackConfig): + for pulumi_project in provider.execution_order(): + render_header(pulumi_project.description) + stack = build_pulumi_stack(pulumi_project=pulumi_project, + stack_name=env_config.stack_name(), + stack_config=stack_config) + stackUpResult = stack.up(on_output=print) + + if pulumi_project.on_success: + pulumi_project.on_success(stackUpResult.outputs, stack.get_all_config()) + + +def down(provider: Provider, + env_config: env_config_parser.EnvConfigParser, + stack_config: stack_config_parser.PulumiStackConfig): + for pulumi_project in reversed(provider.execution_order()): + render_header(pulumi_project.description) + stack = build_pulumi_stack(pulumi_project=pulumi_project, + stack_name=env_config.stack_name(), + stack_config=stack_config) + stackDownResult = stack.destroy(on_output=print) + + +if __name__ == "__main__": + main() diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py new file mode 100644 index 0000000..a741887 --- /dev/null +++ b/pulumi/python/automation/providers/aws.py @@ -0,0 +1,190 @@ +import json +import os + +from kic_util import external_process +from typing import List, Optional, MutableMapping, Union, Hashable, Dict, Any + +from pulumi import automation as auto + +from .base_provider import PulumiProject, Provider, InvalidConfigurationException + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class AwsProviderException(Exception): + pass + + +class AwsCli: + region: str + profile: str + + def __init__(self, region: Optional[str] = None, profile: Optional[str] = None): + super().__init__() + self.region = region + self.profile = profile + + def base_cmd(self) -> str: + cmd = 'aws ' + if self.region and self.region != '': + cmd += f'--region {self.region} ' + if self.profile and self.profile != '': + cmd += f'--profile {self.profile} ' + return cmd.strip() + + def update_kubeconfig_cmd(self, cluster_name: str) -> str: + """ + Returns the command used to update the kubeconfig with the passed cluster + :param cluster_name: name of the cluster to add to the kubeconfig + :return: command to be executed + """ + return f'{self.base_cmd()} eks update-kubeconfig --name {cluster_name}' + + def validate_aws_credentials_cmd(self) -> str: + """ + Returns the command used to verify that AWS has valid credentials + :return: command to be executed + """ + return f'{self.base_cmd()} sts get-caller-identity' + + def list_azs_cmd(self) -> str: + return f"{self.base_cmd()} ec2 describe-availability-zones --filter " \ + f"'Name=state,Values=available' --zone-ids" + + +class AwsProvider(Provider): + def infra_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/aws/vpc', description='VPC'), + PulumiProject(path='infrastructure/aws/eks', description='EKS', + on_success=AwsProvider._update_kubeconfig), + PulumiProject(path='infrastructure/aws/ecr', description='ECR') + ] + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> Union[ + Dict[Hashable, Any], list, None]: + config = { + 'kubernetes:infra_type': 'AWS' + } + envcfg = env_config.main_section() + + # AWS region + if 'AWS_DEFAULT_REGION' in envcfg: + default_region = envcfg['AWS_DEFAULT_REGION'] + else: + default_region = defaults['aws:region'] + + aws_region = input(f'AWS region to use [{default_region}]: ').strip() or default_region + config['aws:region'] = aws_region + print(f"AWS region: {config['aws:region']}") + + # AWS profile + if 'AWS_PROFILE' in envcfg: + default_profile = envcfg['AWS_PROFILE'] + else: + default_profile = 'none' + aws_profile = input( + f'AWS profile to use [{default_profile}] (enter "none" for none): ').strip() or default_profile + print(f'AWS profile: {aws_profile}') + + if aws_profile != 'none': + config['aws:profile'] = aws_profile + + aws_cli = AwsCli(region=aws_region, profile=aws_profile) + + # AWS availability zones + az_data, _ = external_process.run(aws_cli.list_azs_cmd()) + zones = [] + for zone in json.loads(az_data)['AvailabilityZones']: + if zone['ZoneType'] == 'availability-zone': + zones.append(zone['ZoneName']) + + def validate_selected_azs(selected: List[str]) -> bool: + for az in selected: + if az not in zones: + print(f'[{az} is not a known availability zone') + return False + return True + + selected_azs = [] + while len(selected_azs) == 0 or not validate_selected_azs(selected_azs): + default_azs = ', '.join(zones) + azs = input( + f'AWS availability zones to use with VPC [{default_azs} (separate with commas)]: ') or default_azs + selected_azs = [x.strip() for x in azs.split(',')] + + config['vpc:azs'] = list(selected_azs) + print(f"AWS availability zones: {', '.join(config['vpc:azs'])}") + + # EKS version + default_version = defaults['eks:k8s_version'] or '1.22' + config['eks:k8s_version'] = input(f'EKS Kubernetes version [{default_version}]: ').strip() or default_version + print(f"EKS Kubernetes version: {config['eks:k8s_version']}") + + # EKS instance type + default_inst_type = defaults['eks:instance_type'] or 't2.large' + config['eks:instance_type'] = input(f'EKS instance type [{default_inst_type}]: ').strip() or default_inst_type + print(f"EKS instance type: {config['eks:instance_type']}") + + # Minimum number of compute instances for cluster + default_min_size = defaults['eks:min_size'] or 3 + while 'eks:min_size' not in config: + min_size = input('Minimum number compute instances for EKS cluster ' + f'[{default_min_size}]: ').strip() or default_min_size + if type(min_size) == int or min_size.isdigit(): + config['eks:min_size'] = int(min_size) + print(f"EKS minimum cluster size: {config['eks:min_size']}") + + # Maximum number of compute instances for cluster + default_max_size = defaults['eks:max_size'] or 12 + while 'eks:max_size' not in config: + max_size = input('Maximum number compute instances for EKS cluster ' + f'[{default_max_size}]: ').strip() or default_max_size + if type(max_size) == int or max_size.isdigit(): + config['eks:max_size'] = int(max_size) + print(f"EKS maximum cluster size: {config['eks:max_size']}") + + # Desired capacity of compute instances + default_desired_capacity = config['eks:min_size'] + while 'eks:desired_capacity' not in config: + desired_capacity = input('Desired number compute instances for EKS cluster ' + f'[{default_desired_capacity}]: ').strip() or default_desired_capacity + if type(desired_capacity) == int or desired_capacity.isdigit(): + config['eks:desired_capacity'] = int(desired_capacity) + print(f"EKS maximum cluster size: {config['eks:desired_capacity']}") + + parent_config = super().new_stack_config(env_config, defaults) + if 'config' in parent_config: + parent_config['config'].update(config) + else: + parent_config['config'] = config + + return parent_config + + def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, None]): + super().validate_stack_config(stack_config) + config = stack_config['config'] + + if 'aws:region' not in config: + raise InvalidConfigurationException('When using the AWS provider, the region must be specified') + + aws_cli = AwsCli(region=config['aws:region'], profile=config['aws:profile']) + try: + _, err = external_process.run(cmd=aws_cli.validate_aws_credentials_cmd()) + except Exception as e: + raise AwsProviderException('Unable to authenticate against AWS') from e + + @staticmethod + def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputValue], + config: MutableMapping[str, auto._config.ConfigValue]): + if 'cluster_name' not in stack_outputs: + raise AwsProviderException('Cannot find key [cluster_name] in stack output') + + aws_cli = AwsCli(region=config.get('aws:region').value, profile=config.get('aws:profile').value) + cluster_name = stack_outputs['cluster_name'].value + cmd = aws_cli.update_kubeconfig_cmd(cluster_name) + res, err = external_process.run(cmd) + print(res) + + +INSTANCE = AwsProvider() diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py new file mode 100644 index 0000000..7163461 --- /dev/null +++ b/pulumi/python/automation/providers/base_provider.py @@ -0,0 +1,101 @@ +import abc +import os +import pathlib +import sys +from typing import List, Mapping, MutableMapping, Iterable, TextIO, Union, Dict, Any, Hashable, Callable, Optional + +from pulumi import automation as auto + +from .pulumi_project import PulumiProject + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class InvalidConfigurationException(Exception): + pass + + +class Provider: + @staticmethod + def list_providers() -> Iterable[str]: + def is_provider(file: pathlib.Path) -> bool: + return file.is_file() and \ + not file.stem.endswith('base_provider') and \ + not file.stem.endswith('pulumi_project') + + path = pathlib.Path(SCRIPT_DIR) + return [os.path.splitext(file.stem)[0] for file in path.iterdir() if is_provider(file)] + + @staticmethod + def validate_env_config_required_keys(required_keys: List[str], config: Mapping[str, str]): + for key in required_keys: + if key not in config.keys(): + raise InvalidConfigurationException(f'Required configuration key [{key}] not found') + + @abc.abstractmethod + def infra_execution_order(self) -> List[PulumiProject]: + pass + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ + Union[Dict[Hashable, Any], list, None]: + return {} + + def validate_env_config(self, config: Mapping[str, str]): + Provider.validate_env_config_required_keys(['PULUMI_STACK'], config) + + def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, None]): + pass + + def k8s_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/kubeconfig', description='Kubeconfig'), + PulumiProject(path='utility/kic-image-build', description='KIC Image Build'), + PulumiProject(path='utility/kic-image-push', description='KIC Image Build'), + PulumiProject(path='kubernetes/nginx/ingress-controller', description='Ingress Controller'), + PulumiProject(path='kubernetes/logstore', description='Logstore'), + PulumiProject(path='kubernetes/logagent', description='Log Agent'), + PulumiProject(path='kubernetes/certmgr', description='Cert Manager'), + PulumiProject(path='kubernetes/prometheus', description='Prometheus'), + PulumiProject(path='kubernetes/observability', description='Observability'), + PulumiProject(path='kubernetes/applications/sirius', description='Bank of Sirius') + ] + + def execution_order(self) -> List[PulumiProject]: + return self.infra_execution_order() + self.k8s_execution_order() + + def display_execution_order(self, output: TextIO = sys.stdout): + execution_order = self.execution_order() + last_prefix = '' + + for index, pulumi_project in enumerate(execution_order): + path_parts = pulumi_project.root_path.split(os.path.sep) + project = f'{path_parts[-1]} [{pulumi_project.description}]' + prefix = os.path.sep.join(path_parts[:-1]) + + # First item in the list + if last_prefix != prefix and index == 0: + print(f' ┌── {prefix}', file=output) + print(f' │ ├── {project}', file=output) + # Last item in the list with a new prefix + elif last_prefix != prefix and index == len(execution_order) - 1: + print(f' └── {prefix}', file=output) + print(f' └── {project}', file=output) + # Any other item with a new prefix + elif last_prefix != prefix and index != 0: + print(f' ├── {prefix}', file=output) + + peek = execution_order[index + 1] + splitted = peek.root_path.split(f'{prefix}{os.path.sep}')[0] + # item is not the last item with the prefix + if os.path.sep not in splitted: + print(f' │ ├── {project}', file=output) + # item is the last item with the prefix + else: + print(f' │ └── {project}', file=output) + elif last_prefix == prefix: + print(f' │ ├── {project}', file=output) + elif last_prefix == prefix and index == len(execution_order) - 1: + print(f' │ └── {project}', file=output) + + if last_prefix != prefix: + last_prefix = prefix \ No newline at end of file diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py new file mode 100644 index 0000000..472c2ec --- /dev/null +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -0,0 +1,43 @@ +import os.path +import typing + +import yaml + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class PulumiConfigException(Exception): + pass + + +class PulumiProject: + path: str + description: str + on_success: typing.Optional[typing.Callable] = None + _config_data: typing.Optional[typing.Mapping[str, str]] = None + + def __init__(self, path: str, description: str, on_success: typing.Optional[typing.Callable] = None) -> None: + super().__init__() + self.root_path = path + self.description = description + self.on_success = on_success + + def path(self) -> str: + relative_path = os.path.sep.join([SCRIPT_DIR, '..', '..', self.root_path]) + return os.path.abspath(relative_path) + + def config(self) -> typing.Mapping[str, str]: + if not self._config_data: + config_path = os.path.sep.join([self.path(), 'Pulumi.yaml']) + with open(config_path, 'r') as f: + self._config_data = yaml.safe_load(f) + + return self._config_data + + def name(self) -> str: + config_data = self.config() + + if 'name' not in config_data.keys(): + raise PulumiConfigException('Pulumi configuration did not contain required "name" key') + + return config_data['name'] \ No newline at end of file diff --git a/pulumi/python/automation/stack_config_parser.py b/pulumi/python/automation/stack_config_parser.py new file mode 100644 index 0000000..dd180dd --- /dev/null +++ b/pulumi/python/automation/stack_config_parser.py @@ -0,0 +1,47 @@ +import json +import os +from typing import Optional, MutableMapping + +from pulumi.automation import ConfigValue + +import yaml + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +DEFAULT_DIR_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi'])) + + +class PulumiStackConfig(dict): + config_path: Optional[str] = None + + def to_pulumi_config_value(self) -> MutableMapping[str, ConfigValue]: + if 'config' not in self: + return {} + + config = self.get('config') + + pulumi_config = {} + for key, val in config.items(): + if type(val) in [str, int, float]: + pulumi_config[key] = ConfigValue(value=val) + else: + json_val = json.dumps(val) + pulumi_config[key] = ConfigValue(value=json_val) + + return pulumi_config + + +def _stack_config_path(stack_name: str) -> str: + return os.path.sep.join([DEFAULT_DIR_PATH, f'Pulumi.{stack_name}.yaml']) + + +def _read(config_file_path: str) -> PulumiStackConfig: + with open(config_file_path, 'r') as f: + stack_config = PulumiStackConfig() + stack_config.config_path = config_file_path + stack_config.update(yaml.safe_load(f)) + return stack_config + + +def read(stack_name: str) -> PulumiStackConfig: + stack_config_path = _stack_config_path(stack_name) + return _read(stack_config_path) From 23c1bdce092e112e37956f0268831eca5fcce9d7 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 19 May 2022 11:44:28 -0700 Subject: [PATCH 02/62] feat: integrate Automation API scripts with k8s secrets This change adds a new Pulumi project named 'secrets' to MARA. This project is used in conjuction with the Pulumi Automation API to store secrets using the Kubernetes secret store so the secrets can be used across Pulumi projects. --- pulumi/python/automation/env_config_parser.py | 3 + pulumi/python/automation/main.py | 119 ++++++++++++------ pulumi/python/automation/providers/aws.py | 2 +- .../automation/providers/base_provider.py | 28 +++-- .../automation/providers/pulumi_project.py | 38 +++++- .../python/automation/stack_config_parser.py | 2 + .../applications/sirius/__main__.py | 52 ++++++-- .../python/kubernetes/prometheus/__main__.py | 37 ++++-- pulumi/python/kubernetes/secrets/.gitignore | 1 + pulumi/python/kubernetes/secrets/Pulumi.yaml | 6 + pulumi/python/kubernetes/secrets/__main__.py | 47 +++++++ pulumi/python/requirements.txt | 21 ++++ 12 files changed, 285 insertions(+), 71 deletions(-) create mode 100644 pulumi/python/kubernetes/secrets/.gitignore create mode 100644 pulumi/python/kubernetes/secrets/Pulumi.yaml create mode 100644 pulumi/python/kubernetes/secrets/__main__.py create mode 100644 pulumi/python/requirements.txt diff --git a/pulumi/python/automation/env_config_parser.py b/pulumi/python/automation/env_config_parser.py index df09a3d..79a9a89 100644 --- a/pulumi/python/automation/env_config_parser.py +++ b/pulumi/python/automation/env_config_parser.py @@ -19,6 +19,9 @@ def __init__(self) -> None: def stack_name(self) -> str: return self.get(section='main', option='PULUMI_STACK') + def no_color(self) -> bool: + return 'NO_COLOR' in self.main_section() + def main_section(self) -> Mapping[str, str]: return self['main'] diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 1af6642..23e5123 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import getopt +import getpass import importlib import importlib.util import logging @@ -13,6 +14,7 @@ import colorize import env_config_parser from typing import List, Optional +from getpass import getpass from fart import fart from providers.base_provider import Provider from providers.pulumi_project import PulumiProject @@ -22,11 +24,15 @@ import stack_config_parser SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +PROJECT_ROOT = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..'])) OPERATIONS: List[str] = ['down', 'destroy', 'refresh', 'show-execution', 'up', 'validate', 'list-providers'] PROVIDERS: typing.Iterable[str] = Provider.list_providers() -PROJECT_ROOT = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..'])) +BANNER_TYPES: List[str] = ['fabulous', 'boring'] FART_FONT = fart.load_font('standard') +banner_type = BANNER_TYPES[0] +debug_on = False + def usage(): usage_text = f"""Modern Application Reference Architecture (MARA) Runner @@ -35,9 +41,10 @@ def usage(): main.py [FLAGS] [OPERATION] FLAGS: - -d, --debug Enable debug output on all of the commands executed - -h, --help Prints help information - -p, --provider= Specifies the provider used (e.g. {', '.join(PROVIDERS)}) + -d, --debug Enable debug output on all of the commands executed + -b, --banner-type= Banner type to indicate which project is being executed (e.g. {', '.join(BANNER_TYPES)}) + -h, --help Prints help information + -p, --provider= Specifies the provider used (e.g. {', '.join(PROVIDERS)}) OPERATIONS: down/destroy Destroys all provisioned infrastructure @@ -57,8 +64,8 @@ def provider_instance(provider_name: str) -> Provider: def main(): try: - shortopts = 'hdp:' - longopts = ["help", 'debug', 'provider='] + shortopts = 'hdp:b:' + longopts = ["help", 'debug', 'banner-type', 'provider='] opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts) except getopt.GetoptError as err: print(err) # will print something like "option -a not recognized" @@ -66,7 +73,8 @@ def main(): sys.exit(2) provider_name: Optional[str] = None - debug_on = False + + global banner_type, debug_on # Parse flags for opt, value in opts: @@ -78,6 +86,9 @@ def main(): provider_name = value.lower() elif opt in ('-d', '--debug'): debug_on = True + elif opt in ('-b', '--banner-type'): + if value in BANNER_TYPES: + banner_type = value # Make sure we got an operation - it is the last string passed as an argument if len(sys.argv) > 1: @@ -114,7 +125,6 @@ def main(): env_config = env_config_parser.read() stack_config = read_or_prompt_for_stack_config(provider=provider, env_config=env_config) - validate_with_verbosity = operation == 'validate' or debug_on try: validate(provider=provider, env_config=env_config, stack_config=stack_config, @@ -124,12 +134,17 @@ def main(): sys.exit(3) if operation == 'refresh': - refresh(provider=provider, env_config=env_config, stack_config=stack_config) + init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + refresh(provider=provider, env_config=env_config) elif operation == 'up': - up(provider=provider, env_config=env_config, stack_config=stack_config) + init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + up(provider=provider, env_config=env_config) elif operation == 'down' or operation == 'destroy': - down(provider=provider, env_config=env_config, stack_config=stack_config) - elif operation != 'validate': + down(provider=provider, env_config=env_config) + elif operation == 'validate': + init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + # validate was already run above + else: print(f'Unknown operation: {operation}') sys.exit(2) @@ -158,9 +173,13 @@ def read_or_prompt_for_stack_config(provider: Provider, return stack_config -def render_header(text: str): - header = fart.render_fart(text=text, font=FART_FONT) - colorize.PRINTLN_FUNC(header) +def render_header(text: str, env_config: env_config_parser.EnvConfigParser): + if banner_type == 'fabulous': + header = fart.render_fart(text=text, font=FART_FONT) + if not env_config.no_color(): + colorize.PRINTLN_FUNC(header) + else: + print(f'* {text}') def validate(provider: Provider, @@ -211,42 +230,70 @@ def check_path(cmd: str, fail_message: str) -> bool: print(' > configuration is OK') +def init_secrets(env_config: env_config_parser.EnvConfigParser, + pulumi_projects: List[PulumiProject]): + env_vars = { + 'PULUMI_SKIP_UPDATE_CHECK': 'true' + } + env_vars.update(env_config.main_section()) + secrets_work_dir = os.path.sep.join([SCRIPT_DIR, '..', 'kubernetes', 'secrets']) + stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), + opts=auto.LocalWorkspaceOptions( + env_vars=env_vars, + ), + project_name='secrets', + work_dir=secrets_work_dir) + + for project in pulumi_projects: + if not project.config_keys_with_secrets: + continue + for secret_config_key in project.config_keys_with_secrets: + if secret_config_key.key_name not in stack.get_all_config().keys(): + if secret_config_key.default: + prompt = f'{secret_config_key.prompt} [{secret_config_key.default}]: ' + else: + prompt = f'{secret_config_key.prompt}: ' + + value = getpass(prompt) + if secret_config_key.default and value.strip() == '': + value = secret_config_key.default + + config_value = auto.ConfigValue(secret=True, value=value) + stack.set_config(secret_config_key.key_name, value=config_value) + + def build_pulumi_stack(pulumi_project: PulumiProject, - stack_name: str, - stack_config: stack_config_parser.PulumiStackConfig) -> auto.Stack: + env_config: env_config_parser.EnvConfigParser) -> auto.Stack: print(f'project: {pulumi_project.name()} path: {pulumi_project.path()}') - stack = auto.create_or_select_stack(stack_name=stack_name, + env_vars = { + 'PULUMI_SKIP_UPDATE_CHECK': 'true' + } + env_vars.update(env_config.main_section()) + stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( - env_vars={ - 'PULUMI_SKIP_UPDATE_CHECK': 'true' - } + env_vars=env_vars, ), project_name=pulumi_project.name(), work_dir=pulumi_project.path()) - stack.set_all_config(stack_config.to_pulumi_config_value()) return stack def refresh(provider: Provider, - env_config: env_config_parser.EnvConfigParser, - stack_config: stack_config_parser.PulumiStackConfig): + env_config: env_config_parser.EnvConfigParser): for pulumi_project in provider.execution_order(): - render_header(pulumi_project.description) + render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, - stack_name=env_config.stack_name(), - stack_config=stack_config) + env_config=env_config) stack.refresh_config() stack.refresh(on_output=print) def up(provider: Provider, - env_config: env_config_parser.EnvConfigParser, - stack_config: stack_config_parser.PulumiStackConfig): + env_config: env_config_parser.EnvConfigParser): for pulumi_project in provider.execution_order(): - render_header(pulumi_project.description) + render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, - stack_name=env_config.stack_name(), - stack_config=stack_config) + env_config=env_config) stackUpResult = stack.up(on_output=print) if pulumi_project.on_success: @@ -254,13 +301,11 @@ def up(provider: Provider, def down(provider: Provider, - env_config: env_config_parser.EnvConfigParser, - stack_config: stack_config_parser.PulumiStackConfig): + env_config: env_config_parser.EnvConfigParser): for pulumi_project in reversed(provider.execution_order()): - render_header(pulumi_project.description) + render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, - stack_name=env_config.stack_name(), - stack_config=stack_config) + env_config=env_config) stackDownResult = stack.destroy(on_output=print) diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py index a741887..95adcde 100644 --- a/pulumi/python/automation/providers/aws.py +++ b/pulumi/python/automation/providers/aws.py @@ -117,7 +117,7 @@ def validate_selected_azs(selected: List[str]) -> bool: print(f"AWS availability zones: {', '.join(config['vpc:azs'])}") # EKS version - default_version = defaults['eks:k8s_version'] or '1.22' + default_version = defaults['eks:k8s_version'] or '1.21' config['eks:k8s_version'] = input(f'EKS Kubernetes version [{default_version}]: ').strip() or default_version print(f"EKS Kubernetes version: {config['eks:k8s_version']}") diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index 7163461..b905c22 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -2,11 +2,9 @@ import os import pathlib import sys -from typing import List, Mapping, MutableMapping, Iterable, TextIO, Union, Dict, Any, Hashable, Callable, Optional +from typing import List, Mapping, Iterable, TextIO, Union, Dict, Any, Hashable -from pulumi import automation as auto - -from .pulumi_project import PulumiProject +from .pulumi_project import PulumiProject, SecretConfigKey SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -38,7 +36,8 @@ def infra_execution_order(self) -> List[PulumiProject]: def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ Union[Dict[Hashable, Any], list, None]: - return {} + config = {} + return {'config': config} def validate_env_config(self, config: Mapping[str, str]): Provider.validate_env_config_required_keys(['PULUMI_STACK'], config) @@ -49,15 +48,28 @@ def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, N def k8s_execution_order(self) -> List[PulumiProject]: return [ PulumiProject(path='infrastructure/kubeconfig', description='Kubeconfig'), + PulumiProject(path='kubernetes/secrets', description='Secrets'), PulumiProject(path='utility/kic-image-build', description='KIC Image Build'), PulumiProject(path='utility/kic-image-push', description='KIC Image Build'), PulumiProject(path='kubernetes/nginx/ingress-controller', description='Ingress Controller'), PulumiProject(path='kubernetes/logstore', description='Logstore'), PulumiProject(path='kubernetes/logagent', description='Log Agent'), PulumiProject(path='kubernetes/certmgr', description='Cert Manager'), - PulumiProject(path='kubernetes/prometheus', description='Prometheus'), + PulumiProject(path='kubernetes/prometheus', description='Prometheus', + config_keys_with_secrets=[SecretConfigKey(key_name='prometheus:adminpass', + prompt='Prometheus administrator password')]), PulumiProject(path='kubernetes/observability', description='Observability'), - PulumiProject(path='kubernetes/applications/sirius', description='Bank of Sirius') + PulumiProject(path='kubernetes/applications/sirius', description='Bank of Sirius', + config_keys_with_secrets=[SecretConfigKey(key_name='sirius:accounts_pwd', + prompt='Bank of Sirius Accounts Database password'), + SecretConfigKey(key_name='sirius:ledger_pwd', + prompt='Bank of Sirius Ledger Database password'), + SecretConfigKey(key_name='sirius:demo_login_user', + prompt='Bank of Sirius demo site login username', + default='testuser'), + SecretConfigKey(key_name='sirius:demo_login_pwd', + prompt='Bank of Sirius demo site login password', + default='password')]) ] def execution_order(self) -> List[PulumiProject]: @@ -98,4 +110,4 @@ def display_execution_order(self, output: TextIO = sys.stdout): print(f' │ └── {project}', file=output) if last_prefix != prefix: - last_prefix = prefix \ No newline at end of file + last_prefix = prefix diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py index 472c2ec..5911a28 100644 --- a/pulumi/python/automation/providers/pulumi_project.py +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -1,32 +1,58 @@ import os.path -import typing - +from typing import Optional, Callable, Mapping, List import yaml SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) class PulumiConfigException(Exception): + """Generic exception thrown when Pulumi configuration errors are encountered""" pass +class SecretConfigKey: + """ + Class representing a secret that the user will be prompted to enter and subsequently stored in the Pulumi + secrets store. + """ + key_name: str + prompt: str + default: Optional[str] + + def __init__(self, key_name: str, prompt: str, default: Optional[str] = None) -> None: + super().__init__() + self.key_name = key_name + self.prompt = prompt + self.default = default + + class PulumiProject: + """ + Class representing a Pulumi project that is associated with a directory and containing properties regarding the + secrets used, description and the operation to run when it is successfully stood up. + """ path: str description: str - on_success: typing.Optional[typing.Callable] = None - _config_data: typing.Optional[typing.Mapping[str, str]] = None + config_keys_with_secrets: List[SecretConfigKey] + on_success: Optional[Callable] = None + _config_data: Optional[Mapping[str, str]] = None - def __init__(self, path: str, description: str, on_success: typing.Optional[typing.Callable] = None) -> None: + def __init__(self, + path: str, + description: str, + config_keys_with_secrets: Optional[List[SecretConfigKey]] = None, + on_success: Optional[Callable] = None) -> None: super().__init__() self.root_path = path self.description = description + self.config_keys_with_secrets = config_keys_with_secrets or [] self.on_success = on_success def path(self) -> str: relative_path = os.path.sep.join([SCRIPT_DIR, '..', '..', self.root_path]) return os.path.abspath(relative_path) - def config(self) -> typing.Mapping[str, str]: + def config(self) -> Mapping[str, str]: if not self._config_data: config_path = os.path.sep.join([self.path(), 'Pulumi.yaml']) with open(config_path, 'r') as f: diff --git a/pulumi/python/automation/stack_config_parser.py b/pulumi/python/automation/stack_config_parser.py index dd180dd..86c8825 100644 --- a/pulumi/python/automation/stack_config_parser.py +++ b/pulumi/python/automation/stack_config_parser.py @@ -23,6 +23,8 @@ def to_pulumi_config_value(self) -> MutableMapping[str, ConfigValue]: for key, val in config.items(): if type(val) in [str, int, float]: pulumi_config[key] = ConfigValue(value=val) + elif type(val) is dict and 'secure' in val: + pulumi_config[key] = ConfigValue(value=val['secure'], secret=True) else: json_val = json.dumps(val) pulumi_config[key] = ConfigValue(value=json_val) diff --git a/pulumi/python/kubernetes/applications/sirius/__main__.py b/pulumi/python/kubernetes/applications/sirius/__main__.py index bdc471f..3d5a393 100644 --- a/pulumi/python/kubernetes/applications/sirius/__main__.py +++ b/pulumi/python/kubernetes/applications/sirius/__main__.py @@ -1,9 +1,10 @@ import base64 import os - +from typing import Mapping import pulumi import pulumi_kubernetes as k8s from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs +from pulumi_kubernetes.core.v1 import Secret from Crypto.PublicKey import RSA from pulumi_kubernetes.yaml import ConfigFile from pulumi_kubernetes.yaml import ConfigGroup @@ -18,17 +19,24 @@ def remove_status_field(obj): del obj['status'] -def pulumi_k8_project_name(): +def project_name_from_infrastructure_dir(): script_dir = os.path.dirname(os.path.abspath(__file__)) eks_project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'kubeconfig') return pulumi_config.get_pulumi_project_name(eks_project_path) +def project_name_from_kubernetes_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + def pulumi_ingress_project_name(): script_dir = os.path.dirname(os.path.abspath(__file__)) ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller') return pulumi_config.get_pulumi_project_name(ingress_project_path) + def pulumi_repo_ingress_project_name(): script_dir = os.path.dirname(os.path.abspath(__file__)) ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') @@ -41,7 +49,16 @@ def sirius_manifests_location(): return sirius_manifests_path -# We will only want to be deploying one type of cerficate issuer +def extract_password_from_k8s_secrets(secrets: Mapping[str, str], secret_name: str) -> str: + if secret_name not in secrets: + raise f'Secret [{secret_name}] not found in Kubernetes secret store' + base64_string = secrets[secret_name] + byte_data = base64.b64decode(base64_string) + password = str(byte_data, 'utf-8') + return password + + +# We will only want to be deploying one type of certificate issuer # as part of this application; this can (and should) be changed as # needed. For example, if the user is taking advantage of ACME let's encrypt # in order to generate certs. @@ -69,7 +86,7 @@ def add_namespace(obj): stack_name = pulumi.get_stack() project_name = pulumi.get_project() -k8_project_name = pulumi_k8_project_name() +k8_project_name = project_name_from_infrastructure_dir() pulumi_user = pulumi_config.get_pulumi_user() k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" @@ -78,6 +95,11 @@ def add_namespace(obj): k8_stack_ref.get_output('cluster_name').apply( lambda s: pulumi.log.info(f'Cluster name: {s}')) +secrets_project_name = project_name_from_kubernetes_dir('secrets') +secrets_stack_ref_id = f"{pulumi_user}/{secrets_project_name}/{stack_name}" +secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) +pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') + k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) # TODO: Streamline the logic for FQDN/IP into something a bit more sane and scalable #82 @@ -151,7 +173,18 @@ def add_namespace(obj): # # Note this config is specific to the sirius code! config = pulumi.Config('sirius') -accounts_pwd = config.require_secret('accounts_pwd') + +sirius_secrets = Secret.get(resource_name='pulumi-secret-sirius', + id=pulumi_secrets['sirius'], + opts=pulumi.ResourceOptions(provider=k8s_provider)).data +accounts_pwd = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'accounts_pwd')) +ledger_pwd = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'ledger_pwd')) +demo_login_user = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'demo_login_user')) +demo_login_pwd = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'demo_login_pwd')) accounts_admin = config.get('accounts_admin') if not accounts_admin: @@ -236,15 +269,10 @@ def add_namespace(obj): ), data={ "USE_DEMO_DATA": "True", - "DEMO_LOGIN_USERNAME": "testuser", - "DEMO_LOGIN_PASSWORD": "password" + "DEMO_LOGIN_USERNAME": demo_login_user, + "DEMO_LOGIN_PASSWORD": demo_login_pwd }) -# Configuration Values are stored in the configuration: -# ./config/Pulumi.STACKNAME.yaml -config = pulumi.Config('sirius') -ledger_pwd = config.require_secret('ledger_pwd') - ledger_admin = config.get('ledger_admin') if not ledger_admin: ledger_admin = 'admin' diff --git a/pulumi/python/kubernetes/prometheus/__main__.py b/pulumi/python/kubernetes/prometheus/__main__.py index d0c42b0..ca6423a 100644 --- a/pulumi/python/kubernetes/prometheus/__main__.py +++ b/pulumi/python/kubernetes/prometheus/__main__.py @@ -1,8 +1,10 @@ import os - +import base64 +from typing import Mapping import pulumi import pulumi_kubernetes as k8s from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs +from pulumi_kubernetes.core.v1 import Secret from pulumi import Output from pulumi_kubernetes.yaml import ConfigGroup from pulumi import CustomTimeouts @@ -10,27 +12,47 @@ from kic_util import pulumi_config -def project_name_from_project_dir(dirname: str): +def project_name_from_infrastructure_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname) return pulumi_config.get_pulumi_project_name(project_path) +def project_name_from_kubernetes_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + def servicemon_manifests_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) servicemon_manifests_path = os.path.join(script_dir, 'manifests', '*.yaml') return servicemon_manifests_path +def extract_adminpass_from_k8s_secrets(secrets: Mapping[str, str]) -> str: + if 'adminpass' not in secrets: + raise 'Secret [adminpass] not found in Kubernetes secret store' + base64_string = secrets['adminpass'] + byte_data = base64.b64decode(base64_string) + password = str(byte_data, 'utf-8') + return password + + stack_name = pulumi.get_stack() project_name = pulumi.get_project() pulumi_user = pulumi_config.get_pulumi_user() -k8_project_name = project_name_from_project_dir('kubeconfig') +k8_project_name = project_name_from_infrastructure_dir('kubeconfig') k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) +secrets_project_name = project_name_from_kubernetes_dir('secrets') +secrets_stack_ref_id = f"{pulumi_user}/{secrets_project_name}/{stack_name}" +secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) +pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') + k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) @@ -60,10 +82,11 @@ def servicemon_manifests_location(): if not helm_timeout: helm_timeout = 300 -# Require an admin password, but do not encrypt it due to the -# issues we experienced with Anthos; this can be adjusted at the -# same time that we fix the Anthos issues. -adminpass = config.require('adminpass') +# Use Prometheus administrator password stored in Kubernetes secrets +prometheus_secrets = Secret.get(resource_name='pulumi-secret-prometheus', + id=pulumi_secrets['prometheus'], + opts=pulumi.ResourceOptions(provider=k8s_provider)).data +adminpass = pulumi.Output.unsecret(prometheus_secrets).apply(extract_adminpass_from_k8s_secrets) prometheus_release_args = ReleaseArgs( chart=chart_name, diff --git a/pulumi/python/kubernetes/secrets/.gitignore b/pulumi/python/kubernetes/secrets/.gitignore new file mode 100644 index 0000000..31cefef --- /dev/null +++ b/pulumi/python/kubernetes/secrets/.gitignore @@ -0,0 +1 @@ +Pulumi.*.yaml \ No newline at end of file diff --git a/pulumi/python/kubernetes/secrets/Pulumi.yaml b/pulumi/python/kubernetes/secrets/Pulumi.yaml new file mode 100644 index 0000000..ad441f8 --- /dev/null +++ b/pulumi/python/kubernetes/secrets/Pulumi.yaml @@ -0,0 +1,6 @@ +name: secrets +runtime: + name: python + options: + virtualenv: ../../venv +description: Adds Kubernetes Secrets diff --git a/pulumi/python/kubernetes/secrets/__main__.py b/pulumi/python/kubernetes/secrets/__main__.py new file mode 100644 index 0000000..440e9c3 --- /dev/null +++ b/pulumi/python/kubernetes/secrets/__main__.py @@ -0,0 +1,47 @@ +import os + +import pulumi +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs + +from kic_util import pulumi_config + +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_from_project_dir(dirname: str): + global script_dir + project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +k8_project_name = project_name_from_project_dir('kubeconfig') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) +keys = pulumi.runtime.get_config_secret_keys_env() + +config_secrets = {} +for key in keys: + bag_name, config_key = key.split(':') + config_bag = pulumi.config.Config(bag_name) + if bag_name not in config_secrets.keys(): + config_secrets[bag_name] = {} + + config_secrets[bag_name][config_key] = pulumi.Output.unsecret(config_bag.require_secret(config_key)) + +secrets_output = {} +for k, v in config_secrets.items(): + resource_name = f'pulumi-secret-{k}' + secret = Secret(resource_name=resource_name, + args=SecretInitArgs(string_data=v), + opts=pulumi.ResourceOptions(provider=k8s_provider)) + secrets_output[k] = secret.id + +pulumi.export('pulumi_secrets', secrets_output) diff --git a/pulumi/python/requirements.txt b/pulumi/python/requirements.txt new file mode 100644 index 0000000..d9202f1 --- /dev/null +++ b/pulumi/python/requirements.txt @@ -0,0 +1,21 @@ +awscli~=1.22.100 +grpcio==1.43.0 +fart~=0.1.5 +lolcat~=1.4 +nodeenv~=1.6.0 +passlib~=1.7.4 +pulumi-aws>=4.37.5 +pulumi-docker==3.1.0 +pulumi-eks>=0.37.1 +pulumi-kubernetes==3.19.1 +pycryptodome~=3.14.0 +PyYAML~=5.4.1 +requests~=2.27.1 +setuptools==62.1.0 +setuptools-git-versioning==1.9.2 +wheel==0.37.1 +yamlreader==3.0.4 +pulumi-digitalocean==4.12.0 +pulumi-linode==3.7.1 +linode-cli~=5.17.2 +pulumi~=3.32.0 \ No newline at end of file From 14c494be6754f52a6d5390884f9b715b05360f8c Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Fri, 20 May 2022 10:39:38 -0700 Subject: [PATCH 03/62] feat: improve authentication error output This change outputs the results of the AWS cli command 'aws sts get-caller-identity' more tersely and without a stacktrace when the command fails. --- pulumi/python/automation/providers/aws.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py index 95adcde..9f3ba5d 100644 --- a/pulumi/python/automation/providers/aws.py +++ b/pulumi/python/automation/providers/aws.py @@ -169,10 +169,10 @@ def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, N raise InvalidConfigurationException('When using the AWS provider, the region must be specified') aws_cli = AwsCli(region=config['aws:region'], profile=config['aws:profile']) - try: - _, err = external_process.run(cmd=aws_cli.validate_aws_credentials_cmd()) - except Exception as e: - raise AwsProviderException('Unable to authenticate against AWS') from e + _, err = external_process.run(cmd=aws_cli.validate_aws_credentials_cmd(), suppress_error=True) + if err: + print(f'AWS authentication error: {err}', file=sys.stderr) + sys.exit(3) @staticmethod def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputValue], From 27904a626b4e145b5a5241a11bfcbefa44520c32 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 9 Jun 2022 16:27:11 -0700 Subject: [PATCH 04/62] feat: add differing behavior for container push per provider --- .../python/utility/kic-image-push/__main__.py | 90 +++++------ .../utility/kic-image-push/registries/aws.py | 65 ++++++++ .../registries/base_registry.py | 80 +++++++++ .../utility/kic-image-push/registries/do.py | 59 +++++++ .../utility/kic-image-push/repository_push.py | 153 +++++++----------- 5 files changed, 298 insertions(+), 149 deletions(-) create mode 100644 pulumi/python/utility/kic-image-push/registries/aws.py create mode 100644 pulumi/python/utility/kic-image-push/registries/base_registry.py create mode 100644 pulumi/python/utility/kic-image-push/registries/do.py diff --git a/pulumi/python/utility/kic-image-push/__main__.py b/pulumi/python/utility/kic-image-push/__main__.py index dd56935..5fed322 100644 --- a/pulumi/python/utility/kic-image-push/__main__.py +++ b/pulumi/python/utility/kic-image-push/__main__.py @@ -1,18 +1,11 @@ -import base64 +import importlib import os - import pulumi -from pulumi_aws import ecr - +from pulumi import Output from kic_util import pulumi_config -from repository_push import RepositoryPush, RepositoryPushArgs, RepositoryCredentialsArgs +from registries.base_registry import ContainerRegistry - -# Leaving to use EKS since this is tied to AWS.... -def aws_project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) - project_path = os.path.join(script_dir, '..', '..', 'infrastructure', 'aws', dirname) - return pulumi_config.get_pulumi_project_name(project_path) +from repository_push import RepositoryPush, RepositoryPushArgs def project_name_from_project_dir(dirname: str): @@ -21,36 +14,6 @@ def project_name_from_project_dir(dirname: str): return pulumi_config.get_pulumi_project_name(project_path) -# Get login credentials for ECR, so that we can use it to store Docker images -def get_ecr_credentials(registry_id: str): - credentials = ecr.get_credentials(registry_id) - token = credentials.authorization_token - decoded = str(base64.b64decode(token), 'utf-8') - parts = decoded.split(':', 2) - if len(parts) != 2: - raise ValueError("Unexpected format for decoded ECR authorization token") - username = pulumi.Output.secret(parts[0]) - password = pulumi.Output.secret(parts[1]) - return RepositoryCredentialsArgs(username=username, password=password) - - -stack_name = pulumi.get_stack() -project_name = pulumi.get_project() -pulumi_user = pulumi_config.get_pulumi_user() - -ecr_project_name = aws_project_name_from_project_dir('ecr') -ecr_stack_ref_id = f"{pulumi_user}/{ecr_project_name}/{stack_name}" -ecr_stack_ref = pulumi.StackReference(ecr_stack_ref_id) -ecr_repository_url = ecr_stack_ref.require_output('repository_url') -ecr_registry_id = ecr_stack_ref.require_output('registry_id') -ecr_credentials = ecr_registry_id.apply(get_ecr_credentials) - -kic_image_build_project_name = project_name_from_project_dir('kic-image-build') -kic_image_build_stack_ref_id = f"{pulumi_user}/{kic_image_build_project_name}/{stack_name}" -kick_image_build_stack_ref = pulumi.StackReference(kic_image_build_stack_ref_id) -ingress_image = kick_image_build_stack_ref.require_output('ingress_image') - - def select_image_name(image): if 'image_name_alias' in image: return image['image_name_alias'] @@ -77,6 +40,16 @@ def select_image_tag(image): return image['image_tag'] +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +k8s_config = pulumi.Config('kubernetes') + +kic_image_build_project_name = project_name_from_project_dir('kic-image-build') +kic_image_build_stack_ref_id = f"{pulumi_user}/{kic_image_build_project_name}/{stack_name}" +kick_image_build_stack_ref = pulumi.StackReference(kic_image_build_stack_ref_id) +ingress_image = kick_image_build_stack_ref.require_output('ingress_image') + # We default to using the image name alias because it is a more precise definition # of the image type when we build from source. image_name = ingress_image.apply(select_image_name) @@ -84,15 +57,30 @@ def select_image_tag(image): image_id = ingress_image.apply(select_image_id) image_tag = ingress_image.apply(select_image_tag) -repo_args = RepositoryPushArgs(repository_url=ecr_repository_url, - credentials=ecr_credentials, - image_id=image_id, - image_name=image_name, - image_tag=image_tag, - image_tag_alias=image_tag_alias) -# Push the images to the ECR repo -ecr_repo_push = RepositoryPush(name='ingress-controller-repository-push', - repository_args=repo_args) +def push_to_container_registry(container_registry: ContainerRegistry) -> RepositoryPush: + if container_registry.login_to_registry(): + repo_args = RepositoryPushArgs(repository_url=container_registry.registry_url, + image_id=image_id, + image_name=image_name, + image_tag=image_tag, + image_tag_alias=image_tag_alias) + + # Push the images to the container registry + _repo_push = RepositoryPush(name='ingress-controller-registry-push', + repository_args=repo_args, + check_if_id_matches_tag_func=container_registry.check_if_id_matches_tag) + return _repo_push + else: + raise 'Unable to log into container registry' + + +# Dynamically determine the infrastructure provider and instantiate the +# correlated class, then apply the pulumi async closures. +infra_type = k8s_config.require('infra_type').lower() +module = importlib.import_module(name=f'registries.{infra_type}') +container_registry_class = module.CLASS +repo_push: Output[RepositoryPush] = container_registry_class.instance(stack_name, pulumi_user)\ + .apply(push_to_container_registry) -pulumi.export('ecr_repository', ecr_repo_push) +pulumi.export('container_repo_push', Output.unsecret(repo_push)) diff --git a/pulumi/python/utility/kic-image-push/registries/aws.py b/pulumi/python/utility/kic-image-push/registries/aws.py new file mode 100644 index 0000000..169295b --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/aws.py @@ -0,0 +1,65 @@ +import os + +import requests +from typing import List, Any + +from pulumi import Output, StackReference, log +from pulumi_aws import ecr +from kic_util import pulumi_config +from registries.base_registry import ContainerRegistry, RegistryCredentials + + +class ElasticContainerRegistry(ContainerRegistry): + @classmethod + def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: + super().instance(stack_name, pulumi_user) + ecr_project_name = ElasticContainerRegistry.aws_project_name_from_project_dir('ecr') + ecr_stack_ref_id = f"{pulumi_user}/{ecr_project_name}/{stack_name}" + stack_ref = StackReference(ecr_stack_ref_id) + # Async query for credentials from stack reference + ecr_registry_id = stack_ref.require_output('registry_id') + credentials_output = ecr_registry_id.apply(ElasticContainerRegistry.get_ecr_credentials) + # Async query for registry url from stack reference + registry_url_output = stack_ref.require_output('registry_url') + + def _make_instance(params: List[Any]) -> ElasticContainerRegistry: + return cls(stack_name=stack_name, pulumi_user=pulumi_user, registry_url=params[0], credentials=params[1]) + + return Output.all(registry_url_output, credentials_output).apply(_make_instance) + + @staticmethod + def aws_project_name_from_project_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'aws', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + @staticmethod + def get_ecr_credentials(registry_id: str) -> RegistryCredentials: + credentials = ecr.get_credentials(registry_id) + token = credentials.authorization_token + return ContainerRegistry.decode_credentials(token) + + def _ecr_docker_api_url(self, ) -> str: + registry_url_parts = self.registry_url.split('/') + ecr_host = registry_url_parts[0] + ecr_path = registry_url_parts[1] + return f'https://{ecr_host}/v2/{ecr_path}' + + def check_if_id_matches_tag(self, image_tag: str, new_image_id: str) -> bool: + docker_api_url = self._ecr_docker_api_url() + auth_tuple = (self.credentials.username, self.credentials.password) + + log.debug(f'Querying for latest image id: {docker_api_url}/manifests/{image_tag}') + with requests.get(f'{docker_api_url}/manifests/{image_tag}', auth=auth_tuple) as response: + if response.status_code != 200: + log.warn(f'Unable to query ECR directly for image id') + return False + json_response = response.json() + if 'config' in json_response and 'digest' in json_response['config']: + remote_image_id = json_response['config']['digest'] + return remote_image_id != new_image_id + else: + return True + + +CLASS = ElasticContainerRegistry diff --git a/pulumi/python/utility/kic-image-push/registries/base_registry.py b/pulumi/python/utility/kic-image-push/registries/base_registry.py new file mode 100644 index 0000000..79d7a9c --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/base_registry.py @@ -0,0 +1,80 @@ +import base64 +import urllib +from urllib import parse +from typing import Optional, List + +import pulumi.log +from pulumi import Input, Output +import pulumi_docker as docker + +from kic_util import external_process + + +class RegistryCredentials: + username: Input[str] + password: Input[str] + + def __init__(self, + username: Input[str], + password: Input[str]): + self.username = username + self.password = password + + +class ContainerRegistry: + stack_name: str + pulumi_user: str + credentials: Optional[RegistryCredentials] + registry_url: str + + def __init__(self, + stack_name: str, + pulumi_user: str, + registry_url: str, + credentials: Optional[RegistryCredentials]) -> None: + super().__init__() + self.stack_name = stack_name + self.pulumi_user = pulumi_user + self.registry_url = registry_url + self.credentials = credentials + + def format_registry_url_for_docker_login(self): + # We assume that the scheme is https because that's what is used most everywhere + registry_host_url = urllib.parse.urlparse(f'https://{self.registry_url}') + # We strip out the path from the URL because it isn't used when logging into a repository + return f'{registry_host_url.scheme}://{registry_host_url.hostname}' + + def login_to_registry(self) -> Optional[docker.LoginResult]: + registry = docker.Registry(registry=self.format_registry_url_for_docker_login(), + username=self.credentials.username, + password=self.credentials.password) + + docker.login_to_registry(registry=registry, log_resource=None) + pulumi.log.info(f'Logged into container registry: {registry.registry}') + + if not docker.login_results: + return None + if docker.login_results[0]: + return docker.login_results[0] + + def logout_of_registry(self): + docker_cmd = f'docker logout {self.format_registry_url_for_docker_login()}' + res, _ = external_process.run(cmd=docker_cmd) + pulumi.log.info(res) + + def check_if_id_matches_tag(self, image_tag: str, new_image_id: str) -> bool: + return False + + @classmethod + def instance(cls, stack_name: str, pulumi_user: str): + pass + + @staticmethod + def decode_credentials(encoded_token: str) -> RegistryCredentials: + decoded = str(base64.b64decode(encoded_token), 'utf-8') + parts = decoded.split(':', 2) + if len(parts) != 2: + raise ValueError("Unexpected format for decoded ECR authorization token") + username = parts[0] + password = parts[1] + return RegistryCredentials(username=username, password=password) diff --git a/pulumi/python/utility/kic-image-push/registries/do.py b/pulumi/python/utility/kic-image-push/registries/do.py new file mode 100644 index 0000000..2dbf62a --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/do.py @@ -0,0 +1,59 @@ +import json +import os +from typing import List, Any +from pulumi import Output, StackReference, ResourceOptions, log +from pulumi_digitalocean import ContainerRegistry as DoContainerRegistry, ContainerRegistryDockerCredentials + +from kic_util import pulumi_config +from registries.base_registry import ContainerRegistry, RegistryCredentials + + +class DigitalOceanContainerRegistry(ContainerRegistry): + @classmethod + def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: + super().instance(stack_name, pulumi_user) + # Pull properties from the Pulumi project that defines the Digital Ocean repository + container_registry_project_name = DigitalOceanContainerRegistry.do_project_name_from_project_dir( + 'container-registry') + container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name}/{stack_name}" + stack_ref = StackReference(container_registry_stack_ref_id) + container_registry_output = stack_ref.require_output('container_registry') + registry_name_output = stack_ref.require_output('container_registry_name') + + def _docker_credentials() -> Output[str]: + one_hour = 3_600 * 4 + registry_credentials = ContainerRegistryDockerCredentials(resource_name='do_docker_credentials', + registry_name=registry_name_output, + expiry_seconds=one_hour, + write=True, + opts=ResourceOptions(delete_before_replace=True)) + return registry_credentials.docker_credentials + + def _make_instance(params: List[Any]) -> DigitalOceanContainerRegistry: + container_registry = params[0] + do_docker_creds = params[1] + server_url = container_registry['server_url'] + endpoint = container_registry['endpoint'] + registry_url = f'{endpoint}/nginx-ingress' + _credentials = DigitalOceanContainerRegistry._decode_docker_credentials(server_url, do_docker_creds) + + return cls(stack_name=stack_name, pulumi_user=pulumi_user, + registry_url=registry_url, credentials=_credentials) + + return Output.all(container_registry_output, _docker_credentials()).apply(_make_instance) + + @staticmethod + def do_project_name_from_project_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'digitalocean', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + @staticmethod + def _decode_docker_credentials(server_url: str, + docker_credentials_json: str) -> RegistryCredentials: + credential_json = json.loads(docker_credentials_json) + auths_json = credential_json['auths'] + return ContainerRegistry.decode_credentials(auths_json[server_url]['auth']) + + +CLASS = DigitalOceanContainerRegistry diff --git a/pulumi/python/utility/kic-image-push/repository_push.py b/pulumi/python/utility/kic-image-push/repository_push.py index a0b39e6..eaf58a9 100644 --- a/pulumi/python/utility/kic-image-push/repository_push.py +++ b/pulumi/python/utility/kic-image-push/repository_push.py @@ -1,8 +1,6 @@ import uuid -from typing import Any, List, Optional -import urllib.parse +from typing import Any, List, Optional, Callable -import requests from pulumi.dynamic import ResourceProvider, Resource, CreateResult, CheckResult, ReadResult, CheckFailure, DiffResult, \ UpdateResult import pulumi @@ -11,31 +9,20 @@ from kic_util.docker_image_name import DockerImageName __all__ = [ - 'RepositoryCredentialsArgs', 'RepositoryPush', 'RepositoryPushArgs' ] -class RepositoryCredentialsArgs: - def __init__(self, - username: pulumi.Input[str], - password: pulumi.Input[str]): - self.username = username - self.password = password - - @pulumi.input_type class RepositoryPushArgs(dict): def __init__(self, repository_url: pulumi.Input[str], - credentials: pulumi.Input[pulumi.InputType['RepositoryCredentialsArgs']], image_id: pulumi.Input[str], image_name: pulumi.Input[str], image_tag: pulumi.Input[str], image_tag_alias: Optional[pulumi.Input[str]] = None): self.repository_url = repository_url - self.credentials = credentials self.image_id = image_id self.image_name = image_name self.image_tag = image_tag @@ -43,8 +30,6 @@ def __init__(self, dict_init = { 'repository_url': self.repository_url, - 'repository_username': self.credentials.username, - 'repository_password': self.credentials.password, 'image_id': self.image_id, 'image_name': self.image_name, 'image_tag': self.image_tag, @@ -58,30 +43,24 @@ def __init__(self, class RepositoryPushProvider(ResourceProvider): resource: Resource + check_if_id_matches_tag_func: Callable[[str, str], bool] REQUIRED_PROPS: List[str] = [ 'repository_url', 'image_id', 'image_name', 'image_tag', - 'repository_username', - 'repository_password' ] - def __init__(self, resource: pulumi.Resource) -> None: + def __init__(self, + resource: pulumi.Resource, + check_if_id_matches_tag_func: Optional[Callable[[str, str], bool]] = None) -> None: self.resource = resource - super().__init__() - - def login_to_ecr_repo(self, repository_url: str, username: str, password: str) -> docker.Registry: - # We assume that the scheme is https because that's what is used most everywhere - repo_host_url = urllib.parse.urlparse(f'https://{repository_url}') - # We strip out the path from the URL because it isn't used when logging into a repository - repo_host = f'{repo_host_url.scheme}://{repo_host_url.hostname}' - registry = docker.Registry(registry=repo_host, - username=username, - password=password) - docker.login_to_registry(registry=registry, log_resource=self.resource) - return registry + if check_if_id_matches_tag_func: + self.check_if_id_matches_tag_func = check_if_id_matches_tag_func + else: + self.check_if_id_matches_tag_func = lambda image_tag, new_image_id: False + super().__init__() def push_image_to_repo(self, repository_url: str, @@ -136,8 +115,6 @@ def check_for_param(param: str): def create(self, props: Any) -> CreateResult: repository_url = props['repository_url'] - repository_username = props['repository_username'] - repository_password = props['repository_password'] image_name = props['image_name'] image_tag = props['image_tag'] @@ -146,28 +123,24 @@ def create(self, props: Any) -> CreateResult: else: image_tag_alias = None - self.login_to_ecr_repo(repository_url=repository_url, - username=repository_username, - password=repository_password) - # Push the KIC tag and tag_alias, so that the KIC image can be easily identified on the repository - ecr_image_name = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=image_name, - image_tag=image_tag) - pulumi.log.info(msg=f'Tagged and pushed image [{image_name}] to [{ecr_image_name}]', + repo_image_name = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=image_name, + image_tag=image_tag) + pulumi.log.info(msg=f'Tagged and pushed image [{image_name}] to [{repo_image_name}]', resource=self.resource) - outputs = {'ecr_image_name': str(ecr_image_name), - 'ecr_image_id': props['image_id']} + outputs = {'repo_image_name': str(repo_image_name), + 'repo_image_id': props['image_id']} if image_tag_alias: - ecr_image_name_alias = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=image_name, - image_tag=image_tag_alias) - outputs['ecr_image_name_alias'] = str(ecr_image_name_alias) - pulumi.log.info(msg=f'Tagged and pushed image alias [{image_name}] to [{ecr_image_name_alias}]', + repo_image_name_alias = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=image_name, + image_tag=image_tag_alias) + outputs['repo_image_name_alias'] = str(repo_image_name_alias) + pulumi.log.info(msg=f'Tagged and pushed image alias [{image_name}] to [{repo_image_name_alias}]', resource=self.resource) id_ = str(uuid.uuid4()) @@ -175,28 +148,12 @@ def create(self, props: Any) -> CreateResult: def update(self, _id: str, _olds: Any, _news: Any) -> UpdateResult: repository_url: str = _news['repository_url'] - repository_url_parts = repository_url.split('/') - ecr_host = repository_url_parts[0] - ecr_path = repository_url_parts[1] - ecr_docker_api_url = f'https://{ecr_host}/v2/{ecr_path}' - - def check_if_id_matches_tag_in_ecr(image_tag: str) -> bool: - pulumi.log.debug(f'Querying for latest image id: {ecr_docker_api_url}/manifests/{image_tag}') - with requests.get(f'{ecr_docker_api_url}/manifests/{image_tag}', - auth=(_news['repository_username'], _news['repository_password'])) as response: - json_response = response.json() - if 'config' in json_response and 'digest' in json_response['config']: - remote_image_id = json_response['config']['digest'] - return remote_image_id != _news['image_id'] - else: - return True - - image_tag_outdated = check_if_id_matches_tag_in_ecr(_news['image_tag']) + image_tag_outdated = self.check_if_id_matches_tag_func(_news['image_tag'], _news['image_id']) has_tag_alias = 'image_tag_alias' in _news and _news['image_tag_alias'] if has_tag_alias: - image_tag_alias_outdated = check_if_id_matches_tag_in_ecr(_news['image_tag_alias']) + image_tag_alias_outdated = self.check_if_id_matches_tag_func(_news['image_tag_alias'], _news['image_id']) else: image_tag_alias_outdated = False @@ -205,37 +162,33 @@ def check_if_id_matches_tag_in_ecr(image_tag: str) -> bool: pulumi.log.info(msg=f"Tags [{_news['image_tag']}] and [{_news['image_tag_alias']}] " f"are up to date", resource=self.resource) else: - pulumi.log.info(msg=f"Tag [{_news['image_tag']}] is up to date", resource=self.resource) + pulumi.log.info(msg=f"Tag [{_news['image_tag']}] on remote registry is up to date", resource=self.resource) return UpdateResult() outputs = { - 'ecr_image_id': _news['image_id'] + 'repo_image_id': _news['image_id'] } - self.login_to_ecr_repo(repository_url=repository_url, - username=_news['repository_username'], - password=_news['repository_password']) - if image_tag_outdated: - ecr_image_name = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=_news['image_name'], - image_tag=_news['image_tag']) - pulumi.log.info(msg=f"Tagged and pushed image [{_news['image_name']}] to [{ecr_image_name}]", + repo_image_name = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=_news['image_name'], + image_tag=_news['image_tag']) + pulumi.log.info(msg=f"Tagged and pushed image [{_news['image_name']}] to [{repo_image_name}]", resource=self.resource) - outputs['ecr_image_name'] = str(ecr_image_name) + outputs['repo_image_name'] = str(repo_image_name) else: pulumi.log.info(msg=f"Tag [{_news['image_tag']}] is up to date", resource=self.resource) if has_tag_alias and image_tag_alias_outdated: - ecr_image_name_alias = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=_news['image_name'], - image_tag=_news['image_tag_alias']) - pulumi.log.info(msg=f"Tagged and pushed image alias [{_news['image_name']}] to [{ecr_image_name_alias}]", + repo_image_name_alias = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=_news['image_name'], + image_tag=_news['image_tag_alias']) + pulumi.log.info(msg=f"Tagged and pushed image alias [{_news['image_name']}] to [{repo_image_name_alias}]", resource=self.resource) - outputs['ecr_image_name_alias'] = str(ecr_image_name_alias) + outputs['repo_image_name_alias'] = str(repo_image_name_alias) elif has_tag_alias: pulumi.log.info(msg=f"Tag alias [{_news['image_tag_alias']}] is up to date", resource=self.resource) @@ -246,11 +199,12 @@ class RepositoryPush(Resource): def __init__(self, name: str, repository_args: pulumi.InputType['RepositoryPushArgs'], + check_if_id_matches_tag_func: Callable[[str, str], bool] = None, opts: Optional[pulumi.ResourceOptions] = None) -> None: props = dict() props.update(repository_args) - def build_ecr_image_alias(args): + def build_repo_image_alias(args): repository_url = args[0] image_tag = args[1] @@ -259,18 +213,21 @@ def build_ecr_image_alias(args): else: return f'{repository_url}:{image_tag}' - if 'ecr_image_name' not in props: - props['ecr_image_name'] = pulumi.Output.concat(repository_args.repository_url, - ':', - repository_args.image_tag) - if 'ecr_image_name_alias' not in props and repository_args.image_tag_alias: - ecr_image_alias_args = pulumi.Output.all(repository_args.repository_url, - repository_args.image_tag_alias) - props['ecr_image_name_alias'] = ecr_image_alias_args.apply(build_ecr_image_alias) - if 'ecr_image_id' not in props: - props['ecr_image_id'] = repository_args.image_id + if 'repo_image_name' not in props: + props['repo_image_name'] = pulumi.Output.concat(repository_args.repository_url, + ':', + repository_args.image_tag) + if 'repo_image_name_alias' not in props and repository_args.image_tag_alias: + repo_image_alias_args = pulumi.Output.all(repository_args.repository_url, + repository_args.image_tag_alias) + props['repo_image_name_alias'] = repo_image_alias_args.apply(build_repo_image_alias) + if 'repo_image_id' not in props: + props['repo_image_id'] = repository_args.image_id if not opts: opts = pulumi.ResourceOptions() - super().__init__(name=name, opts=opts, props=props, provider=RepositoryPushProvider(resource=self)) + provider = RepositoryPushProvider(resource=self, + check_if_id_matches_tag_func=check_if_id_matches_tag_func) + + super().__init__(name=name, opts=opts, props=props, provider=provider) From bfb719e6213da38929c208e9485072fecd08bac2 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 9 Jun 2022 16:29:04 -0700 Subject: [PATCH 05/62] docs: update examples to reflect changes to secrets --- config/pulumi/Pulumi.stackname.yaml.example | 33 +++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index bfcef90..af9d271 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -16,6 +16,39 @@ ################################################################################ config: + ############################################################################ + # Bank of Sirius (Sample Application) Settings + ############################################################################ + + # These parameters define the name of the database and the database credentials + # used by the Bank of Sirius ledger application. + # + # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius + # deployment if no password is provided. + # sirius:ledger_pwd: Password # Required + sirius:ledger_admin: admin + sirius:ledger_db: postgresdb + + # This optional parameter supplies a hostname for the Bank of Sirius Ingress + # controller. If not set, the FQDN of the LB is used. + #sirius:hostname: demo.example.com + + # These parameters define the name of the database and the database credentials + # used by the Bank of Sirius accounts application. + # + # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius + # deployment if no password is provided. + #sirius:accounts_pwd: Password # Required + sirius:accounts_admin: admin + sirius:accounts_db: postgresdb + + # Prometheus Configuration + sirius:chart_version: 2.3.5 + # Chart version for the Pulumi chart for prometheus + sirius:helm_repo_name: prometheus-community + # Name of the repo to pull the prometheus chart from + sirius:helm_repo_url: https://prometheus-community.github.io/helm-charts + ############################################################################ # AWS Access Settings ############################################################################ From 8ada5092fbaa69d218b63ec98ff5e1f6d8b76029 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 9 Jun 2022 16:30:37 -0700 Subject: [PATCH 06/62] feat: add support for Digital Ocean to automation API scripts --- pulumi/python/automation/env_config_parser.py | 33 ++-- pulumi/python/automation/main.py | 83 +++++---- pulumi/python/automation/providers/aws.py | 43 +++-- .../automation/providers/base_provider.py | 27 ++- pulumi/python/automation/providers/do.py | 159 ++++++++++++++++++ 5 files changed, 270 insertions(+), 75 deletions(-) create mode 100644 pulumi/python/automation/providers/do.py diff --git a/pulumi/python/automation/env_config_parser.py b/pulumi/python/automation/env_config_parser.py index 79a9a89..a2ec327 100644 --- a/pulumi/python/automation/env_config_parser.py +++ b/pulumi/python/automation/env_config_parser.py @@ -7,32 +7,39 @@ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) DEFAULT_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi', 'environment'])) +DEFAULT_ENV_VARS = { + 'PULUMI_SKIP_UPDATE_CHECK': 'true' +} -class EnvConfigParser(ConfigParser): + +class EnvConfig(dict): _stack_config: Optional[stack_config_parser.PulumiStackConfig] = None config_path: Optional[str] = None - def __init__(self) -> None: + def __init__(self, + env_vars: Mapping[str, str], + file_vars: Mapping[str, str], + stack_config: Optional[stack_config_parser.PulumiStackConfig] = None, + config_path: Optional[str] = None) -> None: super().__init__() - self.optionxform = lambda option: option + self.update(DEFAULT_ENV_VARS) + self.update(env_vars) + self.update(file_vars) + self._stack_config = stack_config + self.config_path = config_path def stack_name(self) -> str: - return self.get(section='main', option='PULUMI_STACK') + return self.get('PULUMI_STACK') def no_color(self) -> bool: - return 'NO_COLOR' in self.main_section() - - def main_section(self) -> Mapping[str, str]: - return self['main'] - + return self.get('NO_COLOR') is not None -def read(config_file_path: str = DEFAULT_PATH) -> EnvConfigParser: - config_parser = EnvConfigParser() +def read(config_file_path: str = DEFAULT_PATH) -> EnvConfig: + config_parser = ConfigParser() config_parser.optionxform = lambda option: option with open(config_file_path, 'r') as f: content = f'[main]{os.linesep}{f.read()}' config_parser.read_string(content) - config_parser.config_path = config_file_path - return config_parser + return EnvConfig(env_vars=os.environ, file_vars=config_parser['main'], config_path=config_file_path) diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 23e5123..eb59e9d 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -117,7 +117,7 @@ def main(): print(f'Unknown provider specified: {provider_name}') sys.exit(2) - provider = provider_instance(provider_name) + provider = provider_instance(provider_name.lower()) if operation == 'show-execution': provider.display_execution_order(output=sys.stdout) @@ -134,23 +134,31 @@ def main(): sys.exit(3) if operation == 'refresh': - init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) - refresh(provider=provider, env_config=env_config) + pulumi_cmd = refresh elif operation == 'up': - init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) - up(provider=provider, env_config=env_config) + pulumi_cmd = up elif operation == 'down' or operation == 'destroy': - down(provider=provider, env_config=env_config) + pulumi_cmd = down elif operation == 'validate': init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + pulumi_cmd = None # validate was already run above else: print(f'Unknown operation: {operation}') sys.exit(2) + if pulumi_cmd: + init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + try: + pulumi_cmd(provider=provider, env_config=env_config) + except Exception as e: + logging.error('Error running Pulumi operation with provider [%s] for stack [%s]', + provider_name, env_config.stack_name()) + raise e + def read_or_prompt_for_stack_config(provider: Provider, - env_config: env_config_parser.EnvConfigParser) -> stack_config_parser.PulumiStackConfig: + env_config: env_config_parser.EnvConfig) -> stack_config_parser.PulumiStackConfig: try: stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) except FileNotFoundError as e: @@ -164,8 +172,9 @@ def read_or_prompt_for_stack_config(provider: Provider, with open(stack_defaults_path, 'r') as f: stack_defaults = yaml.safe_load(stream=f) - stack_config_values = provider.new_stack_config(env_config=env_config, defaults=stack_defaults['config']) - + stack_config_values = { + 'config': provider.new_stack_config(env_config=env_config, defaults=stack_defaults['config']) + } with open(e.filename, 'w') as f: yaml.safe_dump(data=stack_config_values, stream=f) stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) @@ -173,7 +182,7 @@ def read_or_prompt_for_stack_config(provider: Provider, return stack_config -def render_header(text: str, env_config: env_config_parser.EnvConfigParser): +def render_header(text: str, env_config: env_config_parser.EnvConfig): if banner_type == 'fabulous': header = fart.render_fart(text=text, font=FART_FONT) if not env_config.no_color(): @@ -183,7 +192,7 @@ def render_header(text: str, env_config: env_config_parser.EnvConfigParser): def validate(provider: Provider, - env_config: env_config_parser.EnvConfigParser, + env_config: env_config_parser.EnvConfig, stack_config: stack_config_parser.PulumiStackConfig, verbose: Optional[bool] = False): # First, we validate that we have the right tools installed @@ -210,9 +219,17 @@ def check_path(cmd: str, fail_message: str) -> bool: if not success: sys.exit(3) + if 'kubernetes:infra_type' in stack_config['config']: + previous_provider = stack_config['config']['kubernetes:infra_type'] + if previous_provider.lower() != provider.infra_type().lower(): + print(f'Stack has already been used with the provider [{previous_provider}], so it cannot ' + f'be run with the specified provider [{provider.infra_type()}]. Destroy all resources ' + 'and remove the kubernetes:infra_type key from the stack configuration.', file=sys.stderr) + sys.exit(3) + # Next, we validate that the environment file has the required values try: - provider.validate_env_config(env_config.main_section()) + provider.validate_env_config(env_config) except Exception as e: print(f' > environment file at path failed validation: {env_config.config_path}') raise e @@ -220,7 +237,7 @@ def check_path(cmd: str, fail_message: str) -> bool: print(f' > environment file validated at path: {env_config.config_path}') try: - provider.validate_stack_config(stack_config) + provider.validate_stack_config(stack_config, env_config) except Exception as e: print(f' > stack configuration file at path failed validation: {stack_config.config_path}') raise e @@ -230,16 +247,12 @@ def check_path(cmd: str, fail_message: str) -> bool: print(' > configuration is OK') -def init_secrets(env_config: env_config_parser.EnvConfigParser, +def init_secrets(env_config: env_config_parser.EnvConfig, pulumi_projects: List[PulumiProject]): - env_vars = { - 'PULUMI_SKIP_UPDATE_CHECK': 'true' - } - env_vars.update(env_config.main_section()) secrets_work_dir = os.path.sep.join([SCRIPT_DIR, '..', 'kubernetes', 'secrets']) stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( - env_vars=env_vars, + env_vars=env_config, ), project_name='secrets', work_dir=secrets_work_dir) @@ -263,15 +276,11 @@ def init_secrets(env_config: env_config_parser.EnvConfigParser, def build_pulumi_stack(pulumi_project: PulumiProject, - env_config: env_config_parser.EnvConfigParser) -> auto.Stack: + env_config: env_config_parser.EnvConfig) -> auto.Stack: print(f'project: {pulumi_project.name()} path: {pulumi_project.path()}') - env_vars = { - 'PULUMI_SKIP_UPDATE_CHECK': 'true' - } - env_vars.update(env_config.main_section()) stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( - env_vars=env_vars, + env_vars=env_config, ), project_name=pulumi_project.name(), work_dir=pulumi_project.path()) @@ -279,34 +288,44 @@ def build_pulumi_stack(pulumi_project: PulumiProject, def refresh(provider: Provider, - env_config: env_config_parser.EnvConfigParser): + env_config: env_config_parser.EnvConfig): for pulumi_project in provider.execution_order(): render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack.refresh_config() - stack.refresh(on_output=print) + stack.refresh(color=pulumi_color_settings(env_config), + on_output=print) def up(provider: Provider, - env_config: env_config_parser.EnvConfigParser): + env_config: env_config_parser.EnvConfig): for pulumi_project in provider.execution_order(): render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) - stackUpResult = stack.up(on_output=print) + stackUpResult = stack.up(color=pulumi_color_settings(env_config), + on_output=print) if pulumi_project.on_success: - pulumi_project.on_success(stackUpResult.outputs, stack.get_all_config()) + pulumi_project.on_success(stackUpResult.outputs, stack.get_all_config(), env_config) def down(provider: Provider, - env_config: env_config_parser.EnvConfigParser): + env_config: env_config_parser.EnvConfig): for pulumi_project in reversed(provider.execution_order()): render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) - stackDownResult = stack.destroy(on_output=print) + stackDownResult = stack.destroy(color=pulumi_color_settings(env_config), + on_output=print) + + +def pulumi_color_settings(env_config: env_config_parser.EnvConfig): + if env_config.no_color(): + return 'never' + else: + return 'auto' if __name__ == "__main__": diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py index 9f3ba5d..80a1593 100644 --- a/pulumi/python/automation/providers/aws.py +++ b/pulumi/python/automation/providers/aws.py @@ -1,8 +1,9 @@ import json import os +import sys from kic_util import external_process -from typing import List, Optional, MutableMapping, Union, Hashable, Dict, Any +from typing import List, Optional, MutableMapping, Union, Hashable, Dict, Any, Mapping from pulumi import automation as auto @@ -40,7 +41,7 @@ def update_kubeconfig_cmd(self, cluster_name: str) -> str: """ return f'{self.base_cmd()} eks update-kubeconfig --name {cluster_name}' - def validate_aws_credentials_cmd(self) -> str: + def validate_credentials_cmd(self) -> str: """ Returns the command used to verify that AWS has valid credentials :return: command to be executed @@ -53,6 +54,9 @@ def list_azs_cmd(self) -> str: class AwsProvider(Provider): + def infra_type(self) -> str: + return 'AWS' + def infra_execution_order(self) -> List[PulumiProject]: return [ PulumiProject(path='infrastructure/aws/vpc', description='VPC'), @@ -63,14 +67,11 @@ def infra_execution_order(self) -> List[PulumiProject]: def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> Union[ Dict[Hashable, Any], list, None]: - config = { - 'kubernetes:infra_type': 'AWS' - } - envcfg = env_config.main_section() + config = super().new_stack_config(env_config, defaults) # AWS region - if 'AWS_DEFAULT_REGION' in envcfg: - default_region = envcfg['AWS_DEFAULT_REGION'] + if 'AWS_DEFAULT_REGION' in env_config: + default_region = env_config['AWS_DEFAULT_REGION'] else: default_region = defaults['aws:region'] @@ -79,8 +80,8 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list print(f"AWS region: {config['aws:region']}") # AWS profile - if 'AWS_PROFILE' in envcfg: - default_profile = envcfg['AWS_PROFILE'] + if 'AWS_PROFILE' in env_config: + default_profile = env_config['AWS_PROFILE'] else: default_profile = 'none' aws_profile = input( @@ -153,30 +154,28 @@ def validate_selected_azs(selected: List[str]) -> bool: config['eks:desired_capacity'] = int(desired_capacity) print(f"EKS maximum cluster size: {config['eks:desired_capacity']}") - parent_config = super().new_stack_config(env_config, defaults) - if 'config' in parent_config: - parent_config['config'].update(config) - else: - parent_config['config'] = config - - return parent_config + return config - def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, None]): - super().validate_stack_config(stack_config) + def validate_stack_config(self, + stack_config: Union[Dict[Hashable, Any], list, None], + env_config: Mapping[str, str]): + super().validate_stack_config(stack_config=stack_config, env_config=env_config) config = stack_config['config'] if 'aws:region' not in config: - raise InvalidConfigurationException('When using the AWS provider, the region must be specified') + raise InvalidConfigurationException('When using the AWS provider, the region [aws:region] ' + 'must be specified') aws_cli = AwsCli(region=config['aws:region'], profile=config['aws:profile']) - _, err = external_process.run(cmd=aws_cli.validate_aws_credentials_cmd(), suppress_error=True) + _, err = external_process.run(cmd=aws_cli.validate_credentials_cmd(), suppress_error=True) if err: print(f'AWS authentication error: {err}', file=sys.stderr) sys.exit(3) @staticmethod def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputValue], - config: MutableMapping[str, auto._config.ConfigValue]): + config: MutableMapping[str, auto._config.ConfigValue], + _env_config: Mapping[str, str]): if 'cluster_name' not in stack_outputs: raise AwsProviderException('Cannot find key [cluster_name] in stack output') diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index b905c22..544623c 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -30,19 +30,30 @@ def validate_env_config_required_keys(required_keys: List[str], config: Mapping[ if key not in config.keys(): raise InvalidConfigurationException(f'Required configuration key [{key}] not found') + @abc.abstractmethod + def infra_type(self) -> str: + """ + :return string representing the type of underlying infrastructure used to stand up Kubernetes + """ + pass + @abc.abstractmethod def infra_execution_order(self) -> List[PulumiProject]: pass - def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ - Union[Dict[Hashable, Any], list, None]: - config = {} - return {'config': config} + def new_stack_config(self, env_config: Mapping[str, str], + defaults: Union[Dict[Hashable, Any], list, None]) -> Union[Dict[Hashable, Any], list, None]: + config = { + 'kubernetes:infra_type': self.infra_type() + } + return config - def validate_env_config(self, config: Mapping[str, str]): - Provider.validate_env_config_required_keys(['PULUMI_STACK'], config) + def validate_env_config(self, env_config: Mapping[str, str]): + Provider.validate_env_config_required_keys(['PULUMI_STACK'], env_config) - def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, None]): + def validate_stack_config(self, + stack_config: Union[Dict[Hashable, Any], list, None], + env_config: Mapping[str, str]): pass def k8s_execution_order(self) -> List[PulumiProject]: @@ -50,7 +61,7 @@ def k8s_execution_order(self) -> List[PulumiProject]: PulumiProject(path='infrastructure/kubeconfig', description='Kubeconfig'), PulumiProject(path='kubernetes/secrets', description='Secrets'), PulumiProject(path='utility/kic-image-build', description='KIC Image Build'), - PulumiProject(path='utility/kic-image-push', description='KIC Image Build'), + PulumiProject(path='utility/kic-image-push', description='KIC Image Push'), PulumiProject(path='kubernetes/nginx/ingress-controller', description='Ingress Controller'), PulumiProject(path='kubernetes/logstore', description='Logstore'), PulumiProject(path='kubernetes/logagent', description='Log Agent'), diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py new file mode 100644 index 0000000..baf5e58 --- /dev/null +++ b/pulumi/python/automation/providers/do.py @@ -0,0 +1,159 @@ +import json +import sys +from typing import List, Dict, Hashable, Any, Union, MutableMapping, Optional, Mapping + +import yaml +from pulumi import automation as auto +from kic_util import external_process + +from .base_provider import PulumiProject, Provider, InvalidConfigurationException + + +class DigitalOceanProviderException(Exception): + pass + + +class DoctlCli: + access_token: str + region: Optional[str] + + def __init__(self, access_token: str, region: Optional[str] = None): + self.access_token = access_token + self.region = region + + def base_cmd(self) -> str: + cmd = 'doctl' + cmd += f' --access-token "{self.access_token}" ' + return cmd.strip() + + def validate_credentials_cmd(self) -> str: + return f'{self.base_cmd()} account get' + + def save_kubernetes_cluster_cmd(self, cluster_name: str) -> str: + return f'{self.base_cmd()} kubernetes cluster config save {cluster_name}' + + def add_container_registry_support_to_kubernetes(self, cluster_name: str) -> str: + return f'{self.base_cmd()} kubernetes cluster registry add {cluster_name}' + + def get_kubernetes_versions_json(self) -> str: + return f'{self.base_cmd()} kubernetes options versions --output json' + + def get_registry_name(self) -> str: + return f'{self.base_cmd()} registry get --format Name --no-header' + + +class DigitalOceanProvider(Provider): + def infra_type(self) -> str: + return 'DO' + + def infra_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/digitalocean/container-registry', description='DO Container Registry'), + PulumiProject(path='infrastructure/digitalocean/domk8s', description='DO Kubernetes', + on_success=DigitalOceanProvider._after_k8s_stand_up), + ] + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ + Union[Dict[Hashable, Any], list, None]: + config = super().new_stack_config(env_config, defaults) + + if 'DIGITALOCEAN_TOKEN' not in env_config: + config['digitalocean:token'] = input("Digital Ocean API token (this is stored in plain-text - " + "alternatively this can be specified as the environment variable " + "DIGITALOCEAN_TOKEN): ") + + config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') + + return config + + def validate_stack_config(self, + stack_config: Union[Dict[Hashable, Any], list, None], + env_config: Mapping[str, str]): + super().validate_stack_config(stack_config=stack_config, env_config=env_config) + token = DigitalOceanProvider.token(stack_config=stack_config, env_config=env_config) + do_cli = DoctlCli(access_token=token) + _, err = external_process.run(cmd=do_cli.validate_credentials_cmd()) + if err: + print(f'Digital Ocean authentication error: {err}', file=sys.stderr) + sys.exit(3) + + @staticmethod + def _after_k8s_stand_up(stack_outputs: MutableMapping[str, auto._output.OutputValue], + config: MutableMapping[str, auto._config.ConfigValue], + env_config: Mapping[str, str]): + DigitalOceanProvider._update_kubeconfig(stack_outputs, config, env_config) + # DigitalOceanProvider._add_container_registry_support(stack_outputs, config, env_config) + + @staticmethod + def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputValue], + config: MutableMapping[str, auto._config.ConfigValue], + env_config: Mapping[str, str]): + if 'cluster_name' not in stack_outputs: + raise DigitalOceanProviderException('Cannot find key [cluster_name] in stack output') + + kubeconfig = yaml.safe_load(stack_outputs['kubeconfig'].value) + full_cluster_name = kubeconfig['clusters'][0]['name'] + + res, _ = external_process.run('kubectl config get-clusters') + clusters = filter(lambda cluster: cluster != 'NAME', res.splitlines()) + + if full_cluster_name in clusters: + print(f'Local kubectl configuration already has credentials for cluster {full_cluster_name}') + else: + print(f'Adding credentials for cluster {full_cluster_name} to local kubectl configuration') + cluster_name = stack_outputs['cluster_name'].value + token = DigitalOceanProvider.token(stack_config=config, env_config=env_config) + do_cli = DoctlCli(access_token=token) + + res, _ = external_process.run(do_cli.save_kubernetes_cluster_cmd(cluster_name)) + if res: + print(res) + + @staticmethod + def _add_container_registry_support(stack_outputs: MutableMapping[str, auto._output.OutputValue], + config: MutableMapping[str, auto._config.ConfigValue], + env_config: Mapping[str, str]): + if 'cluster_name' not in stack_outputs: + raise DigitalOceanProviderException('Cannot find key [cluster_name] in stack output') + + cluster_name = stack_outputs['cluster_name'].value + token = DigitalOceanProvider.token(stack_config=config, env_config=env_config) + do_cli = DoctlCli(access_token=token) + + res, _ = external_process.run(cmd='kubectl get secrets --output=name') + secrets = res.splitlines() + + res, _ = external_process.run(cmd=do_cli.get_registry_name()) + registry_name = res.strip() + + if f'secret/{registry_name}' in secrets: + print('Container registry secrets have already been added to Kubernetes cluster') + else: + print('Adding container registry support (via secrets) to Kubernetes cluster') + res, _ = external_process.run(do_cli.add_container_registry_support_to_kubernetes(cluster_name)) + if res: + print(res) + + @staticmethod + def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], + env_config: Mapping[str, str]) -> str: + # Token is in an environment variable or the environment variable file + if 'DIGITALOCEAN_TOKEN' in env_config: + return env_config['DIGITALOCEAN_TOKEN'] + + # We were given a reference to a StackConfigParser object + if 'config' in stack_config and 'digitalocean:token' in stack_config['config']: + return stack_config['config']['digitalocean:token'] + + # We were given a reference to a Pulumi Stack configuration + if 'digitalocean:token' in stack_config: + return stack_config['digitalocean:token'].value + + # Otherwise + msg = 'When using the Digital Ocean provider, an API token must be specified - ' \ + 'this token can be specified with the Pulumi config parameter digitalocean:token ' \ + 'or the environment variable DIGITALOCEAN_TOKEN' + raise InvalidConfigurationException(msg) + + +INSTANCE = DigitalOceanProvider() From 8e36a6cb728ba95b8ea3ec2a863c574ee7d33f97 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 9 Jun 2022 16:32:52 -0700 Subject: [PATCH 07/62] feat: add support for Digital Ocean Container Registry --- .../container-registry/Pulumi.yaml | 7 +++ .../container-registry/__main__.py | 48 +++++++++++++++ .../digitalocean/domk8s/__main__.py | 58 ++++++++++++++----- .../applications/sirius/Pulumi.yaml | 2 +- .../applications/sirius/config/.gitignore | 1 - .../config/Pulumi.stackname.yaml.example | 29 ---------- .../nginx/ingress-controller/__main__.py | 12 ++-- 7 files changed, 106 insertions(+), 51 deletions(-) create mode 100644 pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml create mode 100644 pulumi/python/infrastructure/digitalocean/container-registry/__main__.py delete mode 100644 pulumi/python/kubernetes/applications/sirius/config/.gitignore delete mode 100644 pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example diff --git a/pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml new file mode 100644 index 0000000..9039d8e --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml @@ -0,0 +1,7 @@ +name: container-registry +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates new Digital Ocean Container Registry diff --git a/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py new file mode 100644 index 0000000..cbef949 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py @@ -0,0 +1,48 @@ +import os + +import pulumi +import pulumi_digitalocean as docean + +from kic_util import external_process + +config = pulumi.Config('digitalocean') +# valid values: starter, basic, professional +subscription_tier = config.get('container_registry_subscription_tier') +if not subscription_tier: + subscription_tier = 'basic' +region = config.get('region') +if not region: + region = 'sfo3' + + +def token(): + if config.get('token'): + return config.get('token') + if config.get_secret('token'): + return config.get_secret('token') + if 'DIGITALOCEAN_TOKEN' in os.environ: + return os.environ['DIGITALOCEAN_TOKEN'] + raise 'No valid token for Digital Ocean found' + + +stack_name = pulumi.get_stack() + +# Digital Ocean allows only a single container registry per user. This means that we need to use doctl +# to check to see if a registry already exists, and if so use it. We must do this using an external +# command because Pulumi does not support the model of checking to see if a resource created outside of +# Pulumi already exists and thereby forking logic. +registry_name_query_cmd = f'doctl --access-token {token()} registry get --format Name --no-header --output text' +registry_name, err = external_process.run(cmd=registry_name_query_cmd, suppress_error=True) +registry_name = registry_name.strip() +if not err and registry_name and not registry_name.startswith('shared-global-container-registry-'): + pulumi.log.info(f'Using already existing global Digital Ocean container registry: {registry_name}') + container_registry = docean.ContainerRegistry.get(registry_name, id=registry_name) +else: + pulumi.log.info('Creating new global Digital Ocean container registry') + container_registry = docean.ContainerRegistry('shared-global-container-registry', + subscription_tier_slug=subscription_tier, + region=region) + +pulumi.export('container_registry_id', container_registry.id) +pulumi.export('container_registry_name', container_registry.name) +pulumi.export('container_registry', container_registry) diff --git a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py index 30521c2..d4ac9cb 100644 --- a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py @@ -1,6 +1,11 @@ +import os + import pulumi -import pulumi_digitalocean as docean +from pulumi import StackReference +from pulumi_digitalocean import KubernetesCluster, KubernetesClusterNodePoolArgs, ContainerRegistryDockerCredentials from kic_util import pulumi_config +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs # Configuration details for the K8 cluster config = pulumi.Config('domk8s') @@ -15,27 +20,52 @@ node_count = 3 k8s_version = config.get('k8s_version') if not k8s_version: - k8s_version = 'latest' + k8s_version = '1.22.8-do.1' stack_name = pulumi.get_stack() project_name = pulumi.get_project() pulumi_user = pulumi_config.get_pulumi_user() + +def container_registry_project_name(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', 'container-registry') + return pulumi_config.get_pulumi_project_name(project_path) + + # Derive our names for the cluster and the pool -resource_name = "do-" + stack_name + "-cluster" -pool_name = "do-" + stack_name + "-pool" +resource_name = f'do-{stack_name}-cluster' +pool_name = f'do-{stack_name}-pool' # Create a digital ocean cluster -cluster = docean.KubernetesCluster(resource_name=resource_name, - region=region, - version=k8s_version, - node_pool=docean.KubernetesClusterNodePoolArgs( - name=pool_name, - size=instance_size, - node_count=node_count, - )) +cluster = KubernetesCluster(resource_name=resource_name, + region=region, + version=k8s_version, + node_pool=KubernetesClusterNodePoolArgs( + name=pool_name, + size=instance_size, + node_count=node_count, + )) + +# Insert Digital Ocean Container Registry Secrets into the cluster +kubeconfig = cluster.kube_configs[0].raw_config +container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name()}/{stack_name}" +stack_ref = StackReference(container_registry_stack_ref_id) +container_registry_output = stack_ref.require_output('container_registry') +registry_name_output = stack_ref.require_output('container_registry_name') + +registry_credentials = ContainerRegistryDockerCredentials(resource_name='do_k8s_docker_credentials', + registry_name=registry_name_output, + write=False) +docker_credentials = registry_credentials.docker_credentials + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) +secret = Secret(resource_name='shared-global-container-registry', + args=SecretInitArgs(string_data={'.dockerconfigjson': docker_credentials}, + type='kubernetes.io/dockerconfigjson'), + opts=pulumi.ResourceOptions(provider=k8s_provider)) # Export the clusters' kubeconfig -pulumi.export("cluster_name", resource_name) +pulumi.export("cluster_name", cluster.name) pulumi.export("cluster_id", cluster.id) -pulumi.export("kubeconfig", pulumi.Output.unsecret(cluster.kube_configs[0].raw_config)) +pulumi.export("kubeconfig", pulumi.Output.unsecret(kubeconfig)) diff --git a/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml b/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml index de0bf82..e192821 100644 --- a/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml +++ b/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml @@ -3,5 +3,5 @@ runtime: name: python options: virtualenv: ../../../venv -config: ./config +config: ../../../../../config/pulumi description: Creates the Bank of Sirius App diff --git a/pulumi/python/kubernetes/applications/sirius/config/.gitignore b/pulumi/python/kubernetes/applications/sirius/config/.gitignore deleted file mode 100644 index 2a61605..0000000 --- a/pulumi/python/kubernetes/applications/sirius/config/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.yaml \ No newline at end of file diff --git a/pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example b/pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example deleted file mode 100644 index b5a09be..0000000 --- a/pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example +++ /dev/null @@ -1,29 +0,0 @@ -config: - # These parameters define the name of the database and the database credentials - # used by the Bank of Sirius ledger application. - # - # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius - # deployment if no password is provided. - # sirius:ledger_pwd: Password # Required - sirius:ledger_admin: admin - sirius:ledger_db: postgresdb - - # This optional parameter supplies a hostname for the Bank of Sirius Ingress - # controller. If not set, the FQDN of the LB is used. - #sirius:hostname: demo.example.com - - # These parameters define the name of the database and the database credentials - # used by the Bank of Sirius accounts application. - # - # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius - # deployment if no password is provided. - #sirius:accounts_pwd: Password # Required - sirius:accounts_admin: admin - sirius:accounts_db: postgresdb - - # Prometheus Configuration - sirius:chart_version: 2.3.5 - # Chart version for the Pulumi chart for prometheus - sirius:helm_repo_name: prometheus-community - # Name of the repo to pull the prometheus chart from - sirius:helm_repo_url: https://prometheus-community.github.io/helm-charts diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index 8a05e07..66292b7 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -62,7 +62,7 @@ def find_image_tag(repository: dict) -> typing.Optional[str]: return None -def build_chart_values(repository: dict) -> helm.ChartOpts: +def build_chart_values(repo_push: dict) -> helm.ChartOpts: values: Dict[str, Dict[str, typing.Any]] = { 'controller': { 'healthStatus': True, @@ -117,12 +117,12 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: "opentracing": True } - image_tag = find_image_tag(repository) + image_tag = find_image_tag(repo_push) if not image_tag: pulumi.log.debug('No image_tag or image_tag_alias found') - if 'repository_url' in repository and image_tag: - repository_url = repository['repository_url'] + if 'repository_url' in repo_push and image_tag: + repository_url = repo_push['repository_url'] if 'image' not in values['controller']: values['controller']['image'] = {} @@ -156,7 +156,7 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: image_push_project_name = project_name_from_project_dir('kic-image-push') image_push_ref_id = f"{pulumi_user}/{image_push_project_name}/{stack_name}" image_push_ref = pulumi.StackReference(image_push_ref_id) -ecr_repository = image_push_ref.get_output('ecr_repository') +container_repo_push = image_push_ref.get_output('container_repo_push') k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) @@ -168,7 +168,7 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: }, opts=pulumi.ResourceOptions(provider=k8s_provider)) -chart_values = ecr_repository.apply(build_chart_values) +chart_values = container_repo_push.apply(build_chart_values) kic_release_args = ReleaseArgs( chart=chart_name, From 30e9937982ea611222123f19a9bd39655fde2b6b Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 12:56:36 -0700 Subject: [PATCH 08/62] chore: upgrade default helm chart version --- config/pulumi/Pulumi.stackname.yaml.example | 2 +- pulumi/python/kubernetes/nginx/ingress-controller/__main__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index af9d271..a1de8ff 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -127,7 +127,7 @@ config: # Chart name for the helm chart for kic kic-helm:chart_name: nginx-ingress # Chart version for the helm chart for kic - kic-helm:chart_version: 0.13.1 + kic-helm:chart_version: 0.13.2 # Name of the repo to pull the kic chart from kic-helm:helm_repo_name: nginx-stable # URL of the chart repo to pull kic from diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index 66292b7..1ad7cce 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -17,7 +17,7 @@ chart_name = 'nginx-ingress' chart_version = config.get('chart_version') if not chart_version: - chart_version = '0.13.0' + chart_version = '0.13.2' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'nginx-stable' From 25fff6b20f52603d809525c8b1a94f4b484391ce Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 12:56:52 -0700 Subject: [PATCH 09/62] chore: upgrade example version for ingress image name --- config/pulumi/Pulumi.stackname.yaml.example | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index a1de8ff..3b22f16 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -151,12 +151,12 @@ config: # https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image/ # # The following are all valid image names: - # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.0 - # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.0-ot - # kic:image_name: docker.io/nginx/nginx-ingress:2.2.0 - # kic:image_name: nginx/nginx-ingress:2.2.0 - # kic:image_name: nginx/nginx-ingress:2.2.0-alpine - kic:image_name: nginx/nginx-ingress:2.2.0 + # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.2 + # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.2-ot + # kic:image_name: docker.io/nginx/nginx-ingress:2.2.2 + # kic:image_name: nginx/nginx-ingress:2.2.2 + # kic:image_name: nginx/nginx-ingress:2.2.2-alpine + kic:image_name: nginx/nginx-ingress:2.2.2 ############################################################################ From a9c996a5cbbd82828a00206184555ae84edeb832 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:00:55 -0700 Subject: [PATCH 10/62] refactor: break headers into separate file --- pulumi/python/automation/headers.py | 15 +++++++++++++++ pulumi/python/automation/main.py | 24 +++++++----------------- 2 files changed, 22 insertions(+), 17 deletions(-) create mode 100644 pulumi/python/automation/headers.py diff --git a/pulumi/python/automation/headers.py b/pulumi/python/automation/headers.py new file mode 100644 index 0000000..bee3733 --- /dev/null +++ b/pulumi/python/automation/headers.py @@ -0,0 +1,15 @@ +import colorize +import env_config_parser +from fart import fart + +FART_FONT = fart.load_font('standard') +banner_type = 'fabulous' + + +def render_header(text: str, env_config: env_config_parser.EnvConfig): + if banner_type == 'fabulous': + header = fart.render_fart(text=text, font=FART_FONT) + if not env_config.no_color(): + colorize.PRINTLN_FUNC(header) + else: + print(f'* {text}') diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index eb59e9d..a933c18 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -11,11 +11,11 @@ import yaml -import colorize import env_config_parser +import headers from typing import List, Optional from getpass import getpass -from fart import fart + from providers.base_provider import Provider from providers.pulumi_project import PulumiProject from pulumi import automation as auto @@ -28,7 +28,6 @@ OPERATIONS: List[str] = ['down', 'destroy', 'refresh', 'show-execution', 'up', 'validate', 'list-providers'] PROVIDERS: typing.Iterable[str] = Provider.list_providers() BANNER_TYPES: List[str] = ['fabulous', 'boring'] -FART_FONT = fart.load_font('standard') banner_type = BANNER_TYPES[0] debug_on = False @@ -74,7 +73,7 @@ def main(): provider_name: Optional[str] = None - global banner_type, debug_on + global debug_on # Parse flags for opt, value in opts: @@ -88,7 +87,7 @@ def main(): debug_on = True elif opt in ('-b', '--banner-type'): if value in BANNER_TYPES: - banner_type = value + headers.banner_type = value # Make sure we got an operation - it is the last string passed as an argument if len(sys.argv) > 1: @@ -182,15 +181,6 @@ def read_or_prompt_for_stack_config(provider: Provider, return stack_config -def render_header(text: str, env_config: env_config_parser.EnvConfig): - if banner_type == 'fabulous': - header = fart.render_fart(text=text, font=FART_FONT) - if not env_config.no_color(): - colorize.PRINTLN_FUNC(header) - else: - print(f'* {text}') - - def validate(provider: Provider, env_config: env_config_parser.EnvConfig, stack_config: stack_config_parser.PulumiStackConfig, @@ -290,7 +280,7 @@ def build_pulumi_stack(pulumi_project: PulumiProject, def refresh(provider: Provider, env_config: env_config_parser.EnvConfig): for pulumi_project in provider.execution_order(): - render_header(text=pulumi_project.description, env_config=env_config) + headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack.refresh_config() @@ -301,7 +291,7 @@ def refresh(provider: Provider, def up(provider: Provider, env_config: env_config_parser.EnvConfig): for pulumi_project in provider.execution_order(): - render_header(text=pulumi_project.description, env_config=env_config) + headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stackUpResult = stack.up(color=pulumi_color_settings(env_config), @@ -314,7 +304,7 @@ def up(provider: Provider, def down(provider: Provider, env_config: env_config_parser.EnvConfig): for pulumi_project in reversed(provider.execution_order()): - render_header(text=pulumi_project.description, env_config=env_config) + headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stackDownResult = stack.destroy(color=pulumi_color_settings(env_config), From 5410bc77e8d1cf3f37482c1a0ae7cfd30124b8e3 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:07:40 -0700 Subject: [PATCH 11/62] refactor: normalize PulumiProject path property naming The naming of the property "root_path" conflicted with the initialization parameter "path" AS WELL AS the method "path()". This change normalizes the property names such that they do not ambiguously overlap. --- pulumi/python/automation/main.py | 4 ++-- pulumi/python/automation/providers/base_provider.py | 4 ++-- pulumi/python/automation/providers/pulumi_project.py | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index a933c18..d0f3c84 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -267,13 +267,13 @@ def init_secrets(env_config: env_config_parser.EnvConfig, def build_pulumi_stack(pulumi_project: PulumiProject, env_config: env_config_parser.EnvConfig) -> auto.Stack: - print(f'project: {pulumi_project.name()} path: {pulumi_project.path()}') + print(f'project: {pulumi_project.name()} path: {pulumi_project.abspath()}') stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( env_vars=env_config, ), project_name=pulumi_project.name(), - work_dir=pulumi_project.path()) + work_dir=pulumi_project.abspath()) return stack diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index 544623c..b5caa03 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -91,7 +91,7 @@ def display_execution_order(self, output: TextIO = sys.stdout): last_prefix = '' for index, pulumi_project in enumerate(execution_order): - path_parts = pulumi_project.root_path.split(os.path.sep) + path_parts = pulumi_project.path.split(os.path.sep) project = f'{path_parts[-1]} [{pulumi_project.description}]' prefix = os.path.sep.join(path_parts[:-1]) @@ -108,7 +108,7 @@ def display_execution_order(self, output: TextIO = sys.stdout): print(f' ├── {prefix}', file=output) peek = execution_order[index + 1] - splitted = peek.root_path.split(f'{prefix}{os.path.sep}')[0] + splitted = peek.path.split(f'{prefix}{os.path.sep}')[0] # item is not the last item with the prefix if os.path.sep not in splitted: print(f' │ ├── {project}', file=output) diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py index 5911a28..24a6a80 100644 --- a/pulumi/python/automation/providers/pulumi_project.py +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -43,18 +43,18 @@ def __init__(self, config_keys_with_secrets: Optional[List[SecretConfigKey]] = None, on_success: Optional[Callable] = None) -> None: super().__init__() - self.root_path = path + self.path = path self.description = description self.config_keys_with_secrets = config_keys_with_secrets or [] self.on_success = on_success - def path(self) -> str: - relative_path = os.path.sep.join([SCRIPT_DIR, '..', '..', self.root_path]) + def abspath(self) -> str: + relative_path = os.path.sep.join([SCRIPT_DIR, '..', '..', self.path]) return os.path.abspath(relative_path) def config(self) -> Mapping[str, str]: if not self._config_data: - config_path = os.path.sep.join([self.path(), 'Pulumi.yaml']) + config_path = os.path.sep.join([self.abspath(), 'Pulumi.yaml']) with open(config_path, 'r') as f: self._config_data = yaml.safe_load(f) From 4ce3b4ef5fbd0c5cb8683d295995ee444335481d Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:12:37 -0700 Subject: [PATCH 12/62] refactor: on_success closure params to single class We want to be able to easily add new references to the state that can be processed for on_success events. As such, the three variables passed to those closures has been refactored to a single type which references the original three variables. This will make adding new variables easier. --- pulumi/python/automation/main.py | 7 +++++-- pulumi/python/automation/providers/aws.py | 16 +++++++--------- pulumi/python/automation/providers/do.py | 13 ++++++------- .../automation/providers/pulumi_project.py | 16 +++++++++++++++- 4 files changed, 33 insertions(+), 19 deletions(-) diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index d0f3c84..b5d93fd 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -17,7 +17,7 @@ from getpass import getpass from providers.base_provider import Provider -from providers.pulumi_project import PulumiProject +from providers.pulumi_project import PulumiProject, PulumiProjectEventParams from pulumi import automation as auto from typing import Any, Hashable, Dict, Union @@ -298,7 +298,10 @@ def up(provider: Provider, on_output=print) if pulumi_project.on_success: - pulumi_project.on_success(stackUpResult.outputs, stack.get_all_config(), env_config) + params = PulumiProjectEventParams(stack_outputs=stack_up_result.outputs, + config=stack.get_all_config(), + env_config=env_config) + pulumi_project.on_success(params) def down(provider: Provider, diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py index 80a1593..f341406 100644 --- a/pulumi/python/automation/providers/aws.py +++ b/pulumi/python/automation/providers/aws.py @@ -3,11 +3,10 @@ import sys from kic_util import external_process -from typing import List, Optional, MutableMapping, Union, Hashable, Dict, Any, Mapping - -from pulumi import automation as auto +from typing import List, Optional, Union, Hashable, Dict, Any, Mapping from .base_provider import PulumiProject, Provider, InvalidConfigurationException +from .pulumi_project import PulumiProjectEventParams SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -173,14 +172,13 @@ def validate_stack_config(self, sys.exit(3) @staticmethod - def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputValue], - config: MutableMapping[str, auto._config.ConfigValue], - _env_config: Mapping[str, str]): - if 'cluster_name' not in stack_outputs: + def _update_kubeconfig(params: PulumiProjectEventParams): + if 'cluster_name' not in params.stack_outputs: raise AwsProviderException('Cannot find key [cluster_name] in stack output') - aws_cli = AwsCli(region=config.get('aws:region').value, profile=config.get('aws:profile').value) - cluster_name = stack_outputs['cluster_name'].value + aws_cli = AwsCli(region=params.config.get('aws:region').value, + profile=params.config.get('aws:profile').value) + cluster_name = params.stack_outputs['cluster_name'].value cmd = aws_cli.update_kubeconfig_cmd(cluster_name) res, err = external_process.run(cmd) print(res) diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index baf5e58..4c2254f 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -7,6 +7,7 @@ from kic_util import external_process from .base_provider import PulumiProject, Provider, InvalidConfigurationException +from .pulumi_project import PulumiProjectEventParams class DigitalOceanProviderException(Exception): @@ -85,13 +86,11 @@ def _after_k8s_stand_up(stack_outputs: MutableMapping[str, auto._output.OutputVa # DigitalOceanProvider._add_container_registry_support(stack_outputs, config, env_config) @staticmethod - def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputValue], - config: MutableMapping[str, auto._config.ConfigValue], - env_config: Mapping[str, str]): - if 'cluster_name' not in stack_outputs: + def _update_kubeconfig(params: PulumiProjectEventParams): + if 'cluster_name' not in params.stack_outputs: raise DigitalOceanProviderException('Cannot find key [cluster_name] in stack output') - kubeconfig = yaml.safe_load(stack_outputs['kubeconfig'].value) + kubeconfig = yaml.safe_load(params.stack_outputs['kubeconfig'].value) full_cluster_name = kubeconfig['clusters'][0]['name'] res, _ = external_process.run('kubectl config get-clusters') @@ -101,8 +100,8 @@ def _update_kubeconfig(stack_outputs: MutableMapping[str, auto._output.OutputVal print(f'Local kubectl configuration already has credentials for cluster {full_cluster_name}') else: print(f'Adding credentials for cluster {full_cluster_name} to local kubectl configuration') - cluster_name = stack_outputs['cluster_name'].value - token = DigitalOceanProvider.token(stack_config=config, env_config=env_config) + cluster_name = params.stack_outputs['cluster_name'].value + token = DigitalOceanProvider.token(stack_config=params.config, env_config=params.env_config) do_cli = DoctlCli(access_token=token) res, _ = external_process.run(do_cli.save_kubernetes_cluster_cmd(cluster_name)) diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py index 24a6a80..a68fd5a 100644 --- a/pulumi/python/automation/providers/pulumi_project.py +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -66,4 +66,18 @@ def name(self) -> str: if 'name' not in config_data.keys(): raise PulumiConfigException('Pulumi configuration did not contain required "name" key') - return config_data['name'] \ No newline at end of file + return config_data['name'] + + +class PulumiProjectEventParams: + stack_outputs: MutableMapping[str, auto._output.OutputValue] + config: MutableMapping[str, auto._config.ConfigValue] + env_config: Mapping[str, str] + + def __init__(self, + stack_outputs: MutableMapping[str, auto._output.OutputValue], + config: MutableMapping[str, auto._config.ConfigValue], + env_config: Mapping[str, str]) -> None: + self.stack_outputs = stack_outputs + self.config = config + self.env_config = env_config From 13fc758d187f759fe916644c29f2ed60097d291c Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:19:12 -0700 Subject: [PATCH 13/62] refactor: separate namespace creation from ingress controller When using container registry credentials with nginx ingress controller, one must create the credential secrets for the registry in the same namespace as the ingress controller. By breaking it apart as a separate step, it allows us to layer in additional logic (such as adding credentials) after the namespace has been created and before the ingress controller has been deployed. --- .../automation/providers/base_provider.py | 2 + .../ingress-controller-namespace/Pulumi.yaml | 7 ++++ .../ingress-controller-namespace/__main__.py | 38 +++++++++++++++++ .../nginx/ingress-controller/__main__.py | 42 ++++++++++++------- 4 files changed, 74 insertions(+), 15 deletions(-) create mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml create mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index b5caa03..89c7ca1 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -62,6 +62,8 @@ def k8s_execution_order(self) -> List[PulumiProject]: PulumiProject(path='kubernetes/secrets', description='Secrets'), PulumiProject(path='utility/kic-image-build', description='KIC Image Build'), PulumiProject(path='utility/kic-image-push', description='KIC Image Push'), + PulumiProject(path='kubernetes/nginx/ingress-controller-namespace', + description='K8S Ingress NS'), PulumiProject(path='kubernetes/nginx/ingress-controller', description='Ingress Controller'), PulumiProject(path='kubernetes/logstore', description='Logstore'), PulumiProject(path='kubernetes/logagent', description='Log Agent'), diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml new file mode 100644 index 0000000..e3d81e2 --- /dev/null +++ b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml @@ -0,0 +1,7 @@ +name: ingress-controller-namespace +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates the NGINX Kubernetes Ingress Controller Namespace diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py new file mode 100644 index 0000000..388cb7c --- /dev/null +++ b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py @@ -0,0 +1,38 @@ +import os + +import pulumi +import pulumi_kubernetes as k8s + +from kic_util import pulumi_config + + +def infrastructure_project_name_from_project_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +k8_project_name = infrastructure_project_name_from_project_dir('kubeconfig') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) +cluster_name = k8_stack_ref.require_output('cluster_name').apply(lambda c: str(c)) + +k8s_provider = k8s.Provider(resource_name=f'ingress-controller', + kubeconfig=kubeconfig) + +namespace_name = 'nginx-ingress' + +ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', + metadata={'name': namespace_name, + 'labels': { + 'prometheus': 'scrape'} + }, + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +pulumi.export('ingress_namespace', ns) +pulumi.export('ingress_namespace_name', namespace_name) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index 1ad7cce..d6ec6b9 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -24,6 +24,9 @@ helm_repo_url = config.get('helm_repo_url') if not helm_repo_url: helm_repo_url = 'https://helm.nginx.com/stable' + +pulumi.log.info(f'NGINX Ingress Controller will be deployed with the Helm Chart [{chart_name}@{chart_version}]') + # # Allow the user to set timeout per helm chart; otherwise # we default to 5 minutes. @@ -33,18 +36,21 @@ helm_timeout = 300 -def aws_project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) +def infrastructure_project_name_from_project_dir(dirname: str): project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname) return pulumi_config.get_pulumi_project_name(project_path) -def project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) +def project_name_from_utility_dir(dirname: str): project_path = os.path.join(script_dir, '..', '..', '..', 'utility', dirname) return pulumi_config.get_pulumi_project_name(project_path) +def project_name_from_same_parent(directory: str): + project_path = os.path.join(script_dir, '..', directory) + return pulumi_config.get_pulumi_project_name(project_path) + + def find_image_tag(repository: dict) -> typing.Optional[str]: """ Inspect the repository dictionary as returned from a stack reference for a valid image_tag_alias or image_tag. @@ -128,7 +134,7 @@ def build_chart_values(repo_push: dict) -> helm.ChartOpts: values['controller']['image'] = {} if repository_url and image_tag: - pulumi.log.info(f"Using ingress controller image: {repository_url}:{image_tag}") + pulumi.log.info(f"Using Ingress Controller image: {repository_url}:{image_tag}") values['controller']['image'].update({ 'repository': repository_url, 'tag': image_tag @@ -147,26 +153,32 @@ def build_chart_values(repo_push: dict) -> helm.ChartOpts: project_name = pulumi.get_project() pulumi_user = pulumi_config.get_pulumi_user() -k8_project_name = aws_project_name_from_project_dir('kubeconfig') +k8_project_name = infrastructure_project_name_from_project_dir('kubeconfig') k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" -k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +k8_stack_ref = StackReference(k8_stack_ref_id) kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) cluster_name = k8_stack_ref.require_output('cluster_name').apply(lambda c: str(c)) -image_push_project_name = project_name_from_project_dir('kic-image-push') +namespace_stack_ref_id = f"{pulumi_user}/{project_name_from_same_parent('ingress-controller-namespace')}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +ns_name_output = ns_stack_ref.require_output('ingress_namespace_name') + +image_push_project_name = project_name_from_utility_dir('kic-image-push') image_push_ref_id = f"{pulumi_user}/{image_push_project_name}/{stack_name}" -image_push_ref = pulumi.StackReference(image_push_ref_id) +image_push_ref = StackReference(image_push_ref_id) container_repo_push = image_push_ref.get_output('container_repo_push') k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) -ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', - metadata={'name': 'nginx-ingress', - 'labels': { - 'prometheus': 'scrape'} - }, - opts=pulumi.ResourceOptions(provider=k8s_provider)) + +def namespace_by_name(name): + return k8s.core.v1.Namespace.get(resource_name=name, + id=name, + opts=pulumi.ResourceOptions(provider=k8s_provider)) + + +ns = ns_name_output.apply(namespace_by_name) chart_values = container_repo_push.apply(build_chart_values) From c29e3866cb79472699c9f2b4ef40197d5b6d4cf2 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:31:14 -0700 Subject: [PATCH 14/62] refactor: add DO Registry credentials to k8s secrets via a project This change adds a new Pulumi project that gets the authentication credentials for a Digital Ocean Container Repository, encodes them as a Kubernetes secret, and then stores the secret in the running cluster's nginx-ingress namespace. --- pulumi/python/automation/providers/do.py | 68 ++++++++----------- .../automation/providers/pulumi_project.py | 4 +- .../Pulumi.yaml | 7 ++ .../__main__.py | 57 ++++++++++++++++ .../digitalocean/domk8s/__main__.py | 23 +------ .../nginx/ingress-controller/__main__.py | 10 ++- 6 files changed, 107 insertions(+), 62 deletions(-) create mode 100644 pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml create mode 100644 pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/__main__.py diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index 4c2254f..5690b31 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -33,15 +33,9 @@ def validate_credentials_cmd(self) -> str: def save_kubernetes_cluster_cmd(self, cluster_name: str) -> str: return f'{self.base_cmd()} kubernetes cluster config save {cluster_name}' - def add_container_registry_support_to_kubernetes(self, cluster_name: str) -> str: - return f'{self.base_cmd()} kubernetes cluster registry add {cluster_name}' - def get_kubernetes_versions_json(self) -> str: return f'{self.base_cmd()} kubernetes options versions --output json' - def get_registry_name(self) -> str: - return f'{self.base_cmd()} registry get --format Name --no-header' - class DigitalOceanProvider(Provider): def infra_type(self) -> str: @@ -51,9 +45,37 @@ def infra_execution_order(self) -> List[PulumiProject]: return [ PulumiProject(path='infrastructure/digitalocean/container-registry', description='DO Container Registry'), PulumiProject(path='infrastructure/digitalocean/domk8s', description='DO Kubernetes', - on_success=DigitalOceanProvider._after_k8s_stand_up), + on_success=DigitalOceanProvider._update_kubeconfig), ] + def k8s_execution_order(self) -> List[PulumiProject]: + # The default Kubernetes Pulumi project instantiation order must be modified because + # the Digital Ocean Container Registry login credentials *must* be added under the + # Ingress Controller's namespace. As such, we insert a Digital Ocean specific + # Pulumi project that gets the credentials and adds them to the Kubernete's cluster + # under the appropriate namespace. + original_order = super().k8s_execution_order() + + def find_position_of_project_by_path(path: str) -> int: + for index, project in enumerate(original_order): + if project.path == path: + return index + return -1 + + namespace_project_path = 'kubernetes/nginx/ingress-controller-namespace' + namespace_project_position = find_position_of_project_by_path(namespace_project_path) + + if namespace_project_position < 0: + raise ValueError('Could not find project that creates the nginx-ingress namespace at ' + f'path {namespace_project_path}') + + add_credentials_project = PulumiProject(path='infrastructure/digitalocean/add-container-registry-credentials', + description='Registry Credentials') + new_order = original_order.copy() + new_order.insert(namespace_project_position + 1, add_credentials_project) + + return new_order + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ Union[Dict[Hashable, Any], list, None]: config = super().new_stack_config(env_config, defaults) @@ -78,13 +100,6 @@ def validate_stack_config(self, print(f'Digital Ocean authentication error: {err}', file=sys.stderr) sys.exit(3) - @staticmethod - def _after_k8s_stand_up(stack_outputs: MutableMapping[str, auto._output.OutputValue], - config: MutableMapping[str, auto._config.ConfigValue], - env_config: Mapping[str, str]): - DigitalOceanProvider._update_kubeconfig(stack_outputs, config, env_config) - # DigitalOceanProvider._add_container_registry_support(stack_outputs, config, env_config) - @staticmethod def _update_kubeconfig(params: PulumiProjectEventParams): if 'cluster_name' not in params.stack_outputs: @@ -108,31 +123,6 @@ def _update_kubeconfig(params: PulumiProjectEventParams): if res: print(res) - @staticmethod - def _add_container_registry_support(stack_outputs: MutableMapping[str, auto._output.OutputValue], - config: MutableMapping[str, auto._config.ConfigValue], - env_config: Mapping[str, str]): - if 'cluster_name' not in stack_outputs: - raise DigitalOceanProviderException('Cannot find key [cluster_name] in stack output') - - cluster_name = stack_outputs['cluster_name'].value - token = DigitalOceanProvider.token(stack_config=config, env_config=env_config) - do_cli = DoctlCli(access_token=token) - - res, _ = external_process.run(cmd='kubectl get secrets --output=name') - secrets = res.splitlines() - - res, _ = external_process.run(cmd=do_cli.get_registry_name()) - registry_name = res.strip() - - if f'secret/{registry_name}' in secrets: - print('Container registry secrets have already been added to Kubernetes cluster') - else: - print('Adding container registry support (via secrets) to Kubernetes cluster') - res, _ = external_process.run(do_cli.add_container_registry_support_to_kubernetes(cluster_name)) - if res: - print(res) - @staticmethod def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], env_config: Mapping[str, str]) -> str: diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py index a68fd5a..7b00ae1 100644 --- a/pulumi/python/automation/providers/pulumi_project.py +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -1,6 +1,8 @@ import os.path -from typing import Optional, Callable, Mapping, List +from typing import Optional, Callable, Mapping, List, MutableMapping import yaml +from pulumi import automation as auto + SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml new file mode 100644 index 0000000..e0350ab --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml @@ -0,0 +1,7 @@ +name: add-container-registry-credentials +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Adds container registry login credentials to the k8s cluster diff --git a/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/__main__.py b/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/__main__.py new file mode 100644 index 0000000..d94686f --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/__main__.py @@ -0,0 +1,57 @@ +import os + +import pulumi +from pulumi import StackReference +from pulumi_digitalocean import ContainerRegistryDockerCredentials +from kic_util import pulumi_config +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_from_same_parent(directory: str): + project_path = os.path.join(script_dir, '..', directory) + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_of_namespace_project(): + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', 'nginx', 'ingress-controller-namespace') + return pulumi_config.get_pulumi_project_name(project_path) + + +k8_project_name = project_name_from_same_parent('domk8s') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +container_registry_stack_ref_id = f"{pulumi_user}/{project_name_from_same_parent('container-registry')}/{stack_name}" +cr_stack_ref = StackReference(container_registry_stack_ref_id) +container_registry_output = cr_stack_ref.require_output('container_registry') +registry_name_output = cr_stack_ref.require_output('container_registry_name') + +namespace_stack_ref_id = f"{pulumi_user}/{project_name_of_namespace_project()}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +namespace_name_output = ns_stack_ref.require_output('ingress_namespace_name') + +fifty_years_in_seconds = 1_576_800_000 +registry_credentials = ContainerRegistryDockerCredentials(resource_name='do_k8s_docker_credentials', + expiry_seconds=fifty_years_in_seconds, + registry_name=registry_name_output, + write=False) +docker_credentials = registry_credentials.docker_credentials + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) + +secret = Secret(resource_name='ingress-controller-registry-secret', + args=SecretInitArgs(string_data={'.dockerconfigjson': docker_credentials}, + type='kubernetes.io/dockerconfigjson', + metadata={'namespace': namespace_name_output, + 'name': 'ingress-controller-registry'}), + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +pulumi.export('ingress-controller-registry-secret', secret) diff --git a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py index d4ac9cb..6da2fd5 100644 --- a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py @@ -1,12 +1,9 @@ import os import pulumi -from pulumi import StackReference -from pulumi_digitalocean import KubernetesCluster, KubernetesClusterNodePoolArgs, ContainerRegistryDockerCredentials -from kic_util import pulumi_config -import pulumi_kubernetes as k8s -from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs +from pulumi_digitalocean import KubernetesCluster, KubernetesClusterNodePoolArgs +from kic_util import pulumi_config # Configuration details for the K8 cluster config = pulumi.Config('domk8s') instance_size = config.get('instance_size') @@ -47,23 +44,7 @@ def container_registry_project_name(): node_count=node_count, )) -# Insert Digital Ocean Container Registry Secrets into the cluster kubeconfig = cluster.kube_configs[0].raw_config -container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name()}/{stack_name}" -stack_ref = StackReference(container_registry_stack_ref_id) -container_registry_output = stack_ref.require_output('container_registry') -registry_name_output = stack_ref.require_output('container_registry_name') - -registry_credentials = ContainerRegistryDockerCredentials(resource_name='do_k8s_docker_credentials', - registry_name=registry_name_output, - write=False) -docker_credentials = registry_credentials.docker_credentials - -k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) -secret = Secret(resource_name='shared-global-container-registry', - args=SecretInitArgs(string_data={'.dockerconfigjson': docker_credentials}, - type='kubernetes.io/dockerconfigjson'), - opts=pulumi.ResourceOptions(provider=k8s_provider)) # Export the clusters' kubeconfig pulumi.export("cluster_name", cluster.name) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index d6ec6b9..bfae293 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -3,7 +3,7 @@ from typing import Dict import pulumi -from pulumi import Output +from pulumi import Output, StackReference import pulumi_kubernetes as k8s from pulumi_kubernetes.core.v1 import Service import pulumi_kubernetes.helm.v3 as helm @@ -11,6 +11,8 @@ from kic_util import pulumi_config +script_dir = os.path.dirname(os.path.abspath(__file__)) + config = pulumi.Config('kic-helm') chart_name = config.get('chart_name') if not chart_name: @@ -84,6 +86,12 @@ def build_chart_values(repo_push: dict) -> helm.ChartOpts: '$upstream_bytes_sent $upstream_response_time $upstream_status $request_id ' } }, + 'serviceAccount': { + # This references the name of the secret used to pull the ingress container image + # from a remote repository. When using EKS on AWS, authentication to ECR happens + # via a different mechanism, so this value is ignored. + 'imagePullSecretName': 'ingress-controller-registry', + }, 'service': { 'annotations': { 'co.elastic.logs/module': 'nginx' From 4539e0ad50dc9fa5b5073aebd8ab24c90ced5a0f Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:38:31 -0700 Subject: [PATCH 15/62] refactor: make pulumi color settings a method on EnvConfig --- pulumi/python/automation/env_config_parser.py | 7 +++++++ pulumi/python/automation/main.py | 17 +++++------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pulumi/python/automation/env_config_parser.py b/pulumi/python/automation/env_config_parser.py index a2ec327..f9fe9b5 100644 --- a/pulumi/python/automation/env_config_parser.py +++ b/pulumi/python/automation/env_config_parser.py @@ -34,6 +34,13 @@ def stack_name(self) -> str: def no_color(self) -> bool: return self.get('NO_COLOR') is not None + def pulumi_color_settings(self): + if self.no_color(): + return 'never' + else: + return 'auto' + + def read(config_file_path: str = DEFAULT_PATH) -> EnvConfig: config_parser = ConfigParser() config_parser.optionxform = lambda option: option diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index b5d93fd..915e1a6 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -284,7 +284,7 @@ def refresh(provider: Provider, stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack.refresh_config() - stack.refresh(color=pulumi_color_settings(env_config), + stack.refresh(color=env_config.pulumi_color_settings(), on_output=print) @@ -294,8 +294,8 @@ def up(provider: Provider, headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) - stackUpResult = stack.up(color=pulumi_color_settings(env_config), - on_output=print) + stack_up_result = stack.up(color=env_config.pulumi_color_settings(), + on_output=print) if pulumi_project.on_success: params = PulumiProjectEventParams(stack_outputs=stack_up_result.outputs, @@ -310,15 +310,8 @@ def down(provider: Provider, headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) - stackDownResult = stack.destroy(color=pulumi_color_settings(env_config), - on_output=print) - - -def pulumi_color_settings(env_config: env_config_parser.EnvConfig): - if env_config.no_color(): - return 'never' - else: - return 'auto' + stack_down_result = stack.destroy(color=env_config.pulumi_color_settings(), + on_output=print) if __name__ == "__main__": From bad95f96dfa4ed034a19e5138cf7496510c710e2 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:41:09 -0700 Subject: [PATCH 16/62] refactor: add container registry implementation name method --- pulumi/python/utility/kic-image-push/registries/aws.py | 3 +++ .../utility/kic-image-push/registries/base_registry.py | 3 +++ pulumi/python/utility/kic-image-push/registries/do.py | 5 ++++- 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pulumi/python/utility/kic-image-push/registries/aws.py b/pulumi/python/utility/kic-image-push/registries/aws.py index 169295b..d2fa3a9 100644 --- a/pulumi/python/utility/kic-image-push/registries/aws.py +++ b/pulumi/python/utility/kic-image-push/registries/aws.py @@ -39,6 +39,9 @@ def get_ecr_credentials(registry_id: str) -> RegistryCredentials: token = credentials.authorization_token return ContainerRegistry.decode_credentials(token) + def registry_implementation_name(self) -> str: + return 'AWS Elastic Container Registry (ECR)' + def _ecr_docker_api_url(self, ) -> str: registry_url_parts = self.registry_url.split('/') ecr_host = registry_url_parts[0] diff --git a/pulumi/python/utility/kic-image-push/registries/base_registry.py b/pulumi/python/utility/kic-image-push/registries/base_registry.py index 79d7a9c..ba77252 100644 --- a/pulumi/python/utility/kic-image-push/registries/base_registry.py +++ b/pulumi/python/utility/kic-image-push/registries/base_registry.py @@ -65,6 +65,9 @@ def logout_of_registry(self): def check_if_id_matches_tag(self, image_tag: str, new_image_id: str) -> bool: return False + def registry_implementation_name(self) -> str: + raise NotImplemented + @classmethod def instance(cls, stack_name: str, pulumi_user: str): pass diff --git a/pulumi/python/utility/kic-image-push/registries/do.py b/pulumi/python/utility/kic-image-push/registries/do.py index 2dbf62a..c7f4fdb 100644 --- a/pulumi/python/utility/kic-image-push/registries/do.py +++ b/pulumi/python/utility/kic-image-push/registries/do.py @@ -41,7 +41,10 @@ def _make_instance(params: List[Any]) -> DigitalOceanContainerRegistry: registry_url=registry_url, credentials=_credentials) return Output.all(container_registry_output, _docker_credentials()).apply(_make_instance) - + + def registry_implementation_name(self) -> str: + return 'Digital Ocean Container Registry' + @staticmethod def do_project_name_from_project_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) From 1dca5134e40bf5d7d4446aab2cd917e2e2ad3237 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:45:26 -0700 Subject: [PATCH 17/62] fix: AWS registry not being referenced using 'repository' AWS ECR refers to itself as a repository and not a registry, we aim to keep that naming consistent when referring directly to ECR nouns. This change fixes a bug where we became over-eager using the word 'registry' instead of the noun 'repository' that is hardcoded in the ECR stack reference. --- pulumi/python/utility/kic-image-push/__main__.py | 4 ++++ pulumi/python/utility/kic-image-push/registries/aws.py | 8 +++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/pulumi/python/utility/kic-image-push/__main__.py b/pulumi/python/utility/kic-image-push/__main__.py index 5fed322..f377bdb 100644 --- a/pulumi/python/utility/kic-image-push/__main__.py +++ b/pulumi/python/utility/kic-image-push/__main__.py @@ -70,6 +70,10 @@ def push_to_container_registry(container_registry: ContainerRegistry) -> Reposit _repo_push = RepositoryPush(name='ingress-controller-registry-push', repository_args=repo_args, check_if_id_matches_tag_func=container_registry.check_if_id_matches_tag) + + pulumi.info('Pushing NGINX Ingress Controller container image to ' + f'{container_registry.registry_implementation_name()}') + return _repo_push else: raise 'Unable to log into container registry' diff --git a/pulumi/python/utility/kic-image-push/registries/aws.py b/pulumi/python/utility/kic-image-push/registries/aws.py index d2fa3a9..ea32aa3 100644 --- a/pulumi/python/utility/kic-image-push/registries/aws.py +++ b/pulumi/python/utility/kic-image-push/registries/aws.py @@ -19,13 +19,15 @@ def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry # Async query for credentials from stack reference ecr_registry_id = stack_ref.require_output('registry_id') credentials_output = ecr_registry_id.apply(ElasticContainerRegistry.get_ecr_credentials) - # Async query for registry url from stack reference - registry_url_output = stack_ref.require_output('registry_url') + # Async query for repository url from stack reference + # Note that AWS ECR refers to itself as a repository and not a registry, we aim to keep + # that naming consistent when referring directly to ECR nouns + repository_url_output = stack_ref.require_output('repository_url') def _make_instance(params: List[Any]) -> ElasticContainerRegistry: return cls(stack_name=stack_name, pulumi_user=pulumi_user, registry_url=params[0], credentials=params[1]) - return Output.all(registry_url_output, credentials_output).apply(_make_instance) + return Output.all(repository_url_output, credentials_output).apply(_make_instance) @staticmethod def aws_project_name_from_project_dir(dirname: str): From 028a1ade4309f07e563886da2662fbaa571d44f3 Mon Sep 17 00:00:00 2001 From: Javier Evans Date: Fri, 3 Jun 2022 16:31:10 -0700 Subject: [PATCH 18/62] bugfix: change pipenv install to pipenv sync to avoid updating deps at build time (#157) (cherry picked from commit 01ef1ff7832af0be839c8fda35e4551fc798cb97) --- bin/setup_venv.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index 6f78052..499268c 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -183,7 +183,8 @@ pip3 install pipenv # Install certain utility packages like `nodeenv` and `wheel` that aid # in the installation of other build tools and dependencies # required by the other python packages. -PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipenv install --dev +# `pipenv sync` uses only the information in the `Pipfile.lock` ensuring repeatable builds +PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipenv sync --dev # Install node.js into virtual environment so that it can be used by Python # modules that make call outs to it. @@ -194,7 +195,8 @@ else fi # Install general package requirements -PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipenv install +# `pipenv sync` uses only the information in the `Pipfile.lock` ensuring repeatable builds +PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipenv sync # Install local common utilities module pip3 install "${script_dir}/../pulumi/python/utility/kic-pulumi-utils" From 8ddf29a8d2e404d65ab5951b07818cb640df5e64 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Tue, 7 Jun 2022 15:13:19 -0600 Subject: [PATCH 19/62] chore: deprecated convenience scripts and projects (#159) * chore: remove non-functional kubevip project * chore: deprecation of resources as discussed in #155 (cherry picked from commit b4ff5618a330ef38041345f452176bee0c36a2e6) --- bin/kubernetes-extras.sh | 13 ++++ bin/testcap.sh | 10 +++ pulumi/python/tools/README.md | 4 ++ pulumi/python/tools/kubevip/Pulumi.yaml | 7 -- pulumi/python/tools/kubevip/__main__.py | 63 ----------------- .../manifests/kube-vip-cloud-controller.yaml | 70 ------------------- 6 files changed, 27 insertions(+), 140 deletions(-) delete mode 100644 pulumi/python/tools/kubevip/Pulumi.yaml delete mode 100644 pulumi/python/tools/kubevip/__main__.py delete mode 100644 pulumi/python/tools/kubevip/manifests/kube-vip-cloud-controller.yaml diff --git a/bin/kubernetes-extras.sh b/bin/kubernetes-extras.sh index 3c60ff7..94b8ba2 100755 --- a/bin/kubernetes-extras.sh +++ b/bin/kubernetes-extras.sh @@ -11,6 +11,19 @@ export PULUMI_SKIP_CONFIRMATIONS=true script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +echo " " +echo "IMPORTANT NOTICE!" +echo "====================================================================================================" +echo " This script and the associated Pulumi projects are deprecated and will be removed in a future " +echo " release as they are outside of the scope of the MARA project." +echo " " +echo " The MARA team no longer tests or updates these scripts, so please review before running if you " +echo " decide that you want to use them." +echo " " +echo " For more information, please see Discussion #155 in the repository (nginx.com/mara)" +echo "====================================================================================================" +sleep 5 + # Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based # projects. diff --git a/bin/testcap.sh b/bin/testcap.sh index 6eeb0ca..d8a49a3 100755 --- a/bin/testcap.sh +++ b/bin/testcap.sh @@ -30,6 +30,16 @@ cleanitup() { fi } +echo " " +echo "IMPORTANT NOTICE!" +echo "====================================================================================================" +echo " This script is deprecated and will be removed in a future release. " +echo " " +echo " This script may not function properly in your environment; run at your own risk. " +echo " " +echo " For more information, please see Discussion #155 in the repository (nginx.com/mara)" +echo "====================================================================================================" +sleep 5 echo " " echo "This script will perform testing on the current kubernetes installation using the currently active kubernetes" diff --git a/pulumi/python/tools/README.md b/pulumi/python/tools/README.md index 306437f..ba27c04 100644 --- a/pulumi/python/tools/README.md +++ b/pulumi/python/tools/README.md @@ -2,6 +2,10 @@ `/pulumi/python/tools` +## Deprecation Notice +These tools are no longer supported by the MARA team and will be removed in a future release. They *should* work +correctly, but this is not guaranteed. Any use is at your own risk. + ## Purpose This directory holds common tools that *may* be required by kubernetes installations that do not meet the minimum diff --git a/pulumi/python/tools/kubevip/Pulumi.yaml b/pulumi/python/tools/kubevip/Pulumi.yaml deleted file mode 100644 index 2073760..0000000 --- a/pulumi/python/tools/kubevip/Pulumi.yaml +++ /dev/null @@ -1,7 +0,0 @@ -name: kubevip -runtime: - name: python - options: - virtualenv: ../../venv -config: ../common/config -description: Deploys kube-vip diff --git a/pulumi/python/tools/kubevip/__main__.py b/pulumi/python/tools/kubevip/__main__.py deleted file mode 100644 index f73158c..0000000 --- a/pulumi/python/tools/kubevip/__main__.py +++ /dev/null @@ -1,63 +0,0 @@ -import pulumi -import ipaddress -import os -import pulumi_kubernetes as k8s -from pulumi_kubernetes.yaml import ConfigFile -from kic_util import pulumi_config - - -def pulumi_kube_project_name(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - kube_project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', 'kubeconfig') - return pulumi_config.get_pulumi_project_name(kube_project_path) - - -def pulumi_ingress_project_name(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - ingress_project_path = os.path.join(script_dir, '..', 'nginx', 'ingress-controller') - return pulumi_config.get_pulumi_project_name(ingress_project_path) - - -# Where are our manifests? -def k8_manifest_location(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - k8_manifest_path = os.path.join(script_dir, 'manifests', 'kube-vip-cloud-controller.yaml') - return k8_manifest_path - - -stack_name = pulumi.get_stack() -project_name = pulumi.get_project() -kube_project_name = pulumi_kube_project_name() -pulumi_user = pulumi_config.get_pulumi_user() - -kube_stack_ref_id = f"{pulumi_user}/{kube_project_name}/{stack_name}" -kube_stack_ref = pulumi.StackReference(kube_stack_ref_id) -kubeconfig = kube_stack_ref.get_output('kubeconfig').apply(lambda c: str(c)) -kube_stack_ref.get_output('cluster_name').apply( - lambda s: pulumi.log.info(f'Cluster name: {s}')) - -k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) - -config = pulumi.Config('kubevip') -thecidr = config.require('thecidr') - -thenet = ipaddress.IPv4Network(thecidr, strict=False) -therange = str(thenet[0]) + "-" + str(thenet[-1]) - -k8_manifest = k8_manifest_location() - -kubevip = ConfigFile( - "kubevip", - file=k8_manifest) - -# Create a config map -kube_system_kubevip_config_map = k8s.core.v1.ConfigMap("kube_systemKubevipConfigMap", - api_version="v1", - data={ - "cidr-global": thecidr - }, - kind="ConfigMap", - metadata=k8s.meta.v1.ObjectMetaArgs( - name="kubevip", - namespace="kube-system", - )) diff --git a/pulumi/python/tools/kubevip/manifests/kube-vip-cloud-controller.yaml b/pulumi/python/tools/kubevip/manifests/kube-vip-cloud-controller.yaml deleted file mode 100644 index ad1a90d..0000000 --- a/pulumi/python/tools/kubevip/manifests/kube-vip-cloud-controller.yaml +++ /dev/null @@ -1,70 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-vip-cloud-controller - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - name: system:kube-vip-cloud-controller-role -rules: - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "create", "update", "list", "put"] - - apiGroups: [""] - resources: ["configmaps", "endpoints","events","services/status", "leases"] - verbs: ["*"] - - apiGroups: [""] - resources: ["nodes", "services"] - verbs: ["list","get","watch","update"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:kube-vip-cloud-controller-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:kube-vip-cloud-controller-role -subjects: -- kind: ServiceAccount - name: kube-vip-cloud-controller - namespace: kube-system ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: kube-vip-cloud-provider - namespace: kube-system -spec: - serviceName: kube-vip-cloud-provider - podManagementPolicy: OrderedReady - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - app: kube-vip - component: kube-vip-cloud-provider - template: - metadata: - labels: - app: kube-vip - component: kube-vip-cloud-provider - spec: - containers: - - command: - - /kube-vip-cloud-provider - - --leader-elect-resource-name=kube-vip-cloud-controller - image: kubevip/kube-vip-cloud-provider:0.1 - name: kube-vip-cloud-provider - imagePullPolicy: Always - resources: {} - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 - serviceAccountName: kube-vip-cloud-controller From eadc250f59c5f8f3661a4c036aa83609251a7981 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Thu, 9 Jun 2022 10:24:45 -0600 Subject: [PATCH 20/62] fix: typo in find command was causing pulumi stacks to not be deleted (#160) (cherry picked from commit 0619d5dc14b68ae33c8bd1a2f2dd677f444197a2) --- extras/jenkins/AWS/Jenkinsfile | 2 +- extras/jenkins/DigitalOcean/Jenkinsfile | 2 +- extras/jenkins/K3S/Jenkinsfile | 2 +- extras/jenkins/Linode/Jenkinsfile | 2 +- extras/jenkins/MicroK8s/Jenkinsfile | 2 +- extras/jenkins/Minikube/Jenkinsfile | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/extras/jenkins/AWS/Jenkinsfile b/extras/jenkins/AWS/Jenkinsfile index 8e17961..05a66fa 100644 --- a/extras/jenkins/AWS/Jenkinsfile +++ b/extras/jenkins/AWS/Jenkinsfile @@ -188,7 +188,7 @@ pipeline { sh ''' $WORKSPACE/bin/destroy.sh - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; + find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \; ''' } } diff --git a/extras/jenkins/DigitalOcean/Jenkinsfile b/extras/jenkins/DigitalOcean/Jenkinsfile index 5ce6771..4ef4eb1 100644 --- a/extras/jenkins/DigitalOcean/Jenkinsfile +++ b/extras/jenkins/DigitalOcean/Jenkinsfile @@ -197,7 +197,7 @@ pipeline { # Destroy our partial build... $WORKSPACE/bin/destroy.sh || true # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; ''' } } diff --git a/extras/jenkins/K3S/Jenkinsfile b/extras/jenkins/K3S/Jenkinsfile index fa19151..52ae855 100644 --- a/extras/jenkins/K3S/Jenkinsfile +++ b/extras/jenkins/K3S/Jenkinsfile @@ -228,7 +228,7 @@ pipeline { /usr/local/bin/k3s-killall.sh || true /usr/local/bin/k3s-uninstall.sh || true # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; ''' } } diff --git a/extras/jenkins/Linode/Jenkinsfile b/extras/jenkins/Linode/Jenkinsfile index 01e3e60..0c1010b 100644 --- a/extras/jenkins/Linode/Jenkinsfile +++ b/extras/jenkins/Linode/Jenkinsfile @@ -183,7 +183,7 @@ pipeline { # Destroy our partial build... $WORKSPACE/bin/destroy.sh || true # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; ''' } } diff --git a/extras/jenkins/MicroK8s/Jenkinsfile b/extras/jenkins/MicroK8s/Jenkinsfile index 85ac3fc..61ec03d 100644 --- a/extras/jenkins/MicroK8s/Jenkinsfile +++ b/extras/jenkins/MicroK8s/Jenkinsfile @@ -240,7 +240,7 @@ pipeline { # True if it’s not there… snap remove microk8s || true # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; ''' } } diff --git a/extras/jenkins/Minikube/Jenkinsfile b/extras/jenkins/Minikube/Jenkinsfile index 465f7cd..8df84f1 100644 --- a/extras/jenkins/Minikube/Jenkinsfile +++ b/extras/jenkins/Minikube/Jenkinsfile @@ -249,7 +249,7 @@ _EOF_ # True if it's not there minikube delete || true # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; ''' } } From 1c5ba818a023652eccd38142e67e45db08f60d76 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Wed, 15 Jun 2022 10:10:52 -0700 Subject: [PATCH 21/62] chore: jenkins fixes and general cleanup of jenkinsfiles (#161) * fix: typo in find command was causing pulumi stacks to not be deleted * fix: formatting and find syntax in jenkins (esc for Groovy) * fix: formatting and find syntax in jenkins (esc for Groovy) * chore: clean up the comments a bit (cherry picked from commit cf655d0392f5c9756ac327437fe44f24a29efb1f) --- extras/jenkins/AWS/Jenkinsfile | 23 +++---- extras/jenkins/DigitalOcean/Jenkinsfile | 23 +++---- extras/jenkins/K3S/Jenkinsfile | 3 +- extras/jenkins/Linode/Jenkinsfile | 23 +++---- extras/jenkins/MicroK8s/Jenkinsfile | 90 ++++++++++--------------- extras/jenkins/Minikube/Jenkinsfile | 74 +++++++++----------- pulumi/python/Pipfile.lock | 33 ++++----- 7 files changed, 115 insertions(+), 154 deletions(-) diff --git a/extras/jenkins/AWS/Jenkinsfile b/extras/jenkins/AWS/Jenkinsfile index 05a66fa..7df39ac 100644 --- a/extras/jenkins/AWS/Jenkinsfile +++ b/extras/jenkins/AWS/Jenkinsfile @@ -78,7 +78,7 @@ pipeline { DEBIAN_FRONTEND=noninteractive apt -y upgrade # Make sure our deps are installed DEBIAN_FRONTEND=noninteractive apt -y install figlet openjdk-11-jdk make docker.io - ''' + ''' } } @@ -95,7 +95,7 @@ pipeline { sh ''' # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; - ''' + ''' } } @@ -108,7 +108,7 @@ pipeline { sh ''' $WORKSPACE/bin/setup_venv.sh - ''' + ''' } } @@ -122,7 +122,7 @@ pipeline { sh ''' $WORKSPACE/bin/aws_write_creds.sh - ''' + ''' } } @@ -156,7 +156,7 @@ pipeline { $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set aws:profile "${AWS_PROFILE}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set aws:region "${AWS_DEFAULT_REGION}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} - ''' + ''' } } @@ -171,7 +171,7 @@ pipeline { sh ''' echo "${NGINX_JWT}" > $WORKSPACE/extras/jwt.token $WORKSPACE/bin/start_aws.sh - ''' + ''' } } @@ -188,8 +188,8 @@ pipeline { sh ''' $WORKSPACE/bin/destroy.sh - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \; - ''' + find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; + ''' } } @@ -204,10 +204,9 @@ pipeline { */ sh ''' - # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; + # Destroy our partial build... + $WORKSPACE/bin/destroy.sh || true + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } } diff --git a/extras/jenkins/DigitalOcean/Jenkinsfile b/extras/jenkins/DigitalOcean/Jenkinsfile index 4ef4eb1..4fa5a8f 100644 --- a/extras/jenkins/DigitalOcean/Jenkinsfile +++ b/extras/jenkins/DigitalOcean/Jenkinsfile @@ -76,7 +76,7 @@ pipeline { doctl auth init -t $DIGITALOCEAN_TOKEN # Fix perms for the snap.... snap connect doctl:kube-config - ''' + ''' } } @@ -140,7 +140,7 @@ pipeline { $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set digitalocean:token "${DO_TOKEN}" --plaintext -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set domk8s:k8s_version "latest" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - ''' + ''' } } @@ -176,15 +176,9 @@ pipeline { find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkdo${BUILD_NUMBER} --force --yes \\; ''' } - /* - * Clean up the environment; this includes running the destroy script to remove our pulumi resources and - * destroy the deployed infrastructure in Digital Ocean. - * - * After that completes, we remove the pulumi stack from the project with the find command; this is because - * we need to delete the stack in each project it's been instantiated in. - */} - } +} + post { failure { @@ -194,11 +188,10 @@ pipeline { */ sh ''' - # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; + # Destroy our partial build... + $WORKSPACE/bin/destroy.sh || true + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; ''' } } - } +} \ No newline at end of file diff --git a/extras/jenkins/K3S/Jenkinsfile b/extras/jenkins/K3S/Jenkinsfile index 52ae855..e92cdc1 100644 --- a/extras/jenkins/K3S/Jenkinsfile +++ b/extras/jenkins/K3S/Jenkinsfile @@ -227,8 +227,7 @@ pipeline { # Reset our K3S Environment /usr/local/bin/k3s-killall.sh || true /usr/local/bin/k3s-uninstall.sh || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; ''' } } diff --git a/extras/jenkins/Linode/Jenkinsfile b/extras/jenkins/Linode/Jenkinsfile index 0c1010b..d237908 100644 --- a/extras/jenkins/Linode/Jenkinsfile +++ b/extras/jenkins/Linode/Jenkinsfile @@ -69,7 +69,7 @@ pipeline { # Create the directory for the kubeconfig mkdir -p $HOME/.kube || true chmod 777 $HOME/.kube || true - ''' + ''' } } @@ -77,15 +77,15 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. + * This is currently empty since we are building a new executor for each run. However, maintaining + * here for anyone who wants to add cleanup steps for their environment * * Other cleanup related functions can be placed here as well. */ sh ''' - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; + # Just return... + true ''' } @@ -132,7 +132,7 @@ pipeline { $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:token "${LINODE_TOKEN}" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} - ''' + ''' } } @@ -165,7 +165,7 @@ pipeline { sh ''' PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } } @@ -180,11 +180,10 @@ pipeline { */ sh ''' - # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; - ''' + # Destroy our partial build... + $WORKSPACE/bin/destroy.sh || true + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; + ''' } } } diff --git a/extras/jenkins/MicroK8s/Jenkinsfile b/extras/jenkins/MicroK8s/Jenkinsfile index 61ec03d..1b04a6c 100644 --- a/extras/jenkins/MicroK8s/Jenkinsfile +++ b/extras/jenkins/MicroK8s/Jenkinsfile @@ -71,7 +71,7 @@ pipeline { DEBIAN_FRONTEND=noninteractive apt -y install figlet openjdk-11-jdk make docker.io # Make sure our kubeconfig dir exists… mkdir $HOME/.kube || true - ''' + ''' } } @@ -79,26 +79,17 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. + * This is currently empty since we are building a new executor for each run. However, maintaining + * here for anyone who wants to add cleanup steps for their environment * - * This function also tries to remove both K3S and Microk8s if they are found on the host; this is because we - * will be installing Microk8s and we want to both make sure we are removing any previous installations as well as - * ensuring this Jenkins Agent does not already have a Microk8s installation on it. + * Other cleanup related functions can be placed here as well. */ - sh ''' - # Reset our K3S Environment - /usr/local/bin/k3s-killall.sh || true - /usr/local/bin/k3s-uninstall.sh || true - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - snap remove microk8s || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; - ''' - } + sh ''' + # Just return... + true + ''' + } } stage('Microk8s Setup') { @@ -115,7 +106,7 @@ pipeline { snap install microk8s --classic --channel=1.23/stable microk8s.enable storage dns helm3 microk8s.enable metallb 192.168.100.100/30 - ''' + ''' } } @@ -129,7 +120,7 @@ pipeline { sh ''' microk8s.config > $HOME/.kube/config - ''' + ''' } } @@ -142,7 +133,7 @@ pipeline { sh ''' $WORKSPACE/bin/setup_venv.sh - ''' + ''' } } @@ -159,21 +150,21 @@ pipeline { */ sh ''' - echo "PULUMI_STACK=marajenk${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius - $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - ''' + echo "PULUMI_STACK=marajenkmk8s${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/config + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmk8ss${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + ''' } } @@ -188,7 +179,7 @@ pipeline { sh ''' echo $NGINX_JWT > $WORKSPACE/extras/jwt.token $WORKSPACE/bin/start_kube.sh - ''' + ''' } } @@ -209,14 +200,8 @@ pipeline { microk8s reset --destroy-storage || true # True if it’s not there… sudo snap remove microk8s || true - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; - # This is a hack to allow additional commands to be issued following cleanup. This is needed because the VMs - # that currently run as agents for K3S and Microk8s deployments need to be rebooted following some number of - # runs due to zombie processes and other issues. Long term we want to deploy these VM's via IaaC so the only - # exist for the lifetime of the project. We do it this way in order to provide some flexibility for the - # jenkins configuration. - ${POSTRUN_CMD- true} - ''' + find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkmk8s${BUILD_NUMBER} --force --yes \\; + ''' } } @@ -233,14 +218,13 @@ pipeline { */ sh ''' - # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - snap remove microk8s || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; + # Destroy our partial build... + $WORKSPACE/bin/destroy.sh || true + # Reset our Microk8s Environment; true if it’s not there + microk8s reset --destroy-storage || true + # True if it’s not there… + snap remove microk8s || true + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkmk8s${BUILD_NUMBER} --force --yes \\; ''' } } diff --git a/extras/jenkins/Minikube/Jenkinsfile b/extras/jenkins/Minikube/Jenkinsfile index 8df84f1..a5a783c 100644 --- a/extras/jenkins/Minikube/Jenkinsfile +++ b/extras/jenkins/Minikube/Jenkinsfile @@ -71,7 +71,7 @@ pipeline { DEBIAN_FRONTEND=noninteractive apt -y install figlet openjdk-11-jdk make docker.io conntrack expect # Make sure our kubeconfig dir exists… mkdir $HOME/.kube || true - ''' + ''' } } @@ -79,26 +79,17 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. + * This is currently empty since we are building a new executor for each run. However, maintaining + * here for anyone who wants to add cleanup steps for their environment * - * This function also tries to remove both K3S and Microk8s if they are found on the host; this is because we - * will be installing Microk8s and we want to both make sure we are removing any previous installations as well as - * ensuring this Jenkins Agent does not already have a Microk8s installation on it. + * Other cleanup related functions can be placed here as well. */ - sh ''' - # Reset our K3S Environment - /usr/local/bin/k3s-killall.sh || true - /usr/local/bin/k3s-uninstall.sh || true - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - snap remove microk8s || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; - ''' - } + sh ''' + # Just return... + true + ''' + } } stage('Minikube Setup') { @@ -166,20 +157,20 @@ _EOF_ */ sh ''' - echo "PULUMI_STACK=marajenk${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius - $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} + echo "PULUMI_STACK=marajenkmkube${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/config + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkubes${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} ''' } } @@ -216,7 +207,7 @@ _EOF_ microk8s reset --destroy-storage || true # True if it’s not there… snap remove microk8s || true - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkmkube${BUILD_NUMBER} --force --yes \\; # This is a hack to allow additional commands to be issued following cleanup. This is needed because the VMs # that currently run as agents for K3S and Microk8s deployments need to be rebooted following some number of # runs due to zombie processes and other issues. Long term we want to deploy these VM's via IaaC so the only @@ -240,17 +231,12 @@ _EOF_ */ sh ''' - # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - snap remove microk8s || true - # True if it's not there - minikube delete || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \; - ''' + # Destroy our partial build... + $WORKSPACE/bin/destroy.sh || true + # True if it's not there + minikube delete || true + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkmkube${BUILD_NUMBER} --force --yes \\; + ''' } } } diff --git a/pulumi/python/Pipfile.lock b/pulumi/python/Pipfile.lock index 595d881..ab6af08 100644 --- a/pulumi/python/Pipfile.lock +++ b/pulumi/python/Pipfile.lock @@ -1,11 +1,11 @@ { "_meta": { "hash": { - "sha256": "82438b823c498fd15f01ec4746b9d77eb7845e90d0320c4e8c6a27ba878eb11e" + "sha256": "26ad2e064332a5855c06569e11375440a111043957d9186d9098a3e1a0122ae4" }, "pipfile-spec": 6, "requires": { - "python_version": "3.10" + "python_version": "3.9" }, "sources": [ { @@ -49,10 +49,11 @@ }, "certifi": { "hashes": [ - "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872", - "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569" + "sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7", + "sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a" ], - "version": "==2021.10.8" + "markers": "python_version >= '3.6'", + "version": "==2022.5.18.1" }, "charset-normalizer": { "hashes": [ @@ -72,11 +73,11 @@ }, "dill": { "hashes": [ - "sha256:7e40e4a70304fd9ceab3535d36e58791d9c4a776b38ec7f7ec9afc8d3dca4d4f", - "sha256:9f9734205146b2b353ab3fec9af0070237b6ddae78452af83d2fca84d739e675" + "sha256:33501d03270bbe410c72639b350e941882a8b0fd55357580fbc873fba0c59302", + "sha256:d75e41f3eff1eee599d738e76ba8f4ad98ea229db8b085318aa2b3333a208c86" ], - "markers": "python_version >= '2.7' and python_version != '3.0'", - "version": "==0.3.4" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'", + "version": "==0.3.5.1" }, "docutils": { "hashes": [ @@ -84,7 +85,7 @@ "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827", "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.15.2" }, "fart": { @@ -352,7 +353,7 @@ "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.8.2" }, "pyyaml": { @@ -424,11 +425,11 @@ }, "setuptools": { "hashes": [ - "sha256:28c79c24d83c42a5e6d6cc711e5e9a6c1b89326229feaa5807fc277040658600", - "sha256:588ffd1dc6e20e9f4f7057aa9873fcdc26e0270362602735d32476bad67d82c5" + "sha256:68e45d17c9281ba25dc0104eadd2647172b3472d9e01f911efa57965e8d51a36", + "sha256:a43bdedf853c670e5fed28e5623403bad2f73cf02f9a2774e91def6bda8265a7" ], "markers": "python_version >= '3.7'", - "version": "==62.3.1" + "version": "==62.3.2" }, "setuptools-git-versioning": { "hashes": [ @@ -443,7 +444,7 @@ "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.16.0" }, "terminaltables": { @@ -459,7 +460,7 @@ "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.10.2" }, "urllib3": { From cab200ddeab5d064a2e4a0ca7c9bb302f952da92 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:53:04 -0700 Subject: [PATCH 22/62] refactor: improve naming and fix typos --- bin/setup_venv.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index 499268c..a5dc943 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -248,7 +248,7 @@ fi # The two fixes here are to hardcode (For now) to a known good version (1.23.6) and force the script to # always download this version. # -# TODO: Figure out a way to not hardocde the kubectl version +# TODO: Figure out a way to not hardcode the kubectl version # TODO: Should not always download if the versions match; need a version check # # @@ -284,10 +284,10 @@ if [[ -x "${VIRTUAL_ENV}/bin/pulumi" ]] && [[ "$(PULUMI_SKIP_UPDATE_CHECK=true " echo "Pulumi version ${PULUMI_VERSION} is already installed" else PULUMI_TARBALL_URL="https://get.pulumi.com/releases/sdk/pulumi-v${PULUMI_VERSION}-${OS}-${ARCH/amd64/x64}.tar.gz" - PULUMI_TARBALL_DESTTARBALL_DEST=$(mktemp -t pulumi.tar.gz.XXXXXXXXXX) - ${download_cmd} "${PULUMI_TARBALL_URL}" >"${PULUMI_TARBALL_DESTTARBALL_DEST}" + PULUMI_TARBALL_DEST=$(mktemp -t pulumi.tar.gz.XXXXXXXXXX) + ${download_cmd} "${PULUMI_TARBALL_URL}" > "${PULUMI_TARBALL_DEST}" [ $? -eq 0 ] && echo "Pulumi downloaded successfully" || echo "Failed to download Pulumi" - tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --strip-components 1 --file "${PULUMI_TARBALL_DESTTARBALL_DEST}" + tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --strip-components 1 --file "${PULUMI_TARBALL_DEST}" [ $? -eq 0 ] && echo "Pulumi installed successfully" || echo "Failed to install Pulumi" - rm "${PULUMI_TARBALL_DESTTARBALL_DEST}" + rm "${PULUMI_TARBALL_DEST}" fi From b349cb211f0b7443fc4807e3990c66f2ce724cd0 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 16 Jun 2022 13:53:18 -0700 Subject: [PATCH 23/62] feat: install Digital Ocean CLI tool --- bin/setup_venv.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index a5dc943..8946f7d 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -291,3 +291,15 @@ else [ $? -eq 0 ] && echo "Pulumi installed successfully" || echo "Failed to install Pulumi" rm "${PULUMI_TARBALL_DEST}" fi + +if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then + echo "Downloading Digital Ocean CLI" + DOCTL_VERSION="1.75.0" + DOCTL_TARBALL_URL="https://github.com/digitalocean/doctl/releases/download/v${DOCTL_VERSION}/doctl-${DOCTL_VERSION}-${OS}-${ARCH}.tar.gz" + DOCTL_TARBALL_DEST=$(mktemp -t doctl.tar.gz.XXXXXXXXXX) + ${download_cmd} "${DOCTL_TARBALL_URL}" > "${DOCTL_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Digital Ocean CLI downloaded successfully" || echo "Failed to download Digital Ocean CLI" + tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI" + rm "${DOCTL_TARBALL_DEST}" +fi \ No newline at end of file From 17257552fe2de278e28e7c850d8b011adc2576fc Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Wed, 15 Jun 2022 10:10:52 -0700 Subject: [PATCH 24/62] feat: prompt user for parameters when starting up DO --- config/pulumi/Pulumi.stackname.yaml.example | 14 ++--- pulumi/python/automation/providers/do.py | 54 +++++++++++++++++++ .../container-registry/__main__.py | 2 +- .../digitalocean/domk8s/__main__.py | 2 +- 4 files changed, 63 insertions(+), 9 deletions(-) diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index 3b22f16..e3485ca 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -321,18 +321,18 @@ config: # within that project. ############################################################################ - # Digital Ocean Managed Kubernetes + # Digital Ocean Managed Kubernetes and Container Registry ############################################################################ # This is the Kubernetes version to install using Digital Ocean K8s. - domk8s:k8s_version: latest - # Version of Kubernetes to use - domk8s:instance_type: s-2vcpu-4gb + digitalocean:k8s_version: 1.22.8-do.1 # This is the default instance type used by Digital Ocean K8s. - domk8s:node_count: 3 + digitalocean:instance_size: s-2vcpu-4gb # The desired node count of the Digital Ocean K8s cluster. - domk8s:region: sfo3 + digitalocean:node_count: 3 # The region to deploy the cluster - + digitalocean:region: sfo3 + # Subscription tier for container registry + digitalocean:container_registry_subscription_tier: starter ############################################################################ # Linode Kubernetes Engine diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index 5690b31..e27e13f 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -36,6 +36,12 @@ def save_kubernetes_cluster_cmd(self, cluster_name: str) -> str: def get_kubernetes_versions_json(self) -> str: return f'{self.base_cmd()} kubernetes options versions --output json' + def get_kubernetes_regions_json(self) -> str: + return f'{self.base_cmd()} kubernetes options regions --output json' + + def get_kubernetes_instance_sizes_json(self) -> str: + return f'{self.base_cmd()} kubernetes options sizes --output json' + class DigitalOceanProvider(Provider): def infra_type(self) -> str: @@ -87,6 +93,54 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') + token = DigitalOceanProvider.token(stack_config={ 'config': config }, env_config=env_config) + do_cli = DoctlCli(access_token=token) + + # Kubernetes versions + k8s_versions_json_str, _ = external_process.run(do_cli.get_kubernetes_versions_json()) + k8s_versions_json = json.loads(k8s_versions_json_str) + k8s_version_slugs = [version['slug'] for version in k8s_versions_json] + + print('Supported Kubernetes versions:') + for slug in k8s_version_slugs: + print(f' {slug}') + default_version = defaults['digitalocean:k8s_version'] or k8s_version_slugs[0] + config['digitalocean:k8s_version'] = input(f'Kubernetes version [{default_version}]: ').strip() or default_version + print(f"Kubernetes version: {config['digitalocean:k8s_version']}") + + # Kubernetes regions + k8s_regions_json_str, _ = external_process.run(do_cli.get_kubernetes_regions_json()) + k8s_regions_json = json.loads(k8s_regions_json_str) + default_region = defaults['digitalocean:region'] or k8s_regions_json[-1]['slug'] + + print('Supported Regions:') + for item in k8s_regions_json: + print(f" {item['name']}: {item['slug']}") + config['digitalocean:region'] = input(f'Region [{default_region}]: ').strip() or default_region + print(f"Region: {config['digitalocean:region']}") + + # Kubernetes instance size + k8s_sizes_json_str, _ = external_process.run(do_cli.get_kubernetes_instance_sizes_json()) + k8s_sizes_json = json.loads(k8s_sizes_json_str) + k8s_sizes_slugs = [size['slug'] for size in k8s_sizes_json] + default_size = defaults['digitalocean:instance_size'] or 's-2vcpu-4gb' + + print('Supported Instance Sizes:') + for slug in k8s_sizes_slugs: + print(f' {slug}') + + config['digitalocean:instance_size'] = input(f'Instance size [{default_size}]: ').strip() or default_size + print(f"Instance size: {config['digitalocean:instance_size']}") + + # Kubernetes instance count + default_node_count = defaults['digitalocean:node_count'] or 3 + while 'digitalocean:node_count' not in config: + node_count = input('Node count for Kubernetes cluster ' + f'[{default_node_count}]: ').strip() or default_node_count + if type(node_count) == int or node_count.isdigit(): + config['digitalocean:node_count'] = int(node_count) + print(f"Node count: {config['digitalocean:node_count']}") + return config def validate_stack_config(self, diff --git a/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py index cbef949..20cb64b 100644 --- a/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py @@ -9,7 +9,7 @@ # valid values: starter, basic, professional subscription_tier = config.get('container_registry_subscription_tier') if not subscription_tier: - subscription_tier = 'basic' + subscription_tier = 'starter' region = config.get('region') if not region: region = 'sfo3' diff --git a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py index 6da2fd5..f94a37f 100644 --- a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py @@ -5,7 +5,7 @@ from kic_util import pulumi_config # Configuration details for the K8 cluster -config = pulumi.Config('domk8s') +config = pulumi.Config('digitalocean') instance_size = config.get('instance_size') if not instance_size: instance_size = 's-2vcpu-4gb' From 3eb6d3e6f351aba5446de0ad524ec6af12cf2929 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Tue, 21 Jun 2022 13:57:01 -0700 Subject: [PATCH 25/62] feat: add dns record support to Digital Ocean provider --- pulumi/python/automation/providers/do.py | 16 +++++++- .../digitalocean/dns-record/Pulumi.yaml | 7 ++++ .../digitalocean/dns-record/__main__.py | 40 +++++++++++++++++++ .../nginx/ingress-controller/__main__.py | 18 ++++++++- 4 files changed, 78 insertions(+), 3 deletions(-) create mode 100644 pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml create mode 100644 pulumi/python/infrastructure/digitalocean/dns-record/__main__.py diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index e27e13f..351693f 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -61,13 +61,15 @@ def k8s_execution_order(self) -> List[PulumiProject]: # Pulumi project that gets the credentials and adds them to the Kubernete's cluster # under the appropriate namespace. original_order = super().k8s_execution_order() + new_order = original_order.copy() def find_position_of_project_by_path(path: str) -> int: - for index, project in enumerate(original_order): + for index, project in enumerate(new_order): if project.path == path: return index return -1 + # Add container registry credentials project after ingress controller namespace project namespace_project_path = 'kubernetes/nginx/ingress-controller-namespace' namespace_project_position = find_position_of_project_by_path(namespace_project_path) @@ -77,9 +79,19 @@ def find_position_of_project_by_path(path: str) -> int: add_credentials_project = PulumiProject(path='infrastructure/digitalocean/add-container-registry-credentials', description='Registry Credentials') - new_order = original_order.copy() new_order.insert(namespace_project_position + 1, add_credentials_project) + # Add DNS record project after ingress controller project + ingress_controller_project_path = 'kubernetes/nginx/ingress-controller' + ingress_controller_project_position = find_position_of_project_by_path(ingress_controller_project_path) + + if namespace_project_position < 0: + raise ValueError('Could not find project that creates the nginx ingress controller at ' + f'path {ingress_controller_project_path}') + + dns_record_project = PulumiProject(path='infrastructure/digitalocean/dns-record', description='DNS Record') + new_order.insert(ingress_controller_project_position + 1, dns_record_project) + return new_order def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ diff --git a/pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml new file mode 100644 index 0000000..de744a2 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml @@ -0,0 +1,7 @@ +name: dns-record +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates new DNS record for Ingress Controller diff --git a/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py b/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py new file mode 100644 index 0000000..3b428e1 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py @@ -0,0 +1,40 @@ +import os + +import pulumi +from pulumi import StackReference +import pulumi_digitalocean as docean + +from kic_util import pulumi_config + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_of_ingress_controller_project(): + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', 'nginx', 'ingress-controller') + return pulumi_config.get_pulumi_project_name(project_path) + + +def extract_ip_address(lb_ingress): + return lb_ingress['load_balancer']['ingress'][0]['ip'] + + +namespace_stack_ref_id = f"{pulumi_user}/{project_name_of_ingress_controller_project()}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +ip = ns_stack_ref.require_output('lb_ingress').apply(extract_ip_address) + +config = pulumi.Config('kic-helm') +fqdn = config.require('fqdn') + +ingress_domain = docean.Domain.get(resource_name='ingress-domain', id=fqdn, name=fqdn) +ingress_a_record = docean.DnsRecord(resource_name='ingress-a-record', + name='@', + domain=ingress_domain.id, + type="A", + ttl=1800, + value=ip) + +pulumi.export('ingress_domain', ingress_domain) +pulumi.export('ingress_a_record', ingress_a_record) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index bfae293..13e2cb5 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -225,7 +225,23 @@ def namespace_by_name(name): ingress_service = srv.status -pulumi.export('lb_ingress_hostname', pulumi.Output.unsecret(ingress_service.load_balancer.ingress[0].hostname)) + +def ingress_hostname(_ingress_service): + # Attempt to get the hostname as returned from the helm chart + if 'load_balancer' in _ingress_service: + load_balancer = _ingress_service['load_balancer'] + if 'ingress' in load_balancer and len(load_balancer['ingress']) > 0: + first_ingress = load_balancer['ingress'][0] + if 'hostname' in first_ingress: + return first_ingress['hostname'] + + # If we can't get the hostname, then use the FQDN coded in the config file + fqdn = config.require('fqdn') + return fqdn + + +pulumi.export('lb_ingress_hostname', pulumi.Output.unsecret(ingress_service).apply(ingress_hostname)) +pulumi.export('lb_ingress', pulumi.Output.unsecret(ingress_service)) # Print out our status pulumi.export("kic_status", pstatus) pulumi.export('nginx_plus', pulumi.Output.unsecret(chart_values['controller']['nginxplus'])) From 8907fd35bc737edff043759f33decb31e149a0e4 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Wed, 22 Jun 2022 09:48:22 -0700 Subject: [PATCH 26/62] refactor: change name of container registry credentials project --- pulumi/python/automation/providers/do.py | 2 +- .../Pulumi.yaml | 2 +- .../__main__.py | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename pulumi/python/infrastructure/digitalocean/{add-container-registry-credentials => container-registry-credentials}/Pulumi.yaml (81%) rename pulumi/python/infrastructure/digitalocean/{add-container-registry-credentials => container-registry-credentials}/__main__.py (100%) diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index 351693f..5b7f67b 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -77,7 +77,7 @@ def find_position_of_project_by_path(path: str) -> int: raise ValueError('Could not find project that creates the nginx-ingress namespace at ' f'path {namespace_project_path}') - add_credentials_project = PulumiProject(path='infrastructure/digitalocean/add-container-registry-credentials', + add_credentials_project = PulumiProject(path='infrastructure/digitalocean/container-registry-credentials', description='Registry Credentials') new_order.insert(namespace_project_position + 1, add_credentials_project) diff --git a/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/Pulumi.yaml similarity index 81% rename from pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml rename to pulumi/python/infrastructure/digitalocean/container-registry-credentials/Pulumi.yaml index e0350ab..44fec00 100644 --- a/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/Pulumi.yaml +++ b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/Pulumi.yaml @@ -1,4 +1,4 @@ -name: add-container-registry-credentials +name: container-registry-credentials runtime: name: python options: diff --git a/pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/__main__.py b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/__main__.py similarity index 100% rename from pulumi/python/infrastructure/digitalocean/add-container-registry-credentials/__main__.py rename to pulumi/python/infrastructure/digitalocean/container-registry-credentials/__main__.py From e3fdb6e9ff11e4b7fdcb2c78bb40f9e3038a7397 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:14:39 -0700 Subject: [PATCH 27/62] docs: small comment addition and doc change Reference to the DO CLI is removed from the documentation because it is installed as part of the setup_venv.sh script. --- bin/setup_venv.sh | 1 + docs/getting_started.md | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index 8946f7d..d629d1f 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -292,6 +292,7 @@ else rm "${PULUMI_TARBALL_DEST}" fi +# Digital Ocean CLI if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then echo "Downloading Digital Ocean CLI" DOCTL_VERSION="1.75.0" diff --git a/docs/getting_started.md b/docs/getting_started.md index bd17367..9ecc17c 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -172,12 +172,7 @@ kubernetes extras functionality. For more details on those, please see the READM ### Digital Ocean -If you are using Digital Ocean as your infrastructure provider -[configuring Pulumi for Digital Ocean](https://www.pulumi.com/registry/packages/digitalocean/) is necessary. The first -step is to install the [`doctl`](https://docs.digitalocean.com/reference/doctl/how-to/install/) utility to interact with -your Digital Ocean account. - -Next, you will need to create a +You will need to create a [Digital Ocean Personal API Token](https://docs.digitalocean.com/reference/api/create-personal-access-token/) for authentication to Digital Ocean. When you run the script [`./bin/start.sh`](../bin/start.sh) and select a Digital Ocean deployment, your token will be added to the `./config/Pulumi/Pulumi..yaml`. This is the main configuration From c68592801ec19be0cc5c613f5e137f12b75d08a3 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:15:35 -0700 Subject: [PATCH 28/62] chore: double Helm timeout for Prometheus install --- pulumi/python/kubernetes/prometheus/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pulumi/python/kubernetes/prometheus/__main__.py b/pulumi/python/kubernetes/prometheus/__main__.py index ca6423a..c29d27c 100644 --- a/pulumi/python/kubernetes/prometheus/__main__.py +++ b/pulumi/python/kubernetes/prometheus/__main__.py @@ -80,7 +80,7 @@ def extract_adminpass_from_k8s_secrets(secrets: Mapping[str, str]) -> str: # helm_timeout = config.get_int('helm_timeout') if not helm_timeout: - helm_timeout = 300 + helm_timeout = 600 # Use Prometheus administrator password stored in Kubernetes secrets prometheus_secrets = Secret.get(resource_name='pulumi-secret-prometheus', From a83c758242017a267dba0f811a7a6dcadb4ae62b Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:19:31 -0700 Subject: [PATCH 29/62] feat: allow adding new clusters to the kubectl config Allow for adding and merging new clusters into the users kubectl config. The AWS and Digital Ocean CLIs do this automatically. However, not all SDKs nor CLI tools do this. Here we add code that does this so that the same type of functionality can be done no matter what the underlying infrastructure provider is. --- .../automation/providers/base_provider.py | 3 +- .../automation/providers/update_kubeconfig.py | 466 ++++++++++++++++++ 2 files changed, 468 insertions(+), 1 deletion(-) create mode 100644 pulumi/python/automation/providers/update_kubeconfig.py diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index 89c7ca1..26a2caf 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -19,7 +19,8 @@ def list_providers() -> Iterable[str]: def is_provider(file: pathlib.Path) -> bool: return file.is_file() and \ not file.stem.endswith('base_provider') and \ - not file.stem.endswith('pulumi_project') + not file.stem.endswith('pulumi_project') and \ + not file.stem.endswith('update_kubeconfig') path = pathlib.Path(SCRIPT_DIR) return [os.path.splitext(file.stem)[0] for file in path.iterdir() if is_provider(file)] diff --git a/pulumi/python/automation/providers/update_kubeconfig.py b/pulumi/python/automation/providers/update_kubeconfig.py new file mode 100644 index 0000000..1531c7e --- /dev/null +++ b/pulumi/python/automation/providers/update_kubeconfig.py @@ -0,0 +1,466 @@ +# This code is derived from code within the AWS SDK licensed under the +# Apache 2.0 License. +# +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright 2022 F5, Inc. All Rights Reserved. +# +# This file is licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for +# the specific language governing permissions and limitations under +# the License. + +import os +import logging +import errno +import sys +from collections import OrderedDict +from typing import Mapping, Any +import yaml + +DEFAULT_PATH = os.path.expanduser("~/.kube/config") +LOG = logging.getLogger(__name__) + + +def update_kubeconfig(cluser_name: str, env: Mapping[str, str], kubeconfig: Mapping[str, Any]): + cluster = kubeconfig['clusters'][0] + user = kubeconfig['users'][0] + alias = kubeconfig['contexts'][0]['name'] + + config_selector = KubeconfigSelector(env_variable=env.get('KUBECONFIG', ''), + path_in=None) + config = config_selector.choose_kubeconfig(cluser_name) + + appender = KubeconfigAppender() + new_context_dict = appender.insert_cluster_user_pair(config=config, + cluster=cluster, + user=user, + alias=alias) + + writer = KubeconfigWriter() + writer.write_kubeconfig(config) + + if config.has_cluster(cluser_name): + uni_print("Updated context {0} in {1}\n".format( + new_context_dict["name"], config.path + )) + else: + uni_print("Added new context {0} to {1}\n".format( + new_context_dict["name"], config.path + )) + + +class KubeconfigError(RuntimeError): + """ Base class for all kubeconfig errors.""" + + +class KubeconfigCorruptedError(KubeconfigError): + """ Raised when a kubeconfig cannot be parsed.""" + + +class KubeconfigInaccessableError(KubeconfigError): + """ Raised when a kubeconfig cannot be opened for read/writing.""" + + +class SafeOrderedDumper(yaml.SafeDumper): + """ Safely dump an OrderedDict as yaml.""" + + +def _ordered_representer(dumper, data): + return dumper.represent_mapping( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, + data.items()) + + +SafeOrderedDumper.add_representer(OrderedDict, _ordered_representer) + + +def ordered_yaml_dump(to_dump, stream=None): + """ + Dump an OrderedDict object to yaml. + + :param to_dump: The OrderedDict to dump + :type to_dump: OrderedDict + + :param stream: The file to dump to + If not given or if None, only return the value + :type stream: file + """ + return yaml.dump(to_dump, stream, + SafeOrderedDumper, default_flow_style=False) + + +class SafeOrderedLoader(yaml.SafeLoader): + """ Safely load a yaml file into an OrderedDict.""" + + +def _ordered_constructor(loader, node): + loader.flatten_mapping(node) + return OrderedDict(loader.construct_pairs(node)) + + +SafeOrderedLoader.add_constructor( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, + _ordered_constructor) + + +def ordered_yaml_load(stream): + """ Load an OrderedDict object from a yaml stream.""" + return yaml.load(stream, SafeOrderedLoader) + + +def _get_new_kubeconfig_content(): + return OrderedDict([ + ("apiVersion", "v1"), + ("clusters", []), + ("contexts", []), + ("current-context", ""), + ("kind", "Config"), + ("preferences", OrderedDict()), + ("users", []) + ]) + + +class KubeconfigSelector(object): + + def __init__(self, env_variable, path_in, validator=None, loader=None): + """ + Parse KUBECONFIG into a list of absolute paths. + Also replace the empty list with DEFAULT_PATH + + :param env_variable: KUBECONFIG as a long string + :type env_variable: string + + :param path_in: The path passed in through the CLI + :type path_in: string or None + """ + if validator is None: + validator = KubeconfigValidator() + self._validator = validator + + if loader is None: + loader = KubeconfigLoader(validator) + self._loader = loader + + if path_in is not None: + # Override environment variable + self._paths = [self._expand_path(path_in)] + else: + # Get the list of paths from the environment variable + if env_variable == "": + env_variable = DEFAULT_PATH + self._paths = [self._expand_path(element) + for element in env_variable.split(os.pathsep) + if len(element.strip()) > 0] + if len(self._paths) == 0: + self._paths = [DEFAULT_PATH] + + def choose_kubeconfig(self, cluster_name): + """ + Choose which kubeconfig file to read from. + If name is already an entry in one of the $KUBECONFIG files, + choose that one. + Otherwise choose the first file. + + :param cluster_name: The name of the cluster which is going to be added + :type cluster_name: String + + :return: a chosen Kubeconfig based on above rules + :rtype: Kubeconfig + """ + # Search for an existing entry to update + for candidate_path in self._paths: + try: + loaded_config = self._loader.load_kubeconfig(candidate_path) + + if loaded_config.has_cluster(cluster_name): + LOG.debug("Found entry to update at {0}".format( + candidate_path + )) + return loaded_config + except KubeconfigError as e: + LOG.warning("Passing {0}:{1}".format(candidate_path, e)) + + # No entry was found, use the first file in KUBECONFIG + # + # Note: This could raise KubeconfigErrors if paths[0] is corrupted + return self._loader.load_kubeconfig(self._paths[0]) + + def _expand_path(self, path): + """ A helper to expand a path to a full absolute path. """ + return os.path.abspath(os.path.expanduser(path)) + + +class Kubeconfig(object): + def __init__(self, path, content=None): + self.path = path + if content is None: + content = _get_new_kubeconfig_content() + self.content = content + + def dump_content(self): + """ Return the stored content in yaml format. """ + return ordered_yaml_dump(self.content) + + def has_cluster(self, name): + """ + Return true if this kubeconfig contains an entry + For the passed cluster name. + """ + if 'clusters' not in self.content: + return False + return name in [cluster['name'] + for cluster in self.content['clusters']] + + +class KubeconfigValidator(object): + def __init__(self): + # Validation_content is an empty Kubeconfig + # It is used as a way to know what types different entries should be + self._validation_content = Kubeconfig(None, None).content + + def validate_config(self, config): + """ + Raises KubeconfigCorruptedError if the passed content is invalid + + :param config: The config to validate + :type config: Kubeconfig + """ + if not isinstance(config, Kubeconfig): + raise KubeconfigCorruptedError("Internal error: " + "Not a Kubeconfig object.") + self._validate_config_types(config) + self._validate_list_entry_types(config) + + def _validate_config_types(self, config): + """ + Raises KubeconfigCorruptedError if any of the entries in config + are the wrong type + + :param config: The config to validate + :type config: Kubeconfig + """ + if not isinstance(config.content, dict): + raise KubeconfigCorruptedError("Content not a dictionary.") + for key, value in self._validation_content.items(): + if (key in config.content and + config.content[key] is not None and + not isinstance(config.content[key], type(value))): + raise KubeconfigCorruptedError( + "{0} is wrong type:{1} " + "(Should be {2})".format( + key, + type(config.content[key]), + type(value) + ) + ) + + def _validate_list_entry_types(self, config): + """ + Raises KubeconfigCorruptedError if any lists in config contain objects + which are not dictionaries + + :param config: The config to validate + :type config: Kubeconfig + """ + for key, value in self._validation_content.items(): + if (key in config.content and + type(config.content[key]) == list): + for element in config.content[key]: + if not isinstance(element, OrderedDict): + raise KubeconfigCorruptedError( + "Entry in {0} not a dictionary.".format(key)) + + +class KubeconfigLoader(object): + def __init__(self, validator=None): + if validator is None: + validator = KubeconfigValidator() + self._validator = validator + + def load_kubeconfig(self, path): + """ + Loads the kubeconfig found at the given path. + If no file is found at the given path, + Generate a new kubeconfig to write back. + If the kubeconfig is valid, loads the content from it. + If the kubeconfig is invalid, throw the relevant exception. + + :param path: The path to load a kubeconfig from + :type path: string + + :raises KubeconfigInaccessableError: if the kubeconfig can't be opened + :raises KubeconfigCorruptedError: if the kubeconfig is invalid + + :return: The loaded kubeconfig + :rtype: Kubeconfig + """ + try: + with open(path, "r") as stream: + loaded_content = ordered_yaml_load(stream) + except IOError as e: + if e.errno == errno.ENOENT: + loaded_content = None + else: + raise KubeconfigInaccessableError( + "Can't open kubeconfig for reading: {0}".format(e)) + except yaml.YAMLError as e: + raise KubeconfigCorruptedError( + "YamlError while loading kubeconfig: {0}".format(e)) + + loaded_config = Kubeconfig(path, loaded_content) + self._validator.validate_config(loaded_config) + + return loaded_config + + +class KubeconfigWriter(object): + def write_kubeconfig(self, config): + """ + Write config to disk. + OK if the file doesn't exist. + + :param config: The kubeconfig to write + :type config: Kubeconfig + + :raises KubeconfigInaccessableError: if the kubeconfig + can't be opened for writing + """ + directory = os.path.dirname(config.path) + + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise KubeconfigInaccessableError( + "Can't create directory for writing: {0}".format(e)) + try: + with os.fdopen( + os.open( + config.path, + os.O_CREAT | os.O_RDWR | os.O_TRUNC, + 0o600), + "w+") as stream: + ordered_yaml_dump(config.content, stream) + except (IOError, OSError) as e: + raise KubeconfigInaccessableError( + "Can't open kubeconfig for writing: {0}".format(e)) + + +class KubeconfigAppender(object): + def insert_entry(self, config, key, entry): + """ + Insert entry into the array at content[key] + Overwrite an existing entry if they share the same name + + :param config: The kubeconfig to insert an entry into + :type config: Kubeconfig + """ + if key not in config.content: + config.content[key] = [] + array = config.content[key] + if not isinstance(array, list): + raise KubeconfigError("Tried to insert into {0}," + "which is a {1} " + "not a {2}".format(key, + type(array), + list)) + found = False + for counter, existing_entry in enumerate(array): + if "name" in existing_entry and\ + "name" in entry and\ + existing_entry["name"] == entry["name"]: + array[counter] = entry + found = True + + if not found: + array.append(entry) + + config.content[key] = array + return config + + def _make_context(self, cluster, user, alias=None): + """ Generate a context to associate cluster and user with a given alias.""" + return OrderedDict([ + ("context", OrderedDict([ + ("cluster", cluster["name"]), + ("user", user["name"]) + ])), + ("name", alias or user["name"]) + ]) + + def insert_cluster_user_pair(self, config, cluster, user, alias=None): + """ + Insert the passed cluster entry and user entry, + then make a context to associate them + and set current-context to be the new context. + Returns the new context + + :param config: the Kubeconfig to insert the pair into + :type config: Kubeconfig + + :param cluster: the cluster entry + :type cluster: OrderedDict + + :param user: the user entry + :type user: OrderedDict + + :param alias: the alias for the context; defaults top user entry name + :type context: str + + :return: The generated context + :rtype: OrderedDict + """ + context = self._make_context(cluster, user, alias=alias) + self.insert_entry(config, "clusters", cluster) + self.insert_entry(config, "users", user) + self.insert_entry(config, "contexts", context) + + config.content["current-context"] = context["name"] + + return context + + +def uni_print(statement, out_file=None): + """ + This function is used to properly write unicode to a file, usually + stdout or stdderr. It ensures that the proper encoding is used if the + statement is not a string type. + """ + if out_file is None: + out_file = sys.stdout + try: + # Otherwise we assume that out_file is a + # text writer type that accepts str/unicode instead + # of bytes. + out_file.write(statement) + except UnicodeEncodeError: + # Some file like objects like cStringIO will + # try to decode as ascii on python2. + # + # This can also fail if our encoding associated + # with the text writer cannot encode the unicode + # ``statement`` we've been given. This commonly + # happens on windows where we have some S3 key + # previously encoded with utf-8 that can't be + # encoded using whatever codepage the user has + # configured in their console. + # + # At this point we've already failed to do what's + # been requested. We now try to make a best effort + # attempt at printing the statement to the outfile. + # We're using 'ascii' as the default because if the + # stream doesn't give us any encoding information + # we want to pick an encoding that has the highest + # chance of printing successfully. + new_encoding = getattr(out_file, 'encoding', 'ascii') + # When the output of the aws command is being piped, + # ``sys.stdout.encoding`` is ``None``. + if new_encoding is None: + new_encoding = 'ascii' + new_statement = statement.encode( + new_encoding, 'replace').decode(new_encoding) + out_file.write(new_statement) + out_file.flush() From 3f03e5690ce18c157be5b8f0d69554cb50724070 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:29:43 -0700 Subject: [PATCH 30/62] refactor: add method to insert project in exec order Add a method to allow for the insertion of a project anywhere in the execution order of a provider. --- .../automation/providers/base_provider.py | 19 ++++++++++ pulumi/python/automation/providers/do.py | 35 ++++++------------- 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index 26a2caf..d3bd399 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -125,3 +125,22 @@ def display_execution_order(self, output: TextIO = sys.stdout): if last_prefix != prefix: last_prefix = prefix + + @staticmethod + def _find_position_of_project_by_path(path: str, k8s_execution_order: List[PulumiProject]) -> int: + for index, project in enumerate(k8s_execution_order): + if project.path == path: + return index + return -1 + + @staticmethod + def _insert_project(project_path_to_insert_after: str, + project: PulumiProject, + k8s_execution_order: List[PulumiProject]): + project_position = Provider._find_position_of_project_by_path(project_path_to_insert_after, + k8s_execution_order) + + if project_position < 0: + raise ValueError(f'Could not find project at path {project_path_to_insert_after}') + + k8s_execution_order.insert(project_position + 1, project) \ No newline at end of file diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index 5b7f67b..2c60f78 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -63,34 +63,18 @@ def k8s_execution_order(self) -> List[PulumiProject]: original_order = super().k8s_execution_order() new_order = original_order.copy() - def find_position_of_project_by_path(path: str) -> int: - for index, project in enumerate(new_order): - if project.path == path: - return index - return -1 - # Add container registry credentials project after ingress controller namespace project - namespace_project_path = 'kubernetes/nginx/ingress-controller-namespace' - namespace_project_position = find_position_of_project_by_path(namespace_project_path) - - if namespace_project_position < 0: - raise ValueError('Could not find project that creates the nginx-ingress namespace at ' - f'path {namespace_project_path}') - add_credentials_project = PulumiProject(path='infrastructure/digitalocean/container-registry-credentials', description='Registry Credentials') - new_order.insert(namespace_project_position + 1, add_credentials_project) + Provider._insert_project(project_path_to_insert_after='kubernetes/nginx/ingress-controller-namespace', + project=add_credentials_project, + k8s_execution_order=new_order) # Add DNS record project after ingress controller project - ingress_controller_project_path = 'kubernetes/nginx/ingress-controller' - ingress_controller_project_position = find_position_of_project_by_path(ingress_controller_project_path) - - if namespace_project_position < 0: - raise ValueError('Could not find project that creates the nginx ingress controller at ' - f'path {ingress_controller_project_path}') - dns_record_project = PulumiProject(path='infrastructure/digitalocean/dns-record', description='DNS Record') - new_order.insert(ingress_controller_project_position + 1, dns_record_project) + Provider._insert_project(project_path_to_insert_after='kubernetes/nginx/ingress-controller', + project=dns_record_project, + k8s_execution_order=new_order) return new_order @@ -103,11 +87,12 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list "alternatively this can be specified as the environment variable " "DIGITALOCEAN_TOKEN): ") - config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') - - token = DigitalOceanProvider.token(stack_config={ 'config': config }, env_config=env_config) + token = DigitalOceanProvider.token(stack_config={'config': config}, env_config=env_config) do_cli = DoctlCli(access_token=token) + # FQDN + config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') + # Kubernetes versions k8s_versions_json_str, _ = external_process.run(do_cli.get_kubernetes_versions_json()) k8s_versions_json = json.loads(k8s_versions_json_str) From 7e69688bafe7be255303733aad46e8700186563e Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:30:59 -0700 Subject: [PATCH 31/62] chore: simplify function name --- pulumi/python/utility/kic-image-push/registries/do.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pulumi/python/utility/kic-image-push/registries/do.py b/pulumi/python/utility/kic-image-push/registries/do.py index c7f4fdb..6659fb9 100644 --- a/pulumi/python/utility/kic-image-push/registries/do.py +++ b/pulumi/python/utility/kic-image-push/registries/do.py @@ -1,8 +1,8 @@ import json import os from typing import List, Any -from pulumi import Output, StackReference, ResourceOptions, log -from pulumi_digitalocean import ContainerRegistry as DoContainerRegistry, ContainerRegistryDockerCredentials +from pulumi import Output, StackReference, ResourceOptions +from pulumi_digitalocean import ContainerRegistryDockerCredentials from kic_util import pulumi_config from registries.base_registry import ContainerRegistry, RegistryCredentials @@ -13,7 +13,7 @@ class DigitalOceanContainerRegistry(ContainerRegistry): def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: super().instance(stack_name, pulumi_user) # Pull properties from the Pulumi project that defines the Digital Ocean repository - container_registry_project_name = DigitalOceanContainerRegistry.do_project_name_from_project_dir( + container_registry_project_name = DigitalOceanContainerRegistry.project_name_from_do_dir( 'container-registry') container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name}/{stack_name}" stack_ref = StackReference(container_registry_stack_ref_id) @@ -46,7 +46,7 @@ def registry_implementation_name(self) -> str: return 'Digital Ocean Container Registry' @staticmethod - def do_project_name_from_project_dir(dirname: str): + def project_name_from_do_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'digitalocean', dirname) return pulumi_config.get_pulumi_project_name(project_path) From 7045ecda7179fd5b8b7ac26785c8bc4dfd35473d Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:31:37 -0700 Subject: [PATCH 32/62] feat: add check for empty configuration file --- pulumi/python/automation/stack_config_parser.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pulumi/python/automation/stack_config_parser.py b/pulumi/python/automation/stack_config_parser.py index 86c8825..b367d07 100644 --- a/pulumi/python/automation/stack_config_parser.py +++ b/pulumi/python/automation/stack_config_parser.py @@ -10,6 +10,14 @@ DEFAULT_DIR_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi'])) +class EmptyConfigurationException(RuntimeError): + filename: str + + def __init__(self, filename: str, *args: object) -> None: + super().__init__(*args) + self.filename = filename + + class PulumiStackConfig(dict): config_path: Optional[str] = None @@ -37,6 +45,10 @@ def _stack_config_path(stack_name: str) -> str: def _read(config_file_path: str) -> PulumiStackConfig: + # Return empty config for empty config files + if os.path.getsize(config_file_path) == 0: + raise EmptyConfigurationException(filename=config_file_path) + with open(config_file_path, 'r') as f: stack_config = PulumiStackConfig() stack_config.config_path = config_file_path From 5ec7fa21fc04b1b47fbbb4537822ef67aca795bf Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:32:45 -0700 Subject: [PATCH 33/62] refactor: break apart read and prompt operations --- pulumi/python/automation/main.py | 42 ++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 915e1a6..0c859b2 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -123,7 +123,7 @@ def main(): sys.exit(0) env_config = env_config_parser.read() - stack_config = read_or_prompt_for_stack_config(provider=provider, env_config=env_config) + stack_config = read_stack_config(provider=provider, env_config=env_config) validate_with_verbosity = operation == 'validate' or debug_on try: validate(provider=provider, env_config=env_config, stack_config=stack_config, @@ -156,28 +156,38 @@ def main(): raise e -def read_or_prompt_for_stack_config(provider: Provider, - env_config: env_config_parser.EnvConfig) -> stack_config_parser.PulumiStackConfig: +def read_stack_config(provider: Provider, + env_config: env_config_parser.EnvConfig) -> stack_config_parser.PulumiStackConfig: try: stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) except FileNotFoundError as e: - print(f' > stack configuration file at path does not exist: {e.filename}') - print(f' creating new configuration based on user input') + print(f' > stack configuration file does not exist: {e.filename}') + stack_config = prompt_for_stack_config(provider, env_config, e.filename) + except stack_config_parser.EmptyConfigurationException as e: + print(f' > stack configuration file is empty: {e.filename}') + stack_config = prompt_for_stack_config(provider, env_config, e.filename) - stack_defaults_path = os.path.sep.join([os.path.dirname(e.filename), - 'Pulumi.stackname.yaml.example']) + return stack_config - stack_defaults: Union[Dict[Hashable, Any], list, None] - with open(stack_defaults_path, 'r') as f: - stack_defaults = yaml.safe_load(stream=f) - stack_config_values = { - 'config': provider.new_stack_config(env_config=env_config, defaults=stack_defaults['config']) - } - with open(e.filename, 'w') as f: - yaml.safe_dump(data=stack_config_values, stream=f) - stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) +def prompt_for_stack_config(provider: Provider, + env_config: env_config_parser.EnvConfig, + filename: str) -> stack_config_parser.PulumiStackConfig: + print(f' creating new configuration based on user input') + + stack_defaults_path = os.path.sep.join([os.path.dirname(filename), + 'Pulumi.stackname.yaml.example']) + + stack_defaults: Union[Dict[Hashable, Any], list, None] + with open(stack_defaults_path, 'r') as f: + stack_defaults = yaml.safe_load(stream=f) + stack_config_values = { + 'config': provider.new_stack_config(env_config=env_config, defaults=stack_defaults['config']) + } + with open(filename, 'w') as f: + yaml.safe_dump(data=stack_config_values, stream=f) + stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) return stack_config From b198edc3b59a29ec09d17b09f5ccfdf2ba90807e Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:33:19 -0700 Subject: [PATCH 34/62] chore: change encoding to ascii because it is correct --- .../utility/kic-image-push/registries/base_registry.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pulumi/python/utility/kic-image-push/registries/base_registry.py b/pulumi/python/utility/kic-image-push/registries/base_registry.py index ba77252..7171cd5 100644 --- a/pulumi/python/utility/kic-image-push/registries/base_registry.py +++ b/pulumi/python/utility/kic-image-push/registries/base_registry.py @@ -1,10 +1,10 @@ import base64 import urllib from urllib import parse -from typing import Optional, List +from typing import Optional import pulumi.log -from pulumi import Input, Output +from pulumi import Input import pulumi_docker as docker from kic_util import external_process @@ -74,7 +74,7 @@ def instance(cls, stack_name: str, pulumi_user: str): @staticmethod def decode_credentials(encoded_token: str) -> RegistryCredentials: - decoded = str(base64.b64decode(encoded_token), 'utf-8') + decoded = str(base64.b64decode(encoded_token), 'ascii') parts = decoded.split(':', 2) if len(parts) != 2: raise ValueError("Unexpected format for decoded ECR authorization token") From c3d3e56cdeb0f5967fce21a153c3000dec911bc6 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:35:16 -0700 Subject: [PATCH 35/62] feat: add automation API support for LKE and Harbor on Linode This adds support for Linode using the Automation API scripts. In order to provide support, the Harbor Container Registry was added to the requirements. Now, before starting LKE an instance of Harbor will be started in a Linode compute instance. --- config/pulumi/Pulumi.stackname.yaml.example | 13 +- pulumi/python/automation/providers/linode.py | 178 ++++++++++++++++++ .../Pulumi.yaml | 7 + .../__main__.py | 81 ++++++++ .../linode/harbor-configuration/Pulumi.yaml | 7 + .../linode/harbor-configuration/__main__.py | 96 ++++++++++ .../infrastructure/linode/harbor/Pulumi.yaml | 7 + .../infrastructure/linode/harbor/__main__.py | 154 +++++++++++++++ .../infrastructure/linode/lke/__main__.py | 50 +++-- .../nginx/ingress-controller/__main__.py | 18 +- .../utility/kic-image-push/registries/lke.py | 44 +++++ 11 files changed, 614 insertions(+), 41 deletions(-) create mode 100644 pulumi/python/automation/providers/linode.py create mode 100644 pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml create mode 100644 pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py create mode 100644 pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml create mode 100644 pulumi/python/infrastructure/linode/harbor-configuration/__main__.py create mode 100644 pulumi/python/infrastructure/linode/harbor/Pulumi.yaml create mode 100644 pulumi/python/infrastructure/linode/harbor/__main__.py create mode 100644 pulumi/python/utility/kic-image-push/registries/lke.py diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index e3485ca..b4718d3 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -338,11 +338,12 @@ config: # Linode Kubernetes Engine ############################################################################ # This is the Kubernetes version to install using Linode K8s. - lke:k8s_version: 1.22 - # Version of Kubernetes to use - lke:instance_type: g6-standard-8 - # This is the default instance type used Linode K8s. - lke:node_count: 3 + linode:k8s_version: 1.22 + # This is the default instance type used Linode Kubernetes + linode:instance_type: g6-standard-8 # The desired node count of the Linode K8s cluster. - lke:region: us-central + linode:node_count: 3 # The region to deploy the cluster + linode:region: us-central + # Flag to enable or disable HA mode for the Kubernetes cluster + linode:k8s_ha: true diff --git a/pulumi/python/automation/providers/linode.py b/pulumi/python/automation/providers/linode.py new file mode 100644 index 0000000..57a7f16 --- /dev/null +++ b/pulumi/python/automation/providers/linode.py @@ -0,0 +1,178 @@ +import base64 +from typing import List, Union, Dict, Hashable, Any, Mapping, MutableMapping + +import yaml +from pulumi import automation as auto + +from kic_util import external_process + +from .base_provider import PulumiProject, Provider, InvalidConfigurationException +from .pulumi_project import PulumiProjectEventParams, SecretConfigKey + +from .update_kubeconfig import update_kubeconfig + + +class LinodeProviderException(Exception): + pass + + +class LinodeCli: + def base_cmd(self) -> str: + return 'linode-cli' + + def get_regions(self) -> str: + return f'{self.base_cmd()} regions list --suppress-warnings' + + def get_k8s_versions(self) -> str: + return f'{self.base_cmd()} lke versions-list --suppress-warnings' + + def get_instance_sizes(self) -> str: + return f'{self.base_cmd()} linodes types --suppress-warnings' + + +class LinodeProvider(Provider): + def infra_type(self) -> str: + return 'LKE' + + def infra_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/linode/lke', description='LKE', + on_success=LinodeProvider._update_kubeconfig), + ] + + def k8s_execution_order(self) -> List[PulumiProject]: + original_order = super().k8s_execution_order() + new_order = original_order.copy() + + harbor_secrets = [SecretConfigKey(key_name='linode:harbor_password', + prompt='Harbor administrator password'), + SecretConfigKey(key_name='linode:harbor_db_password', + prompt='Harbor database password'), + SecretConfigKey(key_name='linode:harbor_sudo_user_password', + prompt='Harbor instance sudo user password')] + harbor_project = PulumiProject(path='infrastructure/linode/harbor', + description='Harbor', + config_keys_with_secrets=harbor_secrets) + + Provider._insert_project(project_path_to_insert_after='kubernetes/secrets', + project=harbor_project, + k8s_execution_order=new_order) + + # Add container registry credentials project after ingress controller namespace project + # Harbor is configured some time after it is stood up in order to give it time to + # instantiate. + add_credentials_project = PulumiProject(path='infrastructure/linode/container-registry-credentials', + description='Registry Credentials') + Provider._insert_project(project_path_to_insert_after='kubernetes/nginx/ingress-controller-namespace', + project=add_credentials_project, + k8s_execution_order=new_order) + + # Add project that configures Harbor for use in the cluster + harbor_config_project = PulumiProject(path='infrastructure/linode/harbor-configuration', + description='Harbor Config') + Provider._insert_project(project_path_to_insert_after='utility/kic-image-build', + project=harbor_config_project, + k8s_execution_order=new_order) + + return new_order + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ + Union[Dict[Hashable, Any], list, None]: + config = super().new_stack_config(env_config, defaults) + + if 'LINODE_TOKEN' not in env_config: + config['linode:token'] = input('Linode API token (this is stored in plain-text - ' + 'alternatively this can be specified as the environment variable ' + 'LINODE_TOKEN): ') + + token = LinodeProvider.token(stack_config={'config': config}, env_config=env_config) + linode_cli = LinodeCli() + + cli_env = {} + cli_env.update(env_config) + cli_env['LINODE_CLI_TOKEN'] = token + + # FQDN + config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') + print(f"FQDN: {config['kic-helm:fqdn']}") + + # SOA Email + config['linode:soa_email'] = input(f'DNS Start of Authority (SOA) email address for container registry domain: ').strip() + print(f"SOA email address: {config['linode:soa_email']}") + + # Kubernetes versions + k8s_version_list, _ = external_process.run(cmd=linode_cli.get_k8s_versions(), + env=cli_env) + print(f'Supported Kubernetes versions:\n{k8s_version_list}') + default_version = defaults['linode:k8s_version'] or '1.22' + config['linode:k8s_version'] = input(f'Kubernetes version [{default_version}]: ').strip() or default_version + print(f"Kubernetes version: {config['linode:k8s_version']}") + + # Region + regions_list, _ = external_process.run(cmd=linode_cli.get_regions(), + env=cli_env) + print(f'Supported regions:\n{regions_list}') + default_region = defaults['linode:region'] or 'us-central' + config['linode:region'] = input(f'Region [{default_region}]: ').strip() or default_region + print(f"Region: {config['linode:region']}") + + # Instance Type + instance_type_list, _ = external_process.run(cmd=linode_cli.get_instance_sizes(), + env=cli_env) + print(f'Supported instance types:\n{instance_type_list}') + default_type = defaults['linode:instance_type'] or 'g6-standard-8' + config['linode:instance_type'] = input(f'Instance type [{default_type}]: ').strip() or default_type + print(f"Instance type: {config['linode:instance_type']}") + + # Node Count + default_node_count = defaults['linode:node_count'] or 3 + while 'linode:node_count' not in config: + node_count = input('Node count for Kubernetes cluster ' + f'[{default_node_count}]: ').strip() or default_node_count + if type(node_count) == int or node_count.isdigit(): + config['linode:node_count'] = int(node_count) + print(f"Node count: {config['linode:node_count']}") + + # HA Enabled + k8s_ha_input = input('Enable Kubernetes HA mode [Y]: ').strip().lower() + k8s_ha = k8s_ha_input in ['', 'y', 'yes', 't', 'true', '1'] + config['linode:k8s_ha'] = k8s_ha + print(f'HA mode enabled: {k8s_ha}') + + return config + + @staticmethod + def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], + env_config: Mapping[str, str]) -> str: + # Token is in an environment variable or the environment variable file + if 'LINODE_TOKEN' in env_config: + return env_config['LINODE_TOKEN'] + + # We were given a reference to a StackConfigParser object + if 'config' in stack_config and 'linode:token' in stack_config['config']: + return stack_config['config']['linode:token'] + + # We were given a reference to a Pulumi Stack configuration + if 'linode:token' in stack_config: + return stack_config['linode:token'].value + + # Otherwise + msg = 'When using the Linode provider, an API token must be specified - ' \ + 'this token can be specified with the Pulumi config parameter linode:token ' \ + 'or the environment variable LINODE_TOKEN' + raise InvalidConfigurationException(msg) + + @staticmethod + def _update_kubeconfig(params: PulumiProjectEventParams): + if 'cluster_name' not in params.stack_outputs: + raise LinodeProviderException('Cannot find key [cluster_name] in stack output') + + cluster_name = params.stack_outputs['cluster_name'].value + kubeconfig_encoded = params.stack_outputs['kubeconfig'].value + kubeconfig_bytes = base64.b64decode(kubeconfig_encoded) + kubeconfig = yaml.safe_load(kubeconfig_bytes) + + update_kubeconfig(env=params.env_config, cluser_name=cluster_name, kubeconfig=kubeconfig) + + +INSTANCE = LinodeProvider() diff --git a/pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml b/pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml new file mode 100644 index 0000000..44fec00 --- /dev/null +++ b/pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml @@ -0,0 +1,7 @@ +name: container-registry-credentials +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Adds container registry login credentials to the k8s cluster diff --git a/pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py b/pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py new file mode 100644 index 0000000..3242091 --- /dev/null +++ b/pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py @@ -0,0 +1,81 @@ +import json +import os +import base64 +from typing import List + +import pulumi +from pulumi import StackReference +from kic_util import pulumi_config +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_from_kubeconfig(): + project_path = os.path.join(script_dir, '..', '..', 'kubeconfig') + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_from_same_parent(directory: str): + project_path = os.path.join(script_dir, '..', directory) + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_of_namespace_project(): + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', 'nginx', 'ingress-controller-namespace') + return pulumi_config.get_pulumi_project_name(project_path) + + +k8_project_name = project_name_from_kubeconfig() +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +container_registry_stack_ref_id = f"{pulumi_user}/{project_name_from_same_parent('harbor')}/{stack_name}" +harbor_stack_ref = StackReference(container_registry_stack_ref_id) +harbor_hostname_output = harbor_stack_ref.require_output('harbor_hostname') +harbor_user_output = harbor_stack_ref.require_output('harbor_user') +harbor_password_output = harbor_stack_ref.require_output('harbor_password') + +namespace_stack_ref_id = f"{pulumi_user}/{project_name_of_namespace_project()}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +namespace_name_output = ns_stack_ref.require_output('ingress_namespace_name') + + +def build_docker_credentials(params: List[str]): + registry_host = params[0] + username = params[1] + password = params[2] + auth_string = f'{username}:{password}' + auth_base64 = str(base64.encodebytes(auth_string.encode('ascii')), 'ascii') + + data = { + 'auths': { + registry_host: { + 'auth': auth_base64 + } + } + } + + return json.dumps(data) + + +docker_credentials = pulumi.Output.all(harbor_hostname_output, + harbor_user_output, + harbor_password_output).apply(build_docker_credentials) + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) + +secret = Secret(resource_name='ingress-controller-registry-secret', + args=SecretInitArgs(string_data={'.dockerconfigjson': docker_credentials}, + type='kubernetes.io/dockerconfigjson', + metadata={'namespace': namespace_name_output, + 'name': 'ingress-controller-registry'}), + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +pulumi.export('ingress-controller-registry-secret', secret) diff --git a/pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml b/pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml new file mode 100644 index 0000000..4f6722b --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml @@ -0,0 +1,7 @@ +name: harbor-configuration +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Configures Harbor Container Registry diff --git a/pulumi/python/infrastructure/linode/harbor-configuration/__main__.py b/pulumi/python/infrastructure/linode/harbor-configuration/__main__.py new file mode 100644 index 0000000..38b42e3 --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor-configuration/__main__.py @@ -0,0 +1,96 @@ +import base64 +import json +import urllib.request +import urllib.error +import os +import time +from typing import List + +import pulumi +from kic_util import pulumi_config + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + + +def project_name_from_harbor_dir(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', 'harbor') + return pulumi_config.get_pulumi_project_name(project_path) + + +harbor_project_name = project_name_from_harbor_dir() +stack_ref_id = f"{pulumi_user}/{harbor_project_name}/{stack_name}" +stack_ref = pulumi.StackReference(stack_ref_id) +harbor_hostname_output = stack_ref.require_output('harbor_hostname') +harbor_user_output = stack_ref.require_output('harbor_user') +harbor_password_output = stack_ref.require_output('harbor_password') + + +def configure_harbor(params: List[str]) -> bool: + hostname = params[0] + user = params[1] + password = params[2] + base_url = f'https://{hostname}/api/v2.0' + base64creds = str(base64.b64encode(f'{user}:{password}'.encode('ascii')), 'ascii') + max_retries = 12 + retries = 0 + timeout = 1000 + + def is_harbor_is_up() -> bool: + url = f'{base_url}/health' + request = urllib.request.Request(url=url, method='GET') + request.add_header(key='Authorization', val=f'Basic {base64creds}') + + try: + with urllib.request.urlopen(url=request, timeout=timeout) as context: + if context.getcode() != 200: + return False + + health_check = json.load(context) + components = health_check['components'] + for component in components: + if component['status'] != 'healthy': + pulumi.log.info(f"Harbor component [{component['name']}] is not healthy") + return False + + return True + except urllib.error.URLError as e: + # Don't retry for name resolution failures + if e.errno == -3: + raise e + + pulumi.log.info(f'Unable to connect to Harbor [try {retries+1} of {max_retries}]: {e}') + return False + + def modify_default_project_registry(): + url = f'{base_url}/projects/library/metadatas/public' + request = urllib.request.Request(url=url, method='PUT') + request.add_header(key='Authorization', val=f'Basic {base64creds}') + request.add_header(key='Content-Type', val='application/json') + body = { + 'public': 'false' + } + body_json = json.dumps(body) + request.data = body_json.encode('utf-8') + urllib.request.urlopen(url=request, timeout=timeout) + + while not is_harbor_is_up(): + retries += 1 + timeout = 1000 * retries + time.sleep(timeout) + + if retries >= max_retries: + raise f'Harbor has not come up after {retries} retries' + + pulumi.log.info('Harbor is up, modifying default registry') + modify_default_project_registry() + + return True + + +harbor_is_alive = pulumi.Output.all(harbor_hostname_output, harbor_user_output, harbor_password_output)\ + .apply(configure_harbor) + +pulumi.export('harbor_is_alive', harbor_is_alive) diff --git a/pulumi/python/infrastructure/linode/harbor/Pulumi.yaml b/pulumi/python/infrastructure/linode/harbor/Pulumi.yaml new file mode 100644 index 0000000..28dabbc --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor/Pulumi.yaml @@ -0,0 +1,7 @@ +name: harbor +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates new Harbor Container Registry diff --git a/pulumi/python/infrastructure/linode/harbor/__main__.py b/pulumi/python/infrastructure/linode/harbor/__main__.py new file mode 100644 index 0000000..3aa914a --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor/__main__.py @@ -0,0 +1,154 @@ +import base64 +import os +from typing import Mapping +from collections import namedtuple + +import pulumi +import pulumi_linode as linode +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret +from kic_util import pulumi_config + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +# Configuration details for the K8 cluster +config = pulumi.Config('linode') + +api_token = config.get('token') or \ + config.get_secret('token') or \ + os.getenv('LINODE_TOKEN') or \ + os.getenv('LINODE_CLI_TOKEN') + +# For whatever reason, the Linode provider does not pickup the token from the +# stack configuration nor from the environment variables, so we do that work +# here. +provider = linode.Provider(resource_name='linode_provider', token=api_token) + +instance_type = config.get('harbor_instance_type') or 'g6-nanode-1' +region = config.require('region') + +# harbor_api_token = linode.Token(resource_name='harbor_token', +# scopes='domains:read_write', +# expiry=None, +# label='Token used by Harbor to create DNS records', +# opts=pulumi.ResourceOptions(provider=provider)) + +# This is the internal Linode ID used to refer to the StackScript +# (https://www.linode.com/products/stackscripts/) that backs the +# Harbor marketplace image. +harbor_stackscript_id = 912262 +# Valid options are: linode/ubuntu20.04 and linode/debian11 + +harbor_os_image = 'linode/ubuntu20.04' + + +def project_name_from_kubernetes_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_from_infrastructure_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +k8_project_name = project_name_from_infrastructure_dir('kubeconfig') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) +k8s_provider = k8s.Provider(resource_name=f'lke-provider', + kubeconfig=kubeconfig) + +secrets_project_name = project_name_from_kubernetes_dir('secrets') +secrets_stack_ref_id = f"{pulumi_user}/{secrets_project_name}/{stack_name}" +secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) +pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') + +harbor_k8s_secrets = Secret.get(resource_name='pulumi-secret-linode', + id=pulumi_secrets['linode'], + opts=pulumi.ResourceOptions(provider=k8s_provider)).data + +HarborSecrets = namedtuple('HarborSecrets', + ['harbor_password', 'harbor_db_password', 'harbor_sudo_user_password']) + + +def extract_secrets(secrets: Mapping[str, str]) -> HarborSecrets: + def decode_k8s_secret(key: str): + base64_string = secrets[key] + byte_data = base64.b64decode(base64_string) + password = str(byte_data, 'utf-8') + return password + + return HarborSecrets(harbor_password=decode_k8s_secret('harbor_password'), + harbor_db_password=decode_k8s_secret('harbor_db_password'), + harbor_sudo_user_password=decode_k8s_secret('harbor_sudo_user_password')) + + +def build_stackscript_data(params) -> Mapping[str, str]: + # token: linode.Token = params[0] + secrets: HarborSecrets = params[0] + + # Read a public key into memory if specified in the config + pubkey_path = config.get('harbor_ssh_key_path') + if pubkey_path and os.path.exists(pubkey_path): + with open(pubkey_path, 'r') as fp: + pubkey = fp.readline() + else: + pubkey = None + + return { + # The Harbor admin password + 'harbor_password': secrets.harbor_password, + # The Harbor database password + 'harbor_db_password': secrets.harbor_db_password, + # Admin Email for the Harbor server + 'soa_email_address': config.require('soa_email'), + # The subdomain for the Linode's DNS record (Requires API token) + 'subdomain': 'registry', + # The limited sudo user to be created for the Linode + 'username': 'harbor', + # The password for the limited sudo user + 'password': secrets.harbor_sudo_user_password, + # The SSH Public Key that will be used to access the Linode + 'pubkey': pubkey, + # Disable root access over SSH? (Yes/No) + 'disable_root': 'Yes' + } + + +harbor_user = 'admin' +harbor_secrets = pulumi.Output.unsecret(harbor_k8s_secrets).apply(extract_secrets) +stackscript_data = pulumi.Output.all(harbor_secrets).apply(build_stackscript_data) + +instance = linode.Instance(resource_name='harbor', + region=region, + image=harbor_os_image, + stackscript_id=harbor_stackscript_id, + stackscript_data=stackscript_data, + type=instance_type, + private_ip=False, + opts=pulumi.ResourceOptions(provider=provider)) + + +def build_hostname(ip_address: str) -> str: + ip_parts = ip_address.split(sep='.') + hostname = '' + for i, part in enumerate(ip_parts): + hostname += part + if i != len(ip_parts) - 1: + hostname += '-' + + hostname += '.ip.linodeusercontent.com' + return hostname + + +harbor_hostname = instance.ip_address.apply(build_hostname) + +pulumi.export('harbor_instance', instance) +pulumi.export('harbor_hostname', harbor_hostname) +pulumi.export('harbor_user', pulumi.Output.secret(harbor_user)) +pulumi.export('harbor_password', pulumi.Output.secret(harbor_secrets.harbor_password)) diff --git a/pulumi/python/infrastructure/linode/lke/__main__.py b/pulumi/python/infrastructure/linode/lke/__main__.py index f9b7d53..886d09c 100644 --- a/pulumi/python/infrastructure/linode/lke/__main__.py +++ b/pulumi/python/infrastructure/linode/lke/__main__.py @@ -1,44 +1,42 @@ +import os import pulumi import pulumi_linode as linode -from kic_util import pulumi_config # Configuration details for the K8 cluster -config = pulumi.Config('lke') -instance_size = config.get('instance_size') -if not instance_size: - instance_size = 'g6-standard-4' -region = config.get('region') -if not region: - region = 'us-west' -node_count = config.get('node_count') -if not node_count: - node_count = 3 -k8s_version = config.get('k8s_version') -if not k8s_version: - k8s_version = '1.22' -k8s_ha = config.get('k8s_ha') -if not k8s_ha: - k8s_ha = True +config = pulumi.Config('linode') -stack_name = pulumi.get_stack() -project_name = pulumi.get_project() -pulumi_user = pulumi_config.get_pulumi_user() +api_token = config.get('token') or \ + config.get_secret('token') or \ + os.getenv('LINODE_TOKEN') or \ + os.getenv('LINODE_CLI_TOKEN') -# Derive our names for the cluster and the pool -resource_name = "lke-" + stack_name + "-cluster" +# For whatever reason, the Linode provider does not pickup the token from the +# stack configuration nor from the environment variables, so we do that work +# here. +provider = linode.Provider(resource_name='linode_provider', token=api_token) + +instance_type = config.require('instance_type') +region = config.require('region') +node_count = config.require_int('node_count') +k8s_version = config.require('k8s_version') +k8s_ha = config.require_bool('k8s_ha') + +stack = pulumi.get_stack() +resource_name = f'lke-{stack}-cluster' # Create a linode cluster -cluster = linode.LkeCluster(resource_name, +cluster = linode.LkeCluster(resource_name=resource_name, k8s_version=k8s_version, control_plane=linode.LkeClusterControlPlaneArgs( high_availability=k8s_ha), - label=resource_name, + label=f'MARA [{stack}]', pools=[linode.LkeClusterPoolArgs( count=node_count, - type=instance_size, + type=instance_type, )], region=region, - tags=["mara"]) + tags=["mara"], + opts=pulumi.ResourceOptions(provider=provider)) # Export the clusters' kubeconfig pulumi.export("cluster_name", resource_name) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index 13e2cb5..03d9608 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -1,12 +1,10 @@ import os -import typing -from typing import Dict +from typing import Dict, Mapping, Any, Optional import pulumi from pulumi import Output, StackReference import pulumi_kubernetes as k8s from pulumi_kubernetes.core.v1 import Service -import pulumi_kubernetes.helm.v3 as helm from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs from kic_util import pulumi_config @@ -53,7 +51,7 @@ def project_name_from_same_parent(directory: str): return pulumi_config.get_pulumi_project_name(project_path) -def find_image_tag(repository: dict) -> typing.Optional[str]: +def find_image_tag(repository: dict) -> Optional[str]: """ Inspect the repository dictionary as returned from a stack reference for a valid image_tag_alias or image_tag. If found, return the image_tag_alias or image_tag if found, otherwise return None @@ -70,8 +68,8 @@ def find_image_tag(repository: dict) -> typing.Optional[str]: return None -def build_chart_values(repo_push: dict) -> helm.ChartOpts: - values: Dict[str, Dict[str, typing.Any]] = { +def build_chart_values(repo_push: dict) -> Mapping[str, Any]: + values: Dict[str, Dict[str, Any]] = { 'controller': { 'healthStatus': True, 'appprotect': { @@ -216,12 +214,14 @@ def namespace_by_name(name): # Force update if required force_update=True) -kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns])) +kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns], + provider=k8s_provider)) pstatus = kic_chart.status -srv = Service.get("nginx-ingress", - Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress")) +srv = Service.get(resource_name="nginx-ingress", + id=Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress"), + opts=pulumi.ResourceOptions(provider=k8s_provider)) ingress_service = srv.status diff --git a/pulumi/python/utility/kic-image-push/registries/lke.py b/pulumi/python/utility/kic-image-push/registries/lke.py new file mode 100644 index 0000000..c0fa2fa --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/lke.py @@ -0,0 +1,44 @@ +import json +import os +from typing import List, Any +from pulumi import Output, StackReference, ResourceOptions, log + +from kic_util import pulumi_config +from registries.base_registry import ContainerRegistry, RegistryCredentials + + +class LinodeHarborRegistry(ContainerRegistry): + @classmethod + def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: + super().instance(stack_name, pulumi_user) + # Pull properties from the Pulumi project that defines the Linode Harbor repository + container_registry_project_name = LinodeHarborRegistry.project_name_from_linode_dir('harbor') + container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name}/{stack_name}" + stack_ref = StackReference(container_registry_stack_ref_id) + harbor_hostname_output = stack_ref.require_output('harbor_hostname') + harbor_user_output = stack_ref.require_output('harbor_user') + harbor_password_output = stack_ref.require_output('harbor_password') + + def _make_instance(params: List[Any]) -> LinodeHarborRegistry: + hostname = params[0] + username = params[1] + password = params[2] + + registry_url = f'{hostname}/library/ingress-controller' + credentials = RegistryCredentials(username=username, password=password) + + return cls(stack_name=stack_name, pulumi_user=pulumi_user, registry_url=registry_url, credentials=credentials) + + return Output.all(harbor_hostname_output, harbor_user_output, harbor_password_output).apply(_make_instance) + + @staticmethod + def project_name_from_linode_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'linode', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + def registry_implementation_name(self) -> str: + return 'Harbor' + + +CLASS = LinodeHarborRegistry From ef2ffb2dc8663ab8c21c2c21f53a9cb4d228bba9 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Mon, 11 Jul 2022 15:45:10 -0700 Subject: [PATCH 36/62] refactor: remove bash provision and destroy scripts --- bin/destroy.sh | 101 ----------- bin/destroy_aws.sh | 162 ------------------ bin/destroy_do.sh | 127 -------------- bin/destroy_kube.sh | 131 --------------- bin/destroy_lke.sh | 154 ----------------- bin/start.sh | 140 ---------------- bin/start_aws.sh | 396 -------------------------------------------- bin/start_do.sh | 380 ------------------------------------------ bin/start_kube.sh | 363 ---------------------------------------- bin/start_lke.sh | 388 ------------------------------------------- 10 files changed, 2342 deletions(-) delete mode 100755 bin/destroy.sh delete mode 100755 bin/destroy_aws.sh delete mode 100755 bin/destroy_do.sh delete mode 100755 bin/destroy_kube.sh delete mode 100755 bin/destroy_lke.sh delete mode 100755 bin/start.sh delete mode 100755 bin/start_aws.sh delete mode 100755 bin/start_do.sh delete mode 100755 bin/start_kube.sh delete mode 100755 bin/start_lke.sh diff --git a/bin/destroy.sh b/bin/destroy.sh deleted file mode 100755 index 4f38632..0000000 --- a/bin/destroy.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based -# projects. -# -if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" > /dev/null ; then - echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." - echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." - echo " " - exit 1 -else - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" -fi - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -echo " " -echo "Notice! This shell script will read the config/environment file to determine which pulumi stack to destroy." -echo "Based on the type of stack it will either run the ./bin/destroy_kube.sh or the ./bin/destroy_aws.sh script." -echo "If this is not what you want to do, please abort the script by typing ctrl-c and running the appropriate " -echo "script manually." -echo " " - -# Sleep so we are seen... -sleep 5 - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# -# Determine what destroy script we need to run -# -if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config>/dev/null 2>&1; then - INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" - if [ $INFRA == 'AWS' ]; then - echo "Destroying an AWS based stack; if this is not right please type ctrl-c to abort this script." - sleep 5 - ${script_dir}/destroy_aws.sh - exit 0 - elif [ $INFRA == 'kubeconfig' ]; then - echo "Destroying a kubeconfig based stack; if this is not right please type ctrl-c to abort this script." - sleep 5 - ${script_dir}/destroy_kube.sh - exit 0 - elif [ $INFRA == 'DO' ]; then - echo "Destroying a Digital Ocean based stack; if this is not right please type ctrl-c to abort this script." - sleep 5 - ${script_dir}/destroy_do.sh - exit 0 - elif [ $INFRA == 'LKE' ]; then - echo "Destroying a Linode LKE based stack; if this is not right please type ctrl-c to abort this script." - sleep 5 - ${script_dir}/destroy_lke.sh - exit 0 - else - print "No infrastructure set in config file; aborting!" - exit 1 - fi -else - print "No infrastructure set in config file; aborting!" - exit 2 -fi diff --git a/bin/destroy_aws.sh b/bin/destroy_aws.sh deleted file mode 100755 index d439b44..0000000 --- a/bin/destroy_aws.sh +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -function validate_aws_credentials() { - pulumi_aws_profile="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get aws:profile || true)" - if [ "${pulumi_aws_profile}" != "" ]; then - profile_arg="--profile ${pulumi_aws_profile}" - elif [[ -n "${AWS_PROFILE+x}" ]]; then - profile_arg="--profile ${AWS_PROFILE}" - else - profile_arg="" - fi - - echo "Validating AWS credentials" - if ! "${script_dir}/../pulumi/python/venv/bin/aws" ${profile_arg} sts get-caller-identity > /dev/null; then - echo >&2 "AWS credentials have expired or are not valid" - exit 2 - fi -} - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(kubernetes/nginx/ingress-controller utility/kic-image-build utility/kic-image-push) -AWSINFRA=(ecr eks vpc) - -if command -v aws > /dev/null; then - validate_aws_credentials -fi - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -set +o errexit # don't abort on nonzero exit status for these commands -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 -set -o errexit # abort on nonzero exit status - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi -done - -# Clean up the kubeconfig project -for project_dir in "kubeconfig" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done - -# Destroy the infrastructure -for project_dir in "${AWSINFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/aws/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/aws/${project_dir} --emoji --stack ${PULUMI_STACK}" - echo "Destroying aws/${project_dir}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/aws/${project_dir}" - fi -done \ No newline at end of file diff --git a/bin/destroy_do.sh b/bin/destroy_do.sh deleted file mode 100755 index e2f5113..0000000 --- a/bin/destroy_do.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(kubernetes/nginx/ingress-controller-repo-only) -INFRA=(kubeconfig digitalocean/domk8s) - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi -done - -# Clean up the kubeconfig project -for project_dir in "${INFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done diff --git a/bin/destroy_kube.sh b/bin/destroy_kube.sh deleted file mode 100755 index c90c5e7..0000000 --- a/bin/destroy_kube.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(kubernetes/nginx/ingress-controller-repo-only) -INFRA=(kubeconfig digitalocean/domk8s) - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi -done - -# Clean up the kubeconfig project -for project_dir in "${INFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done - - - - diff --git a/bin/destroy_lke.sh b/bin/destroy_lke.sh deleted file mode 100755 index af3e400..0000000 --- a/bin/destroy_lke.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(ingress-controller-repo-only) -LINODE=(lke) -KUBECONFIG=(kubeconfig) - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -# This was bombing out if K8 was not responding; hence the || true... -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 || true - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/nginx/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/nginx/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/nginx/${project_dir}" - fi -done - -# -# We need to do a cleanup of kubernetes making sure that we get rid of our PV's so they don't hang around -# -for NAMESPACE in $(kubectl get namespaces) ; do - # Change to a namespace - kubectl config set-context --current --namespace=$NAMESPACE - # Delete all pods - kubectl delete pod --all - # Delete all volume claims - kubectl delete pvc --all - # Delete all persistent volumes - kubectl delete pv --all -done - -# Clean up the kubeconfig project -for project_dir in "${KUBECONFIG[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done - -# Clean up the linode project -for project_dir in "${LINODE[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/linode/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/linode/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/linode/${project_dir}" - fi -done diff --git a/bin/start.sh b/bin/start.sh deleted file mode 100755 index 92e9c62..0000000 --- a/bin/start.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -# Unset virtual environment if defined.... -unset VIRTUAL_ENV - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based -# projects. -# -if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then - echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." - echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." - echo " " - exit 1 -else - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" -fi - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -echo " " -echo "NOTICE! This shell script will call the appropriate helper script depending on your answer to the next question." -echo " " -echo "This script currently supports standing up AWS, Linode, and Digital Ocean kubernetes deployments, provided " -echo "the correct credentials are supplied. It also supports the user of a kubeconfig file with a defined cluster name" -echo "and context, which must be provided by the user." -echo " " -echo "Please read the documentation for more details." -echo " " -# Sleep so we are seen... -sleep 5 - -if [ -s "${script_dir}/../config/pulumi/environment" ] && grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - source "${script_dir}/../config/pulumi/environment" - echo "Environment data found for stack: ${PULUMI_STACK}" - while true; do - read -r -e -p "Environment file exists and is not empty. Answer yes to use, no to delete. " yn - case $yn in - [Yy]*) # We have an environment file and they want to keep it.... - if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" - if [ $INFRA == 'AWS' ]; then - exec ${script_dir}/start_aws.sh - exit 0 - elif [ $INFRA == 'kubeconfig' ]; then - exec ${script_dir}/start_kube.sh - exit 0 - elif [ $INFRA == 'DO' ]; then - exec ${script_dir}/start_do.sh - exit 0 - elif [ $INFRA == 'LKE' ]; then - exec ${script_dir}/start_lke.sh - exit 0 - else - echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." - exit 1 - fi - else - echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." - exit 1 - fi - break - ;; - [Nn]*) # They want to remove and reconfigure - rm -f ${script_dir}/../config/pulumi/environment - break - ;; - *) echo "Please answer yes or no." ;; - esac - done -fi - -while true; do - read -e -r -p "Type a for AWS, d for Digital Ocean, k for kubeconfig, l for Linode? " infra - case $infra in - [Aa]*) - echo "Calling AWS startup script" - exec ${script_dir}/start_aws.sh - exit 0 - break - ;; - [Kk]*) - echo "Calling kubeconfig startup script" - exec ${script_dir}/start_kube.sh - exit 0 - break - ;; - [Dd]*) - echo "Calling Digital Ocean startup script" - exec ${script_dir}/start_do.sh - exit 0 - break - ;; - [Ll]*) - echo "Calling Linode startup script" - exec ${script_dir}/start_lke.sh - exit 0 - break - ;; - *) echo "Please answer a, d, k, or l." ;; - esac -done diff --git a/bin/start_aws.sh b/bin/start_aws.sh deleted file mode 100755 index dbf9489..0000000 --- a/bin/start_aws.sh +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# We skip over the tools directory, because that uses a unique stack for setup of the -# kubernetes components for installations without them. -find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -if [[ -z "${AWS_PROFILE+x}" ]]; then - echo "AWS_PROFILE not set" - if ! grep --quiet '^AWS_PROFILE=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the AWS Profile to use in all projects (leave blank for default): " AWS_PROFILE - if [[ -z "${AWS_PROFILE}" ]]; then - AWS_PROFILE=default - fi - echo "AWS_PROFILE=${AWS_PROFILE}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set aws:profile "${AWS_PROFILE}" \; - fi -else - echo "Using AWS_PROFILE from environment: ${AWS_PROFILE}" -fi - -# Check for default region in environment; set if not found -# The region is set by checking the following in the order below: -# * AWS_DEFAULT_REGION environment variable -# * config/environment values of AWS_DEFAULT_REGION -# * prompt the user for a region - -if [[ -z "${AWS_DEFAULT_REGION+x}" ]]; then - echo "AWS_DEFAULT_REGION not set" - if ! grep --quiet '^AWS_DEFAULT_REGION=.*' "${script_dir}/../config/pulumi/environment"; then - # First, check the config file for our current profile. If there - # is no AWS command we assume that there is no config file, which - # may not always be a valid assumption. - if ! command -v aws >/dev/null; then - AWS_CLI_DEFAULT_REGION="us-east-1" - elif aws configure get region --profile "${AWS_PROFILE}" >/dev/null; then - AWS_CLI_DEFAULT_REGION="$(aws configure get region --profile "${AWS_PROFILE}")" - else - AWS_CLI_DEFAULT_REGION="us-east-1" - fi - - read -r -e -p "Enter the name of the AWS Region to use in all projects [${AWS_CLI_DEFAULT_REGION}]: " AWS_DEFAULT_REGION - echo "AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-${AWS_CLI_DEFAULT_REGION}}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set aws:region "${AWS_DEFAULT_REGION}" \; - fi -else - echo "Using AWS_DEFAULT_REGION from environment/config: ${AWS_DEFAULT_REGION}" - pulumi config set aws:region -C "${script_dir}/../pulumi/python/config" "${AWS_DEFAULT_REGION}" -fi - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -function add_kube_config() { - pulumi_region="$(pulumi ${pulumi_args} config get aws:region -C ${script_dir}/../pulumi/python/config)" - if [ "${pulumi_region}" != "" ]; then - region_arg="--region ${pulumi_region}" - else - region_arg="" - fi - pulumi_aws_profile="$(pulumi ${pulumi_args} config get aws:profile -C ${script_dir}/../pulumi/python/config)" - if [ "${pulumi_aws_profile}" != "" ]; then - echo "Using AWS profile [${pulumi_aws_profile}] from Pulumi configuration" - profile_arg="--profile ${pulumi_aws_profile}" - elif [[ -n "${AWS_PROFILE+x}" ]]; then - echo "Using AWS profile [${AWS_PROFILE}] from environment" - profile_arg="--profile ${AWS_PROFILE}" - else - profile_arg="" - fi - - cluster_name="$(pulumi ${pulumi_args} stack output cluster_name -C ${script_dir}/../pulumi/python/infrastructure/aws/eks)" - - echo "adding ${cluster_name} cluster to local kubeconfig" - "${script_dir}"/../pulumi/python/venv/bin/aws ${profile_arg} ${region_arg} eks update-kubeconfig --name ${cluster_name} -} - -function validate_aws_credentials() { - pulumi_aws_profile="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get aws:profile || true)" - if [ "${pulumi_aws_profile}" != "" ]; then - profile_arg="--profile ${pulumi_aws_profile}" - elif [[ -n "${AWS_PROFILE+x}" ]]; then - profile_arg="--profile ${AWS_PROFILE}" - else - profile_arg="" - fi - - echo "Validating AWS credentials" - if ! aws ${profile_arg} sts get-caller-identity >/dev/null; then - echo >&2 "AWS credentials have expired or are not valid" - exit 2 - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -if command -v aws >/dev/null; then - validate_aws_credentials -fi - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# We automatically set this to aws for infra type; since this is a script specific to AWS -# TODO: combined file should query and manage this -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config AWS -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius AWS - -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "AWS VPC" -cd "${script_dir}/../pulumi/python/infrastructure/aws/vpc" -pulumi $pulumi_args up - -header "AWS EKS" -cd "${script_dir}/../pulumi/python/infrastructure/aws/eks" -pulumi $pulumi_args up - -# pulumi stack output cluster_name -add_kube_config - -if command -v kubectl >/dev/null; then - echo "Attempting to connect to newly create kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -# Display the server information -echo "Kubernetes client/server version information:" -kubectl version -o json -echo " " - -# -# This is used to streamline the pieces that follow. Moving forward we can add new logic behind this and this -# should abstract away for us. This way we just call the kubeconfig project to get the needed information and -# let the infrastructure specific parts do their own thing (as long as they work with this module) -# -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -header "AWS ECR" -cd "${script_dir}/../pulumi/python/infrastructure/aws/ecr" -pulumi $pulumi_args up - -header "IC Image Build" -cd "${script_dir}/../pulumi/python/utility/kic-image-build" -pulumi $pulumi_args up - -header "IC Image Push" -# If we are on MacOS and the user keychain is locked, we need to prompt the -# user to unlock it so that `docker login` will work correctly. -if command -v security >/dev/null && [[ "$(uname -s)" == "Darwin" ]]; then - if ! security show-keychain-info 2>/dev/null; then - echo "Enter in your system credentials in order to access the system keychain for storing secrets securely with Docker." - security unlock-keychain - fi -fi -cd "${script_dir}/../pulumi/python/utility/kic-image-push" -pulumi $pulumi_args up - -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" - -pulumi $pulumi_args up -app_url="$(pulumi ${pulumi_args} stack output --json | python3 "${script_dir}"/../pulumi/python/kubernetes/applications/sirius/verify.py)" - -header "Finished!" -echo "The startup process has finished successfully" -echo " " -echo "Next Steps:" -echo " " -echo "1. The application can now be accessed at: ${app_url}." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" diff --git a/bin/start_do.sh b/bin/start_do.sh deleted file mode 100755 index 6e8537e..0000000 --- a/bin/start_do.sh +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# We skip over the tools directory, because that uses a unique stack for setup of the -# kubernetes components for installations without them. -find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -if [[ -z "${DIGITALOCEAN_TOKEN+x}" ]]; then - echo "DIGITALOCEAN_TOKEN not set" - if ! grep --quiet '^DIGITALOCEAN_TOKEN=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the Digital Ocean Token to use in all projects (leave blank for default): " DIGITALOCEAN_TOKEN - if [[ -z "${DIGITALOCEAN_TOKEN}" ]]; then - echo "No Digital Ocean token found - exiting" - exit 4 - fi - echo "DIGITALOCEAN_TOKEN=${DIGITALOCEAN_TOKEN}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext digitalocean:token "${DIGITALOCEAN_TOKEN}" \; - fi -else - echo "Using DIGITALOCEAN_TOKEN from environment: ${DIGITALOCEAN_TOKEN}" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext digitalocean:token "${DIGITALOCEAN_TOKEN}" \; -fi - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 -# -# This version of the code forces you to add a hostname which is used to generate the cert when the application is -# deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed -# cert and to access the application. -# - -echo " " -echo "NOTICE! Currently we do not automatically pull the hostname of the K8 LoadBalancer with this deployment; instead" -echo "you will need to create a FQDN and map the assigned IP address to your FQDN in order to use the deployment. " -echo "You can then add this mapping to DNS, or locally to your host file" -echo " " -echo "See https://networkdynamics.com/2017/05/the-benefits-of-testing-your-website-with-a-local-hosts-file/ for details" -echo "on how this can be accomplished. " -echo " " -echo "This will be streamlined in a future release of MARA." -echo " " - -# So we can see... -sleep 5 - -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" -else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config -fi -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -function add_kube_config() { - echo "adding ${cluster_name} cluster to local kubeconfig" - doctl kubernetes cluster config save ${cluster_name} -} - -function validate_do_credentials() { - pulumi_do_token="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get digitalocean:token)" - echo "Validating Digital Ocean credentials" - if ! doctl account get >/dev/null; then - echo >&2 "Digital Ocean credentials have expired or are not valid" - exit 2 - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -# -# This deploy only works with the NGINX registries. -# -echo " " -echo "NOTICE! Currently the deployment for Digital Ocean only supports pulling images from the registry! A JWT is " -echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" -echo " " -echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " -echo "details and examples." -echo " " - -# Make sure we see it -sleep 5 - -# -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That -# secret is not a valid secret, but it is created to make the logic easier to read/code. -# -if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -fi - -if command -v doctl >/dev/null; then - validate_do_credentials -fi - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# We automatically set this to DO for infra type; since this is a script specific to DO -# TODO: combined file should query and manage this -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config DO -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius DO - -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "DO Kubernetes" -cd "${script_dir}/../pulumi/python/infrastructure/digitalocean/domk8s" -pulumi $pulumi_args up - -# pulumi stack output cluster_name -cluster_name=$(pulumi stack output cluster_id -s "${PULUMI_STACK}" -C ${script_dir}/../pulumi/python/infrastructure/digitalocean/domk8s) -add_kube_config - -if command -v kubectl >/dev/null; then - echo "Attempting to connect to newly create kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -# Display the server information -echo "Kubernetes client/server version information:" -kubectl version -o json -echo " " - - -# -# This is used to streamline the pieces that follow. Moving forward we can add new logic behind this and this -# should abstract away for us. This way we just call the kubeconfig project to get the needed information and -# let the infrastructure specific parts do their own thing (as long as they work with this module) -# -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" -pulumi $pulumi_args up - -header "Finished!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") -THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") - -echo " " -echo "The startup process has finished successfully" -echo " " -echo " " -echo "Next Steps:" -echo " " -echo "1. Map the IP address ($THE_IP) of your Ingress Controller with your FQDN ($THE_FQDN)." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" diff --git a/bin/start_kube.sh b/bin/start_kube.sh deleted file mode 100755 index 5da8786..0000000 --- a/bin/start_kube.sh +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# Do not change the tools directory of add-ons. -find "${script_dir}/../pulumi" -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -# -# This deploy only works with the NGINX registries. -# -echo " " -echo "NOTICE! Currently the deployment via kubeconfig only supports pulling images from the registry! A JWT is " -echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" -echo " " -echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " -echo "details and examples." -echo " " - -# Make sure we see it -sleep 5 - -# -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That -# secret is not a valid secret, but it is created to make the logic easier to read/code. -# -if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -fi - -# Check for stack info.... -# TODO: Move these to use kubeconfig for the Pulumi main config (which redirects up) instead of aws/vpc #80 -# - -# We automatically set this to a kubeconfig type for infra type -# TODO: combined file should query and manage this #80 -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config kubeconfig -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius kubeconfig - -# Inform the user of what we are doing - -echo " " -echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to" -echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)" -echo "you may need to remove them and replace them with a simple ~/.kube/config file. This will be " -echo "addressed in a future release." -echo " " - -# Sleep so that this is seen... -sleep 5 - -if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Kubeconfig file found" -else - echo "Provide an absolute path to your kubeconfig file" - pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config -fi - -# Clustername -if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Clustername found" -else - echo "Provide your clustername" - pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config -fi - -# Connect to the cluster -if command -v kubectl >/dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 -# -# This version of the code forces you to add a hostname which is used to generate the cert when the application is -# deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed -# cert and to access the application. -# -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" -else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config -fi - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# -# TODO: Allow startup scripts to prompt and accept additional config values #97 -# The default helm timeout for all of the projects is set at the default of 300 seconds (5 minutes) -# However, since this code path is most commonly going to be used to deploy locally we need to bump -# that value up. A fix down the road will add this a prompt, but for now we are going to double this -# value for all helm deploys. -# - -pulumi config set kic-helm:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set logagent:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set logstore:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set certmgr:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set prometheus:helm_timeout 600 -C ${script_dir}/../pulumi/python/config - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# -# Note that this is somewhat different than the other startup scripts, because at the point we run this -# here we know that we have a server so we can get the version. The other builds do not have server info -# at this point in time. -# -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -# TODO: This is using a different project than the AWS deploy; we need to collapse those #80 -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" -pulumi $pulumi_args up - -header "Finished!!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") -THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") - -echo " " -echo "The startup process has finished successfully" -echo " " -echo " " -echo "Next Steps:" -echo " " -echo "1. Map the IP address ($THE_IP) of your Ingress Controller with your FQDN ($THE_FQDN)." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" diff --git a/bin/start_lke.sh b/bin/start_lke.sh deleted file mode 100755 index 774f67c..0000000 --- a/bin/start_lke.sh +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# We skip over the tools directory, because that uses a unique stack for setup of the -# kubernetes components for installations without them. -find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -if [[ -z "${LINODE_TOKEN+x}" ]]; then - echo "LINODE_TOKEN not set" - if ! grep --quiet '^LINODE_TOKEN=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the Linode Token to use in all projects (leave blank for default): " LINODE_TOKEN - if [[ -z "${LINODE_TOKEN}" ]]; then - echo "No Linode Token found - exiting" - exit 4 - fi - echo "LINODE_TOKEN=${LINODE_TOKEN}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext linode:token "${LINODE_TOKEN}" \; - fi -else - echo "Using LINODE_TOKEN from environment: ${LINODE_TOKEN}" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext linode:token "${LINODE_TOKEN}" \; -fi - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 -# -# This version of the code forces you to add a hostname which is used to generate the cert when the application is -# deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed -# cert and to access the application. -# - -echo " " -echo "NOTICE! Currently we do not automatically pull the hostname of the K8 LoadBalancer with this deployment; instead" -echo "you will need to create a FQDN and map the assigned IP address to your FQDN in order to use the deployment. " -echo "You can then add this mapping to DNS, or locally to your host file" -echo " " -echo "See https://networkdynamics.com/2017/05/the-benefits-of-testing-your-website-with-a-local-hosts-file/ for details" -echo "on how this can be accomplished. " -echo " " -echo "This will be streamlined in a future release of MARA." -echo " " - -# So we can see... -sleep 5 - -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" -else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config -fi - -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -# -# The initial version of this tried to manage the kubernetes configuration file, but for some reason -# Linode is a bit touchy about this. -# -# So, now we just backup the existing file and slide ours in place. This will be streamlined/addressed as -# part of the rewrite... -# -function add_kube_config() { - echo "adding ${cluster_name} cluster to local kubeconfig" - mv $HOME/.kube/config $HOME/.kube/config.mara.backup || true - pulumi stack output kubeconfig -s "${PULUMI_STACK}" -C ${script_dir}/../pulumi/python/infrastructure/kubeconfig --show-secrets >$HOME/.kube/config -} - -function validate_lke_credentials() { - pulumi_lke_token="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get linode:token)" - echo "Validating Linode credentials" - if ! linode_cli account view >/dev/null; then - echo >&2 "Linode credentials have expired or are not valid" - exit 2 - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -# -# This deploy only works with the NGINX registries. -# -echo " " -echo "NOTICE! Currently the deployment for Linode LKE only supports pulling images from the registry! A JWT is " -echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" -echo " " -echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " -echo "details and examples." -echo " " - -# Make sure we see it -sleep 5 - -# -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That -# secret is not a valid secret, but it is created to make the logic easier to read/code. -# -if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -fi - -if command -v linode_cli >/dev/null; then - validate_lke_credentials -fi - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# We automatically set this to LKE for infra type; since this is a script specific to LKE -# TODO: combined file should query and manage this -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config LKE -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius LKE - -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "Linode LKE" -cd "${script_dir}/../pulumi/python/infrastructure/linode/lke" -pulumi $pulumi_args up - -# -# This is used to streamline the pieces that follow. Moving forward we can add new logic behind this and this -# should abstract away for us. This way we just call the kubeconfig project to get the needed information and -# let the infrastructure specific parts do their own thing (as long as they work with this module) -# -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -# pulumi stack output cluster_name -cluster_name=$(pulumi stack output cluster_id -s "${PULUMI_STACK}" -C ${script_dir}/../pulumi/python/infrastructure/linode/lke) -add_kube_config - -# Display the server information -echo "Kubernetes client/server version information:" -kubectl version -o json -echo " " - -if command -v kubectl >/dev/null; then - echo "Attempting to connect to newly create kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" -pulumi $pulumi_args up - -header "Finished!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") -THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") - -echo " " -echo "The startup process has finished successfully" -echo " " -echo " " -echo "Next Steps:" -echo " " -echo "1. Map the IP address ($THE_IP) of your Ingress Controller with your FQDN ($THE_FQDN)." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" From 9d96a9a3faecbb08fbaceaa056b535cd3e2f87e4 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Wed, 13 Jul 2022 15:48:07 -0700 Subject: [PATCH 37/62] fix: typo in function parameter --- pulumi/python/automation/providers/linode.py | 2 +- pulumi/python/automation/providers/update_kubeconfig.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pulumi/python/automation/providers/linode.py b/pulumi/python/automation/providers/linode.py index 57a7f16..2737d1a 100644 --- a/pulumi/python/automation/providers/linode.py +++ b/pulumi/python/automation/providers/linode.py @@ -172,7 +172,7 @@ def _update_kubeconfig(params: PulumiProjectEventParams): kubeconfig_bytes = base64.b64decode(kubeconfig_encoded) kubeconfig = yaml.safe_load(kubeconfig_bytes) - update_kubeconfig(env=params.env_config, cluser_name=cluster_name, kubeconfig=kubeconfig) + update_kubeconfig(env=params.env_config, cluster_name=cluster_name, kubeconfig=kubeconfig) INSTANCE = LinodeProvider() diff --git a/pulumi/python/automation/providers/update_kubeconfig.py b/pulumi/python/automation/providers/update_kubeconfig.py index 1531c7e..bd57d18 100644 --- a/pulumi/python/automation/providers/update_kubeconfig.py +++ b/pulumi/python/automation/providers/update_kubeconfig.py @@ -24,14 +24,14 @@ LOG = logging.getLogger(__name__) -def update_kubeconfig(cluser_name: str, env: Mapping[str, str], kubeconfig: Mapping[str, Any]): +def update_kubeconfig(cluster_name: str, env: Mapping[str, str], kubeconfig: Mapping[str, Any]): cluster = kubeconfig['clusters'][0] user = kubeconfig['users'][0] alias = kubeconfig['contexts'][0]['name'] config_selector = KubeconfigSelector(env_variable=env.get('KUBECONFIG', ''), path_in=None) - config = config_selector.choose_kubeconfig(cluser_name) + config = config_selector.choose_kubeconfig(cluster_name) appender = KubeconfigAppender() new_context_dict = appender.insert_cluster_user_pair(config=config, @@ -42,7 +42,7 @@ def update_kubeconfig(cluser_name: str, env: Mapping[str, str], kubeconfig: Mapp writer = KubeconfigWriter() writer.write_kubeconfig(config) - if config.has_cluster(cluser_name): + if config.has_cluster(cluster_name): uni_print("Updated context {0} in {1}\n".format( new_context_dict["name"], config.path )) From 82de0877642336a26a73ce124ab40110aa73f62b Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 14 Jul 2022 14:33:51 -0700 Subject: [PATCH 38/62] docs: add additional code comments --- pulumi/python/automation/colorize.py | 24 ++++- pulumi/python/automation/env_config_parser.py | 24 +++++ pulumi/python/automation/headers.py | 8 ++ pulumi/python/automation/main.py | 94 ++++++++++++++++++- pulumi/python/automation/providers/aws.py | 16 +++- .../automation/providers/base_provider.py | 21 ++++- pulumi/python/automation/providers/do.py | 35 +++++++ pulumi/python/automation/providers/linode.py | 12 +++ .../automation/providers/pulumi_project.py | 8 +- .../automation/providers/update_kubeconfig.py | 15 +++ .../python/automation/stack_config_parser.py | 16 ++++ 11 files changed, 264 insertions(+), 9 deletions(-) diff --git a/pulumi/python/automation/colorize.py b/pulumi/python/automation/colorize.py index b32a0f3..ec47baf 100644 --- a/pulumi/python/automation/colorize.py +++ b/pulumi/python/automation/colorize.py @@ -1,3 +1,10 @@ +""" +This file provides two functions println_nocolor and println_color - println_color will be redirected to +println_nocolor if the execution environment does not support color output. If the environment does support +color output, then the string specified for println_color will be rendered in rainbow colors using the lolcat +library. +""" + import collections import os import random @@ -7,6 +14,10 @@ def println_nocolor(text: str, output: typing.TextIO = sys.stdout): + """Prints a new line to the console without using color + :param text: text to print + :param output: output destination + """ print(text, file=output) @@ -16,6 +27,8 @@ def println_nocolor(text: str, output: typing.TextIO = sys.stdout): lolcat_fields = ['animate', 'duration', 'force', 'freq', 'mode', 'speed', 'spread', 'os'] LolCatOptions = collections.namedtuple('LolCatOptions', lolcat_fields) + # Unfortunately, we do the below hack to load the lolcat code because it was not written + # such that it could be easily consumable as a library, for it was a stand-alone executable. if os.environ.get('VIRTUAL_ENV'): venv = os.environ.get('VIRTUAL_ENV') lolcat_path = os.path.sep.join([venv, 'bin', 'lolcat']) @@ -34,10 +47,15 @@ def println_nocolor(text: str, output: typing.TextIO = sys.stdout): force=False) colorizer = lolcat.LolCat(mode=options.mode, output=sys.stdout) - def println_color(text: str): + def println_color(text: str, output: typing.TextIO = sys.stdout): + """Prints a new line to the console using rainbow colors + :param text: text to print + :param output: output destination + """ + colorizer = lolcat.LolCat(mode=options.mode, output=output) colorizer.println_plain(text, options) - sys.stdout.write('\x1b[0m') - sys.stdout.flush() + output.write('\x1b[0m') + output.flush() PRINTLN_FUNC = println_color else: diff --git a/pulumi/python/automation/env_config_parser.py b/pulumi/python/automation/env_config_parser.py index f9fe9b5..7381719 100644 --- a/pulumi/python/automation/env_config_parser.py +++ b/pulumi/python/automation/env_config_parser.py @@ -1,18 +1,29 @@ +""" +This file defines a data structure containing the environment variables that have been written to a file +(`config/pulumi/environment`). The values stored there are used to specify the environment when executing +operations using the Pulumi Automation API. +""" + import os from typing import Optional, Mapping from configparser import ConfigParser import stack_config_parser +# Directory in which script is located SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Default path to the MARA environment file DEFAULT_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi', 'environment'])) +# Default environment variables set for all Pulumi executions invoked by the Automation API DEFAULT_ENV_VARS = { 'PULUMI_SKIP_UPDATE_CHECK': 'true' } class EnvConfig(dict): + """Object containing environment variables used when executing operations with the Pulumi Automation API""" + _stack_config: Optional[stack_config_parser.PulumiStackConfig] = None config_path: Optional[str] = None @@ -29,12 +40,15 @@ def __init__(self, self.config_path = config_path def stack_name(self) -> str: + """Returns the stack name used in the environment""" return self.get('PULUMI_STACK') def no_color(self) -> bool: + """Returns a flag if color in the console is supported""" return self.get('NO_COLOR') is not None def pulumi_color_settings(self): + """Returns a string indicating if console colors should be auto-detected or just disabled""" if self.no_color(): return 'never' else: @@ -42,11 +56,21 @@ def pulumi_color_settings(self): def read(config_file_path: str = DEFAULT_PATH) -> EnvConfig: + """Reads the contents of the specified file path into a new instance of `EnvConfig`. + :param config_file_path: path to environment variable file + :return: new instance of EnvConfig + """ config_parser = ConfigParser() config_parser.optionxform = lambda option: option with open(config_file_path, 'r') as f: + # The Python configparser library is used to parse the file because it supports the KEY=VALUE syntax of the + # environment file. However, there is one exception; it requires the presence of a [main] section using the + # ini format style. In order avoid having to add a "[main]" string to the environment file, we spoof the + # presence of that section with this line below. It just prepends the string "[main]" before the contents of + # the environment file. content = f'[main]{os.linesep}{f.read()}' + config_parser.read_string(content) return EnvConfig(env_vars=os.environ, file_vars=config_parser['main'], config_path=config_file_path) diff --git a/pulumi/python/automation/headers.py b/pulumi/python/automation/headers.py index bee3733..123342b 100644 --- a/pulumi/python/automation/headers.py +++ b/pulumi/python/automation/headers.py @@ -1,3 +1,7 @@ +""" +This file defines the functions needed to render headers that are displayed before each Pulumi project is executed. +These headers provide a useful visual distinction between each step taken to set up an environment. +""" import colorize import env_config_parser from fart import fart @@ -7,6 +11,10 @@ def render_header(text: str, env_config: env_config_parser.EnvConfig): + """Renders the given text to a header displayed in the console - this header could be large ascii art + :param text: header text to render + :param env_config: reference to environment configuration + """ if banner_type == 'fabulous': header = fart.render_fart(text=text, font=FART_FONT) if not env_config.no_color(): diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 0c859b2..24a5904 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -1,4 +1,18 @@ #!/usr/bin/env python3 + +""" +This file is the entrypoint for the Modern Application Reference Architecture (MARA) Runner. + +This Python script ties together all of the different Pulumi projects needed to setup a +Kubernetes environment on a given infrastructure provider (like AWS), configures it, +installed required services on the Kubernetes environment, and deploys an application to +Kubernetes. + +The runner functions as a simple CLI application that can be run just like any other program +as long as the virtual environment for it (python-venv) is set up. This environment can be +set up using the bin/setup_venv.sh script. +""" + import getopt import getpass import importlib @@ -23,13 +37,22 @@ import stack_config_parser +# Directory in which script is located SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Root directory of the MARA project PROJECT_ROOT = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..'])) +# Allowed operations - if operation is not in this list, the runner will reject it OPERATIONS: List[str] = ['down', 'destroy', 'refresh', 'show-execution', 'up', 'validate', 'list-providers'] +# List of available infrastructure providers - if provider is not in this list, the runner will reject it PROVIDERS: typing.Iterable[str] = Provider.list_providers() +# Types of headings available to show the difference between Pulumi projects +# fabulous: a large rainbow covered banner +# boring: a single line of text uncolored BANNER_TYPES: List[str] = ['fabulous', 'boring'] +# We default to a fabulous banner of course banner_type = BANNER_TYPES[0] +# Debug flag that will trigger additional output debug_on = False @@ -57,14 +80,20 @@ def usage(): def provider_instance(provider_name: str) -> Provider: + """Dynamically instantiates an infrastructure provider + :param provider_name: name of infrastructure provider + :return: instance of infrastructure provider + """ module = importlib.import_module(name=f'providers.{provider_name}') return module.INSTANCE def main(): + """Entrypoint to application""" + try: - shortopts = 'hdp:b:' - longopts = ["help", 'debug', 'banner-type', 'provider='] + shortopts = 'hdp:b:' # single character options available + longopts = ["help", 'debug', 'banner-type', 'provider='] # long form options opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts) except getopt.GetoptError as err: print(err) # will print something like "option -a not recognized" @@ -75,7 +104,7 @@ def main(): global debug_on - # Parse flags + # First, we parse the flags given to the CLI runner for opt, value in opts: if opt in ('-h', '--help'): usage() @@ -89,6 +118,8 @@ def main(): if value in BANNER_TYPES: headers.banner_type = value + # Next, we validate to make sure the input to the runner was correct + # Make sure we got an operation - it is the last string passed as an argument if len(sys.argv) > 1: operation = sys.argv[-1] @@ -118,10 +149,16 @@ def main(): provider = provider_instance(provider_name.lower()) + # We execute the operation requested - different operations have different pre-requirements, so they are matched + # differently. Like show-execution does not require reading the configuration files, so we just look for a match + # for it right away, and if matched, we run and exit. + if operation == 'show-execution': provider.display_execution_order(output=sys.stdout) sys.exit(0) + # For the other operations, we need the configuration files parsed, so we do the parsing upfront. + env_config = env_config_parser.read() stack_config = read_stack_config(provider=provider, env_config=env_config) validate_with_verbosity = operation == 'validate' or debug_on @@ -146,6 +183,9 @@ def main(): print(f'Unknown operation: {operation}') sys.exit(2) + # Lastly, if the operation involves the execution of a Pulumi command, we make sure that secrets have been + # instantiated, before invoking Pulumi via the Automation API. This is required because certain Pulumi + # projects need to pull secrets in order to be stood up. if pulumi_cmd: init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) try: @@ -158,6 +198,11 @@ def main(): def read_stack_config(provider: Provider, env_config: env_config_parser.EnvConfig) -> stack_config_parser.PulumiStackConfig: + """Load and parse the Pulumi stack configuration file. In MARA, this is a globally shared file. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + :return: data structure containing stack configuration + """ try: stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) except FileNotFoundError as e: @@ -173,6 +218,12 @@ def read_stack_config(provider: Provider, def prompt_for_stack_config(provider: Provider, env_config: env_config_parser.EnvConfig, filename: str) -> stack_config_parser.PulumiStackConfig: + """Prompts user via tty for required configuration values when the stack config is empty or missing. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + :param filename: location to write stack config file to + :return: data structure containing stack configuration + """ print(f' creating new configuration based on user input') stack_defaults_path = os.path.sep.join([os.path.dirname(filename), @@ -195,6 +246,13 @@ def validate(provider: Provider, env_config: env_config_parser.EnvConfig, stack_config: stack_config_parser.PulumiStackConfig, verbose: Optional[bool] = False): + """Validates that the runtime environment for MARA is correct. Will validate that external tools are present and + configurations are correct. If validation fails, an exception will be raised. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + :param stack_config: reference to stack configuration + :param verbose: flag to enable verbose output mode + """ # First, we validate that we have the right tools installed def check_path(cmd: str, fail_message: str) -> bool: cmd_path = shutil.which(cmd) @@ -249,6 +307,15 @@ def check_path(cmd: str, fail_message: str) -> bool: def init_secrets(env_config: env_config_parser.EnvConfig, pulumi_projects: List[PulumiProject]): + """Goes through a list of Pulumi projects and prompts the user for secrets required by each project that have not + already been stored. Each secret is encrypted using Pulumi's secret management and stored in the stack configuration + for the Pulumi project kubernetes/secrets and *not* in the global stack configuration. When the secrets Pulumi + project is stood up, it adds the secrets that were encrypted in its stack configuration to the running Kubernetes + cluster as a Kubernetes Secret. This approach is taken because Pulumi does not support sharing secrets across + projects. + :param env_config: reference to environment configuration + :param pulumi_projects: list of pulumi project to instantiate secrets for + """ secrets_work_dir = os.path.sep.join([SCRIPT_DIR, '..', 'kubernetes', 'secrets']) stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( @@ -277,6 +344,12 @@ def init_secrets(env_config: env_config_parser.EnvConfig, def build_pulumi_stack(pulumi_project: PulumiProject, env_config: env_config_parser.EnvConfig) -> auto.Stack: + """Uses the Pulumi Automation API to do a `pulumi stack init` for the given project. If the stack already exists, it + will select it as the stack to use. + :param pulumi_project: reference to Pulumi project + :param env_config: reference to environment configuration + :return: reference to a new or existing stack + """ print(f'project: {pulumi_project.name()} path: {pulumi_project.abspath()}') stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( @@ -289,6 +362,10 @@ def build_pulumi_stack(pulumi_project: PulumiProject, def refresh(provider: Provider, env_config: env_config_parser.EnvConfig): + """Execute `pulumi refresh` for the given project using the Pulumi Automation API. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + """ for pulumi_project in provider.execution_order(): headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, @@ -300,6 +377,10 @@ def refresh(provider: Provider, def up(provider: Provider, env_config: env_config_parser.EnvConfig): + """Execute `pulumi up` for the given project using the Pulumi Automation API. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + """ for pulumi_project in provider.execution_order(): headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, @@ -307,6 +388,9 @@ def up(provider: Provider, stack_up_result = stack.up(color=env_config.pulumi_color_settings(), on_output=print) + # If the project is instantiated without problems, then the on_success event + # as specified in the provider is run. This event is often used to do additional + # configuration, clean up, or to run external tools after a project is stood up. if pulumi_project.on_success: params = PulumiProjectEventParams(stack_outputs=stack_up_result.outputs, config=stack.get_all_config(), @@ -316,6 +400,10 @@ def up(provider: Provider, def down(provider: Provider, env_config: env_config_parser.EnvConfig): + """Execute `pulumi down` for the given project using the Pulumi Automation API. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + """ for pulumi_project in reversed(provider.execution_order()): headers.render_header(text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py index f341406..2931b2b 100644 --- a/pulumi/python/automation/providers/aws.py +++ b/pulumi/python/automation/providers/aws.py @@ -1,3 +1,7 @@ +""" +File containing the AWS infrastructure provider for the MARA runner. +""" + import json import os import sys @@ -16,6 +20,7 @@ class AwsProviderException(Exception): class AwsCli: + """AWS CLI execution helper class""" region: str profile: str @@ -25,6 +30,9 @@ def __init__(self, region: Optional[str] = None, profile: Optional[str] = None): self.profile = profile def base_cmd(self) -> str: + """ + :return: returns the base command and any required flags + """ cmd = 'aws ' if self.region and self.region != '': cmd += f'--region {self.region} ' @@ -34,7 +42,7 @@ def base_cmd(self) -> str: def update_kubeconfig_cmd(self, cluster_name: str) -> str: """ - Returns the command used to update the kubeconfig with the passed cluster + Returns the command used to update the kubeconfig with the passed cluster name :param cluster_name: name of the cluster to add to the kubeconfig :return: command to be executed """ @@ -48,11 +56,17 @@ def validate_credentials_cmd(self) -> str: return f'{self.base_cmd()} sts get-caller-identity' def list_azs_cmd(self) -> str: + """ + Returns the command that provides a list of the AWS availability zones that can be provisioned to by the + current user. + :return: command to be executed + """ return f"{self.base_cmd()} ec2 describe-availability-zones --filter " \ f"'Name=state,Values=available' --zone-ids" class AwsProvider(Provider): + """AWS infrastructure provider""" def infra_type(self) -> str: return 'AWS' diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index d3bd399..ab29cb7 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -1,3 +1,7 @@ +""" +This file is provides the super class for all infrastructure providers. +""" + import abc import os import pathlib @@ -6,6 +10,7 @@ from .pulumi_project import PulumiProject, SecretConfigKey +# Directory in which script is located SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -14,9 +19,13 @@ class InvalidConfigurationException(Exception): class Provider: + """Super class for all infrastructure providers""" @staticmethod def list_providers() -> Iterable[str]: + """returns an iterable of the providers available derived from the files in the providers directory + :return all the usable providers""" def is_provider(file: pathlib.Path) -> bool: + # Filter out the non-provider files return file.is_file() and \ not file.stem.endswith('base_provider') and \ not file.stem.endswith('pulumi_project') and \ @@ -27,6 +36,8 @@ def is_provider(file: pathlib.Path) -> bool: @staticmethod def validate_env_config_required_keys(required_keys: List[str], config: Mapping[str, str]): + """Validates that the required environment variables as defined by file or runtime environment are present""" + for key in required_keys: if key not in config.keys(): raise InvalidConfigurationException(f'Required configuration key [{key}] not found') @@ -40,24 +51,29 @@ def infra_type(self) -> str: @abc.abstractmethod def infra_execution_order(self) -> List[PulumiProject]: + """Pulumi infrastructure (not Kubernetes) projects to be executed in sequential order""" pass def new_stack_config(self, env_config: Mapping[str, str], defaults: Union[Dict[Hashable, Any], list, None]) -> Union[Dict[Hashable, Any], list, None]: + """Creates a new Pulumi stack configuration""" config = { 'kubernetes:infra_type': self.infra_type() } return config def validate_env_config(self, env_config: Mapping[str, str]): + """Validates that the passed environment variables are correct""" Provider.validate_env_config_required_keys(['PULUMI_STACK'], env_config) def validate_stack_config(self, stack_config: Union[Dict[Hashable, Any], list, None], env_config: Mapping[str, str]): + """Validates that the passed stack configuration is correct""" pass def k8s_execution_order(self) -> List[PulumiProject]: + """Pulumi Kubernetes projects to be executed in sequential order""" return [ PulumiProject(path='infrastructure/kubeconfig', description='Kubeconfig'), PulumiProject(path='kubernetes/secrets', description='Secrets'), @@ -87,9 +103,12 @@ def k8s_execution_order(self) -> List[PulumiProject]: ] def execution_order(self) -> List[PulumiProject]: + """Full list of Pulumi projects to be executed in sequential order (including both infrastructure and + Kubernetes""" return self.infra_execution_order() + self.k8s_execution_order() def display_execution_order(self, output: TextIO = sys.stdout): + """Writes the execution order of Pulumi projects in a visual tree to an output stream""" execution_order = self.execution_order() last_prefix = '' @@ -143,4 +162,4 @@ def _insert_project(project_path_to_insert_after: str, if project_position < 0: raise ValueError(f'Could not find project at path {project_path_to_insert_after}') - k8s_execution_order.insert(project_position + 1, project) \ No newline at end of file + k8s_execution_order.insert(project_position + 1, project) diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index 2c60f78..f77441e 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -1,3 +1,7 @@ +""" +File containing the Digital Ocean infrastructure provider for the MARA runner. +""" + import json import sys from typing import List, Dict, Hashable, Any, Union, MutableMapping, Optional, Mapping @@ -15,6 +19,7 @@ class DigitalOceanProviderException(Exception): class DoctlCli: + """Digital Ocean CLI execution helper class""" access_token: str region: Optional[str] @@ -23,27 +28,52 @@ def __init__(self, access_token: str, region: Optional[str] = None): self.region = region def base_cmd(self) -> str: + """ + :return: returns the base command and any required flags + """ cmd = 'doctl' cmd += f' --access-token "{self.access_token}" ' return cmd.strip() def validate_credentials_cmd(self) -> str: + """ + Returns the command that validates if the doctl command can authenticate correctly. + :return: command to be executed + """ return f'{self.base_cmd()} account get' def save_kubernetes_cluster_cmd(self, cluster_name: str) -> str: + """ + Returns the command used to update the kubeconfig with the passed cluster name + :param cluster_name: name of the cluster to add to the kubeconfig + :return: command to be executed + """ return f'{self.base_cmd()} kubernetes cluster config save {cluster_name}' def get_kubernetes_versions_json(self) -> str: + """ + Returns the command that lists the Kubernetes versions available. + :return: command to be executed + """ return f'{self.base_cmd()} kubernetes options versions --output json' def get_kubernetes_regions_json(self) -> str: + """ + Returns the command that lists the regions available to run Kubernetes. + :return: command to be executed + """ return f'{self.base_cmd()} kubernetes options regions --output json' def get_kubernetes_instance_sizes_json(self) -> str: + """ + Returns the command that lists the instance sizes available for Kubernetes nodes. + :return: command to be executed + """ return f'{self.base_cmd()} kubernetes options sizes --output json' class DigitalOceanProvider(Provider): + """Digital Ocean infrastructure provider""" def infra_type(self) -> str: return 'DO' @@ -177,6 +207,11 @@ def _update_kubeconfig(params: PulumiProjectEventParams): @staticmethod def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], env_config: Mapping[str, str]) -> str: + """Looks into multiple configuration sources for a valid Digital Ocean authentication token. + :param stack_config: reference to stack configuration + :param env_config: reference to environment configuration + :return: authentication token + """ # Token is in an environment variable or the environment variable file if 'DIGITALOCEAN_TOKEN' in env_config: return env_config['DIGITALOCEAN_TOKEN'] diff --git a/pulumi/python/automation/providers/linode.py b/pulumi/python/automation/providers/linode.py index 2737d1a..11eb166 100644 --- a/pulumi/python/automation/providers/linode.py +++ b/pulumi/python/automation/providers/linode.py @@ -1,3 +1,7 @@ +""" +File containing the Linode infrastructure provider for the MARA runner. +""" + import base64 from typing import List, Union, Dict, Hashable, Any, Mapping, MutableMapping @@ -17,6 +21,8 @@ class LinodeProviderException(Exception): class LinodeCli: + """Linode CLI execution helper class""" + def base_cmd(self) -> str: return 'linode-cli' @@ -144,6 +150,12 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list @staticmethod def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], env_config: Mapping[str, str]) -> str: + """Looks into multiple configuration sources for a valid Linode authentication token. + :param stack_config: reference to stack configuration + :param env_config: reference to environment configuration + :return: authentication token + """ + # Token is in an environment variable or the environment variable file if 'LINODE_TOKEN' in env_config: return env_config['LINODE_TOKEN'] diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py index 7b00ae1..538dfe6 100644 --- a/pulumi/python/automation/providers/pulumi_project.py +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -1,9 +1,14 @@ +""" +This file contains classes related to modeling Pulumi projects as discrete directories that +are invoked individually in sequence by the Pulumi Automation API. +""" + import os.path from typing import Optional, Callable, Mapping, List, MutableMapping import yaml from pulumi import automation as auto - +# Directory in which script is located SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -72,6 +77,7 @@ def name(self) -> str: class PulumiProjectEventParams: + """Object containing the state passed to an on_success event after the successful stand up of a Pulumi project.""" stack_outputs: MutableMapping[str, auto._output.OutputValue] config: MutableMapping[str, auto._config.ConfigValue] env_config: Mapping[str, str] diff --git a/pulumi/python/automation/providers/update_kubeconfig.py b/pulumi/python/automation/providers/update_kubeconfig.py index bd57d18..6201d45 100644 --- a/pulumi/python/automation/providers/update_kubeconfig.py +++ b/pulumi/python/automation/providers/update_kubeconfig.py @@ -12,6 +12,11 @@ # the specific language governing permissions and limitations under # the License. +""" +This file contains functions that allow for merging in a new kubeconfig into the existing +kubectl config files contained in a user's home directory or path specified by KUBECONFIG. +""" + import os import logging import errno @@ -20,11 +25,18 @@ from typing import Mapping, Any import yaml +# Default path to user's kubectl config files DEFAULT_PATH = os.path.expanduser("~/.kube/config") LOG = logging.getLogger(__name__) def update_kubeconfig(cluster_name: str, env: Mapping[str, str], kubeconfig: Mapping[str, Any]): + """Merge the passed kubeconfig for the given cluster into the existing kubectl config files. + :param cluster_name: name of cluster associated with kubeconfig + :param env: map environment variables to get KUBECONFIG from + :param kubeconfig: contents of kubeconfig + """ + cluster = kubeconfig['clusters'][0] user = kubeconfig['users'][0] alias = kubeconfig['contexts'][0]['name'] @@ -52,6 +64,9 @@ def update_kubeconfig(cluster_name: str, env: Mapping[str, str], kubeconfig: Map )) +# Everything after this line is sourced from the AWS SDK + + class KubeconfigError(RuntimeError): """ Base class for all kubeconfig errors.""" diff --git a/pulumi/python/automation/stack_config_parser.py b/pulumi/python/automation/stack_config_parser.py index b367d07..48f5af8 100644 --- a/pulumi/python/automation/stack_config_parser.py +++ b/pulumi/python/automation/stack_config_parser.py @@ -6,7 +6,9 @@ import yaml +# Directory in which script is located SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Default path to the directory containing the global MARA Pulumi stack configuration file DEFAULT_DIR_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi'])) @@ -19,6 +21,10 @@ def __init__(self, filename: str, *args: object) -> None: class PulumiStackConfig(dict): + """Object containing the configuration parameters used by Pulumi to stand up projects. When this file is loaded by + Pulumi within the context of a project execution, it is *not* loaded into this object. This object is used only by + the MARA runner for the Pulumi Automation API.""" + config_path: Optional[str] = None def to_pulumi_config_value(self) -> MutableMapping[str, ConfigValue]: @@ -41,10 +47,14 @@ def to_pulumi_config_value(self) -> MutableMapping[str, ConfigValue]: def _stack_config_path(stack_name: str) -> str: + """Path to the stack configuration file on the file system""" return os.path.sep.join([DEFAULT_DIR_PATH, f'Pulumi.{stack_name}.yaml']) def _read(config_file_path: str) -> PulumiStackConfig: + """Reads the "stack configuration file from the specified path, parses it, and loads it into the PulumiStackConfig + data structure.""" + # Return empty config for empty config files if os.path.getsize(config_file_path) == 0: raise EmptyConfigurationException(filename=config_file_path) @@ -57,5 +67,11 @@ def _read(config_file_path: str) -> PulumiStackConfig: def read(stack_name: str) -> PulumiStackConfig: + """Generate the configuration file path based on the stack name, reads the "stack configuration file, parse it, + and load it into the PulumiStackConfig data structure. + + :param stack_name: stack name to read configuration for + :return: new instance of PulumiStackConfig + """ stack_config_path = _stack_config_path(stack_name) return _read(stack_config_path) From 138157c9b6887ce04850ffbcb6127cba59128b83 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 14 Jul 2022 17:09:34 -0700 Subject: [PATCH 39/62] feat: migrate to python logging for output --- pulumi/python/automation/colorize.py | 1 - pulumi/python/automation/headers.py | 7 ++ pulumi/python/automation/main.py | 93 +++++++++++++------ .../automation/providers/update_kubeconfig.py | 54 +---------- 4 files changed, 76 insertions(+), 79 deletions(-) diff --git a/pulumi/python/automation/colorize.py b/pulumi/python/automation/colorize.py index ec47baf..bd3e78a 100644 --- a/pulumi/python/automation/colorize.py +++ b/pulumi/python/automation/colorize.py @@ -45,7 +45,6 @@ def println_nocolor(text: str, output: typing.TextIO = sys.stdout): speed=-1.0, spread=0.5, force=False) - colorizer = lolcat.LolCat(mode=options.mode, output=sys.stdout) def println_color(text: str, output: typing.TextIO = sys.stdout): """Prints a new line to the console using rainbow colors diff --git a/pulumi/python/automation/headers.py b/pulumi/python/automation/headers.py index 123342b..12339f7 100644 --- a/pulumi/python/automation/headers.py +++ b/pulumi/python/automation/headers.py @@ -2,10 +2,13 @@ This file defines the functions needed to render headers that are displayed before each Pulumi project is executed. These headers provide a useful visual distinction between each step taken to set up an environment. """ +import logging + import colorize import env_config_parser from fart import fart +LOG = logging.getLogger('runner') FART_FONT = fart.load_font('standard') banner_type = 'fabulous' @@ -15,9 +18,13 @@ def render_header(text: str, env_config: env_config_parser.EnvConfig): :param text: header text to render :param env_config: reference to environment configuration """ + global banner_type + if banner_type == 'fabulous': header = fart.render_fart(text=text, font=FART_FONT) if not env_config.no_color(): colorize.PRINTLN_FUNC(header) + elif banner_type == 'log': + LOG.info('[%s] started', text) else: print(f'* {text}') diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 24a5904..c0376f1 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -48,7 +48,11 @@ # Types of headings available to show the difference between Pulumi projects # fabulous: a large rainbow covered banner # boring: a single line of text uncolored -BANNER_TYPES: List[str] = ['fabulous', 'boring'] +# log: writes the header to the same logger as Pulumi output +BANNER_TYPES: List[str] = ['fabulous', 'boring', 'log'] +# Logger instance +PULUMI_LOG = logging.getLogger('pulumi') +RUNNER_LOG = logging.getLogger('runner') # We default to a fabulous banner of course banner_type = BANNER_TYPES[0] @@ -76,7 +80,7 @@ def usage(): up Provisions all configured infrastructure validate Validates that the environment and configuration is correct """ - print(usage_text) + print(usage_text, file=sys.stdout) def provider_instance(provider_name: str) -> Provider: @@ -96,7 +100,7 @@ def main(): longopts = ["help", 'debug', 'banner-type', 'provider='] # long form options opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts) except getopt.GetoptError as err: - print(err) # will print something like "option -a not recognized" + RUNNER_LOG.error(err) usage() sys.exit(2) @@ -124,12 +128,12 @@ def main(): if len(sys.argv) > 1: operation = sys.argv[-1] else: - print(f'No operation specified') + RUNNER_LOG.error('No operation specified') usage() sys.exit(2) if operation not in OPERATIONS: - print(f'Unknown operation specified: {operation}') + RUNNER_LOG.error('Unknown operation specified: %s', operation) usage() sys.exit(2) @@ -141,13 +145,16 @@ def main(): # Now validate providers because everything underneath here depends on them if not provider_name or provider_name.strip() == '': - print('Provider must be specified') + RUNNER_LOG.error('No provider specified - provider is a required argument') sys.exit(2) if provider_name not in PROVIDERS: - print(f'Unknown provider specified: {provider_name}') + RUNNER_LOG.error('Unknown provider specified: %s', provider_name) sys.exit(2) + setup_loggers() + provider = provider_instance(provider_name.lower()) + RUNNER_LOG.debug('Using [%s] infrastructure provider', provider.infra_type()) # We execute the operation requested - different operations have different pre-requirements, so they are matched # differently. Like show-execution does not require reading the configuration files, so we just look for a match @@ -166,7 +173,7 @@ def main(): validate(provider=provider, env_config=env_config, stack_config=stack_config, verbose=validate_with_verbosity) except Exception as e: - logging.exception('Validation failed: %s', e) + RUNNER_LOG.error('Validation failed: %s', e) sys.exit(3) if operation == 'refresh': @@ -180,7 +187,7 @@ def main(): pulumi_cmd = None # validate was already run above else: - print(f'Unknown operation: {operation}') + RUNNER_LOG.error('Unknown operation: %s', operation) sys.exit(2) # Lastly, if the operation involves the execution of a Pulumi command, we make sure that secrets have been @@ -196,6 +203,32 @@ def main(): raise e +def setup_loggers(): + """Configures two loggers: 1) For the MARA Runner itself 2) For Pulumi output""" + global debug_on + + if debug_on: + level = logging.DEBUG + else: + level = logging.INFO + + # Pulumi output goes to STDOUT + PULUMI_LOG.setLevel(level=level) + pulumi_ch = logging.StreamHandler(stream=sys.stdout) + pulumi_ch.setLevel(level=level) + formatter = logging.Formatter('%(message)s') + pulumi_ch.setFormatter(formatter) + PULUMI_LOG.addHandler(pulumi_ch) + + # Runner output goes to STDERR + RUNNER_LOG.setLevel(level=level) + runner_ch = logging.StreamHandler(stream=sys.stderr) + runner_ch.setLevel(level=level) + formatter = logging.Formatter('%(message)s') + runner_ch.setFormatter(formatter) + RUNNER_LOG.addHandler(runner_ch) + + def read_stack_config(provider: Provider, env_config: env_config_parser.EnvConfig) -> stack_config_parser.PulumiStackConfig: """Load and parse the Pulumi stack configuration file. In MARA, this is a globally shared file. @@ -205,11 +238,12 @@ def read_stack_config(provider: Provider, """ try: stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) + RUNNER_LOG.debug('stack configuration file read') except FileNotFoundError as e: - print(f' > stack configuration file does not exist: {e.filename}') + RUNNER_LOG.info('stack configuration file [%s] does not exist', e.filename) stack_config = prompt_for_stack_config(provider, env_config, e.filename) except stack_config_parser.EmptyConfigurationException as e: - print(f' > stack configuration file is empty: {e.filename}') + RUNNER_LOG.info('stack configuration file [%s] is empty', e.filename) stack_config = prompt_for_stack_config(provider, env_config, e.filename) return stack_config @@ -224,7 +258,7 @@ def prompt_for_stack_config(provider: Provider, :param filename: location to write stack config file to :return: data structure containing stack configuration """ - print(f' creating new configuration based on user input') + RUNNER_LOG.info('creating new configuration based on user input') stack_defaults_path = os.path.sep.join([os.path.dirname(filename), 'Pulumi.stackname.yaml.example']) @@ -257,11 +291,10 @@ def validate(provider: Provider, def check_path(cmd: str, fail_message: str) -> bool: cmd_path = shutil.which(cmd) if cmd_path: - if verbose: - print(f' > {cmd} found at path: {cmd_path}') + RUNNER_LOG.debug('[%s] found at path: %s', cmd, cmd_path) return True else: - print(f'{cmd} is not installed - {fail_message}') + RUNNER_LOG.error('[%s] is not installed - %s', cmd, fail_message) return False success = True @@ -280,29 +313,30 @@ def check_path(cmd: str, fail_message: str) -> bool: if 'kubernetes:infra_type' in stack_config['config']: previous_provider = stack_config['config']['kubernetes:infra_type'] if previous_provider.lower() != provider.infra_type().lower(): - print(f'Stack has already been used with the provider [{previous_provider}], so it cannot ' - f'be run with the specified provider [{provider.infra_type()}]. Destroy all resources ' - 'and remove the kubernetes:infra_type key from the stack configuration.', file=sys.stderr) + RUNNER_LOG.error('Stack has already been used with the provider [%s], so it cannot ' + 'be run with the specified provider [%s]. Destroy all resources ' + 'and remove the kubernetes:infra_type key from the stack configuration.', + previous_provider, provider.infra_type()) sys.exit(3) # Next, we validate that the environment file has the required values try: provider.validate_env_config(env_config) except Exception as e: - print(f' > environment file at path failed validation: {env_config.config_path}') + RUNNER_LOG.error('environment file [%s] failed validation', env_config.config_path) raise e if verbose: - print(f' > environment file validated at path: {env_config.config_path}') + RUNNER_LOG.debug('environment file [%s] passed validation', env_config.config_path) try: provider.validate_stack_config(stack_config, env_config) except Exception as e: - print(f' > stack configuration file at path failed validation: {stack_config.config_path}') + RUNNER_LOG.error('stack configuration file [%s] at path failed validation', stack_config.config_path) raise e if verbose: - print(f' > stack configuration file validated at path: {stack_config.config_path}') + RUNNER_LOG.debug('stack configuration file [%s] passed validation', stack_config.config_path) - print(' > configuration is OK') + RUNNER_LOG.debug('all configuration is OK') def init_secrets(env_config: env_config_parser.EnvConfig, @@ -350,7 +384,7 @@ def build_pulumi_stack(pulumi_project: PulumiProject, :param env_config: reference to environment configuration :return: reference to a new or existing stack """ - print(f'project: {pulumi_project.name()} path: {pulumi_project.abspath()}') + RUNNER_LOG.info('Project [%s] selected: %s', pulumi_project.name(), pulumi_project.abspath()) stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( env_vars=env_config, @@ -372,7 +406,7 @@ def refresh(provider: Provider, env_config=env_config) stack.refresh_config() stack.refresh(color=env_config.pulumi_color_settings(), - on_output=print) + on_output=write_pulumi_output) def up(provider: Provider, @@ -386,7 +420,7 @@ def up(provider: Provider, stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack_up_result = stack.up(color=env_config.pulumi_color_settings(), - on_output=print) + on_output=write_pulumi_output) # If the project is instantiated without problems, then the on_success event # as specified in the provider is run. This event is often used to do additional @@ -409,7 +443,12 @@ def down(provider: Provider, stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack_down_result = stack.destroy(color=env_config.pulumi_color_settings(), - on_output=print) + on_output=write_pulumi_output) + + +def write_pulumi_output(text: str): + """Handles output from Pulumi invocations via the Automation API""" + PULUMI_LOG.info(text) if __name__ == "__main__": diff --git a/pulumi/python/automation/providers/update_kubeconfig.py b/pulumi/python/automation/providers/update_kubeconfig.py index 6201d45..e88f0c1 100644 --- a/pulumi/python/automation/providers/update_kubeconfig.py +++ b/pulumi/python/automation/providers/update_kubeconfig.py @@ -20,14 +20,13 @@ import os import logging import errno -import sys from collections import OrderedDict from typing import Mapping, Any import yaml # Default path to user's kubectl config files DEFAULT_PATH = os.path.expanduser("~/.kube/config") -LOG = logging.getLogger(__name__) +LOG = logging.getLogger('runner') def update_kubeconfig(cluster_name: str, env: Mapping[str, str], kubeconfig: Mapping[str, Any]): @@ -55,13 +54,9 @@ def update_kubeconfig(cluster_name: str, env: Mapping[str, str], kubeconfig: Map writer.write_kubeconfig(config) if config.has_cluster(cluster_name): - uni_print("Updated context {0} in {1}\n".format( - new_context_dict["name"], config.path - )) + LOG.info('Updated context %s in %s', new_context_dict["name"], config.path) else: - uni_print("Added new context {0} to {1}\n".format( - new_context_dict["name"], config.path - )) + LOG.info('Added new context %s to %s', new_context_dict["name"], config.path) # Everything after this line is sourced from the AWS SDK @@ -436,46 +431,3 @@ def insert_cluster_user_pair(self, config, cluster, user, alias=None): config.content["current-context"] = context["name"] return context - - -def uni_print(statement, out_file=None): - """ - This function is used to properly write unicode to a file, usually - stdout or stdderr. It ensures that the proper encoding is used if the - statement is not a string type. - """ - if out_file is None: - out_file = sys.stdout - try: - # Otherwise we assume that out_file is a - # text writer type that accepts str/unicode instead - # of bytes. - out_file.write(statement) - except UnicodeEncodeError: - # Some file like objects like cStringIO will - # try to decode as ascii on python2. - # - # This can also fail if our encoding associated - # with the text writer cannot encode the unicode - # ``statement`` we've been given. This commonly - # happens on windows where we have some S3 key - # previously encoded with utf-8 that can't be - # encoded using whatever codepage the user has - # configured in their console. - # - # At this point we've already failed to do what's - # been requested. We now try to make a best effort - # attempt at printing the statement to the outfile. - # We're using 'ascii' as the default because if the - # stream doesn't give us any encoding information - # we want to pick an encoding that has the highest - # chance of printing successfully. - new_encoding = getattr(out_file, 'encoding', 'ascii') - # When the output of the aws command is being piped, - # ``sys.stdout.encoding`` is ``None``. - if new_encoding is None: - new_encoding = 'ascii' - new_statement = statement.encode( - new_encoding, 'replace').decode(new_encoding) - out_file.write(new_statement) - out_file.flush() From c23714065a62a6bf82859e73cc7799b8bf832ce5 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Fri, 15 Jul 2022 16:57:53 -0700 Subject: [PATCH 40/62] docs: add MARA Runner design document --- pulumi/python/automation/DESIGN.md | 202 +++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 pulumi/python/automation/DESIGN.md diff --git a/pulumi/python/automation/DESIGN.md b/pulumi/python/automation/DESIGN.md new file mode 100644 index 0000000..5dc8385 --- /dev/null +++ b/pulumi/python/automation/DESIGN.md @@ -0,0 +1,202 @@ +# MARA Runner Design + +## Problem + +When creating an infrastructure as code deployment in Pulumi, it is common to have infrastructure +that depends on the presence of other infrastructure. If there are only few layers of dependencies, +it is manageable. However, once you pass three layers of dependencies, it becomes quite difficult +to manage the complexity of your deployment. This also results in deployment plans that are almost +incomprehensible. + +This is the problem that was faced when using Pulumi to build MARA. Multiple infrastructure services +must be instantiated in order to get a working Kubernetes environment. Moreover, once the Kubernetes +is present, it needs additional components that have a web of dependencies. For example, if we use +AWS, a full deployment looks something like the following: +``` + ┌── infrastructure/aws + │ ├── vpc [VPC] + │ ├── eks [EKS] + │ ├── ecr [ECR] + ├── infrastructure + │ └── kubeconfig [Kubeconfig] + ├── kubernetes + │ └── secrets [Secrets] + ├── utility + │ ├── kic-image-build [KIC Image Build] + │ ├── kic-image-push [KIC Image Push] + ├── kubernetes/nginx + │ ├── ingress-controller-namespace [K8S Ingress NS] + │ ├── ingress-controller [Ingress Controller] + ├── kubernetes + │ ├── logstore [Logstore] + │ ├── logagent [Log Agent] + │ ├── certmgr [Cert Manager] + │ ├── prometheus [Prometheus] + │ ├── observability [Observability] + └── kubernetes/applications + └── application +``` + +EKS cannot be instantiated until the VPC is configured. The Ingress Controller cannot be pushed +until a container registry is available. The application cannot be started until log management, +certificate management, and observability services have been instantiated. A non-trivial Kubernetes +deployment is truly a web of dependencies! + +The above example shows the dependencies for a single infrastructure provider (AWS) that is hosting +a Kubernetes environment and a container registry. However, if the infrastructure provider is +changed, then the content and order of dependencies also changes. As such, this introduces a +conditional element that needs to be managed. + +## Solution + +The approach taken in MARA to mitigate the Pulumi dependency problem is to break apart Pulumi +deployments (projects) into bite sized pieces that each did one thing. Pulumi projects pass state +to each other by executing sequentially and using +[stack references](https://www.pulumi.com/learn/building-with-pulumi/stack-references/). + +Initially, sequential execution was implemented through a bash script that would run `pulumi up` +across a series of directories in a set order. Each directory was a Pulumi project. If a given +project had dependent state on another project, it would use a stack reference to pull state out +of the dependent project that was previously executed. When additional infrastructure providers +were added, they were supported by different bash scripts that were conditionally called. + +This approach has proven to be unmanageable as it lacks flexibility and configurability as well +as makes adding new infrastructure providers difficult. For example, if the content and/or ordering +of infrastructure deployed to Kubernetes needs to change based on the infrastructure provider, +then this is difficult or impossible with the bash script approach. Moreover, if you want to +read configuration and change what or how things are deployed, this also becomes difficult +using just bash scripting. Lastly, due to differences in execution environments such as +Linux and MacOS, it is difficult to write portable bash scripting. + +When Pulumi released the [Automation API](https://www.pulumi.com/docs/guides/automation-api/) +it presented an opportunity to resolve the shortcomings mentioned above. Using the Automation +API, the MARA Runner was created to provide a framework for gluing together multiple Pulumi +Projects such that they can all be deployed as one single unit of execution and at the +same time allow for piecemeal deployment using `pulumi up`. + +The MARA Runner is a CLI program written in Python that provides the following: + + * The selection of an infrastructure provider + * Configuration using configuration files that control all Pulumi projects + * Pulumi operations such as up, refresh, destroy to be propagated across all projects + * Visualizing which Pulumi projects will be executed for a given infrastructure provider + +## Terms + +The following terms are used repeatedly in the MARA runner. For clarity, they are defined below. + +### Pulumi Project + +A Pulumi [Project](https://www.pulumi.com/docs/intro/concepts/project/) is a folder/directory +that contains a `Pulumi.yaml` file. It is a stand-alone single unit of execution. Multiple +projects execution is tied together by the MARA Runner. + +### Infrastructure Provider + +The term Infrastructure provider (or provider for short) within the context of the MARA Runner, +is referring to what will be hosting a Kubernetes environment and a container registry. +Infrastructure providers are implemented as a subclass of the +[Provider](providers/base_provider.py) class. They contain a collection references to the +directories of Pulumi projects which are categorized as either "infrastructure" or "kubernetes". +The categorization of "infrastructure" means that a project is a requirement for having +a working Kubernetes cluster and container registry. + +### Execution + +Execution is referring to the running of a Pulumi project by doing `pulumi up`. + +### Environment Configuration + +The environment configuration file by default is located at: `/config/pulumi/environment`. +It is used to define the environment variables needed when executing a Pulumi project. +When executing Pulumi projects, the system environment is used AND the values from the +environment configuration are appended/overwritten over the system environment. +The file format is a simple key value mapping where each line contains a single: `=`. + +### Stack Configuration + +The stack configuration is a Pulumi native configuration file that is specific for a single +Pulumi [Stack](https://www.pulumi.com/docs/intro/concepts/stack/). The stack configuration +is located by default at `/config/pulumi/Pulumi..yaml`. + +## Design + +Below is a rough outline of the major components of the Runner and there order of execution. + +``` +Validate Prompt User for Prompt User for +Configuration───►Provider Configuration────►Secrets │ + │ +┌─────────────────────────────────────────────────────────┘ +▼ +Provider Provider Infrastructure +Selection ──────►Execution───►Project + Execution───────────────────────┐ + │ │ + └─►Infrastructure Project(s)... │ + │ +┌─────────────────────────────────────────────────────────────┘ +▼ +Write Secrets Kubernetes +to Kubernetes───►Project + Execution + │ + └─►Kubernetes Projects(s)... +``` + +### Assumptions + +There are some assumptions for how Pulumi is used by the Runner that differ from what is +possible using Pulumi directly. + + * All Pulumi projects use the same name for their stack + * All Pulumi projects use the same stack configuration file (except the [secrets](../kubernetes/secrets) project) + * All secrets are stored encrypted in the [secrets](../kubernetes/secrets) project and loaded into Kubernetes as + secrets + * Infrastructure providers cannot be changed on a stack after the first run, and as such + a new stack will need to be made when using multiple infrastructure providers + * Stack references are used to pass state between Pulumi projects + * The configuration key `kubernetes:infra_type` contains the name of the infrastructure provider + as used in the Runner + * If there is any error running a Pulumi project, the Runner will exit, and it is up to the user + to try again or fix the issue + * The order of execution may change between different infrastructure providers + * All required external programs are installed + * The Runner is invoked from a virtual environment as set up by the + [setup_venv.sh](../../../bin/setup_venv.sh) script + * After a Kubernetes cluster is stood up, the relevant configuration files are added to the + system such that it can be managed with the `kubectl` tool + +### Configuration + +The initial phase of the Runner's execution reads, parses and validates the environment +and stack configuration files. If the stack configuration is missing or empty, it is assumed +that it is the first time starting up the environment and the user is prompted for required +configuration parameters. + +After configuration validation, the user is prompted to input any required secrets that are +not currently persisted. These secrets are encrypted using Pulumi's local secret handling +and stored in ciphertext in the [secrets](../kubernetes/secrets) project. + +### Provider + +After configuration has completed, a provider is selected based on the options specified +by the user when invoking the Runner. This provider is used as the source of data for what +Pulumi projects are executed and in what order. When standing up an environment, the provider +executes first the Pulumi projects that are categorized as "infrastructure". Infrastructure +in this context means that these projects are required to have been executed successfully +in order to have a working Kubernetes cluster and container registry. + +A Pulumi project reference within a provider may optionally have an `on_success` event +registered which is run when the project executes successfully. Typically, these events +do things like add configuration for a cluster to the kubectl configuration directory. + +After the infrastructure projects have completed executing, the Runner then executes +the [secrets](../kubernetes/secrets) project which stores the locally encrypted secrets +as [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) on the +newly created Kubernetes cluster. + +Once the required secrets are in place, the Runner then executes all the projects +categorized as "kubernetes" including the final application to be deployed. + +At this point, the application should be deployed. \ No newline at end of file From 023b4d60c8323e352c6078efe79626990e0a2cc6 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 21 Jul 2022 08:19:01 -0700 Subject: [PATCH 41/62] fix: change sed flag from -r to posix compat -E Fixes #168 --- bin/setup_venv.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index d629d1f..c222e63 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -272,7 +272,7 @@ fi # Download Pulumi CLI tooling # Regular expression and sed command from https://superuser.com/a/363878 echo "Downloading Pulumi CLI into virtual environment" -PULUMI_VERSION="$(pip3 list | grep 'pulumi ' | sed -nre 's/^[^0-9]*(([0-9]+\.)*[0-9]+).*/\1/p')" +PULUMI_VERSION="$(pip3 list | grep 'pulumi ' | sed -nEe 's/^[^0-9]*(([0-9]+\.)*[0-9]+).*/\1/p')" if [ -z $PULUMI_VERSION ] ; then echo "Failed to find Pulumi version - EXITING" exit 5 From 1d916ae028ac77916cd9b6442553fb0ca64aa757 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 21 Jul 2022 09:28:54 -0700 Subject: [PATCH 42/62] feat: add easy runner script to MARA automation This change adds a bash script that sets up the virtual environment and invokes the automation/main.py script. Ideally, this would make running MARA much easier than it currently is. --- .gitignore | 3 +++ bin/setup_venv.sh | 27 ++++++++++++++++++++------- pulumi/python/automation/main.py | 4 +++- pulumi/python/runner | 22 ++++++++++++++++++++++ 4 files changed, 48 insertions(+), 8 deletions(-) create mode 100755 pulumi/python/runner diff --git a/.gitignore b/.gitignore index bb596ba..2b62406 100644 --- a/.gitignore +++ b/.gitignore @@ -260,6 +260,9 @@ override.tf.json # End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all,terraform +# Ignore locally installed pyenv environment +.pyenv + *.pyc !/extras/jwt.token /pulumi/python/tools/common/config/*.yaml diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index c222e63..be6e5dc 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -60,6 +60,7 @@ if ! command -v git >/dev/null; then exit 1 fi +# When Python does not exist if ! command -v python3 >/dev/null; then if ! command -v make >/dev/null; then echo >&2 "make must be installed in order to install python with pyenv" @@ -86,7 +87,7 @@ if ! command -v python3 >/dev/null; then echo "required libraries: libbz2 libffi libreadline libsqlite3 libssl zlib1g" fi - export PYENV_ROOT="${script_dir}/../pulumi/python/.pyenv" + PYENV_ROOT="${script_dir}/../pulumi/python/.pyenv" mkdir -p "${PYENV_ROOT}" git_clone_log="$(mktemp -t pyenv_git_clone-XXXXXXX.log)" @@ -97,13 +98,21 @@ if ! command -v python3 >/dev/null; then cat >&2 "${git_clone_log}" fi - export PATH="$PYENV_ROOT/bin:$PATH" + PATH="$PYENV_ROOT/bin:$PATH" fi -# If pyenv is available we use a hardcoded python version +# If pyenv is available we use a the python version as set in the +# .python-version file. This gives us a known and well tested version +# of python. if command -v pyenv >/dev/null; then eval "$(pyenv init --path)" eval "$(pyenv init -)" + + if [ -z "${PYENV_ROOT}" ]; then + PYENV_ROOT=~/.pyenv + fi + + echo "pyenv detected in: ${PYENV_ROOT}" pyenv install --skip-existing <"${script_dir}/../.python-version" # If the pyenv-virtualenv tools are installed, prompt the user if they want to @@ -126,12 +135,12 @@ fi if [ ${has_pyenv_venv_plugin} -eq 1 ]; then eval "$(pyenv virtualenv-init -)" - if ! pyenv virtualenvs --bare | grep --quiet '^ref-arch-pulumi-aws'; then - pyenv virtualenv ref-arch-pulumi-aws + if ! pyenv virtualenvs --bare | grep --quiet '^mara'; then + pyenv virtualenv mara fi if [ -z "${VIRTUAL_ENV}" ]; then - pyenv activate ref-arch-pulumi-aws + pyenv activate mara fi if [ -h "${script_dir}/../pulumi/python/venv" ]; then @@ -150,6 +159,10 @@ if [ ${has_pyenv_venv_plugin} -eq 1 ]; then fi fi + # We create a symbolic link to the pyenv managed venv because using the + # pyenv virtual environment tooling introduces too many conditional logic paths + # in subsequent scripts/programs that need to load the virtual environment. + # Assuming that the venv directory is at a fixed known path makes things easier. echo "Linking virtual environment [${VIRTUAL_ENV}] to local directory [venv]" ln -s "${VIRTUAL_ENV}" "${script_dir}/../pulumi/python/venv" fi @@ -241,7 +254,7 @@ fi # # This section originally pulled the most recent version of Kubectl down; however it turned out that -# was causing isues with our AWS deploy (see the issues in the repo). Addtionally, this was only +# was causing issues with our AWS deploy (see the issues in the repo). Additionally, this was only # downloading the kubectl if it did not exist; this could result in versions not being updated if the # MARA project was run in the same environment w/o a refresh. # diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index c0376f1..f55d72e 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -59,12 +59,14 @@ # Debug flag that will trigger additional output debug_on = False +# Use he script name as invoked rather than hard coding it +script_name = os.path.basename(sys.argv[0]) def usage(): usage_text = f"""Modern Application Reference Architecture (MARA) Runner USAGE: - main.py [FLAGS] [OPERATION] + {script_name} [FLAGS] [OPERATION] FLAGS: -d, --debug Enable debug output on all of the commands executed diff --git a/pulumi/python/runner b/pulumi/python/runner new file mode 100755 index 0000000..dedcf65 --- /dev/null +++ b/pulumi/python/runner @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o errexit # abort on nonzero exit status +set -o pipefail # don't hide errors within pipes + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +PYENV_ROOT="${script_dir}/.pyenv" + +if [ -d "${PYENV_ROOT}" ]; then + PATH="${PATH}:${PYENV_ROOT}/bin" + eval "$(pyenv init --path)" + eval "$(pyenv init -)" +fi + +if [ -d "${script_dir}/venv" ]; then + source "${script_dir}/venv/bin/activate" +else + >&2 echo "Python virtual environment not found at path: ${script_dir}/venv" + >&2 echo "Have you run setup_venv.sh to initialize the environment?" +fi + +exec "$script_dir/automation/main.py" \ No newline at end of file From 3d357d6454cebf83003c679aa755c2e4417b46db Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 21 Jul 2022 10:13:11 -0700 Subject: [PATCH 43/62] fix: properly pass parameters to main.py from runner --- pulumi/python/runner | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pulumi/python/runner b/pulumi/python/runner index dedcf65..0c96c7e 100755 --- a/pulumi/python/runner +++ b/pulumi/python/runner @@ -19,4 +19,4 @@ else >&2 echo "Have you run setup_venv.sh to initialize the environment?" fi -exec "$script_dir/automation/main.py" \ No newline at end of file +exec "$script_dir/automation/main.py" $@ \ No newline at end of file From e43662975ae0df5a013710c1f297363ef9c7db65 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Thu, 21 Jul 2022 17:05:21 -0700 Subject: [PATCH 44/62] refactor: improve error messaging and operations parsing --- pulumi/python/automation/main.py | 61 +++++++++++++------ pulumi/python/automation/providers/aws.py | 15 ++++- .../automation/providers/base_provider.py | 10 ++- 3 files changed, 62 insertions(+), 24 deletions(-) diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index f55d72e..8e184b6 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -30,7 +30,7 @@ from typing import List, Optional from getpass import getpass -from providers.base_provider import Provider +from providers.base_provider import Provider, InvalidConfigurationException from providers.pulumi_project import PulumiProject, PulumiProjectEventParams from pulumi import automation as auto from typing import Any, Hashable, Dict, Union @@ -127,8 +127,12 @@ def main(): # Next, we validate to make sure the input to the runner was correct # Make sure we got an operation - it is the last string passed as an argument - if len(sys.argv) > 1: - operation = sys.argv[-1] + if len(args) == 1: + operation = args[0] + elif len(args) >= 1: + RUNNER_LOG.error('Only one operation per invocation allowed') + usage() + sys.exit(2) else: RUNNER_LOG.error('No operation specified') usage() @@ -168,8 +172,18 @@ def main(): # For the other operations, we need the configuration files parsed, so we do the parsing upfront. - env_config = env_config_parser.read() - stack_config = read_stack_config(provider=provider, env_config=env_config) + try: + env_config = env_config_parser.read() + except FileNotFoundError as e: + msg = 'Environment configuration file not found. This file must exist at the path: %s' + RUNNER_LOG.error(msg, e.filename) + sys.exit(2) + + if env_config.stack_name(): + stack_config = read_stack_config(provider=provider, env_config=env_config) + else: + stack_config = None + validate_with_verbosity = operation == 'validate' or debug_on try: validate(provider=provider, env_config=env_config, stack_config=stack_config, @@ -280,7 +294,7 @@ def prompt_for_stack_config(provider: Provider, def validate(provider: Provider, env_config: env_config_parser.EnvConfig, - stack_config: stack_config_parser.PulumiStackConfig, + stack_config: Optional[stack_config_parser.PulumiStackConfig], verbose: Optional[bool] = False): """Validates that the runtime environment for MARA is correct. Will validate that external tools are present and configurations are correct. If validation fails, an exception will be raised. @@ -312,6 +326,26 @@ def check_path(cmd: str, fail_message: str) -> bool: if not success: sys.exit(3) + # Next, we validate that the environment file has the required values + try: + provider.validate_env_config(env_config) + except InvalidConfigurationException as e: + if e.key == 'PULUMI_STACK': + msg = 'Environment file [%s] does not contain the required key PULUMI_STACK. This key specifies the ' \ + 'name of the Pulumi Stack (https://www.pulumi.com/docs/intro/concepts/stack/) that is used ' \ + 'globally across Pulumi projects in MARA.' + else: + msg = 'Environment file [%s] failed validation' + + RUNNER_LOG.error(msg, env_config.config_path) + raise e + if verbose: + RUNNER_LOG.debug('environment file [%s] passed validation', env_config.config_path) + + if not stack_config: + RUNNER_LOG.debug('stack configuration is not available') + return False + if 'kubernetes:infra_type' in stack_config['config']: previous_provider = stack_config['config']['kubernetes:infra_type'] if previous_provider.lower() != provider.infra_type().lower(): @@ -321,24 +355,15 @@ def check_path(cmd: str, fail_message: str) -> bool: previous_provider, provider.infra_type()) sys.exit(3) - # Next, we validate that the environment file has the required values - try: - provider.validate_env_config(env_config) - except Exception as e: - RUNNER_LOG.error('environment file [%s] failed validation', env_config.config_path) - raise e - if verbose: - RUNNER_LOG.debug('environment file [%s] passed validation', env_config.config_path) - try: provider.validate_stack_config(stack_config, env_config) except Exception as e: - RUNNER_LOG.error('stack configuration file [%s] at path failed validation', stack_config.config_path) + RUNNER_LOG.error('Stack configuration file [%s] at path failed validation', stack_config.config_path) raise e if verbose: - RUNNER_LOG.debug('stack configuration file [%s] passed validation', stack_config.config_path) + RUNNER_LOG.debug('Stack configuration file [%s] passed validation', stack_config.config_path) - RUNNER_LOG.debug('all configuration is OK') + RUNNER_LOG.debug('All configuration is OK') def init_secrets(env_config: env_config_parser.EnvConfig, diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py index 2931b2b..24087bf 100644 --- a/pulumi/python/automation/providers/aws.py +++ b/pulumi/python/automation/providers/aws.py @@ -3,6 +3,7 @@ """ import json +import logging import os import sys @@ -13,7 +14,10 @@ from .pulumi_project import PulumiProjectEventParams SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) - +RUNNER_LOG = logging.getLogger('runner') +AUTH_ERR_MSG = '''Unable to authenticate to AWS with provided credentials. Are the settings in your ~/.aws/credentials +correct? Error: %s +''' class AwsProviderException(Exception): pass @@ -106,6 +110,11 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list aws_cli = AwsCli(region=aws_region, profile=aws_profile) + _, err = external_process.run(cmd=aws_cli.validate_credentials_cmd(), suppress_error=True) + if err: + RUNNER_LOG.error(AUTH_ERR_MSG, err.lstrip()) + sys.exit(3) + # AWS availability zones az_data, _ = external_process.run(aws_cli.list_azs_cmd()) zones = [] @@ -124,7 +133,7 @@ def validate_selected_azs(selected: List[str]) -> bool: while len(selected_azs) == 0 or not validate_selected_azs(selected_azs): default_azs = ', '.join(zones) azs = input( - f'AWS availability zones to use with VPC [{default_azs} (separate with commas)]: ') or default_azs + f'AWS availability zones to use with VPC [{default_azs}] (separate with commas): ') or default_azs selected_azs = [x.strip() for x in azs.split(',')] config['vpc:azs'] = list(selected_azs) @@ -182,7 +191,7 @@ def validate_stack_config(self, aws_cli = AwsCli(region=config['aws:region'], profile=config['aws:profile']) _, err = external_process.run(cmd=aws_cli.validate_credentials_cmd(), suppress_error=True) if err: - print(f'AWS authentication error: {err}', file=sys.stderr) + RUNNER_LOG.error(AUTH_ERR_MSG, err.lstrip()) sys.exit(3) @staticmethod diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py index ab29cb7..4f502aa 100644 --- a/pulumi/python/automation/providers/base_provider.py +++ b/pulumi/python/automation/providers/base_provider.py @@ -6,7 +6,7 @@ import os import pathlib import sys -from typing import List, Mapping, Iterable, TextIO, Union, Dict, Any, Hashable +from typing import List, Mapping, Iterable, TextIO, Union, Dict, Any, Hashable, Optional from .pulumi_project import PulumiProject, SecretConfigKey @@ -15,7 +15,11 @@ class InvalidConfigurationException(Exception): - pass + key: Optional[str] + + def __init__(self, msg: str, key: Optional[str] = None) -> None: + super().__init__(msg) + self.key = key class Provider: @@ -40,7 +44,7 @@ def validate_env_config_required_keys(required_keys: List[str], config: Mapping[ for key in required_keys: if key not in config.keys(): - raise InvalidConfigurationException(f'Required configuration key [{key}] not found') + raise InvalidConfigurationException(msg=f'Required configuration key [{key}] not found', key=key) @abc.abstractmethod def infra_type(self) -> str: From 5ef8665e174358ddef8fe8442a25243a69da96b3 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Fri, 22 Jul 2022 11:50:25 -0700 Subject: [PATCH 45/62] fix: fixes object has no attribute '__debug_logger_func' error Fixes #166 There is an issue with methods being named with two leading underscores being passed correctly to Pulumi's async handlers. In this change, we rename the method and add a check for the method's existence. --- .../ingress_controller_image_base_provider.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py b/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py index 983f6d9..8609e4a 100644 --- a/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py +++ b/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py @@ -1,5 +1,5 @@ import os -from typing import Optional, Dict, List, Any +from typing import Optional, Dict, List, Any, Callable import pulumi from pulumi import Resource @@ -21,8 +21,8 @@ def __init__(self, if debug_logger_func: self.debug_logger = debug_logger_func - else: - self.debug_logger = self.__debug_logger_func + elif self._debug_logger_func: + self.debug_logger = self._debug_logger_func super().__init__() @@ -32,7 +32,7 @@ def delete(self, _id: str, _props: Any) -> None: pulumi.log.info(f'deleting image {image_id}') self._docker_delete_image(image_id) - def __debug_logger_func(self, msg): + def _debug_logger_func(self, msg): pulumi.log.debug(msg, self.resource) def _run_docker(self, cmd: str, suppress_error: bool = False) -> (str, str): From 24ee0c0993be09361e96695c0eeb0dd7bccc5926 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Fri, 22 Jul 2022 13:13:55 -0700 Subject: [PATCH 46/62] test: fix test runner to work after directory refactor --- bin/test.py | 73 +++++++++++++++++++++++------------------------------ 1 file changed, 32 insertions(+), 41 deletions(-) diff --git a/bin/test.py b/bin/test.py index 1c785dc..c529609 100755 --- a/bin/test.py +++ b/bin/test.py @@ -8,8 +8,9 @@ import collections from typing import List -IGNORE_DIRS = ['venv', 'kic-pulumi-utils'] +IGNORE_DIRS = ['.pyenv', 'venv', 'config', 'kic-pulumi-utils'] SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +TEST_FILE_PATTERN = 'test_*.py' TestsInDir = collections.namedtuple(typename='TestsInDir', field_names=['directory', 'loader']) RunDirectories = collections.namedtuple(typename='RunDirectories', field_names=['start_dir', 'top_level_dir']) @@ -17,33 +18,26 @@ test_dirs: List[TestsInDir] = [] -def find_testable_dirs(dir_name: pathlib.Path) -> pathlib.Path: - sublisting = os.listdir(dir_name) - dir_count = 0 - last_path = None +def find_testable_dirs(dir_name: pathlib.Path) -> List[pathlib.Path]: + def is_main_file(filename: str) -> bool: + return filename == '__main__.py' or filename == 'main.py' - for item in sublisting: - path = pathlib.Path(dir_name, item) + test_dirs = [] + contains_main_file = False - # We assume we are in the starting directory for test invocation if there is a - # __main__.py file present. - if path.is_file() and (path.name == '__main__.py'): - return dir_name - - # Otherwise, we are probably in a module directory and the starting directory is - # one level deeper. - if path.is_dir(): - dir_count += 1 - if not last_path: - last_path = path - if dir_count > 1: + for item in os.listdir(dir_name): + name = str(item) + path = pathlib.Path(dir_name, name) + if path.is_dir() and name != '__pycache__': + test_dirs.extend(find_testable_dirs(path.absolute())) + # If there is a main file we consider it a top level project where tests would + # live under it + elif path.is_file() and is_main_file(name) and not contains_main_file: + contains_main_file = True + test_dirs.append(pathlib.Path(dir_name)) break - # If the directory contains only a single subdirectory, we traverse down once - if dir_count == 1: - return last_path - - return dir_name + return test_dirs def find_kic_util_path(): @@ -65,27 +59,24 @@ def find_kic_util_path(): return TestsInDir(venv_start_dir, kic_util_loader) +# We explicitly test the kic util package separately because it needs to live +# under venv when tested. By default, we do no traverse into the venv directory. test_dirs.append(find_kic_util_path()) +pulumi_python_dir = os.path.join(SCRIPT_DIR, '..', 'pulumi', 'python') -for item in os.listdir(SCRIPT_DIR): - if item in IGNORE_DIRS: - continue - - directory = pathlib.Path(SCRIPT_DIR, item) - if not directory.is_dir(): +for item in os.listdir(pulumi_python_dir): + directory = pathlib.Path(pulumi_python_dir, item) + if not directory.is_dir() or item in IGNORE_DIRS: continue - run_dir = find_testable_dirs(directory) - if run_dir is None: - continue - - start_dir = str(run_dir) - - loader = unittest.defaultTestLoader.discover( - start_dir=start_dir, - top_level_dir=start_dir, - pattern='test_*.py') - test_dirs.append(TestsInDir(start_dir, loader)) + directory = pathlib.Path(pulumi_python_dir, item) + for test_dir in find_testable_dirs(directory): + start_dir = str(os.path.realpath(test_dir)) + loader = unittest.defaultTestLoader.discover( + start_dir=start_dir, + top_level_dir=start_dir, + pattern=TEST_FILE_PATTERN) + test_dirs.append(TestsInDir(start_dir, loader)) successful = True From 3a7ca2b18dd5d71a95192686818be3246f83a921 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Tue, 26 Jul 2022 15:40:48 -0700 Subject: [PATCH 47/62] fix: enable force delete on ECR so that MARA can remove it --- pulumi/python/infrastructure/aws/ecr/__main__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pulumi/python/infrastructure/aws/ecr/__main__.py b/pulumi/python/infrastructure/aws/ecr/__main__.py index 86febaa..1c29377 100644 --- a/pulumi/python/infrastructure/aws/ecr/__main__.py +++ b/pulumi/python/infrastructure/aws/ecr/__main__.py @@ -8,6 +8,7 @@ ecr_repo = ecr.Repository(name=f'ingress-controller-{stack_name}', resource_name=f'nginx-ingress-repository-{stack_name}', image_tag_mutability="MUTABLE", + force_delete=False, tags={"Project": project_name, "Stack": stack_name}) pulumi.export('repository_url', ecr_repo.repository_url) From 4f5dacd5d0749ec86450428537c823be9d78d580 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Tue, 26 Jul 2022 15:42:11 -0700 Subject: [PATCH 48/62] fix: sensible message on refresh run without prev deployment --- pulumi/python/automation/main.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 8e184b6..5788096 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -214,8 +214,8 @@ def main(): try: pulumi_cmd(provider=provider, env_config=env_config) except Exception as e: - logging.error('Error running Pulumi operation with provider [%s] for stack [%s]', - provider_name, env_config.stack_name()) + logging.error('Error running Pulumi operation [%s] with provider [%s] for stack [%s]', + operation, provider_name, env_config.stack_name()) raise e @@ -432,8 +432,16 @@ def refresh(provider: Provider, stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack.refresh_config() - stack.refresh(color=env_config.pulumi_color_settings(), - on_output=write_pulumi_output) + try: + stack.refresh(color=env_config.pulumi_color_settings(), + on_output=write_pulumi_output) + except auto.CommandError as e: + msg = str(e).strip() + if msg.endswith('no previous deployment'): + logging.warning("Cannot refresh project that has no previous deployment for stack [%s]", + env_config.stack_name()) + else: + raise e def up(provider: Provider, From 03de8c8edcb099cdba43614784f12c59517bfe4d Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Tue, 26 Jul 2022 15:42:58 -0700 Subject: [PATCH 49/62] fix: change import for better compatibility with test runner --- .../test_ingress_controller_image_builder_provider.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py b/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py index 89b7f93..288b15e 100644 --- a/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py +++ b/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py @@ -1,6 +1,6 @@ import os import unittest -import ingress_controller_image_builder_provider as image_builder +from ingress_controller_image_builder_provider import IngressControllerImageBuilderProvider from kic_util.docker_image_name import DockerImageName @@ -8,7 +8,7 @@ class TestIngressControllerImageBuilderProvider(unittest.TestCase): def setUp(self) -> None: super().setUp() - self.provider = image_builder.IngressControllerImageBuilderProvider() + self.provider = IngressControllerImageBuilderProvider() def assertStrEqual(self, first, second, msg=None): self.assertEqual(first=str(first), second=str(second), msg=msg) From 78468dbd16a6d290100b9b7ef63bf5aea846ad18 Mon Sep 17 00:00:00 2001 From: Elijah Zupancic Date: Tue, 26 Jul 2022 15:43:41 -0700 Subject: [PATCH 50/62] chore: upgrade pulumi deps and kubectl versions --- bin/setup_venv.sh | 2 +- pulumi/python/Pipfile | 11 +-- pulumi/python/Pipfile.lock | 129 ++++++++++++++++----------------- pulumi/python/requirements.txt | 10 +-- 4 files changed, 75 insertions(+), 77 deletions(-) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index be6e5dc..845f70d 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -267,7 +267,7 @@ fi # if [ ! -x "${VIRTUAL_ENV}/bin/kubectl" ]; then echo "Downloading kubectl into virtual environment" - KUBECTL_VERSION="v1.23.6" + KUBECTL_VERSION="v1.24.3" ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} diff --git a/pulumi/python/Pipfile b/pulumi/python/Pipfile index de3b95c..482308e 100644 --- a/pulumi/python/Pipfile +++ b/pulumi/python/Pipfile @@ -4,15 +4,15 @@ verify_ssl = true name = "pypi" [packages] -awscli = "~=1.22.101" +awscli = "~=1.25.35" grpcio = "==1.43.0" fart = "~=0.1.5" lolcat = "~=1.4" passlib = "~=1.7.4" -pulumi-aws = ">=4.37.5" +pulumi-aws = ">=4.39.0" pulumi-docker = "==3.1.0" -pulumi-eks = "==0.39.0" -pulumi-kubernetes = "==3.19.1" +pulumi-eks = ">=0.41.2" +pulumi-kubernetes = "==3.20.1" pycryptodome = "~=3.14.0" requests = "~=2.27.1" setuptools-git-versioning = "==1.9.2" @@ -20,8 +20,9 @@ yamlreader = "==3.0.4" pulumi-digitalocean = "==4.12.0" pulumi-linode = "==3.7.1" linode-cli = "~=5.17.2" -pulumi = "~=3.32.0" +pulumi = "~=3.36.0" PyYAML = "~=5.4.1" +nodeenv = "~=1.6.0" [dev-packages] wheel = "~=0.37.1" diff --git a/pulumi/python/Pipfile.lock b/pulumi/python/Pipfile.lock index ab6af08..f5f9915 100644 --- a/pulumi/python/Pipfile.lock +++ b/pulumi/python/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "26ad2e064332a5855c06569e11375440a111043957d9186d9098a3e1a0122ae4" + "sha256": "177455c15d31187879995d736c67fd353a973ace31fd79906ad499f668a09900" }, "pipfile-spec": 6, "requires": { @@ -33,27 +33,27 @@ }, "awscli": { "hashes": [ - "sha256:3a7d9260ecb44e677f04640fd9959fb4310189e39ef0a42fbb652888843890a3", - "sha256:54772140fa9fe72c36f1214cd8f2a210af420940983d8f663f5cdf4b103b7e58" + "sha256:1b3adbc9cfb9aad7d0f6abc4cb0a5b95eb640afb77486885d3c4ff0cbc28f494", + "sha256:8883c357165a1e1866636c19a264876e9a3938af4f25425d587255698162535f" ], "index": "pypi", - "version": "==1.22.101" + "version": "==1.25.35" }, "botocore": { "hashes": [ - "sha256:663d8f02b98641846eb959c54c840cc33264d5f2dee5b8fc09ee8adbef0f8dcf", - "sha256:89a203bba3c8f2299287e48a9e112e2dbe478cf67eaac26716f0e7f176446146" + "sha256:9949d61959476b5a34408881bdb98f54b0642238ffb217c5260124ec58fb0c72", + "sha256:d2e708dd766b21c8e20a57ce1a90e98d324f871f81215efbc2dddaa42d13c551" ], - "markers": "python_version >= '3.6'", - "version": "==1.24.46" + "markers": "python_version >= '3.7'", + "version": "==1.27.35" }, "certifi": { "hashes": [ - "sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7", - "sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a" + "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d", + "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412" ], "markers": "python_version >= '3.6'", - "version": "==2022.5.18.1" + "version": "==2022.6.15" }, "charset-normalizer": { "hashes": [ @@ -65,11 +65,11 @@ }, "colorama": { "hashes": [ - "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff", - "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1" + "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", + "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.4.3" + "version": "==0.4.4" }, "dill": { "hashes": [ @@ -81,12 +81,11 @@ }, "docutils": { "hashes": [ - "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0", - "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827", - "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99" + "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", + "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.15.2" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==0.16" }, "fart": { "hashes": [ @@ -156,11 +155,11 @@ }, "jmespath": { "hashes": [ - "sha256:a490e280edd1f57d6de88636992d05b71e97d69a26a19f058ecf7d304474bf5e", - "sha256:e8dcd576ed616f14ec02eed0005c85973b5890083313860136657e24784e4c04" + "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", + "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe" ], "markers": "python_version >= '3.7'", - "version": "==1.0.0" + "version": "==1.0.1" }, "linode-cli": { "hashes": [ @@ -177,6 +176,14 @@ "index": "pypi", "version": "==1.4" }, + "nodeenv": { + "hashes": [ + "sha256:3ef13ff90291ba2a4a7a4ff9a979b63ffdd00a464dbe04acf0ea6471517a4c2b", + "sha256:621e6b7076565ddcacd2db0294c0381e01fd28945ab36bcf00f41c5daf63bef7" + ], + "index": "pypi", + "version": "==1.6.0" + }, "packaging": { "hashes": [ "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb", @@ -203,47 +210,37 @@ }, "protobuf": { "hashes": [ - "sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf", - "sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f", - "sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f", - "sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7", - "sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996", - "sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067", - "sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c", - "sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7", - "sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9", - "sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c", - "sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739", - "sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91", - "sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c", - "sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153", - "sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9", - "sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388", - "sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e", - "sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab", - "sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde", - "sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531", - "sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8", - "sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7", - "sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20", - "sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3" + "sha256:174bc835cc639c82164bbce4e28e2af5aa7821285d7fde3162afbe5e226a5a73", + "sha256:382c01e2ce14dcc3b4d25b8839f2139cc09c8a4006ad678579dc4080f6be1b29", + "sha256:5330df7650785c7ffdd1199c04933668d5e2dfefb62250e2b03ec1c1d20e7c2e", + "sha256:64fd63629f8952d58a41150b242f1c1c30c5062c9b0de8e420c6d3b360ec5d89", + "sha256:75aaa6d76a76a6f41f02645f6ebd255d738e9bb14c4d9d8269c676e65d0e0c7c", + "sha256:7dfc160de830b96b2c92c10d8f60e582e92252701bf6640ae75dfdecf6fdeb7a", + "sha256:8a2b4976872b71ea56cd3d55d320751d36a53f10220cc6075517806076cf4829", + "sha256:9130759e719bee1e6d05ca6a3037f7eff66d7a7ff6ba25871917dc40e8f3fbb6", + "sha256:9f510e743462899b1e296ac19bbaf4212d3106cdc51260ecde59ee6063f743f9", + "sha256:cebfd1fb899180c0523955d5cae0e764210961b12dfc39fd96af8fc81fe71ac7", + "sha256:d367e7385cd808ad33b580155bf9694881dd711c4271fe7b6f4e5270a01980b7", + "sha256:dec4cb439e25058518e2cd469c5eb0f4e634b113eb0b1343b55ba9303ab1ad38", + "sha256:df5a126706bd1d5072a6a0f6895c633ede67ea6cd679b4268eecce6b438bbe69", + "sha256:fa22e2413f6fd98ec1b388686aadef5420ea8205e37b35cad825adea7c019625" ], "markers": "python_version >= '3.7'", - "version": "==3.20.1" + "version": "==4.21.3" }, "pulumi": { "hashes": [ - "sha256:570654c82f8dbf8584447218db4de537bc417aa181ab8f888fe523b2b5f6bc7a" + "sha256:86acb1e0921619d49123d1a4ce43bfa7dc2dae9723266e21c24a11632f3231d9" ], "index": "pypi", - "version": "==3.32.1" + "version": "==3.36.0" }, "pulumi-aws": { "hashes": [ - "sha256:06f63aaa3bc36f9ef6a563fe397d8a13883757aca7f2d4cd433fbc0835bd08aa" + "sha256:e82655bd961447167e1bb2839032e93ba73c37cf2f048ed2447de67dc73e9fd5" ], "index": "pypi", - "version": "==5.4.0" + "version": "==5.10.0" }, "pulumi-digitalocean": { "hashes": [ @@ -261,17 +258,17 @@ }, "pulumi-eks": { "hashes": [ - "sha256:9ec4a19976b76a4f141e9b469be7ea65940ac546cec192e3c96435d3038532a0" + "sha256:d8f7dafa71eaaab4d8f115691c80fe63df5ac5df07df643c3977f2dc1e9b0cf4" ], "index": "pypi", - "version": "==0.39.0" + "version": "==0.41.2" }, "pulumi-kubernetes": { "hashes": [ - "sha256:c1c6b0c75716fa421282b85cf1a4fbc93a1b895558c580ad91479bcc353445b9" + "sha256:4fe4fcc19be7f3834e06e2baecafaa2bc3fcd7d3af192d7d7d67986c6699096a" ], "index": "pypi", - "version": "==3.19.1" + "version": "==3.20.1" }, "pulumi-linode": { "hashes": [ @@ -409,11 +406,11 @@ }, "s3transfer": { "hashes": [ - "sha256:7a6f4c4d1fdb9a2b640244008e142cbc2cd3ae34b386584ef044dd0f27101971", - "sha256:95c58c194ce657a5f4fb0b9e60a84968c808888aed628cd98ab8771fe1db98ed" + "sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd", + "sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947" ], - "markers": "python_version >= '3.6'", - "version": "==0.5.2" + "markers": "python_version >= '3.7'", + "version": "==0.6.0" }, "semver": { "hashes": [ @@ -425,11 +422,11 @@ }, "setuptools": { "hashes": [ - "sha256:68e45d17c9281ba25dc0104eadd2647172b3472d9e01f911efa57965e8d51a36", - "sha256:a43bdedf853c670e5fed28e5623403bad2f73cf02f9a2774e91def6bda8265a7" + "sha256:0d33c374d41c7863419fc8f6c10bfe25b7b498aa34164d135c622e52580c6b16", + "sha256:c04b44a57a6265fe34a4a444e965884716d34bae963119a76353434d6f18e450" ], "markers": "python_version >= '3.7'", - "version": "==62.3.2" + "version": "==63.2.0" }, "setuptools-git-versioning": { "hashes": [ @@ -465,11 +462,11 @@ }, "urllib3": { "hashes": [ - "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14", - "sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e" + "sha256:8298d6d56d39be0e3bc13c1c97d133f9b45d797169a0e11cdd0e0489d786f7ec", + "sha256:879ba4d1e89654d9769ce13121e0f94310ea32e8d2f8cf587b77c08bbcdb30d6" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==1.26.9" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_version < '4'", + "version": "==1.26.10" }, "yamlreader": { "hashes": [ diff --git a/pulumi/python/requirements.txt b/pulumi/python/requirements.txt index d9202f1..0ad7699 100644 --- a/pulumi/python/requirements.txt +++ b/pulumi/python/requirements.txt @@ -1,13 +1,13 @@ -awscli~=1.22.100 +awscli~=1.25.35 grpcio==1.43.0 fart~=0.1.5 lolcat~=1.4 nodeenv~=1.6.0 passlib~=1.7.4 -pulumi-aws>=4.37.5 +pulumi-aws>=4.39.0 pulumi-docker==3.1.0 -pulumi-eks>=0.37.1 -pulumi-kubernetes==3.19.1 +pulumi-eks>=0.41.2 +pulumi-kubernetes==3.20.1 pycryptodome~=3.14.0 PyYAML~=5.4.1 requests~=2.27.1 @@ -18,4 +18,4 @@ yamlreader==3.0.4 pulumi-digitalocean==4.12.0 pulumi-linode==3.7.1 linode-cli~=5.17.2 -pulumi~=3.32.0 \ No newline at end of file +pulumi~=3.36.0 \ No newline at end of file From beeb2f4f4d57966bdaf4f69c1a4bae637b7210d8 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Tue, 2 Aug 2022 13:16:59 -0400 Subject: [PATCH 51/62] fix: add in updates to automation-api branch for version bumps (#172) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs --- config/pulumi/Pulumi.stackname.yaml.example | 26 +- pulumi/python/kubernetes/certmgr/__main__.py | 26 +- .../certmgr/manifests/cert-manager.crds.yaml | 285 ++- pulumi/python/kubernetes/logagent/__main__.py | 2 +- pulumi/python/kubernetes/logstore/__main__.py | 4 +- .../nginx/ingress-controller/__main__.py | 2 +- .../otel-objects/otel-collector.yaml | 17 +- .../otel-operator/opentelemetry-operator.yaml | 1614 ++++++++++++----- .../python/kubernetes/prometheus/__main__.py | 4 +- 9 files changed, 1379 insertions(+), 601 deletions(-) diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index b4718d3..1276d43 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -127,7 +127,7 @@ config: # Chart name for the helm chart for kic kic-helm:chart_name: nginx-ingress # Chart version for the helm chart for kic - kic-helm:chart_version: 0.13.2 + kic-helm:chart_version: 0.14.0 # Name of the repo to pull the kic chart from kic-helm:helm_repo_name: nginx-stable # URL of the chart repo to pull kic from @@ -151,12 +151,12 @@ config: # https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image/ # # The following are all valid image names: - # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.2 - # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.2-ot - # kic:image_name: docker.io/nginx/nginx-ingress:2.2.2 - # kic:image_name: nginx/nginx-ingress:2.2.2 - # kic:image_name: nginx/nginx-ingress:2.2.2-alpine - kic:image_name: nginx/nginx-ingress:2.2.2 + # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.3.0 + # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.3.0-ot + # kic:image_name: docker.io/nginx/nginx-ingress:2.3.0 + # kic:image_name: nginx/nginx-ingress:2.3.0 + # kic:image_name: nginx/nginx-ingress:2.3.0-alpine + kic:image_name: nginx/nginx-ingress:2.3.0 ############################################################################ @@ -230,7 +230,7 @@ config: # Logagent Configuration logagent:chart_name: filebeat # Chart name for the helm chart for the logagent - logagent:chart_version: 7.16.3 + logagent:chart_version: 7.17.3 # Chart version for the helm chart for the logagent logagent:helm_repo_name: elastic # Name of the repo to pull the logagent from @@ -246,7 +246,7 @@ config: # Logstore Configuration logstore:chart_name: elasticsearch # Chart name for the helm chart for the logstore - logstore:chart_version: 17.6.2 + logstore:chart_version: 19.1.4 # Chart version for the helm chart for the logstore logstore:helm_repo_name: bitnami # Name of the repo to pull the logstore from @@ -277,7 +277,7 @@ config: # Cert Manager Configuration certmgr:chart_name: cert-manager # Chart hame for the helm chart for certmanager - certmgr:chart_version: v1.6.1 + certmgr:chart_version: v1.9.1 # Chart version for the helm chart for certmanager certmgr:certmgr_helm_repo_name: jetstack # Name of the repo to pull the certmanager chart from @@ -293,7 +293,7 @@ config: # Prometheus Configuration prometheus:chart_name: kube-prometheus-stack # Chart name for the helm chart for prometheus - prometheus:chart_version: 30.0.1 + prometheus:chart_version: 39.2.1 # Chart version for the helm chart for prometheus prometheus:helm_repo_name: prometheus-community # Name of the repo to pull the prometheus chart from @@ -301,7 +301,7 @@ config: # URL of the chart repo prometheus:statsd_chart_name: prometheus-statsd-exporter # Name of the statsd chart (uses the same repo as the prom chart) - prometheus.statsd_chart_version: 0.4.2 + prometheus.statsd_chart_version: 0.5.0 # Version of the statsd chart (uses the same repo as the prom chart) prometheus:helm_timeout: 300 # Timeout value for helm operations in seconds @@ -338,7 +338,7 @@ config: # Linode Kubernetes Engine ############################################################################ # This is the Kubernetes version to install using Linode K8s. - linode:k8s_version: 1.22 + linode:k8s_version: 1.23 # This is the default instance type used Linode Kubernetes linode:instance_type: g6-standard-8 # The desired node count of the Linode K8s cluster. diff --git a/pulumi/python/kubernetes/certmgr/__main__.py b/pulumi/python/kubernetes/certmgr/__main__.py index 6ed1d41..49507c8 100644 --- a/pulumi/python/kubernetes/certmgr/__main__.py +++ b/pulumi/python/kubernetes/certmgr/__main__.py @@ -8,12 +8,6 @@ from kic_util import pulumi_config -def crd_deployment_manifest(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - crd_deployment_path = os.path.join(script_dir, 'manifests', 'cert-manager.crds.yaml') - return crd_deployment_path - - def project_name_from_project_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname) @@ -40,24 +34,13 @@ def add_namespace(obj): metadata={'name': 'cert-manager'}, opts=pulumi.ResourceOptions(provider=k8s_provider)) -# Config Manifests -crd_deployment = crd_deployment_manifest() - -crd_dep = ConfigFile( - 'crd-dep', - file=crd_deployment, - transformations=[add_namespace], # Need to review w/ operator - opts=pulumi.ResourceOptions(depends_on=[ns]) -) - - config = pulumi.Config('certmgr') chart_name = config.get('chart_name') if not chart_name: chart_name = 'cert-manager' chart_version = config.get('chart_version') if not chart_version: - chart_version = 'v1.7.0' + chart_version = 'v1.9.1' helm_repo_name = config.get('certmgr_helm_repo_name') if not helm_repo_name: helm_repo_name = 'jetstack' @@ -81,6 +64,9 @@ def add_namespace(obj): ), version=chart_version, namespace=ns.metadata.name, + values={ + "installCRDs": "True" + }, # Configure the timeout value. timeout=helm_timeout, # By default Release resource will wait till all created resources @@ -96,7 +82,7 @@ def add_namespace(obj): # Force update if required force_update=True) -certmgr_release = Release("certmgr", args=certmgr_release_args, opts=pulumi.ResourceOptions(depends_on=crd_dep)) +certmgr_release = Release("certmgr", args=certmgr_release_args, opts=pulumi.ResourceOptions(depends_on=ns)) status = certmgr_release.status -pulumi.export("certmgr_status", status) +pulumi.export("certmgr_status", status) \ No newline at end of file diff --git a/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml b/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml index 1df1e06..5b3f062 100644 --- a/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml +++ b/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 The cert-manager Authors. +# Copyright 2021 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,20 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. ---- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: certificaterequests.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -134,7 +131,7 @@ spec: description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. type: array items: - description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' type: string enum: - signing @@ -205,6 +202,9 @@ spec: type: description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. type: string @@ -212,19 +212,17 @@ spec: served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: certificates.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -284,7 +282,7 @@ spec: - secretName properties: additionalOutputFormats: - description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option. + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. type: array items: description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. @@ -388,6 +386,9 @@ spec: name: description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + literalSubject: + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. + type: string privateKey: description: Options to control private keys used for the Certificate. type: object @@ -408,6 +409,9 @@ spec: rotationPolicy: description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. type: string + enum: + - Never + - Always size: description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. type: integer @@ -486,7 +490,7 @@ spec: description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. type: array items: - description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' type: string enum: - signing @@ -550,6 +554,12 @@ spec: type: description: Type of the condition, known values are (`Ready`, `Issuing`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. type: string @@ -575,19 +585,17 @@ spec: served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: challenges.acme.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: acme.cert-manager.io names: @@ -908,8 +916,20 @@ spec: - region properties: accessKeyID: - description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -920,7 +940,7 @@ spec: description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name @@ -956,10 +976,49 @@ spec: type: object properties: labels: - description: The labels that cert-manager will use when creating the temporary HTTPRoute needed for solving the HTTP-01 challenge. These labels must match the label selector of at least one Gateway. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentRef identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string @@ -1187,7 +1246,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1217,7 +1276,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1268,7 +1327,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1298,7 +1357,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1356,7 +1415,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1386,7 +1445,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1437,7 +1496,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1467,7 +1526,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1573,19 +1632,17 @@ spec: subresources: status: {} --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clusterissuers.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -1941,8 +1998,20 @@ spec: - region properties: accessKeyID: - description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -1953,7 +2022,7 @@ spec: description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name @@ -1989,10 +2058,49 @@ spec: type: object properties: labels: - description: The labels that cert-manager will use when creating the temporary HTTPRoute needed for solving the HTTP-01 challenge. These labels must match the label selector of at least one Gateway. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentRef identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string @@ -2220,7 +2328,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2250,7 +2358,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2301,7 +2409,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2331,7 +2439,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2389,7 +2497,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2419,7 +2527,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2470,7 +2578,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2500,7 +2608,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2780,22 +2888,23 @@ spec: type: description: Type of the condition, known values are (`Ready`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: issuers.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -3151,8 +3260,20 @@ spec: - region properties: accessKeyID: - description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -3163,7 +3284,7 @@ spec: description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name @@ -3199,10 +3320,49 @@ spec: type: object properties: labels: - description: The labels that cert-manager will use when creating the temporary HTTPRoute needed for solving the HTTP-01 challenge. These labels must match the label selector of at least one Gateway. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentRef identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string @@ -3430,7 +3590,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3460,7 +3620,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3511,7 +3671,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3541,7 +3701,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3599,7 +3759,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3629,7 +3789,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3680,7 +3840,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3710,7 +3870,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3990,22 +4150,23 @@ spec: type: description: Type of the condition, known values are (`Ready`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: orders.acme.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: acme.cert-manager.io names: diff --git a/pulumi/python/kubernetes/logagent/__main__.py b/pulumi/python/kubernetes/logagent/__main__.py index fdd8576..4b02231 100644 --- a/pulumi/python/kubernetes/logagent/__main__.py +++ b/pulumi/python/kubernetes/logagent/__main__.py @@ -13,7 +13,7 @@ chart_name = 'filebeat' chart_version = config.get('chart_version') if not chart_version: - chart_version = '7.16.3' + chart_version = '7.17.3' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'elastic' diff --git a/pulumi/python/kubernetes/logstore/__main__.py b/pulumi/python/kubernetes/logstore/__main__.py index 874c0b2..a736500 100644 --- a/pulumi/python/kubernetes/logstore/__main__.py +++ b/pulumi/python/kubernetes/logstore/__main__.py @@ -13,7 +13,7 @@ chart_name = 'elasticsearch' chart_version = config.get('chart_version') if not chart_version: - chart_version = '17.6.2' + chart_version = '19.1.4' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'bitnami' @@ -131,7 +131,7 @@ def project_name_from_project_dir(dirname: str): elastic_rname = elastic_release.status.name -elastic_fqdn = Output.concat(elastic_rname, "-coordinating-only.logstore.svc.cluster.local") +elastic_fqdn = Output.concat(elastic_rname, "-elasticsearch.logstore.svc.cluster.local") kibana_fqdn = Output.concat(elastic_rname, "-kibana.logstore.svc.cluster.local") pulumi.export('elastic_hostname', pulumi.Output.unsecret(elastic_fqdn)) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index 03d9608..0cb4ba9 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -17,7 +17,7 @@ chart_name = 'nginx-ingress' chart_version = config.get('chart_version') if not chart_version: - chart_version = '0.13.2' + chart_version = '0.14.0' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'nginx-stable' diff --git a/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml b/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml index a706929..fed12a4 100644 --- a/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml +++ b/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml @@ -12,20 +12,17 @@ spec: endpoint: 0.0.0.0:9978 http: endpoint: 0.0.0.0:9979 - # Collect Prometheus Metrics - exporters: - otlp: - endpoint: https://ingest.lightstep.com:443 - headers: {"lightstep-service-name":"my-service","lightstep-access-token":"XXXX"} + processors: batch: + + exporters: + logging: + logLevel: + service: pipelines: traces: receivers: [otlp] processors: [batch] - exporters: [otlp] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [otlp] + exporters: [logging] diff --git a/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml b/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml index b48e156..7459e06 100644 --- a/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml +++ b/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Namespace metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager name: opentelemetry-operator-system --- @@ -9,8 +10,10 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.0-beta.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator name: instrumentations.opentelemetry.io spec: group: opentelemetry.io @@ -28,6 +31,15 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .spec.exporter.endpoint + name: Endpoint + type: string + - jsonPath: .spec.sampler.type + name: Sampler + type: string + - jsonPath: .spec.sampler.argument + name: Sampler Arg + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -49,6 +61,115 @@ spec: description: InstrumentationSpec defines the desired state of OpenTelemetry SDK and instrumentation. properties: + env: + description: 'Env defines common env vars. There are four layers for + env vars'' definitions and the precedence order is: `original container + env vars` > `language specific env vars` > `common env vars` > `instrument + spec configs'' vars`. If the former var had been defined, then the + other vars would be ignored.' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array exporter: description: Exporter defines exporter configuration. properties: @@ -59,6 +180,120 @@ spec: java: description: Java defines configuration for java auto-instrumentation. properties: + env: + description: 'Env defines java specific env vars. There are four + layers for env vars'' definitions and the precedence order is: + `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array image: description: Image is a container image with javaagent auto-instrumentation JAR. @@ -67,6 +302,120 @@ spec: nodejs: description: NodeJS defines configuration for nodejs auto-instrumentation. properties: + env: + description: 'Env defines nodejs specific env vars. There are + four layers for env vars'' definitions and the precedence order + is: `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array image: description: Image is a container image with NodeJS SDK and auto-instrumentation. type: string @@ -90,6 +439,120 @@ spec: python: description: Python defines configuration for python auto-instrumentation. properties: + env: + description: 'Env defines python specific env vars. There are + four layers for env vars'' definitions and the precedence order + is: `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array image: description: Image is a container image with Python SDK and auto-instrumentation. type: string @@ -152,7 +615,9 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: opentelemetry-operator-system/opentelemetry-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.6.0-beta.0 + controller-gen.kubebuilder.io/version: v0.8.0 + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetrycollectors.opentelemetry.io spec: group: opentelemetry.io @@ -364,6 +829,16 @@ spec: description: ImagePullPolicy indicates the pull policy to be used for retrieving the container image (Always, Never, IfNotPresent) type: string + maxReplicas: + description: MaxReplicas sets an upper bound to the autoscaling feature. + If MaxReplicas is set autoscaling is enabled. + format: int32 + type: integer + minReplicas: + description: MinReplicas sets a lower bound to the autoscaling feature. Set + this if your are using autoscaling. It must be at least 1 + format: int32 + type: integer mode: description: Mode represents how the collector should be deployed (deployment, daemonset, statefulset or sidecar) @@ -373,6 +848,13 @@ spec: - sidecar - statefulset type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector to schedule OpenTelemetry Collector pods. + This is only relevant to daemonset, statefulset, and deployment + mode + type: object podAnnotations: additionalProperties: type: string @@ -393,7 +875,8 @@ spec: set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of - any volume." + any volume. Note that this field cannot be set when spec.os.name + is windows." format: int64 type: integer fsGroupChangePolicy: @@ -403,13 +886,15 @@ spec: support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". - If not specified, "Always" is used.' + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' type: string runAsGroup: description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -426,7 +911,8 @@ spec: Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: @@ -435,6 +921,7 @@ spec: SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that applies to @@ -455,7 +942,8 @@ spec: type: object seccompProfile: description: The seccomp options to use by the containers in this - pod. + pod. Note that this field cannot be set when spec.os.name is + windows. properties: localhostProfile: description: localhostProfile indicates a profile defined @@ -477,7 +965,8 @@ spec: supplementalGroups: description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. + unspecified, no groups will be added to any container. Note + that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -485,7 +974,8 @@ spec: sysctls: description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. items: description: Sysctl defines a kernel parameter to be set properties: @@ -504,7 +994,8 @@ spec: description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where the GMSA admission @@ -549,7 +1040,7 @@ spec: description: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 - and http://www.iana.org/assignments/service-names). Non-standard + and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. type: string name: @@ -600,7 +1091,7 @@ spec: x-kubernetes-list-type: atomic replicas: description: Replicas is the number of pod instances for the underlying - OpenTelemetry Collector + OpenTelemetry Collector. Set this if your are not using autoscaling format: int32 type: integer resources: @@ -638,12 +1129,14 @@ spec: can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when - the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set when spec.os.name + is windows. properties: add: description: Added capabilities @@ -661,23 +1154,27 @@ spec: privileged: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults - to false. + to false. Note that this field cannot be set when spec.os.name + is windows. type: boolean procMount: description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only root filesystem. - Default is false. + Default is false. Note that this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence. + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -694,7 +1191,8 @@ spec: Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence. + takes precedence. Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -702,7 +1200,8 @@ spec: If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence. + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that applies to @@ -724,7 +1223,8 @@ spec: seccompProfile: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, - the container options override the pod options. + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. properties: localhostProfile: description: localhostProfile indicates a profile defined @@ -747,7 +1247,8 @@ spec: description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where the GMSA admission @@ -795,10 +1296,27 @@ spec: description: Image indicates the container image to use for the OpenTelemetry TargetAllocator. type: string + prometheusCR: + description: PrometheusCR defines the configuration for the retrieval + of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 + and podmonitor.monitoring.coreos.com/v1 ) retrieval. All CR + instances which the ServiceAccount has access to will be retrieved. + This includes other namespaces. + properties: + enabled: + description: Enabled indicates whether to use a PrometheusOperator + custom resources as targets or not. + type: boolean + type: object + serviceAccount: + description: ServiceAccount indicates the name of an existing + service account to use with this instance. + type: string type: object tolerations: description: Toleration to schedule OpenTelemetry Collector pods. - This is only relevant to daemonsets, statefulsets and deployments + This is only relevant to daemonset, statefulset, and deployment + mode items: description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching @@ -884,17 +1402,17 @@ spec: type: string type: object spec: - description: 'Spec defines the desired characteristics of a + description: 'spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: accessModes: - description: 'AccessModes contains the desired access modes + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'This field can be used to specify either: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data @@ -920,26 +1438,27 @@ spec: - name type: object dataSourceRef: - description: 'Specifies the object from which to populate - the volume with data, if a non-empty volume is desired. - This may be any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim object. When - this field is specified, volume binding will only succeed - if the type of the specified object matches some installed - volume populator or dynamic provisioner. This field will - replace the functionality of the DataSource field and - as such if both fields are non-empty, they must have the - same value. For backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value automatically - if one of them is empty and the other is non-empty. There - are two important differences between DataSource and DataSourceRef: - * While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Alpha) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which + to populate the volume with data, if a non-empty volume + is desired. This may be any local object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding will + only succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the DataSource + field and as such if both fields are non-empty, they must + have the same value. For backwards compatibility, both + fields (DataSource and DataSourceRef) will be set to the + same value automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While DataSource + only allows two specific types of objects, DataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -958,8 +1477,12 @@ spec: - name type: object resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but must + still be higher than capacity recorded in the status field + of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: limits: additionalProperties: @@ -986,8 +1509,8 @@ spec: type: object type: object selector: - description: A label query over volumes to consider for - binding. + description: selector is a label query over volumes to consider + for binding. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -1032,8 +1555,8 @@ spec: type: object type: object storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: description: volumeMode defines what type of volume is required @@ -1041,20 +1564,40 @@ spec: included in claim spec. type: string volumeName: - description: VolumeName is the binding reference to the + description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object status: - description: 'Status represents the current information/status + description: 'status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: accessModes: - description: 'AccessModes contains the actual access modes + description: 'accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: allocatedResources is the storage resource + within AllocatedResources tracks the capacity allocated + to a PVC. It may be larger than the actual capacity when + a volume expansion operation is requested. For storage + quota, the larger value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower than + the requested capacity. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: object capacity: additionalProperties: anyOf: @@ -1062,36 +1605,37 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying - volume. + description: capacity represents the actual resources of + the underlying volume. type: object conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being + resized then the Condition will be set to 'ResizeStarted'. items: description: PersistentVolumeClaimCondition contails details about state of pvc properties: lastProbeTime: - description: Last time we probed the condition. + description: lastProbeTime is the time we probed the + condition. format: date-time type: string lastTransitionTime: - description: Last time the condition transitioned - from one status to another. + description: lastTransitionTime is the time the condition + transitioned from one status to another. format: date-time type: string message: - description: Human-readable message indicating details - about last transition. + description: message is the human-readable message + indicating details about last transition. type: string reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. + description: reason is a unique, this should be a + short, machine understandable string that gives + the reason for condition's last transition. If it + reports "ResizeStarted" that means the underlying + persistent volume is being resized. type: string status: type: string @@ -1105,7 +1649,14 @@ spec: type: object type: array phase: - description: Phase represents the current phase of PersistentVolumeClaim. + description: phase represents the current phase of PersistentVolumeClaim. + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. type: string type: object type: object @@ -1160,117 +1711,121 @@ spec: be accessed by any container in the pod. properties: awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' properties: fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' type: string partition: - description: 'The partition in the volume that you want - to mount. If omitted, the default is to mount by volume - name. Examples: For volume /dev/sda1, you specify the - partition as "1". Similarly, the volume partition for - /dev/sda is "0" (or you can leave the property empty).' + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' format: int32 type: integer readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: boolean volumeID: - description: 'Unique ID of the persistent disk resource - in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string required: - volumeID type: object azureDisk: - description: AzureDisk represents an Azure Data Disk mount on + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. properties: cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' type: string diskName: - description: The Name of the data disk in the blob storage + description: diskName is the Name of the data disk in the + blob storage type: string diskURI: - description: The URI the data disk in the blob storage + description: diskURI is the URI of data disk in the blob + storage type: string fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean required: - diskName - diskURI type: object azureFile: - description: AzureFile represents an Azure File Service mount + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. properties: readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean secretName: - description: the name of secret that contains Azure Storage - Account Name and Key + description: secretName is the name of secret that contains + Azure Storage Account Name and Key type: string shareName: - description: Share Name + description: shareName is the azure share Name type: string required: - secretName - shareName type: object cephfs: - description: CephFS represents a Ceph FS mount on the host that + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime properties: monitors: - description: 'Required: Monitors is a collection of Ceph - monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array path: - description: 'Optional: Used as the mounted root, rather - than the full Ceph tree, default is /' + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' type: string readOnly: - description: 'Optional: Defaults to false (read/write). + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: boolean secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: - https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -1278,30 +1833,30 @@ spec: type: string type: object user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string required: - monitors type: object cinder: - description: 'Cinder represents a cinder volume attached and + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' properties: fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string readOnly: - description: 'Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts. + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: boolean secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -1309,31 +1864,31 @@ spec: type: string type: object volumeID: - description: 'volume id used to identify the volume in cinder. + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string required: - volumeID type: object configMap: - description: ConfigMap represents a configMap that should populate + description: configMap represents a configMap that should populate this volume properties: defaultMode: - description: 'Optional: mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Defaults to 0644. Directories within - the path are not affected by this setting. This might - be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits - set.' + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer items: - description: If unspecified, each key-value pair in the - Data field of the referenced ConfigMap will be projected + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be @@ -1345,25 +1900,25 @@ spec: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. type: string required: - key @@ -1375,28 +1930,28 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its keys must - be defined + description: optional specify whether the ConfigMap or its + keys must be defined type: boolean type: object csi: - description: CSI (Container Storage Interface) represents ephemeral + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). properties: driver: - description: Driver is the name of the CSI driver that handles + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. type: string fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to - the associated CSI driver which will determine the default - filesystem to apply. + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. type: string nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, @@ -1410,13 +1965,13 @@ spec: type: string type: object readOnly: - description: Specifies a read-only configuration for the - volume. Defaults to false (read/write). + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). type: boolean volumeAttributes: additionalProperties: type: string - description: VolumeAttributes stores driver-specific properties + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. type: object @@ -1424,7 +1979,7 @@ spec: - driver type: object downwardAPI: - description: DownwardAPI represents downward API about the pod + description: downwardAPI represents downward API about the pod that should populate this volume properties: defaultMode: @@ -1511,50 +2066,47 @@ spec: type: array type: object emptyDir: - description: 'EmptyDir represents a temporary directory that + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' properties: medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string sizeLimit: anyOf: - type: integer - type: string - description: 'Total amount of local storage required for - this EmptyDir volume. The size limit is also applicable - for memory medium. The maximum usage on memory medium - EmptyDir would be the minimum value between the SizeLimit - specified here and the sum of memory limits of all containers - in a pod. The default is nil which means that the limit - is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: "Ephemeral represents a volume that is handled + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity - \ tracking are needed, c) the storage driver is specified - through a storage class, and d) the storage driver supports - dynamic volume provisioning through a PersistentVolumeClaim - (see EphemeralVolumeSource for more information on the - connection between this volume type and PersistentVolumeClaim). - \n Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. \n Use CSI for light-weight local ephemeral - volumes if the CSI driver is meant to be used that way - see - the documentation of the driver for more information. \n A - pod can use both types of ephemeral volumes and persistent - volumes at the same time. \n This is a beta feature and only - available when the GenericEphemeralVolume feature gate is - enabled." + tracking are needed, c) the storage driver is specified through + a storage class, and d) the storage driver supports dynamic + volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource + for more information on the connection between this volume + type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that persist + for longer than the lifecycle of an individual pod. \n Use + CSI for light-weight local ephemeral volumes if the CSI driver + is meant to be used that way - see the documentation of the + driver for more information. \n A pod can use both types of + ephemeral volumes and persistent volumes at the same time." properties: volumeClaimTemplate: description: "Will be used to create a stand-alone PVC to @@ -1606,13 +2158,13 @@ spec: as in a PersistentVolumeClaim are also valid here. properties: accessModes: - description: 'AccessModes contains the desired access + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'This field can be used to specify + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support @@ -1642,14 +2194,14 @@ spec: - name type: object dataSourceRef: - description: 'Specifies the object from which to - populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + local object from a non-empty API group (non core + object) or a PersistentVolumeClaim object. When + this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For @@ -1659,13 +2211,13 @@ spec: other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as - well as PersistentVolumeClaim objects. * While - DataSource ignores disallowed values (dropping - them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Alpha) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + DataSourceRef allows any non-core object, as well + as PersistentVolumeClaim objects. * While DataSource + ignores disallowed values (dropping them), DataSourceRef + preserves all values, and generates an error if + a disallowed value is specified. (Beta) Using + this field requires the AnyVolumeDataSource feature + gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -1687,8 +2239,12 @@ spec: - name type: object resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: limits: additionalProperties: @@ -1716,8 +2272,8 @@ spec: type: object type: object selector: - description: A label query over volumes to consider - for binding. + description: selector is a label query over volumes + to consider for binding. properties: matchExpressions: description: matchExpressions is a list of label @@ -1767,8 +2323,9 @@ spec: type: object type: object storageClassName: - description: 'Name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: description: volumeMode defines what type of volume @@ -1776,7 +2333,7 @@ spec: is implied when not included in claim spec. type: string volumeName: - description: VolumeName is the binding reference + description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object @@ -1785,32 +2342,33 @@ spec: type: object type: object fc: - description: FC represents a Fibre Channel resource that is + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. properties: fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from - compromising the machine' + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' type: string lun: - description: 'Optional: FC target lun number' + description: 'lun is Optional: FC target lun number' format: int32 type: integer readOnly: - description: 'Optional: Defaults to false (read/write). + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' items: type: string type: array wwids: - description: 'Optional: FC volume world wide identifiers + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' items: @@ -1818,34 +2376,36 @@ spec: type: array type: object flexVolume: - description: FlexVolume represents a generic volume resource + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. properties: driver: - description: Driver is the name of the driver to use for + description: driver is the name of the driver to use for this volume. type: string fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. type: string options: additionalProperties: type: string - description: 'Optional: Extra command options if any.' + description: 'options is Optional: this field holds extra + command options if any.' type: object readOnly: - description: 'Optional: Defaults to false (read/write). + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the - plugin scripts. This may be empty if no secret object - is specified. If the secret object contains more than - one secret, all secrets are passed to the plugin scripts.' + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -1856,90 +2416,92 @@ spec: - driver type: object flocker: - description: Flocker represents a Flocker volume attached to + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running properties: datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated type: string datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset type: string type: object gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' properties: fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' type: string partition: - description: 'The partition in the volume that you want - to mount. If omitted, the default is to mount by volume - name. Examples: For volume /dev/sda1, you specify the - partition as "1". Similarly, the volume partition for - /dev/sda is "0" (or you can leave the property empty). + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' format: int32 type: integer pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: string readOnly: - description: 'ReadOnly here will force the ReadOnly setting + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: boolean required: - pdName type: object gitRepo: - description: 'GitRepo represents a git repository at a particular + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' properties: directory: - description: Target directory name. Must not contain or - start with '..'. If '.' is supplied, the volume directory - will be the git repository. Otherwise, if specified, - the volume will contain the git repository in the subdirectory - with the given name. + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. type: string repository: - description: Repository URL + description: repository is the URL type: string revision: - description: Commit hash for the specified revision. + description: revision is the commit hash for the specified + revision. type: string required: - repository type: object glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' properties: endpoints: - description: 'EndpointsName is the endpoint name that details + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string path: - description: 'Path is the Glusterfs volume path. More info: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string readOnly: - description: 'ReadOnly here will force the Glusterfs volume + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: boolean @@ -1948,7 +2510,7 @@ spec: - path type: object hostPath: - description: 'HostPath represents a pre-existing file or directory + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers @@ -1957,68 +2519,70 @@ spec: mounts and who can/can not mount host directories as read/write.' properties: path: - description: 'Path of the directory on the host. If the + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string type: - description: 'Type for HostPath Volume Defaults to "" More + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string required: - path type: object iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' properties: chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication type: boolean chapAuthSession: - description: whether support iSCSI Session CHAP authentication + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication type: boolean fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' type: string initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. type: string iqn: - description: Target iSCSI Qualified Name. + description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). type: string lun: - description: iSCSI Target Lun number. + description: lun represents iSCSI Target Lun number. format: int32 type: integer portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). items: type: string type: array readOnly: - description: ReadOnly here will force the ReadOnly setting + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. type: boolean secretRef: - description: CHAP Secret for iSCSI target and initiator - authentication + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -2026,9 +2590,9 @@ spec: type: string type: object targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). type: string required: - iqn @@ -2036,24 +2600,24 @@ spec: - targetPortal type: object name: - description: 'Volume''s name. Must be a DNS_LABEL and unique + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string nfs: - description: 'NFS represents an NFS mount on the host that shares + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' properties: path: - description: 'Path that is exported by the NFS server. More + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string readOnly: - description: 'ReadOnly here will force the NFS export to + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: boolean server: - description: 'Server is the hostname or IP address of the + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string required: @@ -2061,86 +2625,87 @@ spec: - server type: object persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' type: string readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. type: boolean required: - claimName type: object photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string pdID: - description: ID that identifies Photon Controller persistent - disk + description: pdID is the ID that identifies Photon Controller + persistent disk type: string required: - pdID type: object portworxVolume: - description: PortworxVolume represents a portworx volume attached + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine properties: fsType: - description: FSType represents the filesystem type to mount + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean volumeID: - description: VolumeID uniquely identifies a Portworx volume + description: volumeID uniquely identifies a Portworx volume type: string required: - volumeID type: object projected: - description: Items for all in one resources secrets, configmaps, - and downward API + description: projected items for all in one resources secrets, + configmaps, and downward API properties: defaultMode: - description: Mode bits used to set permissions on created - files by default. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. Directories within the path are not affected - by this setting. This might be in conflict with other - options that affect the file mode, like fsGroup, and the - result can be other mode bits set. + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. format: int32 type: integer sources: - description: list of volume projections + description: sources is the list of volume projections items: description: Projection that may be projected along with other supported volume types properties: configMap: - description: information about the configMap data - to project + description: configMap information about the configMap + data to project properties: items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected @@ -2155,27 +2720,28 @@ spec: a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to - set permissions on this file. Must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element - '..'. May not start with the string '..'. + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. type: string required: - key @@ -2189,13 +2755,13 @@ spec: uid?' type: string optional: - description: Specify whether the ConfigMap or - its keys must be defined + description: optional specify whether the ConfigMap + or its keys must be defined type: boolean type: object downwardAPI: - description: information about the downwardAPI data - to project + description: downwardAPI information about the downwardAPI + data to project properties: items: description: Items is a list of DownwardAPIVolume @@ -2276,15 +2842,15 @@ spec: type: array type: object secret: - description: information about the secret data to - project + description: secret information about the secret data + to project properties: items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose - name is the key and content is the value. If - specified, the listed keys will be projected + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup @@ -2296,27 +2862,28 @@ spec: a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to - set permissions on this file. Must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element - '..'. May not start with the string '..'. + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. type: string required: - key @@ -2330,16 +2897,16 @@ spec: uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined + description: optional field specify whether the + Secret or its key must be defined type: boolean type: object serviceAccountToken: - description: information about the serviceAccountToken - data to project + description: serviceAccountToken is information about + the serviceAccountToken data to project properties: audience: - description: Audience is the intended audience + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the @@ -2347,7 +2914,7 @@ spec: of the apiserver. type: string expirationSeconds: - description: ExpirationSeconds is the requested + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate @@ -2359,7 +2926,7 @@ spec: format: int64 type: integer path: - description: Path is the path relative to the + description: path is the path relative to the mount point of the file to project the token into. type: string @@ -2370,35 +2937,35 @@ spec: type: array type: object quobyte: - description: Quobyte represents a Quobyte mount on the host + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime properties: group: - description: Group to map volume access to Default is no + description: group to map volume access to Default is no group type: string readOnly: - description: ReadOnly here will force the Quobyte volume + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. type: boolean registry: - description: Registry represents a single or multiple Quobyte + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes type: string tenant: - description: Tenant owning the given Quobyte volume in the + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin type: string user: - description: User to map volume access to Defaults to serivceaccount + description: user to map volume access to Defaults to serivceaccount user type: string volume: - description: Volume is a string that references an already + description: volume is a string that references an already created Quobyte volume by name. type: string required: @@ -2406,41 +2973,42 @@ spec: - volume type: object rbd: - description: 'RBD represents a Rados Block Device mount on the + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' properties: fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' type: string image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string keyring: - description: 'Keyring is the path to key ring for RBDUser. + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string monitors: - description: 'A collection of Ceph monitors. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' items: type: string type: array pool: - description: 'The rados pool name. Default is rbd. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string readOnly: - description: 'ReadOnly here will force the ReadOnly setting + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: boolean secretRef: - description: 'SecretRef is name of the authentication secret + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: @@ -2450,35 +3018,36 @@ spec: type: string type: object user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string required: - image - monitors type: object scaleIO: - description: ScaleIO represents a ScaleIO persistent volume + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". type: string gateway: - description: The host address of the ScaleIO API Gateway. + description: gateway is the host address of the ScaleIO + API Gateway. type: string protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: SecretRef references to the secret for ScaleIO + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. properties: @@ -2488,25 +3057,26 @@ spec: type: string type: object sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false type: boolean storageMode: - description: Indicates whether the storage for a volume - should be ThickProvisioned or ThinProvisioned. Default - is ThinProvisioned. + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. type: string storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. type: string system: - description: The name of the storage system as configured - in ScaleIO. + description: system is the name of the storage system as + configured in ScaleIO. type: string volumeName: - description: The name of a volume already created in the - ScaleIO system that is associated with this volume source. + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. type: string required: - gateway @@ -2514,24 +3084,24 @@ spec: - system type: object secret: - description: 'Secret represents a secret that should populate + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' properties: defaultMode: - description: 'Optional: mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Defaults to 0644. Directories within - the path are not affected by this setting. This might - be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits - set.' + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer items: - description: If unspecified, each key-value pair in the - Data field of the referenced Secret will be projected + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be @@ -2543,25 +3113,25 @@ spec: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. type: string required: - key @@ -2569,29 +3139,30 @@ spec: type: object type: array optional: - description: Specify whether the Secret or its keys must - be defined + description: optional field specify whether the Secret or + its keys must be defined type: boolean secretName: - description: 'Name of the secret in the pod''s namespace - to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' type: string type: object storageos: - description: StorageOS represents a StorageOS volume attached + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: SecretRef specifies the secret to use for obtaining + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. properties: @@ -2601,12 +3172,12 @@ spec: type: string type: object volumeName: - description: VolumeName is the human-readable name of the + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. type: string volumeNamespace: - description: VolumeNamespace specifies the scope of the + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS @@ -2617,24 +3188,26 @@ spec: type: string type: object vsphereVolume: - description: VsphereVolume represents a vSphere volume attached + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. type: string storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. type: string volumePath: - description: Path that identifies vSphere volume vmdk + description: volumePath is the path that identifies vSphere + volume vmdk type: string required: - volumePath @@ -2650,17 +3223,32 @@ spec: OpenTelemetryCollector. properties: messages: - description: Messages about actions performed by the operator on this - resource. + description: 'Messages about actions performed by the operator on + this resource. Deprecated: use Kubernetes events instead.' items: type: string type: array x-kubernetes-list-type: atomic replicas: - description: Replicas is currently not being set and might be removed - in the next version. + description: 'Replicas is currently not being set and might be removed + in the next version. Deprecated: use "OpenTelemetryCollector.Status.Scale.Replicas" + instead.' format: int32 type: integer + scale: + description: Scale is the OpenTelemetryCollector's scale subresource + status. + properties: + replicas: + description: The total number non-terminated pods targeted by + this OpenTelemetryCollector's deployment or statefulSet. + format: int32 + type: integer + selector: + description: The selector used to match the OpenTelemetryCollector's + deployment or statefulSet pods. + type: string + type: object version: description: Version of the managed OpenTelemetry Collector (operand) type: string @@ -2670,8 +3258,9 @@ spec: storage: true subresources: scale: + labelSelectorPath: .status.scale.selector specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas + statusReplicasPath: .status.scale.replicas status: {} status: acceptedNames: @@ -2683,12 +3272,16 @@ status: apiVersion: v1 kind: ServiceAccount metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-controller-manager namespace: opentelemetry-operator-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-leader-election-role namespace: opentelemetry-operator-system rules: @@ -2724,6 +3317,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-manager-role rules: - apiGroups: @@ -2820,6 +3415,18 @@ rules: - patch - update - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - coordination.k8s.io resources: @@ -2871,6 +3478,8 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-metrics-reader rules: - nonResourceURLs: @@ -2881,6 +3490,8 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-proxy-role rules: - apiGroups: @@ -2899,6 +3510,8 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-leader-election-rolebinding namespace: opentelemetry-operator-system roleRef: @@ -2913,6 +3526,8 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io @@ -2926,6 +3541,8 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io @@ -2940,6 +3557,7 @@ apiVersion: v1 kind: Service metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager name: opentelemetry-operator-controller-manager-metrics-service namespace: opentelemetry-operator-system @@ -2950,11 +3568,14 @@ spec: protocol: TCP targetPort: https selector: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager --- apiVersion: v1 kind: Service metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-webhook-service namespace: opentelemetry-operator-system spec: @@ -2963,12 +3584,14 @@ spec: protocol: TCP targetPort: 9443 selector: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager --- apiVersion: apps/v1 kind: Deployment metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager name: opentelemetry-operator-controller-manager namespace: opentelemetry-operator-system @@ -2976,17 +3599,19 @@ spec: replicas: 1 selector: matchLabels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager template: metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager spec: containers: - args: - --metrics-addr=127.0.0.1:8080 - --enable-leader-election - image: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:v0.42.0 + image: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.56.0 livenessProbe: httpGet: path: /healthz @@ -3019,13 +3644,20 @@ spec: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + - --v=0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0 name: kube-rbac-proxy ports: - containerPort: 8443 name: https protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi serviceAccountName: opentelemetry-operator-controller-manager terminationGracePeriodSeconds: 10 volumes: @@ -3037,6 +3669,8 @@ spec: apiVersion: cert-manager.io/v1 kind: Certificate metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-serving-cert namespace: opentelemetry-operator-system spec: @@ -3054,6 +3688,8 @@ spec: apiVersion: cert-manager.io/v1 kind: Issuer metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-selfsigned-issuer namespace: opentelemetry-operator-system spec: @@ -3064,11 +3700,12 @@ kind: MutatingWebhookConfiguration metadata: annotations: cert-manager.io/inject-ca-from: opentelemetry-operator-system/opentelemetry-operator-serving-cert + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-mutating-webhook-configuration webhooks: - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3089,7 +3726,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3110,7 +3746,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3135,11 +3770,12 @@ kind: ValidatingWebhookConfiguration metadata: annotations: cert-manager.io/inject-ca-from: opentelemetry-operator-system/opentelemetry-operator-serving-cert + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-validating-webhook-configuration webhooks: - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3160,7 +3796,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3180,7 +3815,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3201,7 +3835,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3219,3 +3852,4 @@ webhooks: resources: - opentelemetrycollectors sideEffects: None + diff --git a/pulumi/python/kubernetes/prometheus/__main__.py b/pulumi/python/kubernetes/prometheus/__main__.py index c29d27c..69969fb 100644 --- a/pulumi/python/kubernetes/prometheus/__main__.py +++ b/pulumi/python/kubernetes/prometheus/__main__.py @@ -66,7 +66,7 @@ def extract_adminpass_from_k8s_secrets(secrets: Mapping[str, str]) -> str: chart_name = 'kube-prometheus-stack' chart_version = config.get('chart_version') if not chart_version: - chart_version = '30.0.1' + chart_version = '39.2.1' helm_repo_name = config.get('prometheus_helm_repo_name') if not helm_repo_name: helm_repo_name = 'prometheus-community' @@ -207,7 +207,7 @@ def extract_adminpass_from_k8s_secrets(secrets: Mapping[str, str]) -> str: statsd_chart_name = 'prometheus-statsd-exporter' statsd_chart_version = config.get('statsd_chart_version') if not statsd_chart_version: - statsd_chart_version = '0.4.2' + statsd_chart_version = '0.5.0' helm_repo_name = config.get('prometheus_helm_repo_name') if not helm_repo_name: helm_repo_name = 'prometheus-community' From 01a563ecfdc84d08aa9062f66bba3ccfbfc3b5db Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Wed, 3 Aug 2022 14:10:59 -0400 Subject: [PATCH 52/62] chore: additional bug fixes and usability fixes to automation branch (#174) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed --- bin/setup_venv.sh | 6 +- .../applications/sirius/__main__.py | 75 ++----- .../ingress-controller-repo-only/Pulumi.yaml | 7 - .../ingress-controller-repo-only/__main__.py | 189 ------------------ .../manifests/.gitkeep | 0 5 files changed, 16 insertions(+), 261 deletions(-) delete mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml delete mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py delete mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index 845f70d..e1ab75e 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -258,7 +258,7 @@ fi # downloading the kubectl if it did not exist; this could result in versions not being updated if the # MARA project was run in the same environment w/o a refresh. # -# The two fixes here are to hardcode (For now) to a known good version (1.23.6) and force the script to +# The two fixes here are to hardcode (For now) to a known good version (1.24.3) and force the script to # always download this version. # # TODO: Figure out a way to not hardcode the kubectl version @@ -275,7 +275,7 @@ if [ ! -x "${VIRTUAL_ENV}/bin/kubectl" ]; then else echo "kubectl is already installed, but will overwrite to ensure correct version" echo "Downloading kubectl into virtual environment" - KUBECTL_VERSION="v1.23.6" + KUBECTL_VERSION="v1.24.3" ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} @@ -316,4 +316,4 @@ if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}" [ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI" rm "${DOCTL_TARBALL_DEST}" -fi \ No newline at end of file +fi diff --git a/pulumi/python/kubernetes/applications/sirius/__main__.py b/pulumi/python/kubernetes/applications/sirius/__main__.py index 3d5a393..ce98d65 100644 --- a/pulumi/python/kubernetes/applications/sirius/__main__.py +++ b/pulumi/python/kubernetes/applications/sirius/__main__.py @@ -37,12 +37,6 @@ def pulumi_ingress_project_name(): return pulumi_config.get_pulumi_project_name(ingress_project_path) -def pulumi_repo_ingress_project_name(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') - return pulumi_config.get_pulumi_project_name(ingress_project_path) - - def sirius_manifests_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) sirius_manifests_path = os.path.join(script_dir, 'src', 'kubernetes-manifests', '*.yaml') @@ -102,58 +96,15 @@ def add_namespace(obj): k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) -# TODO: Streamline the logic for FQDN/IP into something a bit more sane and scalable #82 # -# Currently, if we are doing an AWS deployment we use the AWS IC deployment, which uses the ELB hostname -# as part of the certificate (self-signed). +# We use the hostanme to set the value for our FQDN, which drives the cert +# process as well. # -# If we are using a kubeconfig file (ie, not type AWS) we expect we are going to get an IP address and not -# a hostname in return. So we use the hostname variable to create the certificate we need, and then we use -# the IP address in output to the user to tell them to setup DNS or a hostfile. -# - -# We use the kubernetes namespace for this -config = pulumi.Config('kubernetes') -infra_type = config.require('infra_type') - -if infra_type == 'AWS': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_ingress_project_name() - ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" - ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) - lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - sirius_host = lb_ingress_hostname -elif infra_type == 'kubeconfig': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_repo_ingress_project_name() - ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" - ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) - lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - # Set back to kubernetes - config = pulumi.Config('kubernetes') - lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') - sirius_host = lb_ingress_hostname -elif infra_type == 'DO': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_repo_ingress_project_name() - ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" - ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) - lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - # Set back to kubernetes - config = pulumi.Config('kubernetes') - lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') - sirius_host = lb_ingress_hostname -elif infra_type == 'LKE': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_repo_ingress_project_name() - ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" - ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) - lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - # Set back to kubernetes - config = pulumi.Config('kubernetes') - lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') - sirius_host = lb_ingress_hostname - +ingress_project_name = pulumi_ingress_project_name() +ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" +ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) +lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') +sirius_host = lb_ingress_hostname # Create the namespace for Bank of Sirius ns = k8s.core.v1.Namespace(resource_name='bos', @@ -421,12 +372,12 @@ def add_namespace(obj): elif infra_type == 'kubeconfig': pulumi.export('hostname', lb_ingress_hostname) pulumi.export('ipaddress', lb_ingress_ip) - #pulumi.export('application_url', f'https://{lb_ingress_hostname}') + # pulumi.export('application_url', f'https://{lb_ingress_hostname}') application_url = sirius_host.apply(lambda host: f'https://{host}') elif infra_type == 'DO': pulumi.export('hostname', lb_ingress_hostname) pulumi.export('ipaddress', lb_ingress_ip) - #pulumi.export('application_url', f'https://{lb_ingress_hostname}') + # pulumi.export('application_url', f'https://{lb_ingress_hostname}') application_url = sirius_host.apply(lambda host: f'https://{host}') # @@ -457,11 +408,11 @@ def add_namespace(obj): namespace=ns, # Values from Chart's parameters specified hierarchically, - values = { + values={ "serviceMonitor": { "enabled": True, "namespace": "prometheus" - }, + }, "config": { "datasource": { "host": "accounts-db", @@ -504,11 +455,11 @@ def add_namespace(obj): namespace=ns, # Values from Chart's parameters specified hierarchically, - values = { + values={ "serviceMonitor": { "enabled": True, "namespace": "prometheus" - }, + }, "config": { "datasource": { "host": "ledger-db", diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml deleted file mode 100644 index 6895e0c..0000000 --- a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml +++ /dev/null @@ -1,7 +0,0 @@ -name: ingress-controller -runtime: - name: python - options: - virtualenv: ../../../venv -config: ../../../../../config/pulumi -description: Sets up NGINX Kubernetes Ingress Controller using Helm \ No newline at end of file diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py deleted file mode 100644 index af2e3db..0000000 --- a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py +++ /dev/null @@ -1,189 +0,0 @@ -import os - -import pulumi -from pulumi import Output -import pulumi_kubernetes as k8s -from pulumi_kubernetes.core.v1 import Service -from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs -from pulumi_kubernetes.yaml import ConfigFile - -from kic_util import pulumi_config - -# -# We default to the OSS IC; if the user wants Plus they need to enable it in the config file -# along with the Plus flag, and the addition of a JWT. -# -config = pulumi.Config('kic-helm') -chart_name = config.get('chart_name') -if not chart_name: - chart_name = 'nginx-ingress' -chart_version = config.get('chart_version') -if not chart_version: - chart_version = '0.13.0' -helm_repo_name = config.get('helm_repo_name') -if not helm_repo_name: - helm_repo_name = 'nginx-stable' -helm_repo_url = config.get('helm_repo_url') -if not helm_repo_url: - helm_repo_url = 'https://helm.nginx.com/stable' -nginx_repository = config.get('nginx_repository') -if not nginx_repository: - nginx_repository = "nginx/nginx-ingress" -nginx_tag = config.get('nginx_tag') -if not nginx_tag: - nginx_tag = "2.2.0" -nginx_plus_flag = config.get_bool('nginx_plus_flag') -if not nginx_plus_flag: - nginx_plus_flag = False - -# -# Allow the user to set timeout per helm chart; otherwise -# we default to 5 minutes. -# -helm_timeout = config.get_int('helm_timeout') -if not helm_timeout: - helm_timeout = 300 - -# Get the FQDN -fqdn = config.get('fqdn') - - -def project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) - project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname) - return pulumi_config.get_pulumi_project_name(project_path) - - -def k8_manifest_location(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - k8_manifest_path = os.path.join(script_dir, 'manifests', 'regcred.yaml') - return k8_manifest_path - - -k8_manifest = k8_manifest_location() - -registrycred = ConfigFile( - "regcred", - file=k8_manifest) - -chart_values = { - 'controller': { - 'nginxplus': nginx_plus_flag, - 'healthStatus': True, - 'appprotect': { - 'enable': False - }, - "image": { - "repository": nginx_repository, - "tag": nginx_tag, - "pullPolicy": "Always" - }, - "serviceAccount": { - "imagePullSecretName": "regcred" - }, - 'config': { - 'name': 'nginx-config', - 'entries': { - 'log-format': '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent ' - '\"$http_referer\" \"$http_user_agent\" $upstream_response_time $upstream_status ' - '\"$uri\" $request_length $request_time [$proxy_host] [] $upstream_addr ' - '$upstream_bytes_sent $upstream_response_time $upstream_status $request_id ' - } - }, - 'service': { - 'annotations': { - 'co.elastic.logs/module': 'nginx' - }, - "extraLabels": { - "app": "kic-nginx-ingress" - }, - "customPorts": [ - { - "name": "dashboard", - "targetPort": 8080, - "protocol": "TCP", - "port": 8080 - }, - { - "name": "prometheus", - "targetPort": 9113, - "protocol": "TCP", - "port": 9113 - } - ] - }, - 'pod': { - 'annotations': { - 'co.elastic.logs/module': 'nginx' - } - } - }, - 'prometheus': { - 'create': True, - 'port': 9113 - } -} - -stack_name = pulumi.get_stack() -project_name = pulumi.get_project() -pulumi_user = pulumi_config.get_pulumi_user() - -kube_project_name = project_name_from_project_dir('kubeconfig') -kube_stack_ref_id = f"{pulumi_user}/{kube_project_name}/{stack_name}" -kube_stack_ref = pulumi.StackReference(kube_stack_ref_id) -kubeconfig = kube_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) - -k8s_provider = k8s.Provider(resource_name=f'ingress-controller-repo-only', - kubeconfig=kubeconfig) - -# This is required for the service monitor from the Prometheus namespace -ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', - metadata={'name': 'nginx-ingress', - 'labels': { - 'prometheus': 'scrape'} - }, - opts=pulumi.ResourceOptions(provider=k8s_provider)) - -kic_release_args = ReleaseArgs( - chart=chart_name, - repository_opts=RepositoryOptsArgs( - repo=helm_repo_url - ), - version=chart_version, - namespace=ns.metadata.name, - - # Values from Chart's parameters specified hierarchically, - values=chart_values, - # User configurable timeout - timeout=helm_timeout, - # By default Release resource will wait till all created resources - # are available. Set this to true to skip waiting on resources being - # available. - skip_await=False, - # If we fail, clean up - cleanup_on_fail=True, - # Provide a name for our release - name="kic", - # Lint the chart before installing - lint=True, - # Force update if required - force_update=True) - -kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns])) - -pstatus = kic_chart.status - -srv = Service.get("nginx-ingress", - Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress")) - -ingress_service = srv.status - -# -# Some LB's give us a hostname (which is cool) and some just an IP. We need to capture -# both, and then make a determination on what the user needs to do based on what they have -# been given. -# -pulumi.export('lb_ingress_hostname', fqdn) -pulumi.export('lb_ingress_ip', pulumi.Output.unsecret(ingress_service.load_balancer.ingress[0].ip)) -# Print out our status -pulumi.export("kic_status", pstatus) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep deleted file mode 100644 index e69de29..0000000 From 3ea8c4116da0ca1ad11eb7b58bb00d665ea5b369 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Thu, 4 Aug 2022 18:22:57 -0400 Subject: [PATCH 53/62] chore: updates to fix digital ocean deployment (#177) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed * fix: refactor digitalocean to docean for variables --- config/pulumi/Pulumi.stackname.yaml.example | 8 ++-- pulumi/python/automation/providers/do.py | 38 +++++++++---------- .../container-registry/__main__.py | 2 +- .../digitalocean/dns-record/__main__.py | 9 ++++- .../digitalocean/domk8s/__main__.py | 8 ++-- .../applications/sirius/__main__.py | 21 +++------- 6 files changed, 40 insertions(+), 46 deletions(-) diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index 1276d43..d261d75 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -324,13 +324,13 @@ config: # Digital Ocean Managed Kubernetes and Container Registry ############################################################################ # This is the Kubernetes version to install using Digital Ocean K8s. - digitalocean:k8s_version: 1.22.8-do.1 + docean:k8s_version: 1.22.12-do.0 # This is the default instance type used by Digital Ocean K8s. - digitalocean:instance_size: s-2vcpu-4gb + docean:instance_size: s-4vcpu-8gb # The desired node count of the Digital Ocean K8s cluster. - digitalocean:node_count: 3 + docean:node_count: 3 # The region to deploy the cluster - digitalocean:region: sfo3 + docean:region: sfo3 # Subscription tier for container registry digitalocean:container_registry_subscription_tier: starter diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py index f77441e..0de2274 100644 --- a/pulumi/python/automation/providers/do.py +++ b/pulumi/python/automation/providers/do.py @@ -113,7 +113,7 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list config = super().new_stack_config(env_config, defaults) if 'DIGITALOCEAN_TOKEN' not in env_config: - config['digitalocean:token'] = input("Digital Ocean API token (this is stored in plain-text - " + config['docean:token'] = input("Digital Ocean API token (this is stored in plain-text - " "alternatively this can be specified as the environment variable " "DIGITALOCEAN_TOKEN): ") @@ -131,42 +131,42 @@ def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list print('Supported Kubernetes versions:') for slug in k8s_version_slugs: print(f' {slug}') - default_version = defaults['digitalocean:k8s_version'] or k8s_version_slugs[0] - config['digitalocean:k8s_version'] = input(f'Kubernetes version [{default_version}]: ').strip() or default_version - print(f"Kubernetes version: {config['digitalocean:k8s_version']}") + default_version = defaults['docean:k8s_version'] or k8s_version_slugs[0] + config['docean:k8s_version'] = input(f'Kubernetes version [{default_version}]: ').strip() or default_version + print(f"Kubernetes version: {config['docean:k8s_version']}") # Kubernetes regions k8s_regions_json_str, _ = external_process.run(do_cli.get_kubernetes_regions_json()) k8s_regions_json = json.loads(k8s_regions_json_str) - default_region = defaults['digitalocean:region'] or k8s_regions_json[-1]['slug'] + default_region = defaults['docean:region'] or k8s_regions_json[-1]['slug'] print('Supported Regions:') for item in k8s_regions_json: print(f" {item['name']}: {item['slug']}") - config['digitalocean:region'] = input(f'Region [{default_region}]: ').strip() or default_region - print(f"Region: {config['digitalocean:region']}") + config['docean:region'] = input(f'Region [{default_region}]: ').strip() or default_region + print(f"Region: {config['docean:region']}") # Kubernetes instance size k8s_sizes_json_str, _ = external_process.run(do_cli.get_kubernetes_instance_sizes_json()) k8s_sizes_json = json.loads(k8s_sizes_json_str) k8s_sizes_slugs = [size['slug'] for size in k8s_sizes_json] - default_size = defaults['digitalocean:instance_size'] or 's-2vcpu-4gb' + default_size = defaults['docean:instance_size'] or 's-2vcpu-4gb' print('Supported Instance Sizes:') for slug in k8s_sizes_slugs: print(f' {slug}') - config['digitalocean:instance_size'] = input(f'Instance size [{default_size}]: ').strip() or default_size - print(f"Instance size: {config['digitalocean:instance_size']}") + config['docean:instance_size'] = input(f'Instance size [{default_size}]: ').strip() or default_size + print(f"Instance size: {config['docean:instance_size']}") # Kubernetes instance count - default_node_count = defaults['digitalocean:node_count'] or 3 - while 'digitalocean:node_count' not in config: + default_node_count = defaults['docean:node_count'] or 3 + while 'docean:node_count' not in config: node_count = input('Node count for Kubernetes cluster ' f'[{default_node_count}]: ').strip() or default_node_count if type(node_count) == int or node_count.isdigit(): - config['digitalocean:node_count'] = int(node_count) - print(f"Node count: {config['digitalocean:node_count']}") + config['docean:node_count'] = int(node_count) + print(f"Node count: {config['docean:node_count']}") return config @@ -217,16 +217,16 @@ def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._confi return env_config['DIGITALOCEAN_TOKEN'] # We were given a reference to a StackConfigParser object - if 'config' in stack_config and 'digitalocean:token' in stack_config['config']: - return stack_config['config']['digitalocean:token'] + if 'config' in stack_config and 'docean:token' in stack_config['config']: + return stack_config['config']['docean:token'] # We were given a reference to a Pulumi Stack configuration - if 'digitalocean:token' in stack_config: - return stack_config['digitalocean:token'].value + if 'docean:token' in stack_config: + return stack_config['docean:token'].value # Otherwise msg = 'When using the Digital Ocean provider, an API token must be specified - ' \ - 'this token can be specified with the Pulumi config parameter digitalocean:token ' \ + 'this token can be specified with the Pulumi config parameter docean:token ' \ 'or the environment variable DIGITALOCEAN_TOKEN' raise InvalidConfigurationException(msg) diff --git a/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py index 20cb64b..f326c63 100644 --- a/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py @@ -5,7 +5,7 @@ from kic_util import external_process -config = pulumi.Config('digitalocean') +config = pulumi.Config('docean') # valid values: starter, basic, professional subscription_tier = config.get('container_registry_subscription_tier') if not subscription_tier: diff --git a/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py b/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py index 3b428e1..9c82619 100644 --- a/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py @@ -28,9 +28,14 @@ def extract_ip_address(lb_ingress): config = pulumi.Config('kic-helm') fqdn = config.require('fqdn') -ingress_domain = docean.Domain.get(resource_name='ingress-domain', id=fqdn, name=fqdn) +# +# Split our hostname off the domain name to build the DNS records +# +hostname, domainname = fqdn.split('.',1) + +ingress_domain = docean.Domain.get(resource_name='ingress-domain', id=domainname, name=domainname) ingress_a_record = docean.DnsRecord(resource_name='ingress-a-record', - name='@', + name=hostname, domain=ingress_domain.id, type="A", ttl=1800, diff --git a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py index f94a37f..c82a734 100644 --- a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py @@ -5,14 +5,14 @@ from kic_util import pulumi_config # Configuration details for the K8 cluster -config = pulumi.Config('digitalocean') +config = pulumi.Config('docean') instance_size = config.get('instance_size') if not instance_size: - instance_size = 's-2vcpu-4gb' + instance_size = 's-4vcpu-8gb' region = config.get('region') if not region: region = 'sfo3' -node_count = config.get('node_count') +node_count = config.get_int('node_count') if not node_count: node_count = 3 k8s_version = config.get('k8s_version') @@ -41,7 +41,7 @@ def container_registry_project_name(): node_pool=KubernetesClusterNodePoolArgs( name=pool_name, size=instance_size, - node_count=node_count, + node_count=node_count )) kubeconfig = cluster.kube_configs[0].raw_config diff --git a/pulumi/python/kubernetes/applications/sirius/__main__.py b/pulumi/python/kubernetes/applications/sirius/__main__.py index ce98d65..88aa12e 100644 --- a/pulumi/python/kubernetes/applications/sirius/__main__.py +++ b/pulumi/python/kubernetes/applications/sirius/__main__.py @@ -363,22 +363,11 @@ def add_namespace(obj): )], )) -# We use the kubernetes namespace for this -config = pulumi.Config('kubernetes') -infra_type = config.require('infra_type') -if infra_type == 'AWS': - application_url = sirius_host.apply(lambda host: f'https://{host}') - pulumi.export('application_url', application_url) -elif infra_type == 'kubeconfig': - pulumi.export('hostname', lb_ingress_hostname) - pulumi.export('ipaddress', lb_ingress_ip) - # pulumi.export('application_url', f'https://{lb_ingress_hostname}') - application_url = sirius_host.apply(lambda host: f'https://{host}') -elif infra_type == 'DO': - pulumi.export('hostname', lb_ingress_hostname) - pulumi.export('ipaddress', lb_ingress_ip) - # pulumi.export('application_url', f'https://{lb_ingress_hostname}') - application_url = sirius_host.apply(lambda host: f'https://{host}') +# +# Get the hostname for our connect URL +# +application_url = sirius_host.apply(lambda host: f'https://{host}') +pulumi.export('application_url', application_url) # # Get the chart values for both monitoring charts, switch back to the Sirius From b20fb64145151a0255747c7c9fb2ceb702d04364 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Mon, 8 Aug 2022 12:29:51 -0400 Subject: [PATCH 54/62] fix: provide ability to still run kubeconfig deploys until they are cut over to automation api (#179) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed * fix: refactor digitalocean to docean for variables * fix: add repo-only IC deploy to support kubeconfig deploys * fix: modifications to handle kubeconfig deploys for now * fix: recommission bash scripts to support kubeconfig deploys for now --- bin/destroy.sh | 100 +++++ bin/destroy_kube.sh | 131 ++++++ bin/start.sh | 142 +++++++ bin/start_kube.sh | 373 ++++++++++++++++++ .../applications/sirius/__main__.py | 65 ++- .../ingress-controller-repo-only/Pulumi.yaml | 7 + .../ingress-controller-repo-only/__main__.py | 189 +++++++++ 7 files changed, 997 insertions(+), 10 deletions(-) create mode 100755 bin/destroy.sh create mode 100755 bin/destroy_kube.sh create mode 100755 bin/start.sh create mode 100755 bin/start_kube.sh create mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml create mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py diff --git a/bin/destroy.sh b/bin/destroy.sh new file mode 100755 index 0000000..06ab844 --- /dev/null +++ b/bin/destroy.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +set -o errexit # abort on nonzero exit status +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +# Don't pollute console output with upgrade notifications +export PULUMI_SKIP_UPDATE_CHECK=true +# Run Pulumi non-interactively +export PULUMI_SKIP_CONFIRMATIONS=true +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based +# projects. +# +if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" > /dev/null ; then + echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." + echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." + echo " " + exit 1 +else + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" +fi + +if ! command -v pulumi >/dev/null; then + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi +fi + +if ! command -v python3 >/dev/null; then + echo >&2 "Python 3 must be installed to continue" + exit 1 +fi + +# Check to see if the user is logged into Pulumi +if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + pulumi login + + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi +fi + +echo " " +echo "Notice! This shell script will only destroy kubeconfig based deployments; if you have deployed to AWS, " +echo "DigitalOcean, or Linode you will need to use the ./pulumi/python/runner script instead." +echo " " + +# Sleep so we are seen... +sleep 5 + +source "${script_dir}/../config/pulumi/environment" +echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" + +# +# Determine what destroy script we need to run +# +if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config>/dev/null 2>&1; then + INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" + if [ $INFRA == 'AWS' ]; then + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + elif [ $INFRA == 'kubeconfig' ]; then + echo "Destroying a kubeconfig based stack; if this is not right please type ctrl-c to abort this script." + sleep 5 + ${script_dir}/destroy_kube.sh + exit 0 + elif [ $INFRA == 'DO' ]; then + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + sleep 5 + ${script_dir}/destroy_do.sh + exit 0 + elif [ $INFRA == 'LKE' ]; then + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + sleep 5 + ${script_dir}/destroy_lke.sh + exit 0 + else + print "No infrastructure set in config file; aborting!" + exit 1 + fi +else + print "No infrastructure set in config file; aborting!" + exit 2 +fi diff --git a/bin/destroy_kube.sh b/bin/destroy_kube.sh new file mode 100755 index 0000000..8bb4013 --- /dev/null +++ b/bin/destroy_kube.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +set -o errexit # abort on nonzero exit status +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +# Don't pollute console output with upgrade notifications +export PULUMI_SKIP_UPDATE_CHECK=true +# Run Pulumi non-interactively +export PULUMI_SKIP_CONFIRMATIONS=true + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" + +if ! command -v pulumi > /dev/null; then + if [ -x "${script_dir}/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/venv/bin:$PATH" + + if ! command -v pulumi > /dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi +fi + +if ! command -v python3 > /dev/null; then + echo >&2 "Python 3 must be installed to continue" + exit 1 +fi + +if ! command -v node > /dev/null; then + if [ -x "${script_dir}/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/venv/bin:$PATH" + + if ! command -v node > /dev/null; then + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi + else + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi +fi + +# Check to see if the user is logged into Pulumi +if ! pulumi whoami --non-interactive > /dev/null 2>&1; then + pulumi login + + if ! pulumi whoami --non-interactive > /dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi +fi + +source "${script_dir}/../config/pulumi/environment" +echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" + + +APPLICATIONS=(sirius) +KUBERNETES=(secrets observability logagent logstore certmgr prometheus) +NGINX=(kubernetes/nginx/ingress-controller-repo-only) +INFRA=(kubeconfig digitalocean/domk8s) + +# +# This is a temporary process until we complete the directory reorg and move the start/stop +# process into more solid code. +# + +# Destroy the application(s) +for project_dir in "${APPLICATIONS[@]}" ; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi ${pulumi_args} destroy + else + >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" + fi +done + +# Destroy other K8 resources +for project_dir in "${KUBERNETES[@]}" ; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi ${pulumi_args} destroy + else + >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" + fi +done + +# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 +# This is a hack for now to remove the CRD's for prometheus-kube-stack +# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart +kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 + +# Destroy NGINX components +for project_dir in "${NGINX[@]}" ; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi ${pulumi_args} destroy + else + >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" + fi +done + +# Clean up the kubeconfig project +for project_dir in "${INFRA[@]}" ; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi ${pulumi_args} destroy + else + >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" + fi +done + + + + diff --git a/bin/start.sh b/bin/start.sh new file mode 100755 index 0000000..8e85bfc --- /dev/null +++ b/bin/start.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash + +set -o errexit # abort on nonzero exit status +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +# Don't pollute console output with upgrade notifications +export PULUMI_SKIP_UPDATE_CHECK=true +# Run Pulumi non-interactively +export PULUMI_SKIP_CONFIRMATIONS=true + +# Unset virtual environment if defined.... +unset VIRTUAL_ENV + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based +# projects. +# +if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then + echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." + echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." + echo " " + exit 1 +else + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" +fi + +if ! command -v pulumi >/dev/null; then + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi +fi + +if ! command -v python3 >/dev/null; then + echo >&2 "Python 3 must be installed to continue" + exit 1 +fi + +# Check to see if the user is logged into Pulumi +if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + pulumi login + + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi +fi + +echo " " +echo "NOTICE! This shell script is maintained for compatibility for the kubeconfig only deployment and will be" +echo "deprecated once the kubeconfig deployments are fully integrated with the automation api." +echo " " +echo "If you are deploying AWS, DigitalOcean, or Linode based stacks you will need to use the runner script." +echo " " +echo "Please read the documentation for more details." +echo " " +# Sleep so we are seen... +sleep 5 + +if [ -s "${script_dir}/../config/pulumi/environment" ] && grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then + source "${script_dir}/../config/pulumi/environment" + echo "Environment data found for stack: ${PULUMI_STACK}" + while true; do + read -r -e -p "Environment file exists and is not empty. Answer yes to use, no to delete. " yn + case $yn in + [Yy]*) # We have an environment file and they want to keep it.... + if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then + INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" + if [ $INFRA == 'AWS' ]; then + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + elif [ $INFRA == 'kubeconfig' ]; then + exec ${script_dir}/start_kube.sh + exit 0 + elif [ $INFRA == 'DO' ]; then + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + elif [ $INFRA == 'LKE' ]; then + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + else + echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." + exit 1 + fi + else + echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." + exit 1 + fi + break + ;; + [Nn]*) # They want to remove and reconfigure + rm -f ${script_dir}/../config/pulumi/environment + break + ;; + *) echo "Please answer yes or no." ;; + esac + done +fi + +while true; do + read -e -r -p "Type a for AWS, d for Digital Ocean, k for kubeconfig, l for Linode? " infra + case $infra in + [Aa]*) + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + break + ;; + [Kk]*) + echo "Calling kubeconfig startup script" + exec ${script_dir}/start_kube.sh + exit 0 + break + ;; + [Dd]*) + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + break + ;; + [Ll]*) + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner + exit 0 + break + ;; + *) echo "Please answer a, d, k, or l." ;; + esac +done diff --git a/bin/start_kube.sh b/bin/start_kube.sh new file mode 100755 index 0000000..1a781e3 --- /dev/null +++ b/bin/start_kube.sh @@ -0,0 +1,373 @@ +#!/usr/bin/env bash + +set -o errexit # abort on nonzero exit status +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +# Don't pollute console output with upgrade notifications +export PULUMI_SKIP_UPDATE_CHECK=true +# Run Pulumi non-interactively +export PULUMI_SKIP_CONFIRMATIONS=true + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +if ! command -v pulumi >/dev/null; then + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi +fi + +if ! command -v python3 >/dev/null; then + echo >&2 "Python 3 must be installed to continue" + exit 1 +fi + +if ! command -v node >/dev/null; then + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v node >/dev/null; then + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi + else + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi +fi + +if ! command -v git >/dev/null; then + echo >&2 "git must be installed to continue" + exit 1 +fi + +if ! command -v make >/dev/null; then + echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." +fi + +if ! command -v docker >/dev/null; then + echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." +fi + +# Check to see if the user is logged into Pulumi +if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + pulumi login + + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi +fi + +if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then + touch "${script_dir}/../config/pulumi/environment" +fi + +if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then + read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK + echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" +fi + +# Do we have the submodule source.... +# +# Note: We had been checking for .git, but this is not guaranteed to be +# there if we build the docker image or use a tarball. So now we look +# for the src subdirectory which should always be there. +# +if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then + echo "Submodule source found" +else + # Error out with instructions. + echo "Bank of Sirius submodule not found" + echo " " + echo "Please run:" + echo " git submodule update --init --recursive --remote" + echo "Inside your git directory and re-run this script" + echo "" + echo >&2 "Unable to find submodule - exiting" + exit 3 +fi + +source "${script_dir}/../config/pulumi/environment" +echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" + +# Create the stack if it does not already exist +# Do not change the tools directory of add-ons. +find "${script_dir}/../pulumi" -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; + +# Show colorful fun headers if the right utils are installed and NO_COLOR is not set +# +function header() { + if [ -z ${NO_COLOR+x} ]; then + "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat + else + "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" + fi +} + +function retry() { + local -r -i max_attempts="$1" + shift + local -i attempt_num=1 + until "$@"; do + if ((attempt_num == max_attempts)); then + echo "Attempt ${attempt_num} failed and there are no more attempts left!" + return 1 + else + echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." + sleep $((attempt_num++)) + fi + done +} + +function createpw() { + PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) + echo $PWORD +} + +# +# This deploy only works with the NGINX registries. +# +echo " " +echo "NOTICE! Currently the deployment via kubeconfig only supports pulling images from the registry! A JWT is " +echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" +echo "in the project root, in a file named jwt.token" +echo " " +echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " +echo "details and examples." +echo " " + +# Make sure we see it +sleep 5 + +# +# TODO: Integrate this into the mainline along with logic to work with/without #80 +# +# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not +# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That +# secret is not a valid secret, but it is created to make the logic easier to read/code. +# +if [[ -s "${script_dir}/../extras/jwt.token" ]]; then + JWT=$(cat ${script_dir}/../extras/jwt.token) + echo "Loading JWT into nginx-ingress/regcred" + ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml +else + # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 + echo "No JWT found; writing placeholder manifest" + ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml +fi + +# Check for stack info.... +# TODO: Move these to use kubeconfig for the Pulumi main config (which redirects up) instead of aws/vpc #80 +# + +# We automatically set this to a kubeconfig type for infra type +# TODO: combined file should query and manage this #80 +pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config kubeconfig +# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the +# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own +# configuration because of the encryption needed for the passwords. +pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius kubeconfig + +# Inform the user of what we are doing + +echo " " +echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to" +echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)" +echo "you may need to remove them and replace them with a simple ~/.kube/config file. This will be " +echo "addressed in a future release." +echo " " + +# Sleep so that this is seen... +sleep 5 + +if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then + echo "Kubeconfig file found" +else + echo "Provide an absolute path to your kubeconfig file" + pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config +fi + +# Clustername +if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then + echo "Clustername found" +else + echo "Provide your clustername" + pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config +fi + +# Connect to the cluster +if command -v kubectl >/dev/null; then + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl version >/dev/null +fi + +# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 +# +# This version of the code forces you to add a hostname which is used to generate the cert when the application is +# deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed +# cert and to access the application. +# +if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then + echo "Hostname found for deployment" +else + echo "Create a fqdn for your deployment" + pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config +fi + +# The bank of sirius configuration file is stored in the ./sirius/config +# directory. This is because we cannot pull secrets from different project +# directories. +# +# This work-around is expected to be obsoleted by the work described in +# https://github.com/pulumi/pulumi/issues/4604, specifically around issue +# https://github.com/pulumi/pulumi/issues/2307 +# +# Check for secrets being set +# +echo "Checking for required secrets" +if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a password for grafana" + pulumi config set prometheus:adminpass --secret -C pulumi/python/kubernetes/secrets +fi + +if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a password for the sirius accountsdb" + pulumi config set sirius:accounts_pwd --secret -C pulumi/python/kubernetes/secrets +fi + +if pulumi config get sirius:demo_login_pwd -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a password for the sirius ledgerdb" + pulumi config set sirius:demo_login_pwd --secret -C pulumi/python/kubernetes/secrets +fi + +if pulumi config get sirius:demo_login_user -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a username for the BoS" + pulumi config set sirius:demo_login_user --secret -C pulumi/python/kubernetes/secrets +fi + +if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a password for the BoS user account" + pulumi config set sirius:ledger_pwd --secret -C pulumi/python/kubernetes/secrets +fi + +# +# TODO: Allow startup scripts to prompt and accept additional config values #97 +# The default helm timeout for all of the projects is set at the default of 300 seconds (5 minutes) +# However, since this code path is most commonly going to be used to deploy locally we need to bump +# that value up. A fix down the road will add this a prompt, but for now we are going to double this +# value for all helm deploys. +# + +pulumi config set kic-helm:helm_timeout 600 -C ${script_dir}/../pulumi/python/config +pulumi config set logagent:helm_timeout 600 -C ${script_dir}/../pulumi/python/config +pulumi config set logstore:helm_timeout 600 -C ${script_dir}/../pulumi/python/config +pulumi config set certmgr:helm_timeout 600 -C ${script_dir}/../pulumi/python/config +pulumi config set prometheus:helm_timeout 600 -C ${script_dir}/../pulumi/python/config + +# +# Set the headers to respect the NO_COLOR variable +# +if [ -z ${NO_COLOR+x} ]; then + pulumi_args="--emoji --stack ${PULUMI_STACK}" +else + pulumi_args="--color never --stack ${PULUMI_STACK}" +fi + +# +# Note that this is somewhat different than the other startup scripts, because at the point we run this +# here we know that we have a server so we can get the version. The other builds do not have server info +# at this point in time. +# +header "Version Info" +echo "Version and Account Information" +echo "=====================================================================" +echo "Pulumi version is: $(pulumi version)" +echo "Pulumi user is: $(pulumi whoami)" +echo "Python version is: $(python --version)" +echo "Kubectl version information: " +echo "$(kubectl version -o json)" +echo "Python module information: " +echo "$(pip list)" +echo "=====================================================================" +echo " " + +header "Kubeconfig" +cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" +pulumi $pulumi_args up + +header "Secrets" +cd "${script_dir}/../pulumi/python/kubernetes/secrets" +pulumi $pulumi_args up + +# TODO: This is using a different project than the AWS deploy; we need to collapse those #80 +header "Deploying IC" +cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" +pulumi $pulumi_args up + +header "Logstore" +cd "${script_dir}/../pulumi/python/kubernetes/logstore" +pulumi $pulumi_args up + +header "Logagent" +cd "${script_dir}/../pulumi/python/kubernetes/logagent" +pulumi $pulumi_args up + +header "Cert Manager" +cd "${script_dir}/../pulumi/python/kubernetes/certmgr" +pulumi $pulumi_args up + +header "Prometheus" +cd "${script_dir}/../pulumi/python/kubernetes/prometheus" +pulumi $pulumi_args up + +header "Observability" +cd "${script_dir}/../pulumi/python/kubernetes/observability" +pulumi $pulumi_args up + +header "Bank of Sirius" +cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" +pulumi $pulumi_args up + +header "Finished!!" +THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") +THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") + +echo " " +echo "The startup process has finished successfully" +echo " " +echo " " +echo "Next Steps:" +echo " " +echo "1. Map the IP address ($THE_IP) of your Ingress Controller with your FQDN ($THE_FQDN)." +echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." +echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." +echo " " +echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" +echo "following commands:" +echo " " +echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" +echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" +echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" +echo " " +echo "Please see the documentation in the github repository for more information" diff --git a/pulumi/python/kubernetes/applications/sirius/__main__.py b/pulumi/python/kubernetes/applications/sirius/__main__.py index 88aa12e..6aa6919 100644 --- a/pulumi/python/kubernetes/applications/sirius/__main__.py +++ b/pulumi/python/kubernetes/applications/sirius/__main__.py @@ -30,6 +30,15 @@ def project_name_from_kubernetes_dir(dirname: str): project_path = os.path.join(script_dir, '..', '..', dirname) return pulumi_config.get_pulumi_project_name(project_path) +# +# This is just used for the kubernetes config deploy.... +# +# TODO: Update as part of the conversion of kubeconfig to AutomationAPI #178 +# +def pulumi_repo_ingress_project_name(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') + return pulumi_config.get_pulumi_project_name(ingress_project_path) def pulumi_ingress_project_name(): script_dir = os.path.dirname(os.path.abspath(__file__)) @@ -97,14 +106,37 @@ def add_namespace(obj): k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) # -# We use the hostanme to set the value for our FQDN, which drives the cert -# process as well. +# This logic is used to manage the kubeconfig deployments, since that uses a slightly +# different logic path than the mainline. This will be removed once the kubeconfig deploys +# are moved to the Pulumi Automation API. # -ingress_project_name = pulumi_ingress_project_name() -ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" -ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) -lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') -sirius_host = lb_ingress_hostname +# TODO: Update as part of the conversion of kubeconfig to AutomationAPI #178 +# + +config = pulumi.Config('kubernetes') +infra_type = config.require('infra_type') + +if infra_type == 'kubeconfig': + # Logic to extract the FQDN of the load balancer for Ingress + ingress_project_name = pulumi_repo_ingress_project_name() + ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" + ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) + lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') + # Set back to kubernetes + config = pulumi.Config('kubernetes') + lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') + sirius_host = lb_ingress_hostname +else: + # + # We use the hostname to set the value for our FQDN, which drives the cert + # process as well. + # + ingress_project_name = pulumi_ingress_project_name() + ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" + ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) + lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') + sirius_host = lb_ingress_hostname + # Create the namespace for Bank of Sirius ns = k8s.core.v1.Namespace(resource_name='bos', @@ -364,10 +396,23 @@ def add_namespace(obj): )) # -# Get the hostname for our connect URL +# Get the hostname for our connect URL; this logic will be collapsed once the kubeconfig +# deployments are moved over to the automation api. Until then, we have to use a different +# process. # -application_url = sirius_host.apply(lambda host: f'https://{host}') -pulumi.export('application_url', application_url) +# TODO: Update as part of the conversion of kubeconfig to AutomationAPI #178 +# + +config = pulumi.Config('kubernetes') +infra_type = config.require('infra_type') +if infra_type == 'kubeconfig': + pulumi.export('hostname', lb_ingress_hostname) + pulumi.export('ipaddress', lb_ingress_ip) + #pulumi.export('application_url', f'https://{lb_ingress_hostname}') + application_url = sirius_host.apply(lambda host: f'https://{host}') +else: + application_url = sirius_host.apply(lambda host: f'https://{host}') + pulumi.export('application_url', application_url) # # Get the chart values for both monitoring charts, switch back to the Sirius diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml new file mode 100644 index 0000000..6895e0c --- /dev/null +++ b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml @@ -0,0 +1,7 @@ +name: ingress-controller +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Sets up NGINX Kubernetes Ingress Controller using Helm \ No newline at end of file diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py new file mode 100644 index 0000000..af2e3db --- /dev/null +++ b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py @@ -0,0 +1,189 @@ +import os + +import pulumi +from pulumi import Output +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Service +from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs +from pulumi_kubernetes.yaml import ConfigFile + +from kic_util import pulumi_config + +# +# We default to the OSS IC; if the user wants Plus they need to enable it in the config file +# along with the Plus flag, and the addition of a JWT. +# +config = pulumi.Config('kic-helm') +chart_name = config.get('chart_name') +if not chart_name: + chart_name = 'nginx-ingress' +chart_version = config.get('chart_version') +if not chart_version: + chart_version = '0.13.0' +helm_repo_name = config.get('helm_repo_name') +if not helm_repo_name: + helm_repo_name = 'nginx-stable' +helm_repo_url = config.get('helm_repo_url') +if not helm_repo_url: + helm_repo_url = 'https://helm.nginx.com/stable' +nginx_repository = config.get('nginx_repository') +if not nginx_repository: + nginx_repository = "nginx/nginx-ingress" +nginx_tag = config.get('nginx_tag') +if not nginx_tag: + nginx_tag = "2.2.0" +nginx_plus_flag = config.get_bool('nginx_plus_flag') +if not nginx_plus_flag: + nginx_plus_flag = False + +# +# Allow the user to set timeout per helm chart; otherwise +# we default to 5 minutes. +# +helm_timeout = config.get_int('helm_timeout') +if not helm_timeout: + helm_timeout = 300 + +# Get the FQDN +fqdn = config.get('fqdn') + + +def project_name_from_project_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +def k8_manifest_location(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + k8_manifest_path = os.path.join(script_dir, 'manifests', 'regcred.yaml') + return k8_manifest_path + + +k8_manifest = k8_manifest_location() + +registrycred = ConfigFile( + "regcred", + file=k8_manifest) + +chart_values = { + 'controller': { + 'nginxplus': nginx_plus_flag, + 'healthStatus': True, + 'appprotect': { + 'enable': False + }, + "image": { + "repository": nginx_repository, + "tag": nginx_tag, + "pullPolicy": "Always" + }, + "serviceAccount": { + "imagePullSecretName": "regcred" + }, + 'config': { + 'name': 'nginx-config', + 'entries': { + 'log-format': '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent ' + '\"$http_referer\" \"$http_user_agent\" $upstream_response_time $upstream_status ' + '\"$uri\" $request_length $request_time [$proxy_host] [] $upstream_addr ' + '$upstream_bytes_sent $upstream_response_time $upstream_status $request_id ' + } + }, + 'service': { + 'annotations': { + 'co.elastic.logs/module': 'nginx' + }, + "extraLabels": { + "app": "kic-nginx-ingress" + }, + "customPorts": [ + { + "name": "dashboard", + "targetPort": 8080, + "protocol": "TCP", + "port": 8080 + }, + { + "name": "prometheus", + "targetPort": 9113, + "protocol": "TCP", + "port": 9113 + } + ] + }, + 'pod': { + 'annotations': { + 'co.elastic.logs/module': 'nginx' + } + } + }, + 'prometheus': { + 'create': True, + 'port': 9113 + } +} + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +kube_project_name = project_name_from_project_dir('kubeconfig') +kube_stack_ref_id = f"{pulumi_user}/{kube_project_name}/{stack_name}" +kube_stack_ref = pulumi.StackReference(kube_stack_ref_id) +kubeconfig = kube_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +k8s_provider = k8s.Provider(resource_name=f'ingress-controller-repo-only', + kubeconfig=kubeconfig) + +# This is required for the service monitor from the Prometheus namespace +ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', + metadata={'name': 'nginx-ingress', + 'labels': { + 'prometheus': 'scrape'} + }, + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +kic_release_args = ReleaseArgs( + chart=chart_name, + repository_opts=RepositoryOptsArgs( + repo=helm_repo_url + ), + version=chart_version, + namespace=ns.metadata.name, + + # Values from Chart's parameters specified hierarchically, + values=chart_values, + # User configurable timeout + timeout=helm_timeout, + # By default Release resource will wait till all created resources + # are available. Set this to true to skip waiting on resources being + # available. + skip_await=False, + # If we fail, clean up + cleanup_on_fail=True, + # Provide a name for our release + name="kic", + # Lint the chart before installing + lint=True, + # Force update if required + force_update=True) + +kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns])) + +pstatus = kic_chart.status + +srv = Service.get("nginx-ingress", + Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress")) + +ingress_service = srv.status + +# +# Some LB's give us a hostname (which is cool) and some just an IP. We need to capture +# both, and then make a determination on what the user needs to do based on what they have +# been given. +# +pulumi.export('lb_ingress_hostname', fqdn) +pulumi.export('lb_ingress_ip', pulumi.Output.unsecret(ingress_service.load_balancer.ingress[0].ip)) +# Print out our status +pulumi.export("kic_status", pstatus) From c90d017e7952951be9b4d00b12c9bec158326f03 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Mon, 8 Aug 2022 17:26:06 -0400 Subject: [PATCH 55/62] fix: added gitkeep for IC manifests dir which is required for repo-only deploy (#180) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed * fix: refactor digitalocean to docean for variables * fix: add repo-only IC deploy to support kubeconfig deploys * fix: modifications to handle kubeconfig deploys for now * fix: recommission bash scripts to support kubeconfig deploys for now * fix: gitkeep needed for manifests dir under repo nginx --- .../nginx/ingress-controller-repo-only/manifests/.gitkeep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep b/pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep new file mode 100644 index 0000000..e69de29 From bf6627107f8342e80951f8ddc26db982d8de153c Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Mon, 8 Aug 2022 18:15:13 -0400 Subject: [PATCH 56/62] fix: updated jenkinsfiles for automation api work (#181) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed * fix: refactor digitalocean to docean for variables * fix: add repo-only IC deploy to support kubeconfig deploys * fix: modifications to handle kubeconfig deploys for now * fix: recommission bash scripts to support kubeconfig deploys for now * fix: gitkeep needed for manifests dir under repo nginx * chore: update jenkinsfiles for automation api --- extras/jenkins/AWS/Jenkinsfile | 38 ++++++++------ extras/jenkins/DigitalOcean/Jenkinsfile | 40 +++++++-------- extras/jenkins/K3S/Jenkinsfile | 67 +++++++++++-------------- extras/jenkins/Linode/Jenkinsfile | 41 ++++++++------- extras/jenkins/MicroK8s/Jenkinsfile | 27 +++++----- extras/jenkins/Minikube/Jenkinsfile | 52 +++++++++---------- 6 files changed, 132 insertions(+), 133 deletions(-) diff --git a/extras/jenkins/AWS/Jenkinsfile b/extras/jenkins/AWS/Jenkinsfile index 7df39ac..c573287 100644 --- a/extras/jenkins/AWS/Jenkinsfile +++ b/extras/jenkins/AWS/Jenkinsfile @@ -29,9 +29,10 @@ pipeline { AWS_ACCESS_KEY_ID = credentials('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = credentials('AWS_SECRET_ACCESS_KEY') AWS_SESSION_TOKEN = credentials('AWS_SESSION_TOKEN') - NGINX_JWT = credentials('NGINX_JWT') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') + } @@ -86,15 +87,11 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. - * * Other cleanup related functions can be placed here as well. */ sh ''' - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; + true ''' } } @@ -145,17 +142,28 @@ pipeline { echo "AWS_PROFILE=${AWS_PROFILE}" >> $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkaws${BUILD_NUMBER} -C pulumi/python/config $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkaws${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkaws${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "AWS" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "aws" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "marajenkaws${BUILD_NUMBER}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set aws:profile "${AWS_PROFILE}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set aws:region "${AWS_DEFAULT_REGION}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:k8s_version "1.22" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:instance_type "t2.large" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:min_size "3" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:max_size "12" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:desired_capacity "3" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} ''' } } @@ -164,13 +172,11 @@ pipeline { steps { /* - * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the credentials file if we have environment variables set. Finally, it moves the JWT into the correct location. + * */ sh ''' - echo "${NGINX_JWT}" > $WORKSPACE/extras/jwt.token - $WORKSPACE/bin/start_aws.sh + $WORKSPACE/pulumi/python/runner -p aws up ''' } } @@ -182,12 +188,16 @@ pipeline { * Clean up the environment; this includes running the destroy script to remove our pulumi resources and * destroy the deployed infrastructure in AWS * + * AWS will not remove a registry that contains images, so we do a force removal here; this should ultimately + * be fixed in the code. + * * After that completes, we remove the pulumi stack from the project with the find command; this is because * we need to delete the stack in each project it's been instantiated in. */ sh ''' - $WORKSPACE/bin/destroy.sh + $WORKSPACE/pulumi/python/venv/bin/aws ecr delete-repository --repository-name ingress-controller-marajenkaws${BUILD_NUMBER} --force + $WORKSPACE/pulumi/python/runner -p aws destroy find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } @@ -205,7 +215,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true + $WORKSPACE/pulumi/python/runner -p aws destroy || true find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/DigitalOcean/Jenkinsfile b/extras/jenkins/DigitalOcean/Jenkinsfile index 4fa5a8f..29d9093 100644 --- a/extras/jenkins/DigitalOcean/Jenkinsfile +++ b/extras/jenkins/DigitalOcean/Jenkinsfile @@ -21,8 +21,8 @@ pipeline { environment { DIGITALOCEAN_TOKEN = credentials('DIGITALOCEAN_TOKEN') - NGINX_JWT = credentials('NGINX_JWT') PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') NO_COLOR = "TRUE" DEBIAN_FRONTEND = "noninteractive" } @@ -84,15 +84,11 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. - * * Other cleanup related functions can be placed here as well. */ sh ''' - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkdo${BUILD_NUMBER} --force --yes \\; + true ''' } @@ -128,18 +124,26 @@ pipeline { echo "PULUMI_STACK=marajenkdo${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment echo "DO_TOKEN=${DO_TOKEN}" >> $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkdo${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkdo${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkdo${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "DO" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "601" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set digitalocean:token "${DO_TOKEN}" --plaintext -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set domk8s:k8s_version "latest" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:instance_size "s-4vcpu-8gb" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:k8s_version "latest" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:node_count "3" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:region "sfo3" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "mara${BUILD_NUMBER}.docean.mantawang.com" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:token "${DO_TOKEN}" --plaintext -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + ''' } @@ -148,14 +152,8 @@ pipeline { stage('Deploying Pulumi') { steps { - /* - * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the MARA deployment in Digital Ocean. - */ - sh ''' - echo $NGINX_JWT > $WORKSPACE/extras/jwt.token - $WORKSPACE/bin/start_do.sh + $WORKSPACE/pulumi/python/runner -p do up ''' } } @@ -172,7 +170,7 @@ pipeline { */ sh ''' - PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh + $WORKSPACE/pulumi/python/runner -p do destroy find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkdo${BUILD_NUMBER} --force --yes \\; ''' } @@ -189,7 +187,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true + $WORKSPACE/pulumi/python/runner -p do destroy || true find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/K3S/Jenkinsfile b/extras/jenkins/K3S/Jenkinsfile index e92cdc1..3bd0804 100644 --- a/extras/jenkins/K3S/Jenkinsfile +++ b/extras/jenkins/K3S/Jenkinsfile @@ -15,18 +15,16 @@ pipeline { } /* - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. - * - * The POSTRUN_CMD is used to execute an arbitrary command following the cleanup process; this is just a work-around - * for the time being and will be addressed in the future. - */ + * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the + * open source IC. + */ environment { NGINX_JWT = credentials('NGINX_JWT') - POSTRUN_CMD = credentials('POSTRUN_CMD') PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') NO_COLOR = "TRUE" + MARA_PASSWORD = credentials('MARA_PASSWORD') + } stages { @@ -79,24 +77,11 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. - * - * This function also tries to remove both K3S and Microk8s if they are found on the host; this is because we - * will be installing K3S and we want to both make sure we are removing any previous installations as well as - * ensuring this Jenkins Agent does not already have a microk8s installation on it. + * Any pre-run cleanup can be put here... */ sh ''' - # Reset our K3S Environment - /usr/local/bin/k3s-killall.sh || true - /usr/local/bin/k3s-uninstall.sh || true - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - sudo snap remove microk8s || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + true ''' } } @@ -157,20 +142,24 @@ pipeline { */ sh ''' - echo "PULUMI_STACK=marajenk${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius - $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} + echo "PULUMI_STACK=marajenkk3s${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkk3s${BUILD_NUMBER} -C pulumi/python/config + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkk3s${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets + $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkk3s${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "default" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} ''' } } @@ -202,10 +191,10 @@ pipeline { steps { sh ''' - PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh + $WORKSPACE/bin/destroy.sh /usr/local/bin/k3s-killall.sh || true /usr/local/bin/k3s-uninstall.sh || true - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkk3s${BUILD_NUMBER} --force --yes \\; ''' } } @@ -227,7 +216,7 @@ pipeline { # Reset our K3S Environment /usr/local/bin/k3s-killall.sh || true /usr/local/bin/k3s-uninstall.sh || true - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkk3s${BUILD_NUMBER} --force --yes \\; ''' } } diff --git a/extras/jenkins/Linode/Jenkinsfile b/extras/jenkins/Linode/Jenkinsfile index d237908..638c4c6 100644 --- a/extras/jenkins/Linode/Jenkinsfile +++ b/extras/jenkins/Linode/Jenkinsfile @@ -14,16 +14,14 @@ pipeline { /* * The Linode token is passed into the process via a credential in Jenkins. If this is not found the * process will fail out. - * - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. */ environment { LINODE_TOKEN = credentials('LINODE_TOKEN') - NGINX_JWT = credentials('NGINX_JWT') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') + } stages { @@ -114,23 +112,37 @@ pipeline { * of the manual deployment if required. * * This will likely evolve further as the project does, and we may reach a point where these defaults are assumed - * for a given development type. + * for a given development type. kubernetes:cluster_name */ sh ''' echo "PULUMI_STACK=marajenklke${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment echo "LINODE_TOKEN=${LINODE_TOKEN}" >> $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenklke${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenklke${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenklke${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "DO" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "lke" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "marajenklke${BUILD_NUMBER}" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:harbor_db_password "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:harbor_password "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:harbor_sudo_user_password "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:instance_type "g6-standard-8" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:k8s_ha "true" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:k8s_version "1.23" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:node_count "3" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:region "us-central" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:soa_email "qdzlug@gmail.com" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:token "${LINODE_TOKEN}" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} ''' @@ -140,14 +152,8 @@ pipeline { stage('Deploying Pulumi') { steps { - /* - * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the MARA deployment in Linode. - */ - sh ''' - echo $NGINX_JWT > $WORKSPACE/extras/jwt.token - $WORKSPACE/bin/start_lke.sh + $WORKSPACE/pulumi/python/runner -p linode up ''' } } @@ -164,7 +170,7 @@ pipeline { */ sh ''' - PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh + $WORKSPACE/pulumi/python/runner -p linode destroy find $WORKSPACE -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } @@ -181,7 +187,8 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true + $WORKSPACE/pulumi/python/runner -p linode destroy|| true + # Clean up the Pulumi stack find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/MicroK8s/Jenkinsfile b/extras/jenkins/MicroK8s/Jenkinsfile index 1b04a6c..4bce3ee 100644 --- a/extras/jenkins/MicroK8s/Jenkinsfile +++ b/extras/jenkins/MicroK8s/Jenkinsfile @@ -15,18 +15,15 @@ pipeline { } /* - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. - * - * The POSTRUN_CMD is used to execute an arbitrary command following the cleanup process; this is just a work-around - * for the time being and will be addressed in the future. - */ + * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the + * open source IC. + */ environment { NGINX_JWT = credentials('NGINX_JWT') - POSTRUN_CMD = credentials('POSTRUN_CMD') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') } stages { @@ -152,18 +149,22 @@ pipeline { sh ''' echo "PULUMI_STACK=marajenkmk8s${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmk8ss${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmk8s${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "Zf4dabEA" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "Zf4dabEA" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "Zf4dabEA" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} ''' } } diff --git a/extras/jenkins/Minikube/Jenkinsfile b/extras/jenkins/Minikube/Jenkinsfile index a5a783c..78a8cc3 100644 --- a/extras/jenkins/Minikube/Jenkinsfile +++ b/extras/jenkins/Minikube/Jenkinsfile @@ -1,9 +1,9 @@ pipeline { agent { /* - * Nodes that are configured for Microk8s are tagged as "mk8s". Unlike the deployment to cloud providers, this logic - * will install Microk8s on the Jenkins Agent. This means that the agent should have sufficient resources available - * to run Microk8s. A minimum of 16GB RAM, 2 vCPU, and 20GB of disk is recommended. Testing is done with 20GB of RAM, + * Nodes that are configured for minkube are tagged as "minikube". Unlike the deployment to cloud providers, this logic + * will install minikube on the Jenkins Agent. This means that the agent should have sufficient resources available + * to run minikube. A minimum of 16GB RAM, 2 vCPU, and 20GB of disk is recommended. Testing is done with 20GB of RAM, * 4 vCPU, and 64GB of disk. * * This has been * tested on Ubuntu 20.04. Be sure to check that your Agent has the necessary components installed @@ -15,18 +15,16 @@ pipeline { } /* - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. - * - * The POSTRUN_CMD is used to execute an arbitrary command following the cleanup process; this is just a work-around - * for the time being and will be addressed in the future. - */ + * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the + * open source IC. + */ environment { NGINX_JWT = credentials('NGINX_JWT') - POSTRUN_CMD = credentials('POSTRUN_CMD') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') + } stages { @@ -95,7 +93,7 @@ pipeline { stage('Minikube Setup') { /* - * This step installs Microk8s. This assumes you have the snap store installed and configured properly. Note that + * This step installs minikube. This assumes you have the snap store installed and configured properly. Note that * the snap store will always pull the latest version of the software so you may end up with a deployment that * does not work as expected; if this happens please check back with the github repository and verify the known * working configurations. @@ -159,18 +157,23 @@ _EOF_ sh ''' echo "PULUMI_STACK=marajenkmkube${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkubes${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkube${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + ''' } } @@ -179,7 +182,7 @@ _EOF_ /* * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the MARA deployment in Microk8s + * the MARA deployment in minikube */ steps { @@ -194,7 +197,7 @@ _EOF_ /* * Clean up the environment; this includes running the destroy script to remove our pulumi resources and - * destroy the deployed Microk8s installation. + * destroy the deployed minikube installation. * * After that completes, we remove the pulumi stack from the project with the find command; this is because * we need to delete the stack in each project it's been instantiated in. @@ -203,17 +206,8 @@ _EOF_ steps { sh ''' $WORKSPACE/bin/destroy.sh - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - snap remove microk8s || true + minikube delete || true find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkmkube${BUILD_NUMBER} --force --yes \\; - # This is a hack to allow additional commands to be issued following cleanup. This is needed because the VMs - # that currently run as agents for K3S and Microk8s deployments need to be rebooted following some number of - # runs due to zombie processes and other issues. Long term we want to deploy these VM's via IaaC so the only - # exist for the lifetime of the project. We do it this way in order to provide some flexibility for the - # jenkins configuration. - ${POSTRUN_CMD- true} ''' } From c70959a70826bf5d5abc019a64511109cb908c63 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Fri, 12 Aug 2022 16:11:34 -0600 Subject: [PATCH 57/62] chore: doc updates for automation-api changes (#183) --- docs/getting_started.md | 120 ++++++++------ docs/status-and-issues.md | 26 +-- pulumi/python/README.md | 336 ++++++++++++++++++++------------------ 3 files changed, 262 insertions(+), 220 deletions(-) diff --git a/docs/getting_started.md b/docs/getting_started.md index 9ecc17c..5da0c20 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -93,7 +93,7 @@ deploy the NGINX IC or the NGINX Plus IC (with a JWT from your F5 account) #### Kubernetes Although not required, installing the [CLI tool `kubectl`](https://kubernetes.io/docs/tasks/tools/) -will allow you to interact with the Kubernetes cluster that you have stood up using this project. This +will allow you to interact with the Kubernetes cluster that you have stood up using this project. This tool is also installed as part of the venv that is created and can be used from that directory. #### Setup @@ -116,6 +116,15 @@ other environment variables into the current shell. ## Post Install Configuration +### Stack Name + +For AWS, Linode, or Digital Ocean deployments you will need to add the variable `PULUMI_STACK_NAME` to the environment +file for the deployment at [`../config/pulumi/environment`](../config/pulumi/environment). This is the name that will +be used for the provisioned Pulumi stack. + +If you are running a `kubeconfig` deployment, the process will prompt you for the value of `PULUMI_STACK_NAME` and +update the environment file for you. + ### Kubeconfig If you are using an existing kubernetes installation for this project, you will need to provide three pieces of @@ -128,56 +137,67 @@ information to the installer: The easiest way to test this is to run the command: `kubectl --kubeconfig="yourconfig" --cluster="yourcluster" --context="yourcontext"` -Once you have verified you can connect to the cluster you will need to test to make sure your cluster supports the -minimum required capabilities for MARA. You can test this by running the [`./bin/testcap.sh`](../bin/testcap.sh) -script. - -This script does several things: - -1. Creates a namespace -2. Creates a persistent volume claim -3. Deploys a pod to test the persistent volume -4. Writes to the persistent volume -5. Reads from the persistent volume -6. Destroys the pod -7. Destroys the persistent volume -8. Deploys a service and attempts to provision a `LoadBalancer` to obtain an egress IP address -9. Destroys the service -10. Destroys the namespace - -If any of these tests fails the script exits with notes on the failure. These failures need to be remediated before MARA -can be installed. - -There are several utilities under the `./pulumi/python/tools` directory that are intended for use to add the necessary -capabilities to a Kubernetes cluster. Note that these are not extensively tested with MARA, but are included for -convenience. Please see the [README.md](../pulumi/python/tools/README.md) in that directory for additional information. -Note that these tools can be installed via the [kubernetes-extras.sh](../bin/kubernetes-extras.sh) -script. - ### AWS +*Note:* The AWS deployment has been updated from v1.1 and no longer uses the [`../bin/start.sh`](../bin/start.sh) +script for deployment. If you attempt to use the script to deploy to AWS you will receive an error message. Please +use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for these deployments. + If you are using AWS as your infrastructure provider [configuring Pulumi for AWS](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/) is necessary. If you already have run the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -script, you will have the `aws` CLI tool installed in the path `./pulumi/python/venv/bin/aws` +script, you will have the `aws` CLI tool installed in the path `../pulumi/python/venv/bin/aws` and you do not need to install it to run the steps in the Pulumi Guide. If you want to avoid using environment variables, AWS profile and region definitions can be contained in -the `config/Pulumi..yaml` -files in each project. Refer to the Pulumi documentation for details on how to do this. When you run the -script [`./bin/start.sh`](../bin/start.sh) and select an AWS installation, you will be prompted to add the AWS region -and profile values that will then be added to the `./config/Pulumi/Pulumi..yaml`. This is the main configuration -file for the project, although there are two other configuration files kept for the application standup and the -kubernetes extras functionality. For more details on those, please see the README.md in those directories. +the `config/Pulumi..yaml` files in each project. Refer to the Pulumi documentation for details on how to do this. +When you run the [`../pulumi/python/runnner`](../pulumi/python/runner) program and select your provider you will be +prompted for all variables necessary to use that provider along with MARA specific variables. This information will +be added to the `../config/Pulumi/Pulumi..yaml` configuration file. This is the main configuration file for the +project, although there is one other configuration file used to maintain secrets in the +[`../pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) kubernetes extras functionality. +For more details on those, please see the README.md in those directories. ### Digital Ocean +*Note:* The Digital Ocean deployment has been updated from v1.1 and no longer uses the +[`../bin/start.sh`](../bin/start.sh) script for deployment. If you attempt to use the script to deploy to AWS you will +receive an error message. Please use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for these +deployments. + You will need to create a [Digital Ocean Personal API Token](https://docs.digitalocean.com/reference/api/create-personal-access-token/) -for authentication to Digital Ocean. When you run the script [`./bin/start.sh`](../bin/start.sh) and select a Digital -Ocean deployment, your token will be added to the `./config/Pulumi/Pulumi..yaml`. This is the main configuration -file for the project, although there are two other configuration files kept for the application standup and the -kubernetes extras functionality. For more details on those, please see the README.md in those directories. +for authentication to Digital Ocean. When you run the [`./pulumi/python/runnner`](./pulumi/python/runner) program and +select your provider you will be prompted for all variables necessary to use that provider along with MARA specific +variables. This information will be added to the `./config/Pulumi/Pulumi..yaml` configuration file. This is the +main configuration file for the project, although there is one other configuration file used to maintain secrets in the +[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) kubernetes extras functionality. +For more details on those, please see the README.md in those directories. + +### Linode + +*Note:* The Linode deployment has been updated from v1.1 and no longer uses the [`../bin/start.sh`](../bin/start.sh) +script for deployment. If you attempt to use the script to deploy to AWS you will receive an error message. Please +use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for these deployments. + +You will need to create a +[Linode API Token](https://www.linode.com/docs/products/tools/linode-api/guides/get-access-token/) for authentication +to Linode. When you run the [`./pulumi/python/runnner`](./pulumi/python/runner) program and +select your provider you will be prompted for all variables necessary to use that provider along with MARA specific +variables. This information will be added to the `./config/Pulumi/Pulumi..yaml` configuration file. This is the +main configuration file for the project, although there is one other configuration file used to maintain secrets in the +[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) kubernetes extras functionality. +For more details on those, please see the README.md in those directories. + +### Kubeconfig Deployments: MicroK8s / Minikube / K3s / Other + +Deployments that use a `kubeconfig` file to access an existing K8 installation will continue to use the +[`../bin/start.sh`](../bin/start.sh) script. Additionally, these deployments are not able to build the Ingress +Controller and instead need to download from the NGINX repositories. The installation of NGINX+ is supported via the +use of a JWT, if desired. + +These deployments will be moved over to use the [`../pulumi/python/runner`](../pulumi/python/runner) program in a +future release, which will bring them to parity for NGINX IC build/deployment with the other infrastructures. ### Pulumi @@ -190,18 +210,17 @@ Pulumi documentation for additional details regarding the command and alternativ ## Running the Project -The easiest way to run the project is to run [`start.sh`](../bin/start.sh) -after you have completed the installation steps. When doing so, be sure to choose the same -[Pulumi stack name](https://www.pulumi.com/docs/intro/concepts/stack/) -for all of your projects. Additionally, this script will prompt you for infrastructure specific configuration values. -This information will be used to populate the `./config/pulumi/Pulumi..yaml` file. +Provided you have completed the installation steps, the easiest way to run the project is to run +[`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, or Digital Ocean and +[`../bin/start.sh`](../bin/start.sh) for `kubeconfig` deployments. This process will prompt you for all required +variables for your deployment type. This information will be used to populate the configuration files. Alternatively, you can enter into each Pulumi project directory and execute each project independently by doing `pulumi up`. Take a look at `start.sh` and dependent scripts to get a feel for the flow. -If you want to destroy the entire environment you can run [`destroy.sh`](../bin/destroy.sh). This script calls the -correct destroy script based on the information stored in the `./config/Pulumi/Pulumi..yaml` configuration file. -Detailed information and warnings are emitted by the script as it runs. +If you want to destroy the entire environment you can run [`../pulumi/python/runner`](../pulumi/python/runner) for AWS, +Linode, or Digital Ocean or [`destroy.sh`](../bin/destroy.sh) for `kubeconfig` deployments. +Detailed information and warnings are emitted by the process as it runs. ### Running the Project in a Docker Container @@ -257,6 +276,13 @@ these tools. ### Cleaning Up If you want to completely remove all the resources you have provisioned, run the -script: [`./bin/destroy.sh`](../bin/destroy.sh). +[`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, or Digital Ocean or +[`destroy.sh`](../bin/destroy.sh) for `kubeconfig` deployments. Detailed information and warnings are emitted by the +process as it runs. Be careful because this will **DELETE ALL** the resources you have provisioned. + +## Other Resources +Starting with release `v1.1`, the MARA project has begun the process of transitioning the deployment logic away from +BASH scripts and instead using the [Pulumi Automation API](https://www.pulumi.com/docs/guides/automation-api/) with +Python. For more information on this, please see this [Design Document](../pulumi/python/automation/DESIGN.md). \ No newline at end of file diff --git a/docs/status-and-issues.md b/docs/status-and-issues.md index 6f915e7..49ac95a 100644 --- a/docs/status-and-issues.md +++ b/docs/status-and-issues.md @@ -20,24 +20,24 @@ includes the following: All of these configurations use Pulumi code within Python as the Infrastructure as Code (IaaC) provider. -| K8 Provider | Tested / Deploy Status | Infrastructure Support | IC Options | FQDN/IP | Notes | -|-----------------|--------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------|-----------------|--------------------------------------------------| -| AWS EKS | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | -| Azure AKS | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Digtal Ocean | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=Deploy) | Full Infrastructure Standup | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Google GKE | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | -| K3S | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Linode | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Deploy) | Full Infrastructure Standup | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| MicroK8s | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Storage, DNS, and Metallb need to be Enabled (4) | -| Minikube | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Rancher Desktop | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| K8 Provider | Tested / Deploy Status | Infrastructure Support | IC Options | FQDN/IP | Notes | +|-----------------|--------------------------------------------------------------------------------------------------------|-----------------------------|-----------------------------------|-----------------|--------------------------------------------------| +| AWS EKS | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | +| Azure AKS | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Digtal Ocean | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (Uses DO Registry) | Provided | Requires DNS delegation to DO | +| Google GKE | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| K3S | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Linode | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses Harbor install) | Provided | | +| MicroK8s | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Storage, DNS, and Metallb need to be Enabled (4) | +| Minikube | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Rancher Desktop | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | ### Notes: 1. The NGINX IC build/deploy process is currently under active development and support for IC will be standardized across all providers. Follow [#81](https://github.com/nginxinc/kic-reference-architectures/issues/81) and - [#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for details. Currently, for all non-AWS + [#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for details. Currently, for all `kubeconfig` environments you have the option to specify either NGINX or NGINX Plus as your IC. The latter does require an active subscription and a JWT to be included at build time. Please see the documentation for more details. 2. The process via which the IP and FQDN are created and used is currently under active development, and will be diff --git a/pulumi/python/README.md b/pulumi/python/README.md index 25dcc54..08a3066 100644 --- a/pulumi/python/README.md +++ b/pulumi/python/README.md @@ -1,13 +1,12 @@ # MARA: Pulumi / Python -This project illustrates the end-to-end stand up of an AWS VPC cluster, Elastic -Kubernetes Service (EKS), NGINX Kubernetes Ingress Controller (KIC), and a sample -application using [Pulumi](https://www.pulumi.com/). It is intended to be used -as a reference when building your own Infrastructure as Code (IaC) deployments. -As such, each discrete stage of deployment is defined as a separate Pulumi project -that can be deployed independently of each stage. Although Pulumi supports many -programming languages, Python was chosen as the language for this project. -The reimplementation of the deployment here should be easily reproducible +This project illustrates the end-to-end stand up of the MARA project using +[Pulumi](https://www.pulumi.com/). It is intended to be used +as a reference when building your own Infrastructure as Code (IaC) deployments. +As such, each discrete stage of deployment is defined as a separate Pulumi project +that can be deployed independently of each stage. Although Pulumi supports many +programming languages, Python was chosen as the language for this project. +The reimplementation of the deployment here should be easily reproducible in other languages. ## Getting Started @@ -19,8 +18,8 @@ For instructions on running the project, refer to the ### Top Level -Several directories, located at the root of the project, are used. These are -at the project root because they are intended to be outside the specific +Several directories, located at the root of the project, are used. These are +at the project root because they are intended to be outside the specific IaC providers (e.g., to be used for a port to Terraform). ``` @@ -32,86 +31,80 @@ IaC providers (e.g., to be used for a port to Terraform). └── extras ``` -- The [`bin`](../../bin) directory contains all the binaries and scripts that - are used to start/stop the project, as well as perform capabilities testing - and deployment of extra functionality. -- The [`config`](../../config) directory holds the `requirements.txt` for the - venv needed for this project. -- The [`config/pulumi`](../../config/pulumi) directory holds the configuration - files for deployments, as well as a reference configuration that illustrates +- The [`bin`](../../bin) directory contains all the binaries and scripts that + are used to start/stop the project and provide additional capabilities. +- The [`config/pulumi`](../../config/pulumi) directory holds the configuration + files for deployments, as well as a reference configuration that illustrates the available configuration options and their defaults. -- The [`docker`](../../docker) directory contains Dockerfiles and a script to - build a Docker-based deployment image that contains all the tooling necessary +- The [`docker`](../../docker) directory contains Dockerfiles and a script to + build a Docker-based deployment image that contains all the tooling necessary to deploy MARA. -- The [`docs`](../../docs) directory contains all documentation relevant to the +- The [`docs`](../../docs) directory contains all documentation relevant to the overall project. -- The [`extras`](../../extras) directory contains additional scripts, notes, +- The [`extras`](../../extras) directory contains additional scripts, notes, and configurations. ### Pulumi/Python Level -This directory contains all Pulumi/Python-based logic, which currently +This directory contains all Pulumi/Python-based logic, which currently consists of the following: ``` +├── automation +│   └── providers ├── config ├── infrastructure -│ ├── aws -│ ├── digitalocean -│ └── kubeconfig +│   ├── aws +│   ├── digitalocean +│   ├── kubeconfig +│   └── linode ├── kubernetes -│ ├── applications -│ ├── certmgr -│ ├── logagent -│ ├── logstore -│ ├── nginx -│ ├── observability -│ ├── prometheus -│ └── venv +│   ├── applications +│   ├── certmgr +│   ├── logagent +│   ├── logstore +│   ├── nginx +│   ├── observability +│   ├── prometheus +│   └── secrets ├── tools -│ ├── common -│ ├── kubevip -│ ├── metallb -│ └── nfsvolumes -├── utility -│ ├── kic-image-build -│ ├── kic-image-push -│ └── kic-pulumi-utils -└── venv - ├── bin - ├── include - ├── lib - ├── lib64 -> lib - ├── share - └── src +│   ├── common +│   ├── metallb +│   └── nfsvolumes +└── utility + ├── kic-image-build + ├── kic-image-push + └── kic-pulumi-utils ``` -- The [`config`](./config) directory contains files used by Pulumi to manage - the configuration for this project. Note that this directory is essentially +- The [`automation`](./automation) directory contains the files used to interface with the pulumi + automation api, including provider-specific files. +- The [`config`](./config) directory contains files used by Pulumi to manage + the configuration for this project. Note that this directory is essentially a redirect to the project-wide [`config`](../../config/pulumi) directory. -- The [`infrastructure`](./infrastructure) directory contains files used to stand - up Kubernetes as well as to provide a common project for all of the infrastructure +- The [`infrastructure`](./infrastructure) directory contains files used to stand + up Kubernetes as well as to provide a common project for all of the infrastructure and kubeconfig-based clusters. -- The [`kubernetes`](./kubernetes) directory contains all of the Kubernetes-based +- The [`kubernetes`](./kubernetes) directory contains all of the Kubernetes-based deployments. There are two key subdirectories in this directory: - The [`nginx`](./kubernetes/nginx) directory contains all NGINX products. - - The [`applications`](./kubernetes/applications) directory contains all applications + - The [`secrets`](./kubernetes/secrets) directory contains all encrypted secrets. + - The [`applications`](./kubernetes/applications) directory contains all applications that have been tested for deployment with MARA. -- The [`tools`](./tools) directory contains projects that are used with the - `kubernetes-extras.sh` script found in the bin directory. -- The [`utility`](./utility) directory contains the code used to build/pull/push KIC, +- The [`tools`](./tools) directory contains extra tooling for specific use cases. +- The [`utility`](./utility) directory contains the code used to build/pull/push KIC, and other projects used to support the environment. -- The [`venv/bin`](./venv/bin) directory contains the virtual environment for Python +- The [`venv/bin`](./venv/bin) directory contains the virtual environment for Python along with some key utilities, such as `pulumi`, `kubectl`, and `node`. ## Configuration -The Pulumi configuration files are in the [`config`](../../config/pulumi) -directory. Pulumi's configuration files use the following naming convention: -`Pulumi..yaml`. To create a new configuration file for your Pulumi -stack, create a new file with a name that includes the stack name. Then, refer -to the sample [configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) -for configuration entries that you want to customize and copy over the entries +The Pulumi configuration files are in the [`config`](../../config/pulumi) +directory. Pulumi's configuration files use the following naming convention: +`Pulumi..yaml`. To create a new configuration file for your Pulumi +stack, create a new file with a name that includes the stack name. Then, refer +to the sample [configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) +for configuration entries that you want to customize and copy over the entries that you want to modify from their defaults. ### AWS @@ -120,23 +113,23 @@ The following directories are specific to AWS. #### VPC -Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first -Pulumi project which is responsible for setting up the VPC and subnets used by EKS. -The project is built so that it will attempt to create a subnet for each availability -zone within the running region. You may want to customize this behavior, or the IP +Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first +Pulumi project which is responsible for setting up the VPC and subnets used by EKS. +The project is built so that it will attempt to create a subnet for each availability +zone within the running region. You may want to customize this behavior, or the IP addressing scheme used. #### Elastic Kubernetes Service (EKS) -Located within the [`eks`](./infrastructure/aws/eks) directory is a project used -to stand up a new EKS cluster on AWS. This project reads data from the previously -executed VPC project using its VPC id and subnets. In this project you may want to -customize the `instance_type`, `min_size`, or `max_size` parameters provided +Located within the [`eks`](./infrastructure/aws/eks) directory is a project used +to stand up a new EKS cluster on AWS. This project reads data from the previously +executed VPC project using its VPC id and subnets. In this project you may want to +customize the `instance_type`, `min_size`, or `max_size` parameters provided to the cluster. #### Elastic Container Registry (ECR) -The [`ecr`](./infrastructure/aws/ecr) project is responsible for installing and +The [`ecr`](./infrastructure/aws/ecr) project is responsible for installing and configuring ECR for use with the previously created EKS cluster. ### Digital Ocean @@ -145,71 +138,96 @@ The following directories are specific to Digital Ocean. #### DOMK8S -Contained within the [`domk8s`](./infrastructure/digitalocean/domk8s) directory contains the -logic needed to stand up a Digital Ocean Managed Kubernetes cluster. There are a number of -configuration options available to customize the build, however the defaults can be used +Contained within the [`domk8s`](./infrastructure/digitalocean/domk8s) directory contains the +logic needed to stand up a Digital Ocean Managed Kubernetes cluster. There are a number of +configuration options available to customize the build, however the defaults can be used to create a standard sized cluster in the SFO3 region. +#### container-registry / container-registry-credentials + +These directories contain the projects required to create and use the Digital Ocean container +registry. + +#### dns-record + +This directory contains the project required to provision a DNS record for the Digital Ocean +egress. + +### Linode + +The following directories are specific to Linode. + +#### LKE + +Contained within the [`lke`](./infrastructure/linode/lke) directory contains the +logic needed to stand up a Linode Kubernetes Engine cluster. There are a number of +configuration options available to customize the build. + +#### harbor / harbor-configuration / container-registry-credentials + +These directories contain the projects required to create and use the Harbor container registry +with the Linode deployment. + ### NGINX Ingress Controller Docker Image Build -Within the [`kic-image-build`](./utility/kic-image-build) directory, there is -a Pulumi project that will allow you to build a new KIC from source. Download -of source, compilation, and image creation are fully automated. This project +Within the [`kic-image-build`](./utility/kic-image-build) directory, there is +a Pulumi project that will allow you to build a new KIC from source. Download +of source, compilation, and image creation are fully automated. This project can be customized to build different flavors of KIC. ### NGINX Ingress Controller Docker Image Push -Within the [`kic-image-push`](./utility/kic-image-push) directory, there is a -Pulumi project that will allow you to push the previously created KIC Docker +Within the [`kic-image-push`](./utility/kic-image-push) directory, there is a +Pulumi project that will allow you to push the previously created KIC Docker image to ECR in a fully automated manner. ### NGINX Ingress Controller Helm Chart -In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, you -will find the Pulumi project responsible for installing NGINX KIC. You may want to -customize this project to allow for deploying different versions of KIC. This chart +In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, you +will find the Pulumi project responsible for installing NGINX KIC. You may want to +customize this project to allow for deploying different versions of KIC. This chart is only used for AWS deployments. All other deployments use the [`ingress-controller- -repo-only`](./kubernetes/nginx/ingress-controller-repo-only) directory, which at this -time **only allows the use of deployments from the NGINX repo - either NGINX IC or +repo-only`](./kubernetes/nginx/ingress-controller-repo-only) directory, which at this +time **only allows the use of deployments from the NGINX repo - either NGINX IC or NGINX Plus IC (with a JWT)**. -A sample config-map is provided in the Pulumi deployment code. This code will adjust -the logging format to approximate the upstream NGINX KIC project which will allow for +A sample config-map is provided in the Pulumi deployment code. This code will adjust +the logging format to approximate the upstream NGINX KIC project which will allow for easier ingestion into log storage and processing systems. -**Note**: This deployment uses the GA Ingress APIs. This has been tested with helm -chart version 0.11.1 and NGINX KIC 2.0.2. Older versions of the KIC and helm charts -can be used, but care should be taken to ensure that the helm chart version used is -compatible with the KIC version. This information can be found in the [NGINX KIC Release +**Note**: This deployment uses the GA Ingress APIs. This has been tested with helm +chart version 0.11.1 and NGINX KIC 2.0.2. Older versions of the KIC and helm charts +can be used, but care should be taken to ensure that the helm chart version used is +compatible with the KIC version. This information can be found in the [NGINX KIC Release Notes](https://docs.nginx.com/nginx-ingress-controller/releases/) for each release. #### Ingress API Versions and NGINX KIC -Starting with Kubernetes version 1.22, support for the Ingress Beta API -`networking.k8s.io/v1beta` will be dropped, requiring use of the GA Ingress API -`networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows -these two API versions to coexist and maintains compatibility for consumers of -the API – meaning, the API will respond correctly to calls to either the `v1beta` +Starting with Kubernetes version 1.22, support for the Ingress Beta API +`networking.k8s.io/v1beta` will be dropped, requiring use of the GA Ingress API +`networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows +these two API versions to coexist and maintains compatibility for consumers of +the API – meaning, the API will respond correctly to calls to either the `v1beta` and/or `v1` routes. -This project uses the NGINX KIC v2.x releases which includes full support +This project uses the NGINX KIC v2.x releases which includes full support for the GA APIs. ### Log Store -In the [`logstore`](./kubernetes/logstore) directory, you will find the Pulumi +In the [`logstore`](./kubernetes/logstore) directory, you will find the Pulumi project responsible for installing your log store. The current solution deploys [Elasticsearch and Kibana](https://www.elastic.co/elastic-stack) using the [Bitnami Elasticsearch](https://bitnami.com/stack/elasticsearch/helm) -chart. This solution can be swapped for other options as desired. This application -is deployed to the `logstore` namespace. There are several configuration options -available in the configuration file for the project in order to better tailor this +chart. This solution can be swapped for other options as desired. This application +is deployed to the `logstore` namespace. There are several configuration options +available in the configuration file for the project in order to better tailor this deployment to the size of the cluster being used. #### Notes -To access the Kibana dashboard via your web browser, you will need to set up port +To access the Kibana dashboard via your web browser, you will need to set up port forwarding for the kibana pod. This can be accomplished using the `kubectl` command: ``` @@ -234,22 +252,22 @@ Handling connection for 5601 ### Log Agent -In the [`logagent`](./logagent) directory, you will find the Pulumi project -responsible for installing your log agent. The current solution deploys -[`Filebeat`](https://www.elastic.co/beats/), which connects to the logstore -deployed in the previous step. This solution can be swapped for other options +In the [`logagent`](./logagent) directory, you will find the Pulumi project +responsible for installing your log agent. The current solution deploys +[`Filebeat`](https://www.elastic.co/beats/), which connects to the logstore +deployed in the previous step. This solution can be swapped for other options as desired. This application is deployed to the `logagent` namespace. ### Certificate Management -TLS is enabled via [cert-manager](https://cert-manager.io/), which is installed -in the cert-manager namespace. Creation of ClusterIssuer or Issuer resources is +TLS is enabled via [cert-manager](https://cert-manager.io/), which is installed +in the cert-manager namespace. Creation of ClusterIssuer or Issuer resources is delegated to the individual applications and is not done as part of this deployment. ### Prometheus -Prometheus is deployed and configured to enable the collection of metrics for -all components that have a defined service monitor. At installation time, the +Prometheus is deployed and configured to enable the collection of metrics for +all components that have a defined service monitor. At installation time, the deployment will instantiate: - Node Exporters @@ -258,90 +276,88 @@ deployment will instantiate: - The NGINX Ingress Controller - Statsd receiver -The former behavior of using the `prometheus.io:scrape: true` property set in -annotations indicating pods (where metrics should be scraped) has been deprecated, +The former behavior of using the `prometheus.io:scrape: true` property set in +annotations indicating pods (where metrics should be scraped) has been deprecated, and these annotations will be removed in the near future. -Also, the standalone Grafana deployment has been removed from the standard deployment +Also, the standalone Grafana deployment has been removed from the standard deployment scripts, as it is installed as part of this project. -Finally, this namespace will hold service monitors created by other projects. For -example, the Bank of Sirius deployment currently deploys a service monitor for each +Finally, this namespace will hold service monitors created by other projects. For +example, the Bank of Sirius deployment currently deploys a service monitor for each of the postgres monitors that are deployed. **Notes**: -1. The KIC needs to be configured to expose Prometheus metrics. This is currently +1. The KIC needs to be configured to expose Prometheus metrics. This is currently done by default. -2. The default address binding of the `kube-proxy` component is set to `127.0.0.1` - and therefore will cause errors when the canned Prometheus scrape configurations - are run. The fix is to set this address to `0.0.0.0`. An example manifest has been - provided in [prometheus/extras](./kubernetes/prometheus/extras) that can be applied - against your installation with `kubectl apply -f ./filename`. Please only apply this +2. The default address binding of the `kube-proxy` component is set to `127.0.0.1` + and therefore will cause errors when the canned Prometheus scrape configurations + are run. The fix is to set this address to `0.0.0.0`. An example manifest has been + provided in [prometheus/extras](./kubernetes/prometheus/extras) that can be applied + against your installation with `kubectl apply -f ./filename`. Please only apply this change once you have verified that it will work with your version of Kubernetes. -3. The _grafana_ namespace has been maintained in the configuration file to be used by - the Prometheus operator-deployed version of Grafana. This version only accepts a - password – you can still specify a username for the admin account but it will +3. The _grafana_ namespace has been maintained in the configuration file to be used by + the Prometheus operator-deployed version of Grafana. This version only accepts a + password – you can still specify a username for the admin account but it will be silently ignored. This will be changed in the future. ### Observability -We deploy the [OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) -along with a simple collector. There are several other configurations in the -[observability/otel-objects](./kubernetes/observability/otel-objects) directory. -See the [README.md](./kubernetes/observability/otel-objects/README.md) file -in the [observability/otel-objects](./kubernetes/observability/otel-objects) for more information, +We deploy the [OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) +along with a simple collector. There are several other configurations in the +[observability/otel-objects](./kubernetes/observability/otel-objects) directory. +See the [README.md](./kubernetes/observability/otel-objects/README.md) file +in the [observability/otel-objects](./kubernetes/observability/otel-objects) for more information, including an explanation of the default configuration. ### Demo Application A forked version of the Google [_Bank of Anthos_](https://github.com/GoogleCloudPlatform/bank-of-anthos) -application is contained in the [`sirius`](./kubernetes/applications/sirius) directory. -The github repository for this for is at +application is contained in the [`sirius`](./kubernetes/applications/sirius) directory. +The github repository for this for is at [_Bank of Sirius_](https://github.com/nginxinc/bank-of-sirius). -Normally, the `frontend` microservice is exposed via a load balancer -for traffic management. This deployment has been modified to use the NGINX -or NGINX Plus KIC to manage traffic to the `frontend` microservice. The NGINX -or NGINX Plus KIC is integrated into the cluster logging system, and the user +Normally, the `frontend` microservice is exposed via a load balancer +for traffic management. This deployment has been modified to use the NGINX +or NGINX Plus KIC to manage traffic to the `frontend` microservice. The NGINX +or NGINX Plus KIC is integrated into the cluster logging system, and the user can configure the KIC as desired. -An additional change to the application is the conversion of several of the -standard Kubernetes deployment manifests into Pulumi code. This has been done +An additional change to the application is the conversion of several of the +standard Kubernetes deployment manifests into Pulumi code. This has been done for the configuration maps, the ingress controller, and the JWT RSA signing key -pair. This allows the user to take advantage Pulumi's feature set, by demonstrating -the process of creating and deploying an RSA key pair at deployment time and using +pair. This allows the user to take advantage Pulumi's feature set, by demonstrating +the process of creating and deploying an RSA key pair at deployment time and using the project configuration file to set config variables, including secrets. As part of the Bank of Sirius deployment, we deploy a cluster-wide -[self-signed](https://cert-manager.io/docs/configuration/selfsigned/) issuer -using the cert-manager deployed above. This is then used by the ingress object -created to enable TLS access to the application. Note that this issuer can be -changed out by the user, for example to use the -[ACME](https://cert-manager.io/docs/configuration/acme/) issuer. -The use of the ACME issuer has been tested and works without issues, provided -the FQDN meets the length requirements. As of this writing, the AWS ELB hostname -is too long to work with the ACME server. Additional work in this area will be -undertaken to provide dynamic DNS record creation as part of this process so +[self-signed](https://cert-manager.io/docs/configuration/selfsigned/) issuer +using the cert-manager deployed above. This is then used by the ingress object +created to enable TLS access to the application. Note that this issuer can be +changed out by the user, for example to use the +[ACME](https://cert-manager.io/docs/configuration/acme/) issuer. +The use of the ACME issuer has been tested and works without issues, provided +the FQDN meets the length requirements. As of this writing, the AWS ELB hostname +is too long to work with the ACME server. Additional work in this area will be +undertaken to provide dynamic DNS record creation as part of this process so legitimate certificates can be issued. -To provide visibility into the Postgres databases that are running as part -of the application, the Prometheus Postgres data exporter will be deployed +To provide visibility into the Postgres databases that are running as part +of the application, the Prometheus Postgres data exporter will be deployed into the same namespace as the application and will be configured to be scraped by the Prometheus server installed earlier. -**Note**: Due to the way that Pulumi currently handles secrets, -the [sirius](./kubernetes/applications/sirius) directory contains its own -configuration directory [sirius/config](./kubernetes/applications/sirius/config). -This directory contains an example configuration file that can be copied over -and used. The user will be prompted to add passwords to the configuration file -at the first run of the [start.sh](../../bin/start_all.sh) script. This is a -workaround that will be retired as Pulumi provides better tools -for hierarchical configuration files. +**Note**: Due to the way that Pulumi currently handles secrets, +the [secrets](./kubernetes/secrets) directory contains its own +configuration directory [secrets/config](./kubernetes/secrets/config). +This directory contains an example configuration file that can be copied over +and used. The user will be prompted to add passwords to the configuration file +at the first run of the startup process. ## Simple Load Testing To help enable simple load testing, a script has been provided that uses the -`kubectl` command to port-forward monitoring and management connections +`kubectl` command to port-forward monitoring and management connections to the local workstation. This command is [`test-foward.sh`](../../bin/test-forward.sh). From 097589a4ff636f7267155c969db820e4e68ca80e Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Mon, 15 Aug 2022 13:19:25 -0600 Subject: [PATCH 58/62] fix: adding updates to jenkinsfiles (#185) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed * fix: refactor digitalocean to docean for variables * fix: add repo-only IC deploy to support kubeconfig deploys * fix: modifications to handle kubeconfig deploys for now * fix: recommission bash scripts to support kubeconfig deploys for now * fix: gitkeep needed for manifests dir under repo nginx * chore: update jenkinsfiles for automation api * fix: updates to the jenkinsfiles * chore: doc updates for automation-api changes * fix: update to docker instance for minikube jenkins --- extras/jenkins/K3S/Jenkinsfile | 2 +- extras/jenkins/Minikube/Jenkinsfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/extras/jenkins/K3S/Jenkinsfile b/extras/jenkins/K3S/Jenkinsfile index 3bd0804..42c2052 100644 --- a/extras/jenkins/K3S/Jenkinsfile +++ b/extras/jenkins/K3S/Jenkinsfile @@ -97,7 +97,7 @@ pipeline { steps { sh ''' # Is this super safe? No, but we’re going to roll with it for now. - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik" sh - + curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik" INSTALL_K3S_VERSION="v1.23.9+k3s1" sh - ''' } } diff --git a/extras/jenkins/Minikube/Jenkinsfile b/extras/jenkins/Minikube/Jenkinsfile index 78a8cc3..9d90490 100644 --- a/extras/jenkins/Minikube/Jenkinsfile +++ b/extras/jenkins/Minikube/Jenkinsfile @@ -105,7 +105,7 @@ pipeline { curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube mkdir -p /usr/local/bin/ install minikube /usr/local/bin/ - minikube start --vm-driver=none + minikube start --vm-driver=docker --force --cpus 4 --memory 30000 ''' } } From 2114849d7b51cd6d45e97cb961c44e8b65bf9cee Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Tue, 16 Aug 2022 12:40:38 -0600 Subject: [PATCH 59/62] fix: add wheel back into setup_venv.sh (#187) --- bin/setup_venv.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index e1ab75e..811fffb 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -196,6 +196,8 @@ pip3 install pipenv # Install certain utility packages like `nodeenv` and `wheel` that aid # in the installation of other build tools and dependencies # required by the other python packages. +pip3 install wheel + # `pipenv sync` uses only the information in the `Pipfile.lock` ensuring repeatable builds PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipenv sync --dev From 4934c5be1980d91cf36d2a8a79350e9b7845e33d Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Tue, 30 Aug 2022 10:20:24 -0600 Subject: [PATCH 60/62] fix: add variable for password fields in jenkins runs / accept stack argument / formatting (#188) * chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed * fix: refactor digitalocean to docean for variables * fix: add repo-only IC deploy to support kubeconfig deploys * fix: modifications to handle kubeconfig deploys for now * fix: recommission bash scripts to support kubeconfig deploys for now * fix: gitkeep needed for manifests dir under repo nginx * chore: update jenkinsfiles for automation api * fix: updates to the jenkinsfiles * chore: doc updates for automation-api changes * fix: update to docker instance for minikube jenkins * fix: add wheel back into setup_venv.sh * fix: jenkinsfile updates * feat: accept stack value on CLI, handle mis-match * chore: reformat markdown to fit standards * fix: changes requested in #188 * refactor: break up stack environment logic into fnctions * refactor: formatting changes to main.py * fix: address PR comments and formatting * refactor: formatting fixes * fix: formatting and PR requested changes * refactor: bash script cleanup * chore: remove deprecated testcap script * fix: shell isn't interpreting the args to pulumi right * fix: still having weird globbing issues. * fix: adjust jenkinsfiles for new runner syntax --- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- .github/PULL_REQUEST_TEMPLATE.md | 6 +- .pre-commit-config.yaml | 49 +++ CODE_OF_CONDUCT.md | 12 +- CONTRIBUTING.md | 53 ++- README.md | 88 ++-- bin/aws_write_creds.sh | 52 ++- bin/destroy.sh | 27 +- bin/destroy_kube.sh | 159 ++++--- bin/kubernetes-extras.sh | 216 +++++----- bin/setup_venv.sh | 397 +++++++++--------- bin/start.sh | 184 ++++---- bin/start_kube.sh | 298 ++++++------- bin/test-forward.sh | 63 ++- bin/test.py | 6 +- bin/test_runner.sh | 10 +- bin/testcap.sh | 359 ---------------- config/pulumi/README.md | 31 +- docker/README.md | 10 +- docs/accessing_mgmt_tools.md | 46 +- docs/dir_template.md | 11 - docs/getting_started.md | 330 +++++++++------ docs/status-and-issues.md | 144 ++++--- extras/README.md | 19 +- extras/jenkins/AWS/Jenkinsfile | 6 +- extras/jenkins/DigitalOcean/Jenkinsfile | 8 +- extras/jenkins/Linode/Jenkinsfile | 6 +- extras/jenkins/MicroK8s/Jenkinsfile | 6 +- extras/jenkins/Minikube/Jenkinsfile | 4 +- extras/jenkins/README.md | 39 +- pulumi/python/README.md | 245 ++++++----- pulumi/python/automation/DESIGN.md | 245 ++++++----- pulumi/python/automation/main.py | 154 +++++-- pulumi/python/config/README.md | 28 +- pulumi/python/infrastructure/README.md | 19 +- pulumi/python/kubernetes/README.md | 13 +- .../applications/sirius/__main__.py | 127 +++--- .../kubernetes/applications/sirius/verify.py | 11 +- .../observability/otel-objects/README.md | 60 +-- .../observability/otel-operator/README.md | 10 +- .../kubernetes/prometheus/extras/README.md | 19 +- pulumi/python/tools/README.md | 41 +- 42 files changed, 1804 insertions(+), 1809 deletions(-) create mode 100644 .pre-commit-config.yaml delete mode 100755 bin/testcap.sh delete mode 100644 docs/dir_template.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 6bcce42..066b2d9 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -14,4 +14,4 @@ A clear and concise description of what you want to happen. A clear and concise description of any alternative solutions or features you've considered. **Additional context** -Add any other context or screenshots about the feature request here. \ No newline at end of file +Add any other context or screenshots about the feature request here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 98a7373..023b0f7 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,12 +1,12 @@ ### Proposed changes -Describe the use case and detail of the change. If this PR addresses an issue -on GitHub, make sure to include a link to that issue here in this description +Describe the use case and detail of the change. If this PR addresses an issue +on GitHub, make sure to include a link to that issue here in this description (not in the title of the PR). ### Checklist Before creating a PR, run through this checklist and mark each as complete. -- [ ] I have written my commit messages in the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format. +- [ ] I have written my commit messages in the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format. - [ ] I have read the [CONTRIBUTING](/CONTRIBUTING.md) doc - [ ] I have added tests (when possible) that prove my fix is effective or that my feature works - [ ] I have checked that all unit tests pass after adding my changes diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..ef0b165 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,49 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-yaml + args: [--allow-multiple-documents] + - id: check-added-large-files + - id: check-merge-conflict + - id: detect-private-key + - id: trailing-whitespace + - id: mixed-line-ending + - id: end-of-file-fixer + - id: debug-statements + - id: check-merge-conflict + - id: check-ast + +- repo: https://github.com/pre-commit/mirrors-autopep8 + rev: v1.7.0 + hooks: + - id: autopep8 + +- repo: https://github.com/asottile/dead + rev: v1.5.0 + hooks: + - id: dead + +- repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck + - id: shfmt + - id: markdownlint + +- repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + +- repo: https://github.com/zricethezav/gitleaks + rev: v8.11.0 + hooks: + - id: gitleaks + +- repo: https://github.com/Yelp/detect-secrets + rev: v1.3.0 + hooks: + - id: detect-secrets diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 4547fd8..1396e34 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -117,13 +117,15 @@ the community. This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. +[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) +. -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. \ No newline at end of file +[https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq) +. Translations are available at +[https://www.contributor-covenant.org/translations](https://www.contributor-covenant.org/translations). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8913447..54f05b5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,16 +1,18 @@ # Contributing Guidelines -The following is a set of guidelines for contributing. We really appreciate that you are considering contributing! +The following is a set of guidelines for contributing. We really appreciate +that you are considering contributing! -#### Table Of Contents +## Table Of Contents [Ask a Question](#ask-a-question) [Contributing](#contributing) [Style Guides](#style-guides) - * [Git Style Guide](#git-style-guide) - * [Go Style Guide](#go-style-guide) + +* [Git Style Guide](#git-style-guide) +* [Go Style Guide](#go-style-guide) [Code of Conduct](https://github.com/nginxinc/nginx-wrapper/blob/master/CODE_OF_CONDUCT.md) @@ -22,33 +24,50 @@ Please open an Issue on GitHub with the label `question`. ### Report a Bug -To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the issue has not already been reported. +To report a bug, open an issue on GitHub with the label `bug` using the +available bug report issue template. Please ensure the issue has not already +been reported. ### Suggest an Enhancement -To suggest an enhancement, please create an issue on GitHub with the label `enhancement` using the available feature issue template. +To suggest an enhancement, please create an issue on GitHub with the label +`enhancement` using the available feature issue template. ### Open a Pull Request -* Fork the repo, create a branch, submit a PR when your changes are tested and ready for review. +* Fork the repo, create a branch, submit a PR when your changes are tested and + ready for review. * Fill in [our pull request template](/.github/PULL_REQUEST_TEMPLATE.md) -Note: if you’d like to implement a new feature, please consider creating a feature request issue first to start a discussion about the feature. +Note: if you’d like to implement a new feature, please consider creating a +feature request issue first to start a discussion about the feature. ## Style Guides ### Git Style Guide -* Keep a clean, concise and meaningful git commit history on your branch, rebasing locally and squashing before submitting a PR -* Use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated -* Follow the guidelines of writing a good commit message as described [here](https://chris.beams.io/posts/git-commit/) and summarised in the next few points - * In the subject line, use the present tense ("Add feature" not "Added feature") - * In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...") - * Limit the subject line to 72 characters or less - * Reference issues and pull requests liberally after the subject line - * Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`) +* Keep a clean, concise and meaningful git commit history on your branch, + rebasing locally and squashing before submitting a PR +* Use the + [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format + when writing a commit message, so that changelogs can be automatically + generated +* Follow the guidelines of writing a good commit message as described + [here](https://chris.beams.io/posts/git-commit/) and summarised in the next + few points + * In the subject line, use the present tense + ("Add feature" not "Added feature") + * In the subject line, use the imperative mood ("Move cursor to..." not + "Moves cursor to...") + * Limit the subject line to 72 characters or less + * Reference issues and pull requests liberally after the subject line + * Add more detailed description in the body of the git message ( + `git commit -a` to give you more space and time in your text editor to + write a good message instead of `git commit -am`) ### Code Style Guide -* Python code should conform to the [PEP-8 style guidelines](https://www.python.org/dev/peps/pep-0008/) whenever possible. +* Python code should conform to the + [PEP-8 style guidelines](https://www.python.org/dev/peps/pep-0008/) + whenever possible. * Where feasible, include unit tests. diff --git a/README.md b/README.md index 899ee8b..4eea6f6 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,45 @@ +# NGINX Modern Reference Architectures + +## Current Test Status + [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=shield)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_shield) -![AWS Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=AWS) -![DO Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=DigitalOcean) -![LKE Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Linode) +![AWS Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=AWS) +![DO Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=DigitalOcean) +![LKE Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Linode) ![K3s Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=K3s) ![MicroK8s Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=MicroK8s) -![Minikube Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Minikube) - -# NGINX Modern Reference Architectures +![Minikube Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Minikube) -This repository has the basics for a common way to deploy and manage modern apps. Over time, we'll build more example -architectures using different deployment models and options – including other clouds – and you’ll be able to find those -here. +This repository has the basics for a common way to deploy and manage modern +apps. Over time, we'll build more example architectures using different +deployment models and options – including other clouds – and you’ll be able +to find those here. ## Nomenclature -Internally, we refer to this project as MARA for Modern Application Reference Architecture. The current repository name -reflects the humble origins of this project, as it was started with the purpose of allowing users to build custom -versions of the NGINX Ingress Controller in Kubernetes. This went so well that we expanded it to the project you're -currently viewing. +Internally, we refer to this project as MARA for Modern Application Reference +Architecture. The current repository name reflects the humble origins of this +project, as it was started with the purpose of allowing users to build custom +versions of the NGINX Ingress Controller in Kubernetes. This went so well that +we expanded it to the project you're currently viewing. ## Modern App Architectures We define modern app architectures as those driven by four characteristics: -*scalability*, *portability*, *resiliency*, and *agility*. While many different aspects of a modern architecture exist, -these are fundamental. +*scalability*, *portability*, *resiliency*, and *agility*. While many different +aspects of a modern architecture exist, these are fundamental. -* **Scalability** – Quickly and seamlessly scale up or down to accommodate spikes or reductions in demand, anywhere in - the world. +* **Scalability** – Quickly and seamlessly scale up or down to accommodate + spikes or reductions in demand, anywhere in the world. -* **Portability** – Easy to deploy on multiple types of devices and infrastructures, on public clouds, and on premises. +* **Portability** – Easy to deploy on multiple types of devices and + infrastructures, on public clouds, and on premises. -* **Resiliency** – Can fail over to newly spun‑up clusters or virtual environments in different availability regions, - clouds, or data centers. +* **Resiliency** – Can fail over to newly spun‑up clusters or virtual + environments in different availability regions, clouds, or data centers. -* **Agility** – Ability to update through automated CI/CD pipelines with higher code velocity and more frequent code - pushes. +* **Agility** – Ability to update through automated CI/CD pipelines with higher + code velocity and more frequent code pushes. This diagram is an example of what we mean by a **modern app architecture**: ![Modern Apps Architecture Example Diagram](docs/DIAG-NGINX-ModernAppsRefArch-NGINX-MARA-1-0-blog-1024x800.png) @@ -53,26 +58,32 @@ To satisfy the four key characteristics, many modern app architectures employ: For details on the current state of this project, please see the [readme](pulumi/python/README.md) in the [`pulumi/python`](pulumi/python) -subdirectory. This project is under active development, and the current work is using [Pulumi](https://www.pulumi.com/) -with Python. Additionally, please see -[Status and Issues](docs/status-and-issues.md) for the project's up-to-date build status and known issues. - -Subdirectories contained within the root directory separate reference architectures by infrastructure deployment tooling -with additional subdirectories as needed. For example, Pulumi allows the use of multiple languages for deployment. As we -decided to use Python in our first build, there is a `python` subdirectory under the `pulumi` directory. - -This project was started to provide a complete, stealable, easy to deploy, and standalone example of how a modern app -architecture can be built. It was driven by the necessity to be flexible and not require a long list of dependencies to -get started. It needs to provide examples of tooling used to build this sort of architecture in the real world. Most -importantly, it needs to work. Hopefully this provides a ‘jumping off’ point for someone to build their own +subdirectory. This project is under active development, and the current work is +using [Pulumi](https://www.pulumi.com/) with Python. Additionally, please see +[Status and Issues](docs/status-and-issues.md) for the project's up-to-date +build status and known issues. + +Subdirectories contained within the root directory separate reference +architectures by infrastructure deployment tooling with additional +subdirectories as needed. For example, Pulumi allows the use of multiple +languages for deployment. As we decided to use Python in our first build, there +is a `python` subdirectory under the `pulumi` directory. + +This project was started to provide a complete, stealable, easy to deploy, and +standalone example of how a modern app architecture can be built. It was driven +by the necessity to be flexible and not require a long list of dependencies to +get started. It needs to provide examples of tooling used to build this sort of +architecture in the real world. Most importantly, it needs to work. Hopefully +this provides a ‘jumping off’ point for someone to build their own infrastructure. ## Deployment Tools ### Pulumi -[Pulumi](https://www.pulumi.com/) is a modern Infrastructure as Code (IaC) tool that allows you to write code (node, -Python, Go, etc.) that defines cloud infrastructure. Within the [`pulumi`](pulumi) folder are examples of the pulumi +[Pulumi](https://www.pulumi.com/) is a modern Infrastructure as Code (IaC) tool +that allows you to write code (node, Python, Go, etc.) that defines cloud +infrastructure. Within the [`pulumi`](pulumi) folder are examples of the pulumi being used to stand up MARA. ## Contribution @@ -87,6 +98,7 @@ All code in this repository is licensed under the [Apache License v2 license](LICENSE). Open source license notices for all projects in this repository can be -found [here](https://app.fossa.com/reports/92595e16-c0b8-4c68-8c76-59696b6ac219). +found +[here](https://app.fossa.com/reports/92595e16-c0b8-4c68-8c76-59696b6ac219). -[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=large)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_large) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=large)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_large) \ No newline at end of file diff --git a/bin/aws_write_creds.sh b/bin/aws_write_creds.sh index 485539b..000c5b4 100755 --- a/bin/aws_write_creds.sh +++ b/bin/aws_write_creds.sh @@ -2,40 +2,38 @@ set -o errexit # abort on nonzero exit status set -o pipefail # don't hide errors within pipes -# -# This script is temporary until we rewrite the AWS deployment following #81 and #82. -# We look into the environment and if we see environment variables for the AWS -# authentication process we move them into a credentials file. This is primarily being -# done at this time to support Jenkins using env vars for creds +# +# This script is temporary until we rewrite the AWS deployment following +# 81 and #82. # We look into the environment and if we see environment +# variables for the AWS # authentication process we move them into a +# credentials file. This is primarily being # done at this time to support +# Jenkins using env vars for creds # aws_auth_vars=(AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN) missing_auth_vars=() -for i in "${aws_auth_vars[@]}" -do - test -n "${!i:+y}" || missing_vars+=("$i") +for i in "${aws_auth_vars[@]}"; do + test -n "${!i:+y}" || missing_vars+=("$i") done -if [ ${#missing_auth_vars[@]} -ne 0 ] -then - echo "Did not find values for:" - printf ' %q\n' "${missing_vars[@]}" - echo "Will assume they are in credentials file or not needed" +if [ ${#missing_auth_vars[@]} -ne 0 ]; then + echo "Did not find values for:" + printf ' %q\n' "${missing_vars[@]}" + echo "Will assume they are in credentials file or not needed" else - echo "Creating credentials file" - # Create the directory.... - mkdir -p ~/.aws - CREDS=~/.aws/credentials - echo "[default]" > $CREDS - echo "aws_access_key_id=$AWS_ACCESS_KEY_ID" >> $CREDS - echo "aws_secret_access_key=$AWS_SECRET_ACCESS_KEY" >> $CREDS - # This is if we have non-temp credentials... - if [[ -z "${AWS_SESSION_TOKEN+x}" ]]; then - echo "Variable AWS_SESSION_TOKEN was unset; not adding to credentials" - else - echo "aws_session_token=$AWS_SESSION_TOKEN" >> $CREDS - fi + echo "Creating credentials file" + # Create the directory.... + mkdir -p ~/.aws + CREDS=~/.aws/credentials + echo "[default]" >$CREDS + echo "aws_access_key_id=$AWS_ACCESS_KEY_ID" >>$CREDS + echo "aws_secret_access_key=$AWS_SECRET_ACCESS_KEY" >>$CREDS + # This is if we have non-temp credentials... + if [[ -z "${AWS_SESSION_TOKEN+x}" ]]; then + echo "Variable AWS_SESSION_TOKEN was unset; not adding to credentials" + else + echo "aws_session_token=$AWS_SESSION_TOKEN" >>$CREDS + fi fi - diff --git a/bin/destroy.sh b/bin/destroy.sh index 06ab844..58f6f5a 100755 --- a/bin/destroy.sh +++ b/bin/destroy.sh @@ -10,10 +10,11 @@ export PULUMI_SKIP_UPDATE_CHECK=true export PULUMI_SKIP_CONFIRMATIONS=true script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based -# projects. # -if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" > /dev/null ; then +# Check to see if the venv has been installed, since this is only going to be +# used to start pulumi/python based projects. +# +if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." echo " " @@ -67,28 +68,28 @@ echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" # # Determine what destroy script we need to run # -if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config>/dev/null 2>&1; then +if pulumi config get kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" - if [ $INFRA == 'AWS' ]; then + if [ "$INFRA" == 'AWS' ]; then echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" exec ${script_dir}/../pulumi/python/runner exit 0 - elif [ $INFRA == 'kubeconfig' ]; then + elif [ "$INFRA" == 'kubeconfig' ]; then echo "Destroying a kubeconfig based stack; if this is not right please type ctrl-c to abort this script." sleep 5 - ${script_dir}/destroy_kube.sh + "${script_dir}"/destroy_kube.sh exit 0 - elif [ $INFRA == 'DO' ]; then + elif [ "$INFRA" == 'DO' ]; then echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner + exec "${script_dir}"/../pulumi/python/runner sleep 5 - ${script_dir}/destroy_do.sh + "${script_dir}"/destroy_do.sh exit 0 - elif [ $INFRA == 'LKE' ]; then + elif [ "$INFRA" == 'LKE' ]; then echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner + exec "${script_dir}"/../pulumi/python/runner sleep 5 - ${script_dir}/destroy_lke.sh + "${script_dir}"/destroy_lke.sh exit 0 else print "No infrastructure set in config file; aborting!" diff --git a/bin/destroy_kube.sh b/bin/destroy_kube.sh index 8bb4013..0f9592e 100755 --- a/bin/destroy_kube.sh +++ b/bin/destroy_kube.sh @@ -9,57 +9,56 @@ export PULUMI_SKIP_UPDATE_CHECK=true # Run Pulumi non-interactively export PULUMI_SKIP_CONFIRMATIONS=true -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +if ! command -v pulumi >/dev/null; then + if [ -x "${script_dir}/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 +if ! command -v python3 >/dev/null; then + echo >&2 "Python 3 must be installed to continue" + exit 1 fi -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi +if ! command -v node >/dev/null; then + if [ -x "${script_dir}/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/venv/bin:$PATH" + + if ! command -v node >/dev/null; then + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi + else + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi fi # Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login +if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + pulumi login - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi fi source "${script_dir}/../config/pulumi/environment" echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - APPLICATIONS=(sirius) KUBERNETES=(secrets observability logagent logstore certmgr prometheus) NGINX=(kubernetes/nginx/ingress-controller-repo-only) @@ -71,61 +70,57 @@ INFRA=(kubeconfig digitalocean/domk8s) # # Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi +for project_dir in "${APPLICATIONS[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" + fi done # Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi +for project_dir in "${KUBERNETES[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" + fi done # TODO: figure out a more elegant way to do the CRD removal for prometheus #83 # This is a hack for now to remove the CRD's for prometheus-kube-stack # See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd alertmanagerconfigs.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd alertmanagers.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd podmonitors.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd probes.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd prometheuses.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd prometheusrules.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd servicemonitors.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd thanosrulers.monitoring.coreos.com >/dev/null 2>&1 # Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi +for project_dir in "${NGINX[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" + fi done # Clean up the kubeconfig project -for project_dir in "${INFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi +for project_dir in "${INFRA[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" + fi done - - - - diff --git a/bin/kubernetes-extras.sh b/bin/kubernetes-extras.sh index 94b8ba2..b4090b6 100755 --- a/bin/kubernetes-extras.sh +++ b/bin/kubernetes-extras.sh @@ -24,55 +24,50 @@ echo " For more information, please see Discussion #155 in the repository (nginx echo "====================================================================================================" sleep 5 - # Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based # projects. # -if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" > /dev/null ; then - echo "NOTICE! Unable to find the vnev directory. This is required for the pulumi/python deployment process." - echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." - echo " " - exit 1 +if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then + echo "NOTICE! Unable to find the vnev directory. This is required for the pulumi/python deployment process." + echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." + echo " " + exit 1 else - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" fi if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi - function retry() { - local -r -i max_attempts="$1"; shift - local -i attempt_num=1 - until "$@" - do - if ((attempt_num==max_attempts)) - then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done + local -r -i max_attempts="$1" + shift + local -i attempt_num=1 + until "$@"; do + if ((attempt_num == max_attempts)); then + echo "Attempt ${attempt_num} failed and there are no more attempts left!" + return 1 + else + echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." + sleep $((attempt_num++)) + fi + done } - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" echo " " echo "NOTICE! The stack name provided here should be different from the stack name you use for your main" @@ -89,12 +84,12 @@ echo " " sleep 5 if [ ! -f "${script_dir}/../pulumi/python/tools/common/config/environment" ]; then - touch "${script_dir}/../pulumi/python/tools/common/config/environment" + touch "${script_dir}/../pulumi/python/tools/common/config/environment" fi if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../pulumi/python/tools/common/config/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in tool installation: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../pulumi/python/tools/common/config/environment" + read -r -e -p "Enter the name of the Pulumi stack to use in tool installation: " PULUMI_STACK + echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../pulumi/python/tools/common/config/environment" fi source "${script_dir}/../pulumi/python/tools/common/config/environment" @@ -103,7 +98,6 @@ echo "Configuring all tool installations to use the stack: ${PULUMI_STACK}" # Create the stack if it does not already exist find "${script_dir}/../pulumi/python/tools" -mindepth 2 -maxdepth 2 -type f -name Pulumi.yaml -execdir pulumi stack select --create "${PULUMI_STACK}" \; - echo " " echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to" echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)" @@ -118,27 +112,27 @@ echo " " sleep 5 if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "Kubeconfig file found" + echo "Kubeconfig file found" else - echo "Provide an absolute path to your kubeconfig file" - pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common + echo "Provide an absolute path to your kubeconfig file" + pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common fi # Clustername if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "Clustername found" + echo "Clustername found" else - echo "Provide your clustername" - pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common + echo "Provide your clustername" + pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common fi # Contextname # TODO: Update process to use context name as well as kubeconfig and clustername #84 if pulumi config get kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "Context name found" + echo "Context name found" else - echo "Provide your context name" - pulumi config set kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common + echo "Provide your context name" + pulumi config set kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common fi # Set our variables @@ -147,15 +141,15 @@ cluster_name="$(pulumi config get kubernetes:cluster_name -C ${script_dir}/../pu context_name="$(pulumi config get kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common)" # Show our config...based on the kubeconfig file -if command -v kubectl > /dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl --kubeconfig="${kubeconfig}" config view +if command -v kubectl >/dev/null; then + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl --kubeconfig="${kubeconfig}" config view fi # Connect to the cluster -if command -v kubectl > /dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl --kubeconfig="${kubeconfig}" --cluster="${cluster_name}" --context="${context_name}" version > /dev/null +if command -v kubectl >/dev/null; then + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl --kubeconfig="${kubeconfig}" --cluster="${cluster_name}" --context="${context_name}" version >/dev/null fi echo " " @@ -168,67 +162,73 @@ echo " " sleep 5 while true; do - read -r -e -p "Do you wish to install metallb? " yn - case $yn in - [Yy]* ) echo "Checking for necessary values in the configuration:" - pulumi config set metallb:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 - if pulumi config get metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "CIDR found" - else - echo "Provide your CIDR (Note: no validation is done on this data)" - pulumi config set metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common - fi - break;; - [Nn]* ) # If they don't want metallb, but have a value in there we delete it - pulumi config rm metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - pulumi config rm metallb:enabled -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - break;; - * ) echo "Please answer yes or no.";; - esac + read -r -e -p "Do you wish to install metallb? " yn + case $yn in + [Yy]*) + echo "Checking for necessary values in the configuration:" + pulumi config set metallb:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 + if pulumi config get metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then + echo "CIDR found" + else + echo "Provide your CIDR (Note: no validation is done on this data)" + pulumi config set metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common + fi + break + ;; + [Nn]*) # If they don't want metallb, but have a value in there we delete it + pulumi config rm metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + pulumi config rm metallb:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + break + ;; + *) echo "Please answer yes or no." ;; + esac done while true; do - read -r -e -p "Do you wish to install nfs client support for persistent volumes? " yn - case $yn in - [Yy]* ) echo "Checking for necessary values in the configuration:" - pulumi config set nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 - if pulumi config get nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "NFS Server IP found" - else - echo "Provide your NFS Server IP (Note: no validation is done on this data)" - pulumi config set nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common - fi - if pulumi config get nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "NFS Share Path found" - else - echo "Provide your NFS Share Path (Note: no validation is done on this data)" - pulumi config set nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common - fi - break;; - [Nn]* ) # If they don't want nfsvols, but have a value in there we delete it - pulumi config rm nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - pulumi config rm nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - pulumi config rm nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - break;; - * ) echo "Please answer yes or no.";; - esac + read -r -e -p "Do you wish to install nfs client support for persistent volumes? " yn + case $yn in + [Yy]*) + echo "Checking for necessary values in the configuration:" + pulumi config set nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 + if pulumi config get nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then + echo "NFS Server IP found" + else + echo "Provide your NFS Server IP (Note: no validation is done on this data)" + pulumi config set nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common + fi + if pulumi config get nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then + echo "NFS Share Path found" + else + echo "Provide your NFS Share Path (Note: no validation is done on this data)" + pulumi config set nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common + fi + break + ;; + [Nn]*) # If they don't want nfsvols, but have a value in there we delete it + pulumi config rm nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + pulumi config rm nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + pulumi config rm nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + break + ;; + *) echo "Please answer yes or no." ;; + esac done pulumi_args="--emoji " if pulumi config get metallb:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "=====================" - echo "| MetalLB |" - echo "=====================" - cd "${script_dir}/../pulumi/python/tools/metallb" - pulumi $pulumi_args up + echo "=====================" + echo "| MetalLB |" + echo "=====================" + cd "${script_dir}/../pulumi/python/tools/metallb" + pulumi $pulumi_args up fi if pulumi config get nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "=====================" - echo "| NFSVols |" - echo "=====================" + echo "=====================" + echo "| NFSVols |" + echo "=====================" - cd "${script_dir}/../pulumi/python/tools/nfsvolumes" - pulumi $pulumi_args up -fi \ No newline at end of file + cd "${script_dir}/../pulumi/python/tools/nfsvolumes" + pulumi $pulumi_args up +fi diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index 811fffb..fbce414 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -7,47 +7,47 @@ set -o pipefail # don't hide errors within pipes # https://stackoverflow.com/a/31939275/33611 # CC BY-SA 3.0 License: https://creativecommons.org/licenses/by-sa/3.0/ function askYesNo() { - QUESTION=$1 - DEFAULT=$2 - if [ "$DEFAULT" = true ]; then - OPTIONS="[Y/n]" - DEFAULT="y" - else - OPTIONS="[y/N]" - DEFAULT="n" - fi - if [ "${DEBIAN_FRONTEND}" != "noninteractive" ]; then - read -p "$QUESTION $OPTIONS " -n 1 -s -r INPUT - INPUT=${INPUT:-${DEFAULT}} - echo "${INPUT}" - fi - - if [ "${DEBIAN_FRONTEND}" == "noninteractive" ]; then - ANSWER=$DEFAULT - elif [[ "$INPUT" =~ ^[yY]$ ]]; then - ANSWER=true - else - ANSWER=false - fi + QUESTION=$1 + DEFAULT=$2 + if [ "$DEFAULT" = true ]; then + OPTIONS="[Y/n]" + DEFAULT="y" + else + OPTIONS="[y/N]" + DEFAULT="n" + fi + if [ "${DEBIAN_FRONTEND}" != "noninteractive" ]; then + read -p "$QUESTION $OPTIONS " -n 1 -s -r INPUT + INPUT=${INPUT:-${DEFAULT}} + echo "${INPUT}" + fi + + if [ "${DEBIAN_FRONTEND}" == "noninteractive" ]; then + ANSWER=$DEFAULT + elif [[ "$INPUT" =~ ^[yY]$ ]]; then + ANSWER=true + else + ANSWER=false + fi } # Does basic OS distribution detection for "class" of distribution, such # as debian, rhel, etc function distro_like() { - local like - if [ "$(uname -s)" == "Darwin" ]; then - like="darwin" - elif [ -f /etc/os-release ]; then - if grep --quiet '^ID_LIKE=' /etc/os-release; then - like="$(grep '^ID_LIKE=' /etc/os-release | cut -d'=' -f2 | tr -d \")" - else - like="$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d \")" - fi - else - like="unknown" - fi - - echo "${like}" + local like + if [ "$(uname -s)" == "Darwin" ]; then + like="darwin" + elif [ -f /etc/os-release ]; then + if grep --quiet '^ID_LIKE=' /etc/os-release; then + like="$(grep '^ID_LIKE=' /etc/os-release | cut -d'=' -f2 | tr -d \")" + else + like="$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d \")" + fi + else + like="unknown" + fi + + echo "${like}" } script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" @@ -56,133 +56,135 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" unset VIRTUAL_ENV if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 + echo >&2 "git must be installed to continue" + exit 1 fi # When Python does not exist if ! command -v python3 >/dev/null; then - if ! command -v make >/dev/null; then - echo >&2 "make must be installed in order to install python with pyenv" - echo >&2 "Either install make or install Python 3 with the venv module" - exit 1 - fi - if ! command -v gcc >/dev/null; then - echo >&2 "gcc must be installed in order to install python with pyenv" - echo >&2 "Either install gcc or install Python 3 with the venv module" - exit 1 - fi - - echo "Python 3 is not installed. Adding pyenv to allow for Python installation" - echo "If development library dependencies are not installed, Python build may fail." - - # Give relevant hint for the distro - if distro_like | grep --quiet 'debian'; then - echo "You may need to install additional packages using a command like the following:" - echo " apt-get install libbz2-dev libffi-dev libreadline-dev libsqlite3-dev libssl-dev" - elif distro_like | grep --quiet 'rhel'; then - echo "You may need to install additional packages using a command like the following:" - echo " yum install bzip2-devel libffi-devel readline-devel sqlite-devel openssl-devel zlib-devel" - else - echo "required libraries: libbz2 libffi libreadline libsqlite3 libssl zlib1g" - fi - - PYENV_ROOT="${script_dir}/../pulumi/python/.pyenv" - - mkdir -p "${PYENV_ROOT}" - git_clone_log="$(mktemp -t pyenv_git_clone-XXXXXXX.log)" - if git clone --depth 1 --branch v2.0.3 https://github.com/pyenv/pyenv.git "${PYENV_ROOT}" 2>"${git_clone_log}"; then - rm "${git_clone_log}" - else - echo >&2 "Error cloning pyenv repository:" - cat >&2 "${git_clone_log}" - fi - - PATH="$PYENV_ROOT/bin:$PATH" + if ! command -v make >/dev/null; then + echo >&2 "make must be installed in order to install python with pyenv" + echo >&2 "Either install make or install Python 3 with the venv module" + exit 1 + fi + if ! command -v gcc >/dev/null; then + echo >&2 "gcc must be installed in order to install python with pyenv" + echo >&2 "Either install gcc or install Python 3 with the venv module" + exit 1 + fi + + echo "Python 3 is not installed. Adding pyenv to allow for Python installation" + echo "If development library dependencies are not installed, Python build may fail." + + # Give relevant hint for the distro + if distro_like | grep --quiet 'debian'; then + echo "You may need to install additional packages using a command like the following:" + echo " apt-get install libbz2-dev libffi-dev libreadline-dev libsqlite3-dev libssl-dev" + elif distro_like | grep --quiet 'rhel'; then + echo "You may need to install additional packages using a command like the following:" + echo " yum install bzip2-devel libffi-devel readline-devel sqlite-devel openssl-devel zlib-devel" + else + echo "required libraries: libbz2 libffi libreadline libsqlite3 libssl zlib1g" + fi + + PYENV_ROOT="${script_dir}/../pulumi/python/.pyenv" + + mkdir -p "${PYENV_ROOT}" + git_clone_log="$(mktemp -t pyenv_git_clone-XXXXXXX.log)" + if git clone --depth 1 --branch v2.0.3 https://github.com/pyenv/pyenv.git "${PYENV_ROOT}" 2>"${git_clone_log}"; then + rm "${git_clone_log}" + else + echo >&2 "Error cloning pyenv repository:" + cat >&2 "${git_clone_log}" + fi + + PATH="$PYENV_ROOT/bin:$PATH" fi -# If pyenv is available we use a the python version as set in the +# +# If pyenv is available we use the python version as set in the # .python-version file. This gives us a known and well tested version # of python. +# if command -v pyenv >/dev/null; then - eval "$(pyenv init --path)" - eval "$(pyenv init -)" - - if [ -z "${PYENV_ROOT}" ]; then - PYENV_ROOT=~/.pyenv - fi - - echo "pyenv detected in: ${PYENV_ROOT}" - pyenv install --skip-existing <"${script_dir}/../.python-version" - - # If the pyenv-virtualenv tools are installed, prompt the user if they want to - # use them. - if [ -d "${PYENV_ROOT}/plugins/pyenv-virtualenv" ]; then - askYesNo "Use pyenv-virtualenv to manage virtual environment?" true - if [ $ANSWER = true ]; then - has_pyenv_venv_plugin=1 - else - has_pyenv_venv_plugin=0 - fi - else - has_pyenv_venv_plugin=0 - fi + eval "$(pyenv init --path)" + eval "$(pyenv init -)" + + if [ -z "${PYENV_ROOT}" ]; then + PYENV_ROOT=~/.pyenv + fi + + echo "pyenv detected in: ${PYENV_ROOT}" + pyenv install --skip-existing <"${script_dir}/../.python-version" + + # If the pyenv-virtualenv tools are installed, prompt the user if they want to + # use them. + if [ -d "${PYENV_ROOT}/plugins/pyenv-virtualenv" ]; then + askYesNo "Use pyenv-virtualenv to manage virtual environment?" true + if [ $ANSWER = true ]; then + has_pyenv_venv_plugin=1 + else + has_pyenv_venv_plugin=0 + fi + else + has_pyenv_venv_plugin=0 + fi else - has_pyenv_venv_plugin=0 + has_pyenv_venv_plugin=0 fi # if pyenv with virtual-env plugin is installed, use that if [ ${has_pyenv_venv_plugin} -eq 1 ]; then - eval "$(pyenv virtualenv-init -)" - - if ! pyenv virtualenvs --bare | grep --quiet '^mara'; then - pyenv virtualenv mara - fi - - if [ -z "${VIRTUAL_ENV}" ]; then - pyenv activate mara - fi - - if [ -h "${script_dir}/../pulumi/python/venv" ]; then - echo "Link already exists [${script_dir}/../pulumi/python/venv] - removing and relinking" - rm "${script_dir}/../pulumi/python/venv" - elif [ -d "${script_dir}/../pulumi/python/venv" ]; then - echo "Virtual environment directory already exists" - askYesNo "Delete and replace with pyenv-virtualenv managed link?" false - if [ $ANSWER = true ]; then - echo "Deleting ${script_dir}/../pulumi/python/venv" - rm -rf "${script_dir}/../pulumi/python/venv" - else - echo >&2 "The path ${script_dir}/../pulumi/python/venv must not be a virtual environment directory when using pyenv-virtualenv" - echo >&2 "Exiting. Please manually remove the directory" - exit 1 - fi - fi - - # We create a symbolic link to the pyenv managed venv because using the - # pyenv virtual environment tooling introduces too many conditional logic paths - # in subsequent scripts/programs that need to load the virtual environment. - # Assuming that the venv directory is at a fixed known path makes things easier. - echo "Linking virtual environment [${VIRTUAL_ENV}] to local directory [venv]" - ln -s "${VIRTUAL_ENV}" "${script_dir}/../pulumi/python/venv" + eval "$(pyenv virtualenv-init -)" + + if ! pyenv virtualenvs --bare | grep --quiet '^mara'; then + pyenv virtualenv mara + fi + + if [ -z "${VIRTUAL_ENV}" ]; then + pyenv activate mara + fi + + if [ -h "${script_dir}/../pulumi/python/venv" ]; then + echo "Link already exists [${script_dir}/../pulumi/python/venv] - removing and relinking" + rm "${script_dir}/../pulumi/python/venv" + elif [ -d "${script_dir}/../pulumi/python/venv" ]; then + echo "Virtual environment directory already exists" + askYesNo "Delete and replace with pyenv-virtualenv managed link?" false + if [ $ANSWER = true ]; then + echo "Deleting ${script_dir}/../pulumi/python/venv" + rm -rf "${script_dir}/../pulumi/python/venv" + else + echo >&2 "The path ${script_dir}/../pulumi/python/venv must not be a virtual environment directory when using pyenv-virtualenv" + echo >&2 "Exiting. Please manually remove the directory" + exit 1 + fi + fi + + # We create a symbolic link to the pyenv managed venv because using the + # pyenv virtual environment tooling introduces too many conditional logic paths + # in subsequent scripts/programs that need to load the virtual environment. + # Assuming that the venv directory is at a fixed known path makes things easier. + echo "Linking virtual environment [${VIRTUAL_ENV}] to local directory [venv]" + ln -s "${VIRTUAL_ENV}" "${script_dir}/../pulumi/python/venv" fi # If pyenv isn't present do everything with default python tooling -if [ ${has_pyenv_venv_plugin} -eq 0 ]; then - if [ -z "${VIRTUAL_ENV}" ]; then - VIRTUAL_ENV="${script_dir}/../pulumi/python/venv" - echo "No virtual environment already specified, defaulting to: ${VIRTUAL_ENV}" - fi - - if [ ! -d "${VIRTUAL_ENV}" ]; then - echo "Creating new virtual environment: ${VIRTUAL_ENV}" - if ! python3 -m venv "${VIRTUAL_ENV}"; then - echo "Deleting partially created virtual environment: ${VIRTUAL_ENV}" - rm -rf "${VIRTUAL_ENV}" || true - fi - fi - - source "${VIRTUAL_ENV}/bin/activate" +if [ "${has_pyenv_venv_plugin}" -eq 0 ]; then + if [ -z "${VIRTUAL_ENV}" ]; then + VIRTUAL_ENV="${script_dir}/../pulumi/python/venv" + echo "No virtual environment already specified, defaulting to: ${VIRTUAL_ENV}" + fi + + if [ ! -d "${VIRTUAL_ENV}" ]; then + echo "Creating new virtual environment: ${VIRTUAL_ENV}" + if ! python3 -m venv "${VIRTUAL_ENV}"; then + echo "Deleting partially created virtual environment: ${VIRTUAL_ENV}" + rm -rf "${VIRTUAL_ENV}" || true + fi + fi + + source "${VIRTUAL_ENV}/bin/activate" fi source "${VIRTUAL_ENV}/bin/activate" @@ -204,9 +206,9 @@ PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipe # Install node.js into virtual environment so that it can be used by Python # modules that make call outs to it. if [ ! -x "${VIRTUAL_ENV}/bin/node" ]; then - nodeenv -p --node=lts + nodeenv -p --node=lts else - echo "Node.js version $("${VIRTUAL_ENV}/bin/node" --version) is already installed" + echo "Node.js version $("${VIRTUAL_ENV}/bin/node" --version) is already installed" fi # Install general package requirements @@ -217,9 +219,8 @@ PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipe pip3 install "${script_dir}/../pulumi/python/utility/kic-pulumi-utils" rm -rf "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/.eggs" \ - "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/build" \ - "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/kic_pulumi_utils.egg-info" - + "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/build" \ + "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/kic_pulumi_utils.egg-info" ARCH="" case $(uname -m) in @@ -230,28 +231,28 @@ aarch64) ARCH="arm64" ;; arm64) ARCH="arm64" ;; arm) dpkg --print-architecture | grep -q "arm64" && ARCH="arm64" || ARCH="arm" ;; *) - echo >&2 "Unable to determine system architecture." - exit 1 - ;; + echo >&2 "Unable to determine system architecture." + exit 1 + ;; esac OS="$(uname -s | tr '[:upper:]' '[:lower:]')" if command -v wget >/dev/null; then - download_cmd="wget --quiet --max-redirect=12 --output-document -" + download_cmd="wget --quiet --max-redirect=12 --output-document -" elif command -v curl >/dev/null; then - download_cmd="curl --fail --silent --location" + download_cmd="curl --fail --silent --location" else - echo >&2 "either wget or curl must be installed" - exit 1 + echo >&2 "either wget or curl must be installed" + exit 1 fi if command -v sha256sum >/dev/null; then - sha256sum_cmd="sha256sum --check" + sha256sum_cmd="sha256sum --check" elif command -v shasum >/dev/null; then - sha256sum_cmd="shasum --algorithm 256 --check" + sha256sum_cmd="shasum --algorithm 256 --check" else - echo >&2 "either sha256sum or shasum must be installed" - exit 1 + echo >&2 "either sha256sum or shasum must be installed" + exit 1 fi # @@ -268,54 +269,54 @@ fi # # if [ ! -x "${VIRTUAL_ENV}/bin/kubectl" ]; then - echo "Downloading kubectl into virtual environment" - KUBECTL_VERSION="v1.24.3" - ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" - KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" - echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} - chmod +x "${VIRTUAL_ENV}/bin/kubectl" + echo "Downloading kubectl into virtual environment" + KUBECTL_VERSION="v1.24.3" + ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" + KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" + echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} + chmod +x "${VIRTUAL_ENV}/bin/kubectl" else - echo "kubectl is already installed, but will overwrite to ensure correct version" - echo "Downloading kubectl into virtual environment" - KUBECTL_VERSION="v1.24.3" - ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" - KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" - echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} - chmod +x "${VIRTUAL_ENV}/bin/kubectl" + echo "kubectl is already installed, but will overwrite to ensure correct version" + echo "Downloading kubectl into virtual environment" + KUBECTL_VERSION="v1.24.3" + ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" + KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" + echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} + chmod +x "${VIRTUAL_ENV}/bin/kubectl" fi # Download Pulumi CLI tooling # Regular expression and sed command from https://superuser.com/a/363878 echo "Downloading Pulumi CLI into virtual environment" PULUMI_VERSION="$(pip3 list | grep 'pulumi ' | sed -nEe 's/^[^0-9]*(([0-9]+\.)*[0-9]+).*/\1/p')" - if [ -z $PULUMI_VERSION ] ; then - echo "Failed to find Pulumi version - EXITING" - exit 5 - else - echo "Pulumi version found: $PULUMI_VERSION" - fi +if [ -z "$PULUMI_VERSION" ]; then + echo "Failed to find Pulumi version - EXITING" + exit 5 +else + echo "Pulumi version found: $PULUMI_VERSION" +fi if [[ -x "${VIRTUAL_ENV}/bin/pulumi" ]] && [[ "$(PULUMI_SKIP_UPDATE_CHECK=true "${VIRTUAL_ENV}/bin/pulumi" version)" == "v${PULUMI_VERSION}" ]]; then - echo "Pulumi version ${PULUMI_VERSION} is already installed" + echo "Pulumi version ${PULUMI_VERSION} is already installed" else - PULUMI_TARBALL_URL="https://get.pulumi.com/releases/sdk/pulumi-v${PULUMI_VERSION}-${OS}-${ARCH/amd64/x64}.tar.gz" - PULUMI_TARBALL_DEST=$(mktemp -t pulumi.tar.gz.XXXXXXXXXX) - ${download_cmd} "${PULUMI_TARBALL_URL}" > "${PULUMI_TARBALL_DEST}" - [ $? -eq 0 ] && echo "Pulumi downloaded successfully" || echo "Failed to download Pulumi" - tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --strip-components 1 --file "${PULUMI_TARBALL_DEST}" - [ $? -eq 0 ] && echo "Pulumi installed successfully" || echo "Failed to install Pulumi" - rm "${PULUMI_TARBALL_DEST}" + PULUMI_TARBALL_URL="https://get.pulumi.com/releases/sdk/pulumi-v${PULUMI_VERSION}-${OS}-${ARCH/amd64/x64}.tar.gz" + PULUMI_TARBALL_DEST=$(mktemp -t pulumi.tar.gz.XXXXXXXXXX) + ${download_cmd} "${PULUMI_TARBALL_URL}" >"${PULUMI_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Pulumi downloaded successfully" || echo "Failed to download Pulumi" + tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --strip-components 1 --file "${PULUMI_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Pulumi installed successfully" || echo "Failed to install Pulumi" + rm "${PULUMI_TARBALL_DEST}" fi # Digital Ocean CLI if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then - echo "Downloading Digital Ocean CLI" - DOCTL_VERSION="1.75.0" - DOCTL_TARBALL_URL="https://github.com/digitalocean/doctl/releases/download/v${DOCTL_VERSION}/doctl-${DOCTL_VERSION}-${OS}-${ARCH}.tar.gz" - DOCTL_TARBALL_DEST=$(mktemp -t doctl.tar.gz.XXXXXXXXXX) - ${download_cmd} "${DOCTL_TARBALL_URL}" > "${DOCTL_TARBALL_DEST}" - [ $? -eq 0 ] && echo "Digital Ocean CLI downloaded successfully" || echo "Failed to download Digital Ocean CLI" - tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}" - [ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI" - rm "${DOCTL_TARBALL_DEST}" + echo "Downloading Digital Ocean CLI" + DOCTL_VERSION="1.75.0" + DOCTL_TARBALL_URL="https://github.com/digitalocean/doctl/releases/download/v${DOCTL_VERSION}/doctl-${DOCTL_VERSION}-${OS}-${ARCH}.tar.gz" + DOCTL_TARBALL_DEST=$(mktemp -t doctl.tar.gz.XXXXXXXXXX) + ${download_cmd} "${DOCTL_TARBALL_URL}" >"${DOCTL_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Digital Ocean CLI downloaded successfully" || echo "Failed to download Digital Ocean CLI" + tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI" + rm "${DOCTL_TARBALL_DEST}" fi diff --git a/bin/start.sh b/bin/start.sh index 8e85bfc..f28e979 100755 --- a/bin/start.sh +++ b/bin/start.sh @@ -18,42 +18,42 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" # projects. # if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then - echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." - echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." - echo " " - exit 1 + echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." + echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." + echo " " + exit 1 else - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" fi if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 + echo >&2 "Python 3 must be installed to continue" + exit 1 fi # Check to see if the user is logged into Pulumi if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login + pulumi login - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi fi echo " " @@ -68,75 +68,75 @@ echo " " sleep 5 if [ -s "${script_dir}/../config/pulumi/environment" ] && grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - source "${script_dir}/../config/pulumi/environment" - echo "Environment data found for stack: ${PULUMI_STACK}" - while true; do - read -r -e -p "Environment file exists and is not empty. Answer yes to use, no to delete. " yn - case $yn in - [Yy]*) # We have an environment file and they want to keep it.... - if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" - if [ $INFRA == 'AWS' ]; then - echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner - exit 0 - elif [ $INFRA == 'kubeconfig' ]; then - exec ${script_dir}/start_kube.sh - exit 0 - elif [ $INFRA == 'DO' ]; then - echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner - exit 0 - elif [ $INFRA == 'LKE' ]; then - echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner - exit 0 - else - echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." - exit 1 - fi - else - echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." - exit 1 - fi - break - ;; - [Nn]*) # They want to remove and reconfigure - rm -f ${script_dir}/../config/pulumi/environment - break - ;; - *) echo "Please answer yes or no." ;; - esac - done + source "${script_dir}"/../config/pulumi/environment + echo "Environment data found for stack: ${PULUMI_STACK}" + while true; do + read -r -e -p "Environment file exists and is not empty. Answer yes to use, no to delete. " yn + case $yn in + [Yy]*) # We have an environment file, and they want to keep it.... + if pulumi config get kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + INFRA=$(pulumi config get kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config) + if [ "$INFRA" == 'AWS' ]; then + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + elif [ "$INFRA" == 'kubeconfig' ]; then + exec "${script_dir}"/start_kube.sh + exit 0 + elif [ "$INFRA" == 'DO' ]; then + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + elif [ "$INFRA" == 'LKE' ]; then + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + else + echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." + exit 1 + fi + else + echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." + exit 1 + fi + break + ;; + [Nn]*) # They want to remove and reconfigure + rm -f "${script_dir}"/../config/pulumi/environment + break + ;; + *) echo "Please answer yes or no." ;; + esac + done fi while true; do - read -e -r -p "Type a for AWS, d for Digital Ocean, k for kubeconfig, l for Linode? " infra - case $infra in - [Aa]*) - echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner - exit 0 - break - ;; - [Kk]*) - echo "Calling kubeconfig startup script" - exec ${script_dir}/start_kube.sh - exit 0 - break - ;; - [Dd]*) - echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner - exit 0 - break - ;; - [Ll]*) - echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" - exec ${script_dir}/../pulumi/python/runner - exit 0 - break - ;; - *) echo "Please answer a, d, k, or l." ;; - esac + read -e -r -p "Type a for AWS, d for Digital Ocean, k for kubeconfig, l for Linode? " infra + case "$infra" in + [Aa]*) + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + break + ;; + [Kk]*) + echo "Calling kubeconfig startup script" + exec "${script_dir}"/start_kube.sh + exit 0 + break + ;; + [Dd]*) + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + break + ;; + [Ll]*) + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + break + ;; + *) echo "Please answer a, d, k, or l." ;; + esac done diff --git a/bin/start_kube.sh b/bin/start_kube.sh index 1a781e3..e2f4f74 100755 --- a/bin/start_kube.sh +++ b/bin/start_kube.sh @@ -12,72 +12,73 @@ export PULUMI_SKIP_CONFIRMATIONS=true script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 + echo >&2 "Python 3 must be installed to continue" + exit 1 fi if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v node >/dev/null; then + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi + else + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi fi if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 + echo >&2 "git must be installed to continue" + exit 1 fi if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." + echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." fi if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." + echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." fi # Check to see if the user is logged into Pulumi if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login + pulumi login - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi fi if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" + touch "${script_dir}/../config/pulumi/environment" fi if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" + read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK + echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" fi +# # Do we have the submodule source.... # # Note: We had been checking for .git, but this is not guaranteed to be @@ -85,102 +86,105 @@ fi # for the src subdirectory which should always be there. # if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" + echo "Submodule source found" else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 + # Error out with instructions. + echo "Bank of Sirius submodule not found" + echo " " + echo "Please run:" + echo " git submodule update --init --recursive --remote" + echo "Inside your git directory and re-run this script" + echo "" + echo >&2 "Unable to find submodule - exiting" + exit 3 fi source "${script_dir}/../config/pulumi/environment" echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" +# # Create the stack if it does not already exist # Do not change the tools directory of add-ons. +# find "${script_dir}/../pulumi" -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; +# # Show colorful fun headers if the right utils are installed and NO_COLOR is not set # function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi + if [ -z ${NO_COLOR+x} ]; then + "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat + else + "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" + fi } function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done + local -r -i max_attempts="$1" + shift + local -i attempt_num=1 + until "$@"; do + if ((attempt_num == max_attempts)); then + echo "Attempt ${attempt_num} failed and there are no more attempts left!" + return 1 + else + echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." + sleep $((attempt_num++)) + fi + done } function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD + PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) + echo "$PWORD" } # -# This deploy only works with the NGINX registries. +# This deployment only works with the NGINX registries. # echo " " -echo "NOTICE! Currently the deployment via kubeconfig only supports pulling images from the registry! A JWT is " +echo "NOTICE! Currently, the deployment via kubeconfig only supports pulling images from the registry! A JWT is " echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" +echo "in the project root in a file named jwt.token" echo " " echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " echo "details and examples." echo " " +# # Make sure we see it +# sleep 5 # -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That +# This logic takes the JWT and transforms it into a secret, so we can pull the NGINX Plus IC. If the user is not +# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secret. That # secret is not a valid secret, but it is created to make the logic easier to read/code. # if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml + JWT=$(cat "${script_dir}"/../extras/jwt.token) + echo "Loading JWT into nginx-ingress/regcred" + "${script_dir}"/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username="${JWT}" --docker-password=none -n nginx-ingress --dry-run=client -o yaml >"${script_dir}"/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml + echo "No JWT found; writing placeholder manifest" + "${script_dir}"/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >"${script_dir}"/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml fi -# Check for stack info.... -# TODO: Move these to use kubeconfig for the Pulumi main config (which redirects up) instead of aws/vpc #80 # - +# Check for stack info.... # We automatically set this to a kubeconfig type for infra type -# TODO: combined file should query and manage this #80 -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config kubeconfig -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own +# +pulumi config set kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config kubeconfig +# +# This is a bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the +# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses its own # configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius kubeconfig +# +pulumi config set kubernetes:infra_type -C "${script_dir}"/../pulumi/python/kubernetes/applications/sirius kubeconfig +# # Inform the user of what we are doing - +# echo " " echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to" echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)" @@ -188,115 +192,112 @@ echo "you may need to remove them and replace them with a simple ~/.kube/config echo "addressed in a future release." echo " " +# # Sleep so that this is seen... +# sleep 5 -if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Kubeconfig file found" +if pulumi config get kubernetes:kubeconfig -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + echo "Kubeconfig file found" else - echo "Provide an absolute path to your kubeconfig file" - pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config + echo "Provide an absolute path to your kubeconfig file" + pulumi config set kubernetes:kubeconfig -C "${script_dir}"/../pulumi/python/config fi +# # Clustername -if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Clustername found" +# +if pulumi config get kubernetes:cluster_name -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + echo "Clustername found" else - echo "Provide your clustername" - pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config + echo "Provide your clustername" + pulumi config set kubernetes:cluster_name -C "${script_dir}"/../pulumi/python/config fi +# # Connect to the cluster +# if command -v kubectl >/dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl version >/dev/null + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl version >/dev/null fi -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 # # This version of the code forces you to add a hostname which is used to generate the cert when the application is # deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed # cert and to access the application. # -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" +if pulumi config get kic-helm:fqdn -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + echo "Hostname found for deployment" else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config + echo "Create a fqdn for your deployment" + pulumi config set kic-helm:fqdn -C "${script_dir}"/../pulumi/python/config fi -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 # -# Check for secrets being set +# The bank of sirius secrets (and all other secrets) are stored in the "secrets" +# project. # echo "Checking for required secrets" -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then - echo "Configuration value found" +if pulumi config get prometheus:adminpass -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - echo "Please enter a password for grafana" - pulumi config set prometheus:adminpass --secret -C pulumi/python/kubernetes/secrets + echo "Please enter a password for grafana" + pulumi config set prometheus:adminpass --secret -C pulumi/python/kubernetes/secrets fi -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then - echo "Configuration value found" +if pulumi config get sirius:accounts_pwd -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - echo "Please enter a password for the sirius accountsdb" - pulumi config set sirius:accounts_pwd --secret -C pulumi/python/kubernetes/secrets + echo "Please enter a password for the sirius accountsdb" + pulumi config set sirius:accounts_pwd --secret -C pulumi/python/kubernetes/secrets fi -if pulumi config get sirius:demo_login_pwd -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then - echo "Configuration value found" +if pulumi config get sirius:demo_login_pwd -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - echo "Please enter a password for the sirius ledgerdb" - pulumi config set sirius:demo_login_pwd --secret -C pulumi/python/kubernetes/secrets + echo "Please enter a password for the sirius ledgerdb" + pulumi config set sirius:demo_login_pwd --secret -C pulumi/python/kubernetes/secrets fi -if pulumi config get sirius:demo_login_user -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then - echo "Configuration value found" +if pulumi config get sirius:demo_login_user -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - echo "Please enter a username for the BoS" - pulumi config set sirius:demo_login_user --secret -C pulumi/python/kubernetes/secrets + echo "Please enter a username for the BoS" + pulumi config set sirius:demo_login_user --secret -C pulumi/python/kubernetes/secrets fi -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then - echo "Configuration value found" +if pulumi config get sirius:ledger_pwd -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - echo "Please enter a password for the BoS user account" - pulumi config set sirius:ledger_pwd --secret -C pulumi/python/kubernetes/secrets + echo "Please enter a password for the BoS user account" + pulumi config set sirius:ledger_pwd --secret -C pulumi/python/kubernetes/secrets fi # -# TODO: Allow startup scripts to prompt and accept additional config values #97 -# The default helm timeout for all of the projects is set at the default of 300 seconds (5 minutes) +# The default helm timeout for all the projects is set at the default of 300 seconds (5 minutes) # However, since this code path is most commonly going to be used to deploy locally we need to bump # that value up. A fix down the road will add this a prompt, but for now we are going to double this # value for all helm deploys. # - -pulumi config set kic-helm:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set logagent:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set logstore:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set certmgr:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set prometheus:helm_timeout 600 -C ${script_dir}/../pulumi/python/config +pulumi config set kic-helm:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set logagent:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set logstore:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set certmgr:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set prometheus:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config # # Set the headers to respect the NO_COLOR variable # if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" + pulumi_args="--emoji --stack ${PULUMI_STACK}" else - pulumi_args="--color never --stack ${PULUMI_STACK}" + pulumi_args="--color never --stack ${PULUMI_STACK}" fi # -# Note that this is somewhat different than the other startup scripts, because at the point we run this -# here we know that we have a server so we can get the version. The other builds do not have server info +# Note that this is somewhat different from the other startup scripts, because at the point we run this +# here we know that we have a server, so we can get the version. The other builds do not have server info # at this point in time. # header "Version Info" @@ -306,9 +307,9 @@ echo "Pulumi version is: $(pulumi version)" echo "Pulumi user is: $(pulumi whoami)" echo "Python version is: $(python --version)" echo "Kubectl version information: " -echo "$(kubectl version -o json)" +kubectl version -o json echo "Python module information: " -echo "$(pip list)" +pip list echo "=====================================================================" echo " " @@ -320,7 +321,6 @@ header "Secrets" cd "${script_dir}/../pulumi/python/kubernetes/secrets" pulumi $pulumi_args up -# TODO: This is using a different project than the AWS deploy; we need to collapse those #80 header "Deploying IC" cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" pulumi $pulumi_args up @@ -350,7 +350,7 @@ cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" pulumi $pulumi_args up header "Finished!!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") +THE_FQDN=$(pulumi config get kic-helm:fqdn -C "${script_dir}"/../pulumi/python/config || echo "Cannot Retrieve") THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") echo " " @@ -370,4 +370,4 @@ echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" echo " " -echo "Please see the documentation in the github repository for more information" +echo "Please see the documentation in the GitHub repository for more information" diff --git a/bin/test-forward.sh b/bin/test-forward.sh index 52985f8..87d4443 100755 --- a/bin/test-forward.sh +++ b/bin/test-forward.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # # This is a simple shell script that sets up port forwards locally for -# the various benchmarking/monitoring tooling that is part of the +# the various benchmarking/monitoring tooling that is part of the # deployment. This should be run on the same machine as your web browser, # then you will be able to connect to the localhost ports to get to the # services. @@ -9,37 +9,36 @@ # This script is designed to clean itself up once a Ctrl-C is issued. # -PID01=$(mktemp) -PID02=$(mktemp) -PID03=$(mktemp) -PID04=$(mktemp) -PID05=$(mktemp) +PID01="$(mktemp)" +PID02="$(mktemp)" +PID03="$(mktemp)" +PID04="$(mktemp)" +PID05="$(mktemp)" # this function is called when Ctrl-C is sent -function trap_ctrlc () -{ - # perform cleanup here - echo "Ctrl-C caught...performing clean up" +function trap_ctrlc() { + # perform cleanup here + echo "Ctrl-C caught...performing clean up" - echo "Doing cleanup" + echo "Doing cleanup" - echo "Kill forwards" - kill $(cat $PID01) - kill $(cat $PID02) - kill $(cat $PID03) - kill $(cat $PID04) - kill $(cat $PID05) + echo "Kill forwards" + kill $(cat "$PID01") + kill $(cat "$PID02") + kill $(cat "$PID03") + kill $(cat "$PID04") + kill $(cat "$PID05") - echo "Remove temp files" - rm $PID01 - rm $PID02 - rm $PID03 - rm $PID04 - rm $PID05 + echo "Remove temp files" + rm "$PID01" + rm "$PID02" + rm "$PID03" + rm "$PID04" + rm "$PID05" - # exit shell script with error code 2 - # if omitted, shell script will continue execution - exit 2 + # exit shell script with error code 2 + # if omitted, shell script will continue execution + exit 2 } # initialise trap to call trap_ctrlc function @@ -48,23 +47,23 @@ trap "trap_ctrlc" 2 ## Kibana Tunnel kubectl port-forward service/elastic-kibana --namespace logstore 5601:5601 & -echo $! > $PID01 +echo $! >"$PID01" ## Grafana Tunnel kubectl port-forward service/prometheus-grafana --namespace prometheus 3000:80 & -echo $! > $PID02 +echo $! >"$PID02" ## Loadgenerator Tunnel kubectl port-forward service/loadgenerator --namespace bos 8089:8089 & -echo $! > $PID03 +echo $! >"$PID03" ## Prometheus Tunnel kubectl port-forward service/prometheus-kube-prometheus-prometheus --namespace prometheus 9090:9090 & -echo $! > $PID04 +echo $! >"$PID04" ## Elasticsearch Tunnel kubectl port-forward service/elastic-coordinating-only --namespace logstore 9200:9200 & -echo $! > $PID05 +echo $! >"$PID05" ## Legend echo "Connections Details" @@ -79,5 +78,3 @@ echo "" echo "Issue Ctrl-C to Exit" ## Wait... wait - - diff --git a/bin/test.py b/bin/test.py index c529609..2e4d62c 100755 --- a/bin/test.py +++ b/bin/test.py @@ -12,8 +12,10 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) TEST_FILE_PATTERN = 'test_*.py' -TestsInDir = collections.namedtuple(typename='TestsInDir', field_names=['directory', 'loader']) -RunDirectories = collections.namedtuple(typename='RunDirectories', field_names=['start_dir', 'top_level_dir']) +TestsInDir = collections.namedtuple( + typename='TestsInDir', field_names=['directory', 'loader']) +RunDirectories = collections.namedtuple( + typename='RunDirectories', field_names=['start_dir', 'top_level_dir']) test_dirs: List[TestsInDir] = [] diff --git a/bin/test_runner.sh b/bin/test_runner.sh index 5ebfab4..2efa155 100755 --- a/bin/test_runner.sh +++ b/bin/test_runner.sh @@ -12,10 +12,10 @@ set -o pipefail # don't hide errors within pipes # for docker but not GH actions # -if [ -z "$1" ] ; then - source ~/pulumi/python/venv/bin/activate - ~/pulumi/python/venv/bin/python3 ~/bin/test.py +if [ -z "$1" ]; then + source ~/pulumi/python/venv/bin/activate + ~/pulumi/python/venv/bin/python3 ~/bin/test.py else - source $1/pulumi/python/venv/bin/activate - $1/pulumi/python/venv/bin/python3 $1/bin/test.py + source "$1/pulumi/python/venv/bin/activate" + $1/pulumi/python/venv/bin/python3 $1/bin/test.py fi diff --git a/bin/testcap.sh b/bin/testcap.sh deleted file mode 100755 index d8a49a3..0000000 --- a/bin/testcap.sh +++ /dev/null @@ -1,359 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Test to see if we have persistent volume support; to do this we provision a PV using the default storage class -# and then conduct a read and write test against it. -# -# Since MARA is intended as a testbed application, the performance numbers are not a particular concern, however it is -# advised that you test your PV provider for performance and concurrency if you are in production, development, or -# quality assurance testing. For example, the NFS volume support is known to potentially cause issues due to the way -# that NFS works (latency, performance). -# - -# Timeout Value -# We check in 15 second increments -TIMEOUT=15 - - -# Clean up the namespace.... -cleanitup() { - echo "Deleting testspace namespace" - echo "This should remove all test resources" - kubectl delete ns testspace - if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to remove namespace testpsace" - echo " " - exit 100 - fi -} - -echo " " -echo "IMPORTANT NOTICE!" -echo "====================================================================================================" -echo " This script is deprecated and will be removed in a future release. " -echo " " -echo " This script may not function properly in your environment; run at your own risk. " -echo " " -echo " For more information, please see Discussion #155 in the repository (nginx.com/mara)" -echo "====================================================================================================" -sleep 5 - -echo " " -echo "This script will perform testing on the current kubernetes installation using the currently active kubernetes" -echo "configuration and context." -echo " " -echo "Any failures should be investigated, as they will indicate that the installation does not meet the minimum set" -echo "of capabilities required to run MARA." -echo " " -sleep 5 - -# We need kubectl to do any of this.... -if command -v kubectl > /dev/null; then - echo "Found kubectl - continuing" -else - echo "Cannot proceed without kubectl!" - echo "Please install kubectl and ensure it is in your path." - exit 101 -fi - -# Write out the configuration so we can see it -echo "Dumping current configuration:" -kubectl config view -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to connect to dump configuration from kubeconfig file." - echo "Please check your kubeconfig file." - echo " " - exit 102 -else - echo " " -fi - -# Make sure we can connect -echo "Connecting to cluster:" -kubectl cluster-info -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to connect to cluster and pull information!" - echo "Please make sure you are able to connect to the cluster context defined in your kubeconfig file" - echo " " - exit 103 -else - echo "Success connecting to cluster" - echo " " -fi - - -# We are going to do all our testing in a dedicated namespace -echo "Test ability to create a namespace:" -kubectl create ns testspace -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to create namespace testspace!" - echo "Please make sure you are able to create namespaces in your cluster" - echo " " - exit 104 -fi -echo "Namespace testspace created" -echo " " - -# Create a PV Claim -echo "Create a persistent volume" -kubectl apply -f - << EOF -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: maratest01 - namespace: testspace -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 5G -EOF - -if [ $? -ne 0 ] ; then - echo "FAILURE! Error trying to create persistent volume!" - echo "This could be related to an error running the YAML or an issue attempting to create" - echo "a persistent volume." - echo " " - echo "Please make sure you are able to create persistent volumes in your cluster and try again." - echo " " - cleanitup - exit 105 -fi -echo "Persistent volume yaml applied" -echo " " - -# Perform a write test -echo "Test writing to the persistent volume" -kubectl apply -f - << EOF -apiVersion: batch/v1 -kind: Job -metadata: - name: write - namespace: testspace -spec: - template: - metadata: - name: write - spec: - containers: - - name: write - image: ubuntu:xenial - command: ["dd","if=/dev/zero","of=/mnt/pv/test.img","bs=1G","count=1","oflag=dsync"] - volumeMounts: - - mountPath: "/mnt/pv" - name: maratest01 - volumes: - - name: maratest01 - persistentVolumeClaim: - claimName: maratest01 - restartPolicy: Never -EOF - -WRITEJOB="FIRSTRUN" -KOUNT=1 -while [ "$WRITEJOB" != "Completed" ] && [ $KOUNT -lt $TIMEOUT ] ; do - WRITEJOB=$(kubectl get pods --selector=job-name=write --namespace testspace --output=jsonpath='{.items[*].status.containerStatuses[0].state.terminated.reason}') - echo "Attempt $KOUNT of $TIMEOUT: Waiting for job to complete..." - sleep 15 - ((KOUNT++)) -done - -if [ $KOUNT -ge $TIMEOUT ] ; then - echo "FAILURE! Unable to create or write to persistent volume!" - echo "Please make sure you are able to create and write to persistent volumes in your cluster." - cleanitup - exit 106 -elif [ "$WRITEJOB" == "Completed" ] ; then - echo "Persistent volume write test completed; logs follow:" - kubectl logs --selector=job-name=write --namespace testspace - echo " " -else - echo "Should not get here! Exiting!" - cleanitup - exit 107 -fi - - -# Perform a read test -echo "Test reading from the persistent volume" -kubectl apply -f - << EOF -apiVersion: batch/v1 -kind: Job -metadata: - name: read - namespace: testspace -spec: - template: - metadata: - name: read - spec: - containers: - - name: read - image: ubuntu:xenial - command: ["dd","if=/mnt/pv/test.img","of=/dev/null","bs=8k"] - volumeMounts: - - mountPath: "/mnt/pv" - name: maratest01 - volumes: - - name: maratest01 - persistentVolumeClaim: - claimName: maratest01 - restartPolicy: Never -EOF - -READJOB="FIRSTRUN" -KOUNT=1 -while [ "$READJOB" != "Completed" ] && [ $KOUNT -lt $TIMEOUT ] ; do - READJOB=$(kubectl get pods --selector=job-name=read --namespace testspace --output=jsonpath='{.items[*].status.containerStatuses[0].state.terminated.reason}') - echo "Attempt $KOUNT of $TIMEOUT: Waiting for job to complete..." - sleep 15 - ((KOUNT++)) -done - -if [ $KOUNT -ge $TIMEOUT ] ; then - echo "FAILURE! Unable to read from persistent volume!" - echo "Please make sure you are able to read from persistent volumes in your cluster" - cleanitup - exit 108 -elif [ "$READJOB" == "Completed" ] ; then - echo "Persistent volume read test completed; logs follow:" - kubectl logs --selector=job-name=read --namespace testspace - echo " " -else - echo "Should not get here! Exiting!" - cleanitup - exit 109 -fi - -# Clean up... -echo "Cleaning up read job" -kubectl --namespace testspace delete job read -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete read job!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 110 -else - echo "Complete" - echo " " -fi - -echo "Cleaning up write job" -kubectl --namespace testspace delete job write -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete write job!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 111 -else - echo "Complete" - echo " " -fi - -echo "Cleaning up persistent volume" -kubectl --namespace testspace delete pvc maratest01 -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to clean up persistent volume!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 112 -else - echo "Complete" - echo " " -fi - -echo "Deploying KUARD application" -kubectl apply -f - << EOF -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: kuard - name: kuard - namespace: testspace -spec: - replicas: 2 - selector: - matchLabels: - app: kuard - template: - metadata: - labels: - app: kuard - spec: - containers: - - image: gcr.io/kuar-demo/kuard-amd64:1 - name: kuard ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: kuard - name: kuard - namespace: testspace -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: kuard - sessionAffinity: None - type: LoadBalancer -EOF - -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to create KUARD application!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 113 -fi - -echo "Sleeping 30 to wait for IP assignment" -sleep 30 -echo "Checking for External IP address" -echo " " -EXTIP=$(kubectl get service kuard --namespace testspace --output=jsonpath='{.status.loadBalancer.ingress[*].ip}') -if [ "$EXTIP" == "" ] ; then - echo "FAILURE! Unable to pull loadBalancer IP address!" - echo "This could mean that you do not have a loadBalancer egress defined for the cluster, or it could" - echo "be misconfigured. Please remediate this issue." - echo " " - cleanitup - exit 114 -fi - -echo "External IP is $EXTIP" -echo " " - -echo "Deleting KUARD deployment" -kubectl --namespace testspace delete deployment kuard -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete KUARD deployment!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 115 -fi - -echo "Deleting KUARD service" -kubectl --namespace testspace delete service kuard -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete KUARD service!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 116 -fi - -# If we reached this point we are good! -cleanitup -echo " " -echo "==============================================================" -echo "| All tests passed! This system meets the basic requirements |" -echo "| to deploy MARA. |" -echo "==============================================================" - diff --git a/config/pulumi/README.md b/config/pulumi/README.md index 5e7354e..290c6b3 100644 --- a/config/pulumi/README.md +++ b/config/pulumi/README.md @@ -1,19 +1,26 @@ -## Directory +# Directory + `/config/pulumi` ## Purpose -This directory contains the yaml configuration files used for the pulumi installation. + +This directory contains the yaml configuration files used for the pulumi +installation. ## Key Files -- [`Pulumi.stackname.yaml.example`](./Pulumi.stackname.yaml.example) Contains the list of variables that -this installation understands. -- [`environmenet`](./environment) Created at runtime; this file contains details about the environment including -the stack name, and the ASW profile and region (if deploying in AWS). -- `Pulumi.YOURSTACK.yaml` Contains the list of variables associated with the stack with the name YOURSTACK. This -configuration will be created at the first run for the named stack, but it can be created in advance with an -editor. + +* [`Pulumi.stackname.yaml.example`](./Pulumi.stackname.yaml.example) Contains + the list of variables that this installation understands. +* [`environmenet`](./environment) Created at runtime; this file contains details + about the environment including the stack name, and the ASW profile and region + (if deploying in AWS). +* `Pulumi.YOURSTACK.yaml` Contains the list of variables associated with the + stack with the name YOURSTACK. This configuration will be created at the first + run for the named stack, but it can be created in advance with an editor. ## Notes -Many of the variables have defaults that are enforced through the Pulumi code for each project, however -there are certain variables that are required. When the process reaches one of these variables and it -is not set the process will abort with an error message. \ No newline at end of file + +Many of the variables have defaults that are enforced through the Pulumi code +for each project, however there are certain variables that are required. When +the process reaches one of these variables and it is not set the process will +abort with an error message. diff --git a/docker/README.md b/docker/README.md index af71fb6..efe69f6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,16 +1,18 @@ -## Directory +# Directory `/docker` ## Purpose -This directory contains the necessary code to create a docker image that can then be used to deploy MARA. Each docker -image created is self-sufficient with all necessary tools installed. In order to fully understand how to use these +This directory contains the necessary code to create a docker image that can +then be used to deploy MARA. Each docker image created is self-sufficient with +all necessary tools installed. In order to fully understand how to use these images, please see the [Getting Started](../docs/getting_started.md) guide. ## Key Files -- [`build_dev_docker_image.sh`](./build_dev_docker_image.sh) Controlling script for docker build process. +* [`build_dev_docker_image.sh`](./build_dev_docker_image.sh) Controlling script + for docker build process. ## Notes diff --git a/docs/accessing_mgmt_tools.md b/docs/accessing_mgmt_tools.md index 77bc0fc..31b69c5 100644 --- a/docs/accessing_mgmt_tools.md +++ b/docs/accessing_mgmt_tools.md @@ -1,4 +1,4 @@ -## Accessing the Management Tools in MARA +# Accessing the Management Tools in MARA Currently, the management tool suite in MARA consists of: @@ -8,17 +8,20 @@ Currently, the management tool suite in MARA consists of: - [Elasticsearch](https://elastic.co) - [Kibana](https://www.elastic.co/kibana/) -Each of these tools provides an interface that can be reached through an endpoint exposed by the tool. For security -reasons these tools are not exposed to the internet, which means you will need to use some form of port forwarding to -access them. +Each of these tools provides an interface that can be reached through an +endpoint exposed by the tool. For security reasons these tools are not exposed +to the internet, which means you will need to use some form of port forwarding +to access them. -### Running MARA on your Local Workstation +## Running MARA on your Local Workstation -If you are running MARA on your local workstation, you can use the [`test-forward.sh`](../bin/test-forward.sh) -script to use [`kubectl`](https://kubernetes.io/docs/reference/kubectl/) to forward the ports on your behalf. These -ports are all forwarded to the corresponding port on localhost as shown below: +If you are running MARA on your local workstation, you can use the +[`test-forward.sh`](../bin/test-forward.sh) script to use +[`kubectl`](https://kubernetes.io/docs/reference/kubectl/) to forward the ports +on your behalf. These ports are all forwarded to the corresponding port on +localhost as shown below: -``` +```txt Connections Details ==================================== Kibana: http://localhost:5601 @@ -33,22 +36,25 @@ Issue Ctrl-C to Exit Issuing a Ctrl-C will cause the ports to close. -### Running MARA Somewhere Else +## Running MARA Somewhere Else -In the event you are running MARA somewhere else - in the cloud, on a different server, in a VM on your laptop, etc you -will need to go through an additional step. Note that this is just one way of accomplishing this, and depending on your -environment you may want or need to do this differently. +In the event you are running MARA somewhere else - in the cloud, on a different +server, in a VM on your laptop, etc. you will need to go through an additional +step. Note that this is just one way of accomplishing this, and depending on +your environment you may want or need to do this differently. -The easiest thing is to install `kubectl` on the system you want to access the MARA tooling from and then copy over the -`kubeconfig` from your MARA deployment system. This will then allow you to copy over the `test-forward.sh` script and +The easiest thing is to install `kubectl` on the system you want to access the +MARA tooling from and then copy over the `kubeconfig` from your MARA deployment +system. This will then allow you to copy over the `test-forward.sh` script and use that to build the tunnels locally. -### Edge Cases +## Edge Cases -There are definitely cases where these solutions will not work. Please see the "More Information" section below, and if -you have one of these cases and discover a solution please open a PR so that we can add to this section. +There are definitely cases where these solutions will not work. Please see the +"More Information" section below, and if you have one of these cases and +discover a solution please open a PR so that we can add to this section. -### More Information +## More Information To learn more about Kubernetes port-forwarding, please see -[this article](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) \ No newline at end of file +[this article](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) diff --git a/docs/dir_template.md b/docs/dir_template.md deleted file mode 100644 index 4d8d41c..0000000 --- a/docs/dir_template.md +++ /dev/null @@ -1,11 +0,0 @@ -## Directory -`/` - -## Purpose -Main purpose for this directory. - -## Key Files -- [`filename`](./file-link) Draw out details for key files along with a link. - -## Notes -Any other information the user should know. diff --git a/docs/getting_started.md b/docs/getting_started.md index 5da0c20..dc1f58a 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -1,39 +1,48 @@ # Getting Started Guide -There are a few ways to get the reference architecture set up on your machine. You can install the dependencies locally -and run the project. Alternatively, the project can be run in a Docker container that you have built. +There are a few ways to get the reference architecture set up on your machine. +You can install the dependencies locally and run the project. Alternatively, +the project can be run in a Docker container that you have built. Here is a rough outline of the steps to get started: -1. Clone git repository, including the Bank of Sirius submodule. This can be done by - running `git clone --recurse-submodules https://github.com/nginxinc/kic-reference-architectures` -2. Install dependencies (install section below - python3, python venv module, git, docker, make). -3. Setup Pulumi credentials. -4. Setup AWS credentials OR Setup `kubectl` to connect to an existing cluster -5. Run `./bin/start.sh` and answer the prompts. +1. Clone git repository, including the Bank of Sirius submodule. This can be +done by running +`git clone --recurse-submodules https://github.com/nginxinc/kic-reference-architectures` + +1. Install dependencies (install section below - python3, python venv module, +git, docker, make). + +1. Setup Pulumi credentials. + +1. Setup AWS credentials OR Setup `kubectl` to connect to an existing cluster + +1. Run `./bin/start.sh` and answer the prompts. ## Install on macOS with HomeBrew and Docker Desktop -``` +```sh # Install Homebrew for the Mac: https://brew.sh/ -# Install Docker Toolbox for the Mac: https://docs.docker.com/docker-for-mac/install/ +# Install Docker Toolbox for the Mac: +# https://docs.docker.com/docker-for-mac/install/ $ brew install make git python3 ``` ## Install on macOS with Docker Desktop -``` -# In a terminal window with the MacOS UI, install developer tools if they haven't already -# been installed. +```sh +# In a terminal window with the MacOS UI, install developer tools if they +# haven't already # been installed. $ xcode-select --install $ bash ./bin/setup_venv.sh ``` ## Install with Debian/Ubuntu Linux -``` +```sh $ sudo apt-get update -$ sudo apt-get install --no-install-recommends curl ca-certificates git make python3-venv docker.io +$ sudo apt-get install --no-install-recommends curl ca-certificates git make \ +python3-venv docker.io $ sudo usermod -aG docker $USER $ newgrp docker $ bash ./bin/setup_venv.sh @@ -41,7 +50,7 @@ $ bash ./bin/setup_venv.sh ## Install with CentOS/Redhat/Rocky Linux -``` +```sh # Install Docker Yum repository $ sudo dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo $ sudo yum install python3-pip make git docker-ce @@ -55,7 +64,7 @@ $ bash ./bin/setup_venv.sh Run the following helper script to build a Debian container image. -``` +```sh $ ./docker/build_dev_docker_image.sh debian ``` @@ -65,43 +74,50 @@ $ ./docker/build_dev_docker_image.sh debian #### Python 3.7 or Newer or Prerequisites for Building Python 3.7 or Newer -In this project, Pulumi executes Python code that creates cloud and Kubernetes infrastructure. In order for it to work, -Python 3 and the [venv module](https://docs.python.org/3/library/venv.html) -must be installed. Alternative, if GNU make and the gcc compiler are installed the setup script can build and install -Python 3. +In this project, Pulumi executes Python code that creates cloud and Kubernetes +infrastructure. In order for it to work, Python 3 and the +[venv module](https://docs.python.org/3/library/venv.html) must be installed. +Alternative, if GNU make and the gcc compiler are installed the setup script +can build and install Python 3. Note that the minimum supported version is 3.7. #### Git -The `git` command line tool is required for checking out KIC source code from GitHub and for the KIC image build -process. +The `git` command line tool is required for checking out KIC source code from +GitHub and for the KIC image build process. #### Make -In order to build the Ingress Controller from source, GNU `make` is required to be installed on the running system. If -you are not building from source, you do not need to install `make`. By default, the build script looks for -`gmake` and then `make`. +In order to build the Ingress Controller from source, GNU `make` is required to +be installed on the running system. If you are not building from source, you do +not need to install `make`. By default, the build script looks for `gmake` and +then `make`. #### Docker -Docker is required because the Ingress Controller is a Docker image and needs Docker to generate the image. +Docker is required because the Ingress Controller is a Docker image and needs +Docker to generate the image. -**NOTE**: The kubeconfig deployment option currently only allows you to deploy from a registry. This allows you to -deploy the NGINX IC or the NGINX Plus IC (with a JWT from your F5 account) +**NOTE**: The kubeconfig deployment option currently only allows you to deploy +from a registry. This allows you to deploy the NGINX IC or the NGINX Plus IC +(with a JWT from your F5 account) #### Kubernetes -Although not required, installing the [CLI tool `kubectl`](https://kubernetes.io/docs/tasks/tools/) -will allow you to interact with the Kubernetes cluster that you have stood up using this project. This -tool is also installed as part of the venv that is created and can be used from that directory. +Although not required, installing the +[CLI tool `kubectl`](https://kubernetes.io/docs/tasks/tools/) will allow you to +interact with the Kubernetes cluster that you have stood up using this project. +This tool is also installed as part of the venv that is created and can be used +from that directory. #### Setup -Within the project, you will need to install Python and dependent libraries into the `venv` directory. To do this is to -invoke the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -from the project root. This script will install into -the [virtual environment](https://docs.python.org/3/tutorial/venv.html) +Within the project, you will need to install Python and dependent libraries +into the `venv` directory. To do this is to invoke the +[`./bin/setup_venv.sh`](../bin/setup_venv.sh) from the project root. This +script will install into the +[virtual environment](https://docs.python.org/3/tutorial/venv.html) directory: * Python 3 (via pyenv) if it is not already present @@ -109,180 +125,220 @@ directory: * AWS CLI utilities * `kubectl` -After running [`./bin/setup_venv.sh`](../bin/setup_venv.sh) from the project root, you will need to activate the newly -created virtual environment by running -`source ./pulumi/python/venv/bin/activate` from the project root. This will load the virtual environment's path and -other environment variables into the current shell. +After running [`./bin/setup_venv.sh`](../bin/setup_venv.sh) from the project +root, you will need to activate the newly created virtual environment by +running `source ./pulumi/python/venv/bin/activate` from the project root. +This will load the virtual environment's path and other environment variables +into the current shell. ## Post Install Configuration ### Stack Name -For AWS, Linode, or Digital Ocean deployments you will need to add the variable `PULUMI_STACK_NAME` to the environment -file for the deployment at [`../config/pulumi/environment`](../config/pulumi/environment). This is the name that will -be used for the provisioned Pulumi stack. +For AWS, Linode, or Digital Ocean deployments you will need to add the variable +`PULUMI_STACK_NAME` to the environment file for the deployment at +[`../config/pulumi/environment`](../config/pulumi/environment). This is the name +that will be used for the provisioned Pulumi stack. -If you are running a `kubeconfig` deployment, the process will prompt you for the value of `PULUMI_STACK_NAME` and -update the environment file for you. +If you are running a `kubeconfig` deployment, the process will prompt you for +the value of `PULUMI_STACK_NAME` and update the environment file for you. ### Kubeconfig -If you are using an existing kubernetes installation for this project, you will need to provide three pieces of -information to the installer: +If you are using an existing kubernetes installation for this project, you will +need to provide three pieces of information to the installer: -- The full path to a kubeconfig file. -- The name of the cluster you are using. -- The cluster context you are using. +* The full path to a kubeconfig file. +* The name of the cluster you are using. +* The cluster context you are using. The easiest way to test this is to run the command: `kubectl --kubeconfig="yourconfig" --cluster="yourcluster" --context="yourcontext"` ### AWS -*Note:* The AWS deployment has been updated from v1.1 and no longer uses the [`../bin/start.sh`](../bin/start.sh) -script for deployment. If you attempt to use the script to deploy to AWS you will receive an error message. Please -use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for these deployments. +*Note:* The AWS deployment has been updated from v1.1 and no longer uses the +[`../bin/start.sh`](../bin/start.sh) script for deployment. If you attempt to +use the script to deploy to AWS you will receive an error message. Please +use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for +these deployments. If you are using AWS as your infrastructure provider [configuring Pulumi for AWS](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/) -is necessary. If you already have run the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -script, you will have the `aws` CLI tool installed in the path `../pulumi/python/venv/bin/aws` +is necessary. If you already have run the +[`./bin/setup_venv.sh`](../bin/setup_venv.sh) +script, you will have the `aws` CLI tool installed in the path +`../pulumi/python/venv/bin/aws` and you do not need to install it to run the steps in the Pulumi Guide. -If you want to avoid using environment variables, AWS profile and region definitions can be contained in -the `config/Pulumi..yaml` files in each project. Refer to the Pulumi documentation for details on how to do this. -When you run the [`../pulumi/python/runnner`](../pulumi/python/runner) program and select your provider you will be -prompted for all variables necessary to use that provider along with MARA specific variables. This information will -be added to the `../config/Pulumi/Pulumi..yaml` configuration file. This is the main configuration file for the -project, although there is one other configuration file used to maintain secrets in the -[`../pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) kubernetes extras functionality. -For more details on those, please see the README.md in those directories. +If you want to avoid using environment variables, AWS profile and region +definitions can be contained in the `config/Pulumi..yaml` files in each +project. Refer to the Pulumi documentation for details on how to do this. +When you run the [`../pulumi/python/runnner`](../pulumi/python/runner) program +and select your provider you will be prompted for all variables necessary to +use that provider along with MARA specific variables. This information will +be added to the `../config/Pulumi/Pulumi..yaml` configuration file. This is +the main configuration file for the project, although there is one other +configuration file used to maintain secrets in the +[`../pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) +kubernetes extras functionality. For more details on those, please see the +README.md in those directories. ### Digital Ocean -*Note:* The Digital Ocean deployment has been updated from v1.1 and no longer uses the -[`../bin/start.sh`](../bin/start.sh) script for deployment. If you attempt to use the script to deploy to AWS you will -receive an error message. Please use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for these -deployments. +*Note:* The Digital Ocean deployment has been updated from v1.1 and no longer +uses the [`../bin/start.sh`](../bin/start.sh) script for deployment. If you +attempt to use the script to deploy to AWS you will receive an error message. +Please use the new [`../pulumi/python/runner`](../pulumi/python/runner) program +for these deployments. You will need to create a [Digital Ocean Personal API Token](https://docs.digitalocean.com/reference/api/create-personal-access-token/) -for authentication to Digital Ocean. When you run the [`./pulumi/python/runnner`](./pulumi/python/runner) program and -select your provider you will be prompted for all variables necessary to use that provider along with MARA specific -variables. This information will be added to the `./config/Pulumi/Pulumi..yaml` configuration file. This is the -main configuration file for the project, although there is one other configuration file used to maintain secrets in the -[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) kubernetes extras functionality. -For more details on those, please see the README.md in those directories. +for authentication to Digital Ocean. When you run the +[`./pulumi/python/runnner`](./pulumi/python/runner) program and select your +provider you will be prompted for all variables necessary to use that provider +along with MARA specific variables. This information will be added to the +`./config/Pulumi/Pulumi..yaml` configuration file. This is the main +configuration file for the project, although there is one other configuration file +used to maintain secrets in the +[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) +kubernetes extras functionality. For more details on those, please see the +README.md in those directories. ### Linode -*Note:* The Linode deployment has been updated from v1.1 and no longer uses the [`../bin/start.sh`](../bin/start.sh) -script for deployment. If you attempt to use the script to deploy to AWS you will receive an error message. Please -use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for these deployments. +*Note:* The Linode deployment has been updated from v1.1 and no longer uses the +[`../bin/start.sh`](../bin/start.sh) script for deployment. If you attempt to +use the script to deploy to AWS you will receive an error message. Please +use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for +these deployments. You will need to create a -[Linode API Token](https://www.linode.com/docs/products/tools/linode-api/guides/get-access-token/) for authentication -to Linode. When you run the [`./pulumi/python/runnner`](./pulumi/python/runner) program and -select your provider you will be prompted for all variables necessary to use that provider along with MARA specific -variables. This information will be added to the `./config/Pulumi/Pulumi..yaml` configuration file. This is the -main configuration file for the project, although there is one other configuration file used to maintain secrets in the -[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) kubernetes extras functionality. -For more details on those, please see the README.md in those directories. +[Linode API Token](https://www.linode.com/docs/products/tools/linode-api/guides/get-access-token/) +for authentication to Linode. When you run the +[`./pulumi/python/runnner`](./pulumi/python/runner) program and select your +provider you will be prompted for all variables necessary to use that provider +along with MARA specific variables. This information will be added to the +`./config/Pulumi/Pulumi..yaml` configuration file. This is the main +configuration file for the project, although there is one other configuration file +used to maintain secrets in the +[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) +kubernetes extras functionality. For more details on those, please see the +README.md in those directories. ### Kubeconfig Deployments: MicroK8s / Minikube / K3s / Other -Deployments that use a `kubeconfig` file to access an existing K8 installation will continue to use the -[`../bin/start.sh`](../bin/start.sh) script. Additionally, these deployments are not able to build the Ingress -Controller and instead need to download from the NGINX repositories. The installation of NGINX+ is supported via the -use of a JWT, if desired. +Deployments that use a `kubeconfig` file to access an existing K8 installation +will continue to use the [`../bin/start.sh`](../bin/start.sh) script. +Additionally, these deployments are not able to build the Ingress Controller +and instead need to download from the NGINX repositories. The installation of +NGINX+ is supported via the use of a JWT, if desired. -These deployments will be moved over to use the [`../pulumi/python/runner`](../pulumi/python/runner) program in a -future release, which will bring them to parity for NGINX IC build/deployment with the other infrastructures. +These deployments will be moved over to use the +[`../pulumi/python/runner`](../pulumi/python/runner) program in a future +release, which will bring them to parity for NGINX IC build/deployment with the +other infrastructures. ### Pulumi If you already have run the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -script, you will have the `pulumi` CLI tool installed in the path `venv/bin/pulumi`. You will need to make an account -on [pulumi.com](https://pulumi.com) or alternatively use another form of state store. Next, login to pulumi from the CLI -by running the -command [`./pulumi/python/venv/bin/pulumi login`](https://www.pulumi.com/docs/reference/cli/pulumi_login/). Refer to the -Pulumi documentation for additional details regarding the command and alternative state stores. +script, you will have the `pulumi` CLI tool installed in the path +`venv/bin/pulumi`. You will need to make an account on +[pulumi.com](https://pulumi.com) or alternatively use another form of state +store. Next, login to pulumi from the CLI by running the command +[`./pulumi/python/venv/bin/pulumi login`](https://www.pulumi.com/docs/reference/cli/pulumi_login/). +Refer to the Pulumi documentation for additional details regarding the command +and alternative state stores. ## Running the Project -Provided you have completed the installation steps, the easiest way to run the project is to run -[`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, or Digital Ocean and -[`../bin/start.sh`](../bin/start.sh) for `kubeconfig` deployments. This process will prompt you for all required -variables for your deployment type. This information will be used to populate the configuration files. +Provided you have completed the installation steps, the easiest way to run the +project is to run [`../pulumi/python/runner`](../pulumi/python/runner) for AWS, +Linode, or Digital Ocean and [`../bin/start.sh`](../bin/start.sh) for +`kubeconfig` deployments. This process will prompt you for all required +variables for your deployment type. This information will be used to populate +the configuration files. -Alternatively, you can enter into each Pulumi project directory and execute each project independently by doing -`pulumi up`. Take a look at `start.sh` and dependent scripts to get a feel for the flow. +Alternatively, you can enter into each Pulumi project directory and execute +each project independently by doing `pulumi up`. Take a look at `start.sh` and +dependent scripts to get a feel for the flow. -If you want to destroy the entire environment you can run [`../pulumi/python/runner`](../pulumi/python/runner) for AWS, -Linode, or Digital Ocean or [`destroy.sh`](../bin/destroy.sh) for `kubeconfig` deployments. +If you want to destroy the entire environment you can run +[`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, or +Digital Ocean or [`destroy.sh`](../bin/destroy.sh) for `kubeconfig` deployments. Detailed information and warnings are emitted by the process as it runs. ### Running the Project in a Docker Container -If you are using a Docker container to run Pulumi, you will want to run the with the docker socket mounted, like the -following command. +If you are using a Docker container to run Pulumi, you will want to run the +with the docker socket mounted, like the following command. -``` -docker run --interactive --tty --volume /var/run/docker.sock:/var/run/docker.sock \ - kic-ref-arch-pulumi-aws: +```console +$ docker run --interactive --tty \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + kic-ref-arch-pulumi-aws: ``` -If you already have set up Pulumi, kubeconfig information, and/or AWS credentials on the host machine, you can mount -them into the container using Docker with the following options. - -``` -docker run --interactive --tty \ - --volume /var/run/docker.sock:/var/run/docker.sock \ - --volume $HOME/.pulumi:/pulumi/projects/kic-reference-architectures/.pulumi \ - --volume $HOME/.aws:/pulumi/projects/kic-reference-architectures/.aws \ - --volume $HOME/.kube:/pulumi/projects/kic-reference-architectures/.kube \ - kic-ref-arch-pulumi-aws:debian +If you already have set up Pulumi, kubeconfig information, and/or AWS +credentials on the host machine, you can mount them into the container using +Docker with the following options. + +```console +$ docker run --interactive --tty \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + --volume $HOME/.pulumi:/pulumi/projects/kic-reference-architectures/.pulumi \ + --volume $HOME/.aws:/pulumi/projects/kic-reference-architectures/.aws \ + --volume $HOME/.kube:/pulumi/projects/kic-reference-architectures/.kube \ + kic-ref-arch-pulumi-aws:debian ``` ### Accessing the Application -The final output from the startup process will provide you with detailed information on how to access your project. This -information will vary based on the K8 distribution that you are deploying against; the following output is from a -deployment against an existing K8 installation using the *kubeconfig* option: - -``` +The final output from the startup process will provide you with detailed +information on how to access your project. This information will vary based on +the K8 distribution that you are deploying against; the following output is +from a deployment against an existing K8 installation using the *kubeconfig* +option: +```console Next Steps: -1. Map the IP address (192.168.100.100) of your Ingress Controller with your FQDN (mara.example.com). -2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools. +1. Map the IP address (192.168.100.100) of your Ingress Controller with your + FQDN (mara.example.com). +2. Use the ./bin/test-forward.sh program to establish tunnels you can use to + connect to the management tools. 3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment. -To review your configuration options, including the passwords defined, you can access the pulumi secrets via the +To review your configuration options, including the passwords defined, you can +access the pulumi secrets via the following commands: -Main Configuration: pulumi config -C /jenkins/workspace/jaytest/bin/../pulumi/python/config -Bank of Sirius (Example Application) Configuration: pulumi config -C /jenkins/workspace/jaytest/bin/../pulumi/python/kubernetes/applications/sirius +Main Configuration: pulumi config -C +Bank of Sirius (Example Application) Configuration: pulumi config -C K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress -Please see the documentation in the github repository for more information - +Please see the documentation in the github repository for more information ``` ### Accessing the Management Tooling -Please see the document [Accessing Management Tools in MARA](./accessing_mgmt_tools.md) for information on how to access -these tools. +Please see the document +[Accessing Management Tools in MARA](./accessing_mgmt_tools.md) for information +on how to access these tools. ### Cleaning Up -If you want to completely remove all the resources you have provisioned, run the -[`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, or Digital Ocean or -[`destroy.sh`](../bin/destroy.sh) for `kubeconfig` deployments. Detailed information and warnings are emitted by the +If you want to completely remove all the resources you have provisioned, +run the [`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, +or Digital Ocean or [`destroy.sh`](../bin/destroy.sh) for `kubeconfig` +deployments. Detailed information and warnings are emitted by the process as it runs. Be careful because this will **DELETE ALL** the resources you have provisioned. ## Other Resources -Starting with release `v1.1`, the MARA project has begun the process of transitioning the deployment logic away from -BASH scripts and instead using the [Pulumi Automation API](https://www.pulumi.com/docs/guides/automation-api/) with -Python. For more information on this, please see this [Design Document](../pulumi/python/automation/DESIGN.md). \ No newline at end of file + +Starting with release `v1.1`, the MARA project has begun the process of +transitioning the deployment logic away from BASH scripts and instead using the +[Pulumi Automation API](https://www.pulumi.com/docs/guides/automation-api/) with +Python. For more information on this, please see this +[Design Document](../pulumi/python/automation/DESIGN.md). diff --git a/docs/status-and-issues.md b/docs/status-and-issues.md index 49ac95a..e8460bb 100644 --- a/docs/status-and-issues.md +++ b/docs/status-and-issues.md @@ -1,30 +1,39 @@ # Overview -This project is a work in progress and as such there are a number of areas for improvement. As of this writing, the -development process is primarily using AWS and MicroK8s for development and testing. However, there is manual testing -being undertaken on several other K8 providers. Current information on known issues, bugs, and open feature requests can -be seen on the [Project GitHub Issue Page](https://github.com/nginxinc/kic-reference-architectures/issues). +This project is a work in progress and as such there are a number of areas for +improvement. As of this writing, the development process is primarily using AWS +and MicroK8s for development and testing. However, there is manual testing +being undertaken on several other K8 providers. Current information on known +issues, bugs, and open feature requests can be seen on the +[Project GitHub Issue Page](https://github.com/nginxinc/kic-reference-architectures/issues). Additionally, the core contributors are available for discussion on the [Project GitHub Discussion Page](https://github.com/nginxinc/kic-reference-architectures/discussions) ## Provider Status -This matrix lists out the currently tested configurations, along with any notes on that configuration. The matrix -includes the following: +This matrix lists out the currently tested configurations, along with any notes +on that configuration. The matrix includes the following: -- K8 Provider: The name of the provider -- Infrastructure Support: Does the project stand up the infrastructure with this provider? -- Ingress Controller Options: What are the options for IC deployment? -- FQDN/IP: How does the project handle the IP addressing and FQDN for the certificates? -- Notes: Any additional information on the provider / project interaction. +* K8 Provider: The name of the provider -All of these configurations use Pulumi code within Python as the Infrastructure as Code (IaaC) provider. +* Infrastructure Support: Does the project stand up the infrastructure with +this provider? + +* Ingress Controller Options: What are the options for IC deployment? + +* FQDN/IP: How does the project handle the IP addressing and FQDN for the +certificates? + +* Notes: Any additional information on the provider / project interaction. + +All of these configurations use Pulumi code within Python as the Infrastructure +as Code (IaaC) provider. | K8 Provider | Tested / Deploy Status | Infrastructure Support | IC Options | FQDN/IP | Notes | |-----------------|--------------------------------------------------------------------------------------------------------|-----------------------------|-----------------------------------|-----------------|--------------------------------------------------| | AWS EKS | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | | Azure AKS | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Digtal Ocean | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (Uses DO Registry) | Provided | Requires DNS delegation to DO | +| Digital Ocean | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (Uses DO Registry) | Provided | Requires DNS delegation to DO | | Google GKE | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | | Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | | K3S | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | @@ -33,50 +42,81 @@ All of these configurations use Pulumi code within Python as the Infrastructure | Minikube | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | | Rancher Desktop | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | -### Notes: - -1. The NGINX IC build/deploy process is currently under active development and support for IC will be standardized - across all providers. Follow [#81](https://github.com/nginxinc/kic-reference-architectures/issues/81) and - [#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for details. Currently, for all `kubeconfig` - environments you have the option to specify either NGINX or NGINX Plus as your IC. The latter does require an active - subscription and a JWT to be included at build time. Please see the documentation for more details. -2. The process via which the IP and FQDN are created and used is currently under active development, and will be - streamlined and standardized for all providers. - Follow [#82](https://github.com/nginxinc/kic-reference-architectures/issues/82) for details. -3. The initial deployment was entirely built to work with AWS. As part of our reorganization the ability to use a - kubeconfig file was added, along with the necessary configuration to support additional standup options. This is - currently in active development and will result in this process being streamlined for these additional environments. - Please follow - [#80](https://github.com/nginxinc/kic-reference-architectures/issues/80) for details. -4. We are currently using filebeat as our logging agent. This deployment requires that the correct paths to the - container log directory are present in the deployment data. We have discovered that this differs based on the K8 - provider. Please see [#76](https://github.com/nginxinc/kic-reference-architectures/issues/76) for more detail. +### Notes + +1. The NGINX IC build/deploy process is currently under active development and + support for IC will be standardized across all providers. Follow + [#81](https://github.com/nginxinc/kic-reference-architectures/issues/81) and + [#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for + details. Currently, for all `kubeconfig` environments you have the option to + specify either NGINX or NGINX Plus as your IC. The latter does require an + active subscription and a JWT to be included at build time. Please see the + documentation for more details. + +1. The process via which the IP and FQDN are created and used is currently + under active development, and will be streamlined and standardized for all + providers. Follow + [#82](https://github.com/nginxinc/kic-reference-architectures/issues/82) for + details. + +1. The initial deployment was entirely built to work with AWS. As part of our + reorganization the ability to use a kubeconfig file was added, along with the + necessary configuration to support additional standup options. This is + currently in active development and will result in this process being + streamlined for these additional environments. Please follow + [#80](https://github.com/nginxinc/kic-reference-architectures/issues/80) + for details. + +1. We are currently using filebeat as our logging agent. This deployment + requires that the correct paths to the container log directory are present + in the deployment data. We have discovered that this differs based on the K8 + provider. Please see + [#76](https://github.com/nginxinc/kic-reference-architectures/issues/76) + for more detail. ## Known Issues / Caveats -1. Currently, the use of the Elastic tooling has shown to be problematic under heavy load, with containers falling over - and causing disruptions. Please see the [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) - variables to adjust the number of replicas deployed for the Elastic logstore to tune to your environment. These will - need to be added/updated in the configuration for your stack, which is located in `./config/pulumi` and +1. Currently, the use of the Elastic tooling has shown to be problematic under + heavy load, with containers falling over and causing disruptions. Please see + the + [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) + variables to adjust the number of replicas deployed for the Elastic logstore + to tune to your environment. These will need to be added/updated in the + configuration for your stack, which is located in `./config/pulumi` and is named `Pulumi.$STACK.yaml`. -2. The default Helm timeout is 5 minutes, which is acceptable for most managed clouds but tends to be too short for - single-vm or workstation deployments. Please see - the [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) - variables to adjust the helm timeout as required for your environment. These will need to be added/updated in the - configuration for your stack, which is located in `./config/pulumi` and is named `Pulumi.$STACK.yaml`. -3. When load testing the Bank of Sirius using [Locust](https://locust.io/), you will likely see a high failure rate as - you increase the max users and spawn rate. This is "normal" and is an area we want to expose and explore for + +1. The default Helm timeout is 5 minutes, which is acceptable for most managed + clouds but tends to be too short for single-vm or workstation deployments. + Please see the + [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) + variables to adjust the helm timeout as required for your environment. These + will need to be added/updated in the configuration for your stack, which is + located in `./config/pulumi` and is named `Pulumi.$STACK.yaml`. + +1. When load testing the Bank of Sirius using [Locust](https://locust.io/), you + will likely see a high failure rate as you increase the max users and spawn + rate. This is "normal" and is an area we want to expose and explore for troubleshooting, determining which metrics/traces are helpful, etc. -4. The most common failure modes for non-cloud environments tend towards the following failures: + +1. The most common failure modes for non-cloud environments tend towards the + following failures: + 1. Unable to provision persistent storage; correct by ensuring you have a - [persistent volume provider](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) and can provision a - volume. - 2. Unable to provision an External IP; correct by adding an IP provider such + [persistent volume provider](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + and can provision a volume. + + 1. Unable to provision an External IP; correct by adding an IP provider such as [kubevip](https://kube-vip.chipzoller.dev/) or [metallb](https://metallb.org/). - 3. Resource starvation (not enough CPU, Memory); expand the size of the VM or detune the environment. - 4. Timeouts in helm; increase the helm timeout in the configuration file. -5. If you are using a cloud provider with timed credentials, such as AWS, one failure mode that can arise is when the - credentials expire. This will result in a number of strange and seemingly confusing errors. Double check to make sure - that the credentials are valid. -6. Currently, the build/test process is highly manual. This will be addressed in the future. + + 1. Resource starvation (not enough CPU, Memory); expand the size of the VM + or detune the environment. + + 1. Timeouts in helm; increase the helm timeout in the configuration file. + If you are using a cloud provider with timed credentials, such as AWS, one + failure mode that can arise is when the credentials expire. This will result + in a number of strange and seemingly confusing errors. Double check to make + sure that the credentials are valid. + +1. Currently, the build/test process is highly manual. This will be addressed + in the future. diff --git a/extras/README.md b/extras/README.md index 2418a42..2e0c1ba 100644 --- a/extras/README.md +++ b/extras/README.md @@ -1,20 +1,23 @@ -## Directory +# Directory `/extras` ## Purpose -This directory is for files that, although important, don't have a clearly defined home. Files from this directory will -most likely be moved as the project matures. +This directory is for files that, although important, don't have a clearly +defined home. Files from this directory will most likely be moved as the +project matures. ## Key Files -- [`jwt.token`](./jwt.token) This file contains the JWT required to pull the NGINX IC from the NGINX, Inc registry. - See [this webpage](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret) +* [`jwt.token`](./jwt.token) This file contains the JWT required to pull + the NGINX IC from the NGINX, Inc registry. See + [this webpage](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret) for details and examples. -- [`jenkins`](./jenkins) This directory contains sample jenkinsfiles. Note that these are not guaranteed to be production - ready. These files are named according to the specific type of build they manage; for example, AWS, K3S, MicroK8s, and - DO (Digital Ocean). +* [`jenkins`](./jenkins) This directory contains sample jenkinsfiles. Note + that these are not guaranteed to be production ready. These files are named + according to the specific type of build they manage; for example, AWS, K3S, + MicroK8s, and DO (Digital Ocean). ## Notes diff --git a/extras/jenkins/AWS/Jenkinsfile b/extras/jenkins/AWS/Jenkinsfile index c573287..69d3966 100644 --- a/extras/jenkins/AWS/Jenkinsfile +++ b/extras/jenkins/AWS/Jenkinsfile @@ -176,7 +176,7 @@ pipeline { */ sh ''' - $WORKSPACE/pulumi/python/runner -p aws up + $WORKSPACE/pulumi/python/runner -p aws -s marajenkaws${BUILD_NUMBER} up ''' } } @@ -197,7 +197,7 @@ pipeline { sh ''' $WORKSPACE/pulumi/python/venv/bin/aws ecr delete-repository --repository-name ingress-controller-marajenkaws${BUILD_NUMBER} --force - $WORKSPACE/pulumi/python/runner -p aws destroy + $WORKSPACE/pulumi/python/runner -p aws -s marajenkaws${BUILD_NUMBER} destroy find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } @@ -215,7 +215,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/pulumi/python/runner -p aws destroy || true + $WORKSPACE/pulumi/python/runner -p aws -s marajenkaws${BUILD_NUMBER} destroy || true find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/DigitalOcean/Jenkinsfile b/extras/jenkins/DigitalOcean/Jenkinsfile index 29d9093..aedc659 100644 --- a/extras/jenkins/DigitalOcean/Jenkinsfile +++ b/extras/jenkins/DigitalOcean/Jenkinsfile @@ -153,7 +153,7 @@ pipeline { steps { sh ''' - $WORKSPACE/pulumi/python/runner -p do up + $WORKSPACE/pulumi/python/runner -p do -s marajenkdo${BUILD_NUMBER} up ''' } } @@ -170,7 +170,7 @@ pipeline { */ sh ''' - $WORKSPACE/pulumi/python/runner -p do destroy + $WORKSPACE/pulumi/python/runner -p do -s marajenkdo${BUILD_NUMBER} destroy find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkdo${BUILD_NUMBER} --force --yes \\; ''' } @@ -187,9 +187,9 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/pulumi/python/runner -p do destroy || true + $WORKSPACE/pulumi/python/runner -p do -s marajenkdo${BUILD_NUMBER} destroy || true find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; ''' } } -} \ No newline at end of file +} diff --git a/extras/jenkins/Linode/Jenkinsfile b/extras/jenkins/Linode/Jenkinsfile index 638c4c6..1d8e6b6 100644 --- a/extras/jenkins/Linode/Jenkinsfile +++ b/extras/jenkins/Linode/Jenkinsfile @@ -153,7 +153,7 @@ pipeline { steps { sh ''' - $WORKSPACE/pulumi/python/runner -p linode up + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER} up ''' } } @@ -170,7 +170,7 @@ pipeline { */ sh ''' - $WORKSPACE/pulumi/python/runner -p linode destroy + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER destroy find $WORKSPACE -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } @@ -187,7 +187,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/pulumi/python/runner -p linode destroy|| true + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER destroy|| true # Clean up the Pulumi stack find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' diff --git a/extras/jenkins/MicroK8s/Jenkinsfile b/extras/jenkins/MicroK8s/Jenkinsfile index 4bce3ee..c269991 100644 --- a/extras/jenkins/MicroK8s/Jenkinsfile +++ b/extras/jenkins/MicroK8s/Jenkinsfile @@ -160,11 +160,11 @@ pipeline { $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "Zf4dabEA" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "Zf4dabEA" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "Zf4dabEA" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} ''' } } diff --git a/extras/jenkins/Minikube/Jenkinsfile b/extras/jenkins/Minikube/Jenkinsfile index 9d90490..dbe4c9c 100644 --- a/extras/jenkins/Minikube/Jenkinsfile +++ b/extras/jenkins/Minikube/Jenkinsfile @@ -105,7 +105,7 @@ pipeline { curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube mkdir -p /usr/local/bin/ install minikube /usr/local/bin/ - minikube start --vm-driver=docker --force --cpus 4 --memory 30000 + minikube start --vm-driver=docker --force --cpus 4 --memory 30000 --kubernetes-version=v1.23.9 ''' } } @@ -164,7 +164,7 @@ _EOF_ $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkube${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkube${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} diff --git a/extras/jenkins/README.md b/extras/jenkins/README.md index 4915455..0fd04f2 100644 --- a/extras/jenkins/README.md +++ b/extras/jenkins/README.md @@ -1,28 +1,35 @@ -## Directory +# Directory `/extras/jenkins` ## Purpose -This directory contains several subdirectories, each of which contains a -[Jenkinsfile](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/). These are designed to be used by the -[Jenkins](https://www.jenkins.io/) CI system to run builds of the MARA project. These can be used as-is from the -repository using the ability of Jenkins to pull its pipeline configuration from SCM, as described in -[this article](https://www.jenkins.io/doc/book/pipeline/getting-started/#defining-a-pipeline-in-scm ) +This directory contains several subdirectories, each of which contains a +[Jenkinsfile](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/). These are +designed to be used by the [Jenkins](https://www.jenkins.io/) CI system to run +deployments of the MARA project. These can be used as-is from the repository +using the ability of Jenkins to pull its pipeline configuration from SCM, as +described in +[this article](https://www.jenkins.io/doc/book/pipeline/getting-started/#defining-a-pipeline-in-scm) -Please note that these should be considered to be in a "draft" status, and should be reviewed and modified if you plan -on using them. As always, pull requests, issues, and comments are welcome. +Please note that these should be considered to be in a "draft" status, and +should be reviewed and modified if you plan on using them. As always, pull +requests, issues, and comments are welcome. ## Key Files -- [`AWS`](./AWS) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) to deploy to AWS. Please see the - file for additional information regarding the configuration. -- [`DigitalOcean`](./DigitalOcean) This directory contains the [`Jenkinsfile`](./DigitalOcean/Jenkinsfile) to deploy to - Digital Ocean. Please see the file for additional information regarding the configuration. -- [`K3S`](./K3S) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) to deploy to K3S. Please see the - file for additional information regarding the configuration. -- [`MicroK8s`](./MicroK8s) This directory contains the [`Jenkinsfile`](./AWS/MicroK8s) to deploy to MicroK8s. Please see - the file for additional information regarding the configuration. +- [`AWS`](./AWS) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) + to deploy to AWS. Please see the file for additional information regarding the + configuration. +- [`DigitalOcean`](./DigitalOcean) This directory contains the + [`Jenkinsfile`](./DigitalOcean/Jenkinsfile) to deploy to Digital Ocean. Please + see the file for additional information regarding the configuration. +- [`K3S`](./K3S) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) + to deploy to K3S. Please see the file for additional information regarding the + configuration. +- [`MicroK8s`](./MicroK8s) This directory contains the + [`Jenkinsfile`](./AWS/MicroK8s) to deploy to MicroK8s. Please see the file for + additional information regarding the configuration. ## Notes diff --git a/pulumi/python/README.md b/pulumi/python/README.md index 08a3066..089fb5a 100644 --- a/pulumi/python/README.md +++ b/pulumi/python/README.md @@ -1,10 +1,10 @@ # MARA: Pulumi / Python This project illustrates the end-to-end stand up of the MARA project using -[Pulumi](https://www.pulumi.com/). It is intended to be used -as a reference when building your own Infrastructure as Code (IaC) deployments. -As such, each discrete stage of deployment is defined as a separate Pulumi project -that can be deployed independently of each stage. Although Pulumi supports many +[Pulumi](https://www.pulumi.com/). It is intended to be used as a reference +when building your own Infrastructure as Code (IaC) deployments. As such, each +discrete stage of deployment is defined as a separate Pulumi project that can be +deployed independently of each stage. Although Pulumi supports many programming languages, Python was chosen as the language for this project. The reimplementation of the deployment here should be easily reproducible in other languages. @@ -22,7 +22,7 @@ Several directories, located at the root of the project, are used. These are at the project root because they are intended to be outside the specific IaC providers (e.g., to be used for a port to Terraform). -``` +```console ├── bin ├── config │ └── pulumi @@ -31,17 +31,17 @@ IaC providers (e.g., to be used for a port to Terraform). └── extras ``` -- The [`bin`](../../bin) directory contains all the binaries and scripts that +* The [`bin`](../../bin) directory contains all the binaries and scripts that are used to start/stop the project and provide additional capabilities. -- The [`config/pulumi`](../../config/pulumi) directory holds the configuration +* The [`config/pulumi`](../../config/pulumi) directory holds the configuration files for deployments, as well as a reference configuration that illustrates the available configuration options and their defaults. -- The [`docker`](../../docker) directory contains Dockerfiles and a script to +* The [`docker`](../../docker) directory contains Dockerfiles and a script to build a Docker-based deployment image that contains all the tooling necessary to deploy MARA. -- The [`docs`](../../docs) directory contains all documentation relevant to the +* The [`docs`](../../docs) directory contains all documentation relevant to the overall project. -- The [`extras`](../../extras) directory contains additional scripts, notes, +* The [`extras`](../../extras) directory contains additional scripts, notes, and configurations. ### Pulumi/Python Level @@ -49,7 +49,7 @@ IaC providers (e.g., to be used for a port to Terraform). This directory contains all Pulumi/Python-based logic, which currently consists of the following: -``` +```console ├── automation │   └── providers ├── config @@ -77,25 +77,28 @@ consists of the following: └── kic-pulumi-utils ``` -- The [`automation`](./automation) directory contains the files used to interface with the pulumi - automation api, including provider-specific files. -- The [`config`](./config) directory contains files used by Pulumi to manage +* The [`automation`](./automation) directory contains the files used to + interface with the pulumi automation api, including provider-specific files. +* The [`config`](./config) directory contains files used by Pulumi to manage the configuration for this project. Note that this directory is essentially a redirect to the project-wide [`config`](../../config/pulumi) directory. -- The [`infrastructure`](./infrastructure) directory contains files used to stand - up Kubernetes as well as to provide a common project for all of the infrastructure - and kubeconfig-based clusters. -- The [`kubernetes`](./kubernetes) directory contains all of the Kubernetes-based +* The [`infrastructure`](./infrastructure) directory contains files used to + stand up Kubernetes as well as to provide a common project for the + infrastructure and kubeconfig-based clusters. +* The [`kubernetes`](./kubernetes) directory contains the Kubernetes-based deployments. There are two key subdirectories in this directory: - - The [`nginx`](./kubernetes/nginx) directory contains all NGINX products. - - The [`secrets`](./kubernetes/secrets) directory contains all encrypted secrets. - - The [`applications`](./kubernetes/applications) directory contains all applications - that have been tested for deployment with MARA. -- The [`tools`](./tools) directory contains extra tooling for specific use cases. -- The [`utility`](./utility) directory contains the code used to build/pull/push KIC, - and other projects used to support the environment. -- The [`venv/bin`](./venv/bin) directory contains the virtual environment for Python - along with some key utilities, such as `pulumi`, `kubectl`, and `node`. + * The [`nginx`](./kubernetes/nginx) directory contains all NGINX products. + * The [`secrets`](./kubernetes/secrets) directory contains all encrypted + secrets. + * The [`applications`](./kubernetes/applications) directory contains all + applications that have been tested for deployment with MARA. +* The [`tools`](./tools) directory contains extra tooling for specific use + cases. +* The [`utility`](./utility) directory contains the code used to + build/pull/push KIC, and other projects used to support the environment. +* The [`venv/bin`](./venv/bin) directory contains the virtual environment for + Python along with some key utilities, such as `pulumi`, `kubectl`, and `node` + . ## Configuration @@ -103,7 +106,8 @@ The Pulumi configuration files are in the [`config`](../../config/pulumi) directory. Pulumi's configuration files use the following naming convention: `Pulumi..yaml`. To create a new configuration file for your Pulumi stack, create a new file with a name that includes the stack name. Then, refer -to the sample [configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) +to the sample +[configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) for configuration entries that you want to customize and copy over the entries that you want to modify from their defaults. @@ -114,18 +118,18 @@ The following directories are specific to AWS. #### VPC Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first -Pulumi project which is responsible for setting up the VPC and subnets used by EKS. -The project is built so that it will attempt to create a subnet for each availability -zone within the running region. You may want to customize this behavior, or the IP -addressing scheme used. +Pulumi project which is responsible for setting up the VPC and subnets used by +EKS. The project is built so that it will attempt to create a subnet for each +availability zone within the running region. You may want to customize this +behavior, or the IP addressing scheme used. #### Elastic Kubernetes Service (EKS) -Located within the [`eks`](./infrastructure/aws/eks) directory is a project used -to stand up a new EKS cluster on AWS. This project reads data from the previously -executed VPC project using its VPC id and subnets. In this project you may want to -customize the `instance_type`, `min_size`, or `max_size` parameters provided -to the cluster. +Located within the [`eks`](./infrastructure/aws/eks) directory is a project +used to stand up a new EKS cluster on AWS. This project reads data from the +previously executed VPC project using its VPC id and subnets. In this project +you may want to customize the `instance_type`, `min_size`, or `max_size` +parameters provided to the cluster. #### Elastic Container Registry (ECR) @@ -138,20 +142,21 @@ The following directories are specific to Digital Ocean. #### DOMK8S -Contained within the [`domk8s`](./infrastructure/digitalocean/domk8s) directory contains the -logic needed to stand up a Digital Ocean Managed Kubernetes cluster. There are a number of -configuration options available to customize the build, however the defaults can be used -to create a standard sized cluster in the SFO3 region. +Contained within the [`domk8s`](./infrastructure/digitalocean/domk8s) directory +contains the logic needed to stand up a Digital Ocean Managed Kubernetes +cluster. There are a number of configuration options available to customize the +build, however the defaults can be used to create a standard sized cluster in +the SFO3 region. #### container-registry / container-registry-credentials -These directories contain the projects required to create and use the Digital Ocean container -registry. +These directories contain the projects required to create and use the Digital +Ocean container registry. #### dns-record -This directory contains the project required to provision a DNS record for the Digital Ocean -egress. +This directory contains the project required to provision a DNS record for the +Digital Ocean egress. ### Linode @@ -159,14 +164,14 @@ The following directories are specific to Linode. #### LKE -Contained within the [`lke`](./infrastructure/linode/lke) directory contains the -logic needed to stand up a Linode Kubernetes Engine cluster. There are a number of -configuration options available to customize the build. +Contained within the [`lke`](./infrastructure/linode/lke) directory contains +the logic needed to stand up a Linode Kubernetes Engine cluster. There are a +number of configuration options available to customize the build. #### harbor / harbor-configuration / container-registry-credentials -These directories contain the projects required to create and use the Harbor container registry -with the Linode deployment. +These directories contain the projects required to create and use the Harbor +container registry with the Linode deployment. ### NGINX Ingress Controller Docker Image Build @@ -183,23 +188,25 @@ image to ECR in a fully automated manner. ### NGINX Ingress Controller Helm Chart -In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, you -will find the Pulumi project responsible for installing NGINX KIC. You may want to -customize this project to allow for deploying different versions of KIC. This chart -is only used for AWS deployments. All other deployments use the [`ingress-controller- -repo-only`](./kubernetes/nginx/ingress-controller-repo-only) directory, which at this -time **only allows the use of deployments from the NGINX repo - either NGINX IC or -NGINX Plus IC (with a JWT)**. - -A sample config-map is provided in the Pulumi deployment code. This code will adjust -the logging format to approximate the upstream NGINX KIC project which will allow for -easier ingestion into log storage and processing systems. - -**Note**: This deployment uses the GA Ingress APIs. This has been tested with helm -chart version 0.11.1 and NGINX KIC 2.0.2. Older versions of the KIC and helm charts -can be used, but care should be taken to ensure that the helm chart version used is -compatible with the KIC version. This information can be found in the [NGINX KIC Release -Notes](https://docs.nginx.com/nginx-ingress-controller/releases/) for each release. +In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, +you will find the Pulumi project responsible for installing NGINX KIC. You may +want to customize this project to allow for deploying different versions of +KIC. This chart is only used for AWS deployments. All other deployments use the +[`ingress-controller- repo-only`](./kubernetes/nginx/ingress-controller-repo-only) +directory, which at this time **only allows the use of deployments from the +NGINX repo - either NGINX IC or NGINX Plus IC (with a JWT)**. + +A sample config-map is provided in the Pulumi deployment code. This code will +adjust the logging format to approximate the upstream NGINX KIC project which +will allow for easier ingestion into log storage and processing systems. + +**Note**: This deployment uses the GA Ingress APIs. This has been tested with +helm chart version 0.11.1 and NGINX KIC 2.0.2. Older versions of the KIC and +helm charts can be used, but care should be taken to ensure that the helm chart +version used is compatible with the KIC version. This information can be found +in the +[NGINX KIC Release Notes](https://docs.nginx.com/nginx-ingress-controller/releases/) +for each release. #### Ingress API Versions and NGINX KIC @@ -207,8 +214,8 @@ Starting with Kubernetes version 1.22, support for the Ingress Beta API `networking.k8s.io/v1beta` will be dropped, requiring use of the GA Ingress API `networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows these two API versions to coexist and maintains compatibility for consumers of -the API – meaning, the API will respond correctly to calls to either the `v1beta` -and/or `v1` routes. +the API – meaning, the API will respond correctly to calls to either the +`v1beta` and/or `v1` routes. This project uses the NGINX KIC v2.x releases which includes full support for the GA APIs. @@ -220,17 +227,18 @@ project responsible for installing your log store. The current solution deploys [Elasticsearch and Kibana](https://www.elastic.co/elastic-stack) using the [Bitnami Elasticsearch](https://bitnami.com/stack/elasticsearch/helm) -chart. This solution can be swapped for other options as desired. This application -is deployed to the `logstore` namespace. There are several configuration options -available in the configuration file for the project in order to better tailor this -deployment to the size of the cluster being used. +chart. This solution can be swapped for other options as desired. This +application is deployed to the `logstore` namespace. There are several +configuration options available in the configuration file for the project in order +to better tailor this deployment to the size of the cluster being used. #### Notes -To access the Kibana dashboard via your web browser, you will need to set up port -forwarding for the kibana pod. This can be accomplished using the `kubectl` command: +To access the Kibana dashboard via your web browser, you will need to set up +port forwarding for the kibana pod. This can be accomplished using the +`kubectl` command: -``` +```console $ # Find the Kibana pod name $ kubectl get pods -n logstore NAME READY STATUS RESTARTS AGE @@ -262,7 +270,8 @@ as desired. This application is deployed to the `logagent` namespace. TLS is enabled via [cert-manager](https://cert-manager.io/), which is installed in the cert-manager namespace. Creation of ClusterIssuer or Issuer resources is -delegated to the individual applications and is not done as part of this deployment. +delegated to the individual applications and is not done as part of this +deployment. ### Prometheus @@ -270,53 +279,57 @@ Prometheus is deployed and configured to enable the collection of metrics for all components that have a defined service monitor. At installation time, the deployment will instantiate: -- Node Exporters -- Kubernetes Service Monitors -- Grafana preloaded with dashboards and datasources for Kubernetes management -- The NGINX Ingress Controller -- Statsd receiver +* Node Exporters +* Kubernetes Service Monitors +* Grafana preloaded with dashboards and datasources for Kubernetes management +* The NGINX Ingress Controller +* Statsd receiver The former behavior of using the `prometheus.io:scrape: true` property set in -annotations indicating pods (where metrics should be scraped) has been deprecated, -and these annotations will be removed in the near future. +annotations indicating pods (where metrics should be scraped) has been +deprecated, and these annotations will be removed in the near future. -Also, the standalone Grafana deployment has been removed from the standard deployment -scripts, as it is installed as part of this project. +Also, the standalone Grafana deployment has been removed from the standard +deployment scripts, as it is installed as part of this project. -Finally, this namespace will hold service monitors created by other projects. For -example, the Bank of Sirius deployment currently deploys a service monitor for each -of the postgres monitors that are deployed. +Finally, this namespace will hold service monitors created by other projects. +For example, the Bank of Sirius deployment currently deploys a service monitor +for each of the postgres monitors that are deployed. **Notes**: -1. The KIC needs to be configured to expose Prometheus metrics. This is currently - done by default. -2. The default address binding of the `kube-proxy` component is set to `127.0.0.1` - and therefore will cause errors when the canned Prometheus scrape configurations - are run. The fix is to set this address to `0.0.0.0`. An example manifest has been - provided in [prometheus/extras](./kubernetes/prometheus/extras) that can be applied - against your installation with `kubectl apply -f ./filename`. Please only apply this - change once you have verified that it will work with your version of Kubernetes. -3. The _grafana_ namespace has been maintained in the configuration file to be used by - the Prometheus operator-deployed version of Grafana. This version only accepts a - password – you can still specify a username for the admin account but it will - be silently ignored. This will be changed in the future. +1. The KIC needs to be configured to expose Prometheus metrics. This is + currently done by default. +2. The default address binding of the `kube-proxy` component is set to + `127.0.0.1` and therefore will cause errors when the canned Prometheus + scrape configurations are run. The fix is to set this address to `0.0.0.0`. An + example manifest has been provided in + [prometheus/extras](./kubernetes/prometheus/extras) that can be applied + against your installation with `kubectl apply -f ./filename`. + Please only apply this change once you have verified that it will work with + your version of Kubernetes. +3. The _grafana_ namespace has been maintained in the configuration file to be + used by the Prometheus operator-deployed version of Grafana. This version + only accepts a password – you can still specify a username for the admin + account but it will be silently ignored. This will be changed in the future. ### Observability -We deploy the [OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) +We deploy the +[OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) along with a simple collector. There are several other configurations in the -[observability/otel-objects](./kubernetes/observability/otel-objects) directory. +[observability/otel-objects](./kubernetes/observability/otel-objects) +directory. See the [README.md](./kubernetes/observability/otel-objects/README.md) file -in the [observability/otel-objects](./kubernetes/observability/otel-objects) for more information, -including an explanation of the default configuration. +in the [observability/otel-objects](./kubernetes/observability/otel-objects) +for more information, including an explanation of the default configuration. ### Demo Application A forked version of the Google [_Bank of Anthos_](https://github.com/GoogleCloudPlatform/bank-of-anthos) -application is contained in the [`sirius`](./kubernetes/applications/sirius) directory. -The github repository for this for is at +application is contained in the [`sirius`](./kubernetes/applications/sirius) +directory. The github repository for this for is at [_Bank of Sirius_](https://github.com/nginxinc/bank-of-sirius). Normally, the `frontend` microservice is exposed via a load balancer @@ -328,9 +341,10 @@ can configure the KIC as desired. An additional change to the application is the conversion of several of the standard Kubernetes deployment manifests into Pulumi code. This has been done for the configuration maps, the ingress controller, and the JWT RSA signing key -pair. This allows the user to take advantage Pulumi's feature set, by demonstrating -the process of creating and deploying an RSA key pair at deployment time and using -the project configuration file to set config variables, including secrets. +pair. This allows the user to take advantage Pulumi's feature set, by +demonstrating the process of creating and deploying an RSA key pair at +deployment time and using the project configuration file to set config variables, +including secrets. As part of the Bank of Sirius deployment, we deploy a cluster-wide [self-signed](https://cert-manager.io/docs/configuration/selfsigned/) issuer @@ -339,10 +353,10 @@ created to enable TLS access to the application. Note that this issuer can be changed out by the user, for example to use the [ACME](https://cert-manager.io/docs/configuration/acme/) issuer. The use of the ACME issuer has been tested and works without issues, provided -the FQDN meets the length requirements. As of this writing, the AWS ELB hostname -is too long to work with the ACME server. Additional work in this area will be -undertaken to provide dynamic DNS record creation as part of this process so -legitimate certificates can be issued. +the FQDN meets the length requirements. As of this writing, the AWS ELB +hostname is too long to work with the ACME server. Additional work in this area +will be undertaken to provide dynamic DNS record creation as part of this +process so legitimate certificates can be issued. To provide visibility into the Postgres databases that are running as part of the application, the Prometheus Postgres data exporter will be deployed @@ -360,4 +374,5 @@ at the first run of the startup process. To help enable simple load testing, a script has been provided that uses the `kubectl` command to port-forward monitoring and management connections -to the local workstation. This command is [`test-foward.sh`](../../bin/test-forward.sh). +to the local workstation. This command is +[`test-foward.sh`](../../bin/test-forward.sh). diff --git a/pulumi/python/automation/DESIGN.md b/pulumi/python/automation/DESIGN.md index 5dc8385..07ef65c 100644 --- a/pulumi/python/automation/DESIGN.md +++ b/pulumi/python/automation/DESIGN.md @@ -2,17 +2,20 @@ ## Problem -When creating an infrastructure as code deployment in Pulumi, it is common to have infrastructure -that depends on the presence of other infrastructure. If there are only few layers of dependencies, -it is manageable. However, once you pass three layers of dependencies, it becomes quite difficult -to manage the complexity of your deployment. This also results in deployment plans that are almost -incomprehensible. - -This is the problem that was faced when using Pulumi to build MARA. Multiple infrastructure services -must be instantiated in order to get a working Kubernetes environment. Moreover, once the Kubernetes -is present, it needs additional components that have a web of dependencies. For example, if we use +When creating an infrastructure as code deployment in Pulumi, it is common to +have infrastructure that depends on the presence of other infrastructure. If +there are only few layers of dependencies, it is manageable. However, once you +pass three layers of dependencies, it becomes quite difficult to manage the +complexity of your deployment. This also results in deployment plans that are +almost incomprehensible. + +This is the problem that was faced when using Pulumi to build MARA. Multiple +infrastructure services must be instantiated in order to get a working +Kubernetes environment. Moreover, once the Kubernetes is present, it needs +additional components that have a web of dependencies. For example, if we use AWS, a full deployment looks something like the following: -``` + +```console ┌── infrastructure/aws │ ├── vpc [VPC] │ ├── eks [EKS] @@ -37,69 +40,84 @@ AWS, a full deployment looks something like the following: └── application ``` -EKS cannot be instantiated until the VPC is configured. The Ingress Controller cannot be pushed -until a container registry is available. The application cannot be started until log management, -certificate management, and observability services have been instantiated. A non-trivial Kubernetes +EKS cannot be instantiated until the VPC is configured. The Ingress Controller +cannot be pushed until a container registry is available. The application +cannot be started until log management, certificate management, and +observability services have been instantiated. A non-trivial Kubernetes deployment is truly a web of dependencies! -The above example shows the dependencies for a single infrastructure provider (AWS) that is hosting -a Kubernetes environment and a container registry. However, if the infrastructure provider is -changed, then the content and order of dependencies also changes. As such, this introduces a -conditional element that needs to be managed. +The above example shows the dependencies for a single infrastructure provider +(AWS) that is hosting a Kubernetes environment and a container registry. +However, if the infrastructure provider is changed, then the content and order +of dependencies also changes. As such, this introduces a conditional element +that needs to be managed. ## Solution -The approach taken in MARA to mitigate the Pulumi dependency problem is to break apart Pulumi -deployments (projects) into bite sized pieces that each did one thing. Pulumi projects pass state -to each other by executing sequentially and using -[stack references](https://www.pulumi.com/learn/building-with-pulumi/stack-references/). - -Initially, sequential execution was implemented through a bash script that would run `pulumi up` -across a series of directories in a set order. Each directory was a Pulumi project. If a given -project had dependent state on another project, it would use a stack reference to pull state out -of the dependent project that was previously executed. When additional infrastructure providers -were added, they were supported by different bash scripts that were conditionally called. - -This approach has proven to be unmanageable as it lacks flexibility and configurability as well -as makes adding new infrastructure providers difficult. For example, if the content and/or ordering -of infrastructure deployed to Kubernetes needs to change based on the infrastructure provider, -then this is difficult or impossible with the bash script approach. Moreover, if you want to -read configuration and change what or how things are deployed, this also becomes difficult -using just bash scripting. Lastly, due to differences in execution environments such as -Linux and MacOS, it is difficult to write portable bash scripting. - -When Pulumi released the [Automation API](https://www.pulumi.com/docs/guides/automation-api/) -it presented an opportunity to resolve the shortcomings mentioned above. Using the Automation -API, the MARA Runner was created to provide a framework for gluing together multiple Pulumi -Projects such that they can all be deployed as one single unit of execution and at the -same time allow for piecemeal deployment using `pulumi up`. +The approach taken in MARA to mitigate the Pulumi dependency problem is to +break apart Pulumi deployments (projects) into bite sized pieces that each did +one thing. Pulumi projects pass state to each other by executing sequentially +and using +[stack references](https://www.pulumi.com/learn/building-with-pulumi/stack-references/) +. + +Initially, sequential execution was implemented through a bash script that +would run `pulumi up` across a series of directories in a set order. Each +directory was a Pulumi project. If a given project had dependent state on +another project, it would use a stack reference to pull state out of the +dependent project that was previously executed. When additional infrastructure +providers were added, they were supported by different bash scripts that were +conditionally called. + +This approach has proven to be unmanageable as it lacks flexibility and +configurability as well as makes adding new infrastructure providers difficult. +For example, if the content and/or ordering of infrastructure deployed to +Kubernetes needs to change based on the infrastructure provider, then this is +difficult or impossible with the bash script approach. Moreover, if you want to +read configuration and change what or how things are deployed, this also becomes +difficult using just bash scripting. Lastly, due to differences in execution +environments such as Linux and MacOS, it is difficult to write portable bash +scripting. + +When Pulumi released the +[Automation API](https://www.pulumi.com/docs/guides/automation-api/) +it presented an opportunity to resolve the shortcomings mentioned above. Using +the Automation API, the MARA Runner was created to provide a framework for +gluing together multiple Pulumi Projects such that they can all be deployed as +one single unit of execution and at the same time allow for piecemeal +deployment using `pulumi up`. The MARA Runner is a CLI program written in Python that provides the following: - * The selection of an infrastructure provider - * Configuration using configuration files that control all Pulumi projects - * Pulumi operations such as up, refresh, destroy to be propagated across all projects - * Visualizing which Pulumi projects will be executed for a given infrastructure provider +* The selection of an infrastructure provider +* Configuration using configuration files that control all Pulumi projects +* Pulumi operations such as up, refresh, destroy to be propagated across all + projects +* Visualizing which Pulumi projects will be executed for a given + infrastructure provider ## Terms -The following terms are used repeatedly in the MARA runner. For clarity, they are defined below. +The following terms are used repeatedly in the MARA runner. For clarity, they +are defined below. ### Pulumi Project -A Pulumi [Project](https://www.pulumi.com/docs/intro/concepts/project/) is a folder/directory -that contains a `Pulumi.yaml` file. It is a stand-alone single unit of execution. Multiple -projects execution is tied together by the MARA Runner. +A Pulumi [Project](https://www.pulumi.com/docs/intro/concepts/project/) is a +folder/directory that contains a `Pulumi.yaml` file. It is a stand-alone single +unit of execution. Multiple projects execution is tied together by the MARA +Runner. ### Infrastructure Provider -The term Infrastructure provider (or provider for short) within the context of the MARA Runner, -is referring to what will be hosting a Kubernetes environment and a container registry. -Infrastructure providers are implemented as a subclass of the -[Provider](providers/base_provider.py) class. They contain a collection references to the -directories of Pulumi projects which are categorized as either "infrastructure" or "kubernetes". -The categorization of "infrastructure" means that a project is a requirement for having -a working Kubernetes cluster and container registry. +The term Infrastructure provider (or provider for short) within the context of +the MARA Runner, is referring to what will be hosting a Kubernetes environment +and a container registry. Infrastructure providers are implemented as a +subclass of the [Provider](providers/base_provider.py) class. They contain a +collection references to the directories of Pulumi projects which are +categorized as either "infrastructure" or "kubernetes". The categorization of +"infrastructure" means that a project is a requirement for having a working +Kubernetes cluster and container registry. ### Execution @@ -107,23 +125,27 @@ Execution is referring to the running of a Pulumi project by doing `pulumi up`. ### Environment Configuration -The environment configuration file by default is located at: `/config/pulumi/environment`. -It is used to define the environment variables needed when executing a Pulumi project. -When executing Pulumi projects, the system environment is used AND the values from the -environment configuration are appended/overwritten over the system environment. -The file format is a simple key value mapping where each line contains a single: `=`. +The environment configuration file by default is located at: +`/config/pulumi/environment`. +It is used to define the environment variables needed when executing a Pulumi +project. When executing Pulumi projects, the system environment is used AND the +values from the environment configuration are appended/overwritten over the +system environment. The file format is a simple key value mapping where each +line contains a single: `=`. ### Stack Configuration -The stack configuration is a Pulumi native configuration file that is specific for a single -Pulumi [Stack](https://www.pulumi.com/docs/intro/concepts/stack/). The stack configuration -is located by default at `/config/pulumi/Pulumi..yaml`. +The stack configuration is a Pulumi native configuration file that is specific for +a single Pulumi [Stack](https://www.pulumi.com/docs/intro/concepts/stack/). The +stack configuration is located by default at +`/config/pulumi/Pulumi..yaml`. ## Design -Below is a rough outline of the major components of the Runner and there order of execution. +Below is a rough outline of the major components of the Runner and their order +of execution. -``` +```console Validate Prompt User for Prompt User for Configuration───►Provider Configuration────►Secrets │ │ @@ -146,57 +168,64 @@ to Kubernetes───►Project ### Assumptions -There are some assumptions for how Pulumi is used by the Runner that differ from what is -possible using Pulumi directly. - - * All Pulumi projects use the same name for their stack - * All Pulumi projects use the same stack configuration file (except the [secrets](../kubernetes/secrets) project) - * All secrets are stored encrypted in the [secrets](../kubernetes/secrets) project and loaded into Kubernetes as - secrets - * Infrastructure providers cannot be changed on a stack after the first run, and as such - a new stack will need to be made when using multiple infrastructure providers - * Stack references are used to pass state between Pulumi projects - * The configuration key `kubernetes:infra_type` contains the name of the infrastructure provider - as used in the Runner - * If there is any error running a Pulumi project, the Runner will exit, and it is up to the user - to try again or fix the issue - * The order of execution may change between different infrastructure providers - * All required external programs are installed - * The Runner is invoked from a virtual environment as set up by the - [setup_venv.sh](../../../bin/setup_venv.sh) script - * After a Kubernetes cluster is stood up, the relevant configuration files are added to the - system such that it can be managed with the `kubectl` tool +There are some assumptions for how Pulumi is used by the Runner that differ +from what is possible using Pulumi directly. + +* All Pulumi projects use the same name for their stack +* All Pulumi projects use the same stack configuration file (except the + [secrets](../kubernetes/secrets) project) +* All secrets are stored encrypted in the [secrets](../kubernetes/secrets) + project and loaded into Kubernetes as secrets +* Infrastructure providers cannot be changed on a stack after the first run, + and as such a new stack will need to be made when using multiple + infrastructure providers +* Stack references are used to pass state between Pulumi projects +* The configuration key `kubernetes:infra_type` contains the name of the + infrastructure provider as used in the Runner +* If there is any error running a Pulumi project, the Runner will exit, and it + is up to the user to try again or fix the issue +* The order of execution may change between different infrastructure providers +* All required external programs are installed +* The Runner is invoked from a virtual environment as set up by the + [setup_venv.sh](../../../bin/setup_venv.sh) script +* After a Kubernetes cluster is stood up, the relevant configuration files are + added to the system such that it can be managed with the `kubectl` tool ### Configuration -The initial phase of the Runner's execution reads, parses and validates the environment -and stack configuration files. If the stack configuration is missing or empty, it is assumed -that it is the first time starting up the environment and the user is prompted for required -configuration parameters. +The initial phase of the Runner's execution reads, parses and validates the +environment and stack configuration files. If the stack configuration is missing +or empty, it is assumed that it is the first time starting up the environment +and the user is prompted for required configuration parameters. -After configuration validation, the user is prompted to input any required secrets that are -not currently persisted. These secrets are encrypted using Pulumi's local secret handling -and stored in ciphertext in the [secrets](../kubernetes/secrets) project. +After configuration validation, the user is prompted to input any required +secrets that are not currently persisted. These secrets are encrypted using +Pulumi's local secret handling and stored in ciphertext in the +[secrets](../kubernetes/secrets) project. ### Provider -After configuration has completed, a provider is selected based on the options specified -by the user when invoking the Runner. This provider is used as the source of data for what -Pulumi projects are executed and in what order. When standing up an environment, the provider -executes first the Pulumi projects that are categorized as "infrastructure". Infrastructure -in this context means that these projects are required to have been executed successfully +After configuration has completed, a provider is selected based on the options +specified by the user when invoking the Runner. This provider is used as the +source of data for what Pulumi projects are executed and in what order. When +standing up an environment, the provider executes first the Pulumi projects that +are categorized as "infrastructure". Infrastructure in this context means that +these projects are required to have been executed successfully in order to have a working Kubernetes cluster and container registry. -A Pulumi project reference within a provider may optionally have an `on_success` event -registered which is run when the project executes successfully. Typically, these events -do things like add configuration for a cluster to the kubectl configuration directory. +A Pulumi project reference within a provider may optionally have an +`on_success` event registered which is run when the project executes +successfully. Typically, these events do things like add configuration for a +cluster to the kubectl configuration directory. -After the infrastructure projects have completed executing, the Runner then executes -the [secrets](../kubernetes/secrets) project which stores the locally encrypted secrets -as [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) on the -newly created Kubernetes cluster. +After the infrastructure projects have completed executing, the Runner then +executes the [secrets](../kubernetes/secrets) project which stores the locally +encrypted secrets as +[Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) +on the newly created Kubernetes cluster. -Once the required secrets are in place, the Runner then executes all the projects -categorized as "kubernetes" including the final application to be deployed. +Once the required secrets are in place, the Runner then executes all the +projects categorized as "kubernetes" including the final application to be +deployed. -At this point, the application should be deployed. \ No newline at end of file +At this point, the application should be deployed. diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index 5788096..d0f7fe1 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -14,7 +14,6 @@ """ import getopt -import getpass import importlib import importlib.util import logging @@ -42,7 +41,8 @@ # Root directory of the MARA project PROJECT_ROOT = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..'])) # Allowed operations - if operation is not in this list, the runner will reject it -OPERATIONS: List[str] = ['down', 'destroy', 'refresh', 'show-execution', 'up', 'validate', 'list-providers'] +OPERATIONS: List[str] = ['down', 'destroy', 'refresh', + 'show-execution', 'up', 'validate', 'list-providers'] # List of available infrastructure providers - if provider is not in this list, the runner will reject it PROVIDERS: typing.Iterable[str] = Provider.list_providers() # Types of headings available to show the difference between Pulumi projects @@ -62,6 +62,16 @@ # Use he script name as invoked rather than hard coding it script_name = os.path.basename(sys.argv[0]) + +def provider_instance(provider_name: str) -> Provider: + """Dynamically instantiates an infrastructure provider + :param provider_name: name of infrastructure provider + :return: instance of infrastructure provider + """ + module = importlib.import_module(name=f'providers.{provider_name}') + return module.INSTANCE + + def usage(): usage_text = f"""Modern Application Reference Architecture (MARA) Runner @@ -72,6 +82,7 @@ def usage(): -d, --debug Enable debug output on all of the commands executed -b, --banner-type= Banner type to indicate which project is being executed (e.g. {', '.join(BANNER_TYPES)}) -h, --help Prints help information + -s, --stack Specifies the Pulumi stack to use -p, --provider= Specifies the provider used (e.g. {', '.join(PROVIDERS)}) OPERATIONS: @@ -85,21 +96,37 @@ def usage(): print(usage_text, file=sys.stdout) -def provider_instance(provider_name: str) -> Provider: - """Dynamically instantiates an infrastructure provider - :param provider_name: name of infrastructure provider - :return: instance of infrastructure provider - """ - module = importlib.import_module(name=f'providers.{provider_name}') - return module.INSTANCE +def write_env(env_config, stack_name): + """Create a new environment file and write our stack to it""" + with open(env_config.filename, 'w') as f: + try: + print("PULUMI_STACK=" + stack_name, file=f) + msg = 'Environment configuration file not found. Creating new file at the path: %s' + RUNNER_LOG.error(msg, env_config.filename) + except (FileNotFoundError, PermissionError): + RUNNER_LOG.error("Unable to build configuration file") + sys.exit(2) + + +def append_env(env_config, stack_name): + """Append our stack to the existing environment file""" + with open(env_config.config_path, 'a') as f: + try: + msg = 'Environment configuration file does not contain PULUMI_STACK, adding' + print("PULUMI_STACK=" + stack_name, file=f) + RUNNER_LOG.error(msg) + except (FileNotFoundError, PermissionError): + RUNNER_LOG.error("Unable to append to configuration file") + sys.exit(2) def main(): """Entrypoint to application""" try: - shortopts = 'hdp:b:' # single character options available - longopts = ["help", 'debug', 'banner-type', 'provider='] # long form options + shortopts = 'hds:p:b:' # single character options available + longopts = ["help", 'debug', 'banner-type', + 'stack=', 'provider='] # long form options opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts) except getopt.GetoptError as err: RUNNER_LOG.error(err) @@ -107,6 +134,7 @@ def main(): sys.exit(2) provider_name: Optional[str] = None + stack_name: Optional[str] = None global debug_on @@ -118,6 +146,9 @@ def main(): elif opt in ('-p', '--provider'): if value.lower() != 'none': provider_name = value.lower() + elif opt in ('-s', '--stack'): + if value.lower() != 'none': + stack_name = value.lower() elif opt in ('-d', '--debug'): debug_on = True elif opt in ('-b', '--banner-type'): @@ -151,7 +182,8 @@ def main(): # Now validate providers because everything underneath here depends on them if not provider_name or provider_name.strip() == '': - RUNNER_LOG.error('No provider specified - provider is a required argument') + RUNNER_LOG.error( + 'No provider specified - provider is a required argument') sys.exit(2) if provider_name not in PROVIDERS: RUNNER_LOG.error('Unknown provider specified: %s', provider_name) @@ -160,7 +192,14 @@ def main(): setup_loggers() provider = provider_instance(provider_name.lower()) - RUNNER_LOG.debug('Using [%s] infrastructure provider', provider.infra_type()) + RUNNER_LOG.debug( + 'Using [%s] infrastructure provider', provider.infra_type()) + + # Now validate the stack name + if not stack_name or stack_name.strip() == '': + RUNNER_LOG.error( + 'No Pulumi stack specified - Pulumi stack is a required argument') + sys.exit(2) # We execute the operation requested - different operations have different pre-requirements, so they are matched # differently. Like show-execution does not require reading the configuration files, so we just look for a match @@ -170,19 +209,34 @@ def main(): provider.display_execution_order(output=sys.stdout) sys.exit(0) - # For the other operations, we need the configuration files parsed, so we do the parsing upfront. - + # We parse the environment file up front in order to have the necessary values required by this program. + # The logic around the PULUMI_STACK accounts for three scenarios: + # + # 1. If there is no environment file, the argument given on the CLI is used and added to the environment file. + # 2. If there is a difference between the CLI and the environment file, the environment file value is used. + # 3. If there is an environment file with no PULUMI_STACK, the environment file is appended with the argument. try: env_config = env_config_parser.read() except FileNotFoundError as e: - msg = 'Environment configuration file not found. This file must exist at the path: %s' - RUNNER_LOG.error(msg, e.filename) + # No file, we create one and then read it back in + write_env(e, stack_name) + env_config = env_config_parser.read() + + if env_config.stack_name() is None: + # Found file, if there is no stack we append it + try: + env_config = env_config_parser.read() + except FileNotFoundError: + sys.exit(2) + append_env(env_config, stack_name) + env_config = env_config_parser.read() + elif env_config.stack_name() != stack_name: + # Found file, but stack name mismatch; bail out + msg = 'Stack "%s" given on CLI but Stack "%s" is in env file; exiting' + RUNNER_LOG.error(msg, stack_name, env_config.stack_name()) sys.exit(2) - if env_config.stack_name(): - stack_config = read_stack_config(provider=provider, env_config=env_config) - else: - stack_config = None + stack_config = read_stack_config(provider=provider, env_config=env_config) validate_with_verbosity = operation == 'validate' or debug_on try: @@ -199,7 +253,8 @@ def main(): elif operation == 'down' or operation == 'destroy': pulumi_cmd = down elif operation == 'validate': - init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + init_secrets(env_config=env_config, + pulumi_projects=provider.execution_order()) pulumi_cmd = None # validate was already run above else: @@ -210,7 +265,8 @@ def main(): # instantiated, before invoking Pulumi via the Automation API. This is required because certain Pulumi # projects need to pull secrets in order to be stood up. if pulumi_cmd: - init_secrets(env_config=env_config, pulumi_projects=provider.execution_order()) + init_secrets(env_config=env_config, + pulumi_projects=provider.execution_order()) try: pulumi_cmd(provider=provider, env_config=env_config) except Exception as e: @@ -253,14 +309,18 @@ def read_stack_config(provider: Provider, :return: data structure containing stack configuration """ try: - stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) + stack_config = stack_config_parser.read( + stack_name=env_config.stack_name()) RUNNER_LOG.debug('stack configuration file read') except FileNotFoundError as e: - RUNNER_LOG.info('stack configuration file [%s] does not exist', e.filename) - stack_config = prompt_for_stack_config(provider, env_config, e.filename) + RUNNER_LOG.info( + 'stack configuration file [%s] does not exist', e.filename) + stack_config = prompt_for_stack_config( + provider, env_config, e.filename) except stack_config_parser.EmptyConfigurationException as e: RUNNER_LOG.info('stack configuration file [%s] is empty', e.filename) - stack_config = prompt_for_stack_config(provider, env_config, e.filename) + stack_config = prompt_for_stack_config( + provider, env_config, e.filename) return stack_config @@ -303,6 +363,7 @@ def validate(provider: Provider, :param stack_config: reference to stack configuration :param verbose: flag to enable verbose output mode """ + # First, we validate that we have the right tools installed def check_path(cmd: str, fail_message: str) -> bool: cmd_path = shutil.which(cmd) @@ -340,7 +401,8 @@ def check_path(cmd: str, fail_message: str) -> bool: RUNNER_LOG.error(msg, env_config.config_path) raise e if verbose: - RUNNER_LOG.debug('environment file [%s] passed validation', env_config.config_path) + RUNNER_LOG.debug( + 'environment file [%s] passed validation', env_config.config_path) if not stack_config: RUNNER_LOG.debug('stack configuration is not available') @@ -358,10 +420,12 @@ def check_path(cmd: str, fail_message: str) -> bool: try: provider.validate_stack_config(stack_config, env_config) except Exception as e: - RUNNER_LOG.error('Stack configuration file [%s] at path failed validation', stack_config.config_path) + RUNNER_LOG.error( + 'Stack configuration file [%s] at path failed validation', stack_config.config_path) raise e if verbose: - RUNNER_LOG.debug('Stack configuration file [%s] passed validation', stack_config.config_path) + RUNNER_LOG.debug( + 'Stack configuration file [%s] passed validation', stack_config.config_path) RUNNER_LOG.debug('All configuration is OK') @@ -377,13 +441,14 @@ def init_secrets(env_config: env_config_parser.EnvConfig, :param env_config: reference to environment configuration :param pulumi_projects: list of pulumi project to instantiate secrets for """ - secrets_work_dir = os.path.sep.join([SCRIPT_DIR, '..', 'kubernetes', 'secrets']) + secrets_work_dir = os.path.sep.join( + [SCRIPT_DIR, '..', 'kubernetes', 'secrets']) stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( env_vars=env_config, - ), - project_name='secrets', - work_dir=secrets_work_dir) + ), + project_name='secrets', + work_dir=secrets_work_dir) for project in pulumi_projects: if not project.config_keys_with_secrets: @@ -400,7 +465,8 @@ def init_secrets(env_config: env_config_parser.EnvConfig, value = secret_config_key.default config_value = auto.ConfigValue(secret=True, value=value) - stack.set_config(secret_config_key.key_name, value=config_value) + stack.set_config(secret_config_key.key_name, + value=config_value) def build_pulumi_stack(pulumi_project: PulumiProject, @@ -411,13 +477,14 @@ def build_pulumi_stack(pulumi_project: PulumiProject, :param env_config: reference to environment configuration :return: reference to a new or existing stack """ - RUNNER_LOG.info('Project [%s] selected: %s', pulumi_project.name(), pulumi_project.abspath()) + RUNNER_LOG.info('Project [%s] selected: %s', + pulumi_project.name(), pulumi_project.abspath()) stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), opts=auto.LocalWorkspaceOptions( env_vars=env_config, - ), - project_name=pulumi_project.name(), - work_dir=pulumi_project.abspath()) + ), + project_name=pulumi_project.name(), + work_dir=pulumi_project.abspath()) return stack @@ -428,7 +495,8 @@ def refresh(provider: Provider, :param env_config: reference to environment configuration """ for pulumi_project in provider.execution_order(): - headers.render_header(text=pulumi_project.description, env_config=env_config) + headers.render_header( + text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack.refresh_config() @@ -451,7 +519,8 @@ def up(provider: Provider, :param env_config: reference to environment configuration """ for pulumi_project in provider.execution_order(): - headers.render_header(text=pulumi_project.description, env_config=env_config) + headers.render_header( + text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack_up_result = stack.up(color=env_config.pulumi_color_settings(), @@ -474,7 +543,8 @@ def down(provider: Provider, :param env_config: reference to environment configuration """ for pulumi_project in reversed(provider.execution_order()): - headers.render_header(text=pulumi_project.description, env_config=env_config) + headers.render_header( + text=pulumi_project.description, env_config=env_config) stack = build_pulumi_stack(pulumi_project=pulumi_project, env_config=env_config) stack_down_result = stack.destroy(color=env_config.pulumi_color_settings(), diff --git a/pulumi/python/config/README.md b/pulumi/python/config/README.md index 871d5c7..0d57571 100644 --- a/pulumi/python/config/README.md +++ b/pulumi/python/config/README.md @@ -1,24 +1,28 @@ -## Directory +# Directory `/pulumi/python/config` ## Purpose -This directory is used for configuration management in Pulumi. In previous versions of this project, the -`vpc` directory was used to manage writes to the configuration file. This is required because you can only run -the `pulumi config` command if you have a `Pulumi.yaml` somewhere in your directory or above that allows you to use the -Pulumi tooling. +This directory is used for configuration management in Pulumi. In previous +versions of this project, the `vpc` directory was used to manage writes to the +configuration file. This is required because you can only run the `pulumi config` +command if you have a `Pulumi.yaml` somewhere in your directory or above that +allows you to use the Pulumi tooling. -Why not use each stack directory as it's own configuration? Using different directories will result in failures -encrypting/decrypting the values in the main configuration file if different stacks are used. This is a stopgap -workaround that will be obsoleted at such time that Pulumi provides nested/included configuration files. +Why not use each stack directory as its own configuration? Using different +directories will result in failures encrypting/decrypting the values in the +main configuration file if different stacks are used. This is a stopgap +workaround that will be obsoleted at such time that Pulumi provides +nested/included configuration files. This is also the reason why we have created +the `secrets` project. ## Key Files -- [`Pulumi.yaml`](./Pulumi.yaml) This file tells the `pulumi` command where to find it's virtual envrionment and it's - configuration. +* [`Pulumi.yaml`](./Pulumi.yaml) This file tells the `pulumi` command where to +* find its virtual environment and its configuration. ## Notes -Once Pulumi adds nested configuration files to the product we should be able to remove this work-around. - +Once Pulumi adds nested configuration files to the product we should be able to +remove this work-around. diff --git a/pulumi/python/infrastructure/README.md b/pulumi/python/infrastructure/README.md index c8bf6eb..42750d5 100644 --- a/pulumi/python/infrastructure/README.md +++ b/pulumi/python/infrastructure/README.md @@ -1,4 +1,4 @@ -## Directory +# Directory `/python/pulumi/infrastructure` @@ -8,14 +8,17 @@ Holds all infrastructure related files. ## Key Files -- [`aws`](./aws) Files to stand up a K8 cluster in AWS using VPC, EKS, and ECR. -- [`digitalocean`](./digitalocean) Files to stand up a K8 cluster in DigitalOcean using DO Managed K8s. -- [`linode`](./linode) Files to stand up a K8 cluster in Linode using Linode Kubernetes Engine. -- [`kubeconfig`](./kubeconfig) Files to allow users to connect to any kubernetes installation that can be specified via - a `kubeconfig` file. +* [`aws`](./aws) Files to stand up a K8 cluster in AWS using VPC, EKS, and ECR. +* [`digitalocean`](./digitalocean) Files to stand up a K8 cluster in + DigitalOcean using DO Managed K8s. +* [`linode`](./linode) Files to stand up a K8 cluster in Linode using Linode + Kubernetes Engine. +* [`kubeconfig`](./kubeconfig) Files to allow users to connect to any kubernetes + installation that can be specified via a `kubeconfig` file. ## Notes -The `kubeconfig` project is intended to serve as a shim between infrastructure providers and the rest of the project. -For example, even if you use the AWS logic you will still use the logic inside the `kubeconfig` stack as part of the +The `kubeconfig` project is intended to serve as a shim between infrastructure +providers and the rest of the project. For example, even if you use the AWS +logic you will still use the logic inside the `kubeconfig` stack as part of the process. Additional infrastructures added will need to follow this pattern. diff --git a/pulumi/python/kubernetes/README.md b/pulumi/python/kubernetes/README.md index 081a0b2..ff753d1 100644 --- a/pulumi/python/kubernetes/README.md +++ b/pulumi/python/kubernetes/README.md @@ -1,18 +1,19 @@ -## Directory +# Directory `/pulumi/python/kubernetes` ## Purpose -All kubernetes deployments are stored in this directory; all of these stacks will use the -[`infrastructure/kubeconfig`](../infrastructure/kubeconfig) stack as a source of information about the kubernetes -installation that is being used. +All kubernetes deployments are stored in this directory; all of these stacks +will use the [`infrastructure/kubeconfig`](../infrastructure/kubeconfig) stack as +a source of information about the kubernetes installation that is being used. ## Key Files -- [`nginx`](./nginx) NGINX related components; Ingress Controller, Service Mesh, App Protect, etc. Each in a separate +* [`nginx`](./nginx) NGINX related components; Ingress Controller, Service + Mesh, App Protect, etc. Each in a separate directory. -- [`applications`](./applications) Applications; each in it's own directory. +* [`applications`](./applications) Applications; each in it's own directory. ## Notes diff --git a/pulumi/python/kubernetes/applications/sirius/__main__.py b/pulumi/python/kubernetes/applications/sirius/__main__.py index 6aa6919..06736fc 100644 --- a/pulumi/python/kubernetes/applications/sirius/__main__.py +++ b/pulumi/python/kubernetes/applications/sirius/__main__.py @@ -21,7 +21,8 @@ def remove_status_field(obj): def project_name_from_infrastructure_dir(): script_dir = os.path.dirname(os.path.abspath(__file__)) - eks_project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'kubeconfig') + eks_project_path = os.path.join( + script_dir, '..', '..', '..', 'infrastructure', 'kubeconfig') return pulumi_config.get_pulumi_project_name(eks_project_path) @@ -33,22 +34,26 @@ def project_name_from_kubernetes_dir(dirname: str): # # This is just used for the kubernetes config deploy.... # -# TODO: Update as part of the conversion of kubeconfig to AutomationAPI #178 -# + + def pulumi_repo_ingress_project_name(): script_dir = os.path.dirname(os.path.abspath(__file__)) - ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') + ingress_project_path = os.path.join( + script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') return pulumi_config.get_pulumi_project_name(ingress_project_path) + def pulumi_ingress_project_name(): script_dir = os.path.dirname(os.path.abspath(__file__)) - ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller') + ingress_project_path = os.path.join( + script_dir, '..', '..', 'nginx', 'ingress-controller') return pulumi_config.get_pulumi_project_name(ingress_project_path) def sirius_manifests_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) - sirius_manifests_path = os.path.join(script_dir, 'src', 'kubernetes-manifests', '*.yaml') + sirius_manifests_path = os.path.join( + script_dir, 'src', 'kubernetes-manifests', '*.yaml') return sirius_manifests_path @@ -60,21 +65,23 @@ def extract_password_from_k8s_secrets(secrets: Mapping[str, str], secret_name: s password = str(byte_data, 'utf-8') return password - +# # We will only want to be deploying one type of certificate issuer # as part of this application; this can (and should) be changed as # needed. For example, if the user is taking advantage of ACME let's encrypt # in order to generate certs. +# def k8_manifest_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) k8_manifest_path = os.path.join(script_dir, 'cert', 'self-sign.yaml') return k8_manifest_path - +# # The database password is a secret, and in order to use it in a string concat # we need to decrypt the password with Output.unsecret() before we use it. -# This function provides the logic to accomplish this, while still using the pulumi -# secrets for the resulting string: +# This function provides the logic to accomplish this, while still using the +# pulumi secrets for the resulting string: +# def create_pg_uri(password_object): user = str(accounts_admin) password = str(password_object) @@ -103,26 +110,27 @@ def add_namespace(obj): secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') -k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) +k8s_provider = k8s.Provider(resource_name='ingress-controller') # -# This logic is used to manage the kubeconfig deployments, since that uses a slightly -# different logic path than the mainline. This will be removed once the kubeconfig deploys -# are moved to the Pulumi Automation API. -# -# TODO: Update as part of the conversion of kubeconfig to AutomationAPI #178 +# This logic is used to manage the kubeconfig deployments, since that uses a +# slightly # different logic path than the mainline. This will be removed once +# the kubeconfig deploys are moved to the Pulumi Automation API. # - config = pulumi.Config('kubernetes') infra_type = config.require('infra_type') if infra_type == 'kubeconfig': + # # Logic to extract the FQDN of the load balancer for Ingress + # ingress_project_name = pulumi_repo_ingress_project_name() ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') + # # Set back to kubernetes + # config = pulumi.Config('kubernetes') lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') sirius_host = lb_ingress_hostname @@ -137,24 +145,25 @@ def add_namespace(obj): lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') sirius_host = lb_ingress_hostname - +# # Create the namespace for Bank of Sirius +# ns = k8s.core.v1.Namespace(resource_name='bos', metadata={'name': 'bos'}, opts=pulumi.ResourceOptions(provider=k8s_provider)) -# Add Config Maps for Bank of Sirius; these are built in Pulumi in order to manage secrets and provide the option -# for users to override defaults in the configuration file. Configuration values that are required use the `require` -# method. Those that are optional use the `get` method, and have additional logic to set defaults if no value is set -# by the user. # -# Note that the Pulumi code will exit with an error message if a required variable is not defined in the configuration -# file. +# Add Config Maps for Bank of Sirius; these are built in Pulumi in order to +# manage secrets and provide the option for users to override defaults in the +# configuration file. Configuration values that are required use the `require` +# method. Those that are optional use the `get` method, and have additional +# logic to set defaults if no value is set by the user. # -# Configuration Values are stored in the configuration: -# ./config/Pulumi.STACKNAME.yaml +# Note that the Pulumi code will exit with an error message if a required +# variable is not defined in the configuration file. +# +# Configuration Values are stored in the "secrets" project # -# Note this config is specific to the sirius code! config = pulumi.Config('sirius') sirius_secrets = Secret.get(resource_name='pulumi-secret-sirius', @@ -180,7 +189,8 @@ def add_namespace(obj): accounts_db_uri = pulumi.Output.unsecret(accounts_pwd).apply(create_pg_uri) accounts_db_config_config_map = k8s.core.v1.ConfigMap("accounts_db_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -198,7 +208,8 @@ def add_namespace(obj): }) environment_config_config_map = k8s.core.v1.ConfigMap("environment_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -209,8 +220,10 @@ def add_namespace(obj): "LOCAL_ROUTING_NUM": "883745000", "PUB_KEY_PATH": "/root/.ssh/publickey" }) + tracing_config_config_map = k8s.core.v1.ConfigMap("tracing_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -224,7 +237,8 @@ def add_namespace(obj): }) service_api_config_config_map = k8s.core.v1.ConfigMap("service_api_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -238,12 +252,13 @@ def add_namespace(obj): "CONTACTS_API_ADDR": "contacts:8080", "USERSERVICE_API_ADDR": "userservice:8080", }) - +# # Demo data is hardcoded in the current incarnation of the bank of # sirius project, so we go along with that for now. - +# demo_data_config_config_map = k8s.core.v1.ConfigMap("demo_data_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -267,7 +282,8 @@ def add_namespace(obj): spring_url = 'jdbc:postgresql://ledger-db:5432/' + str(ledger_db) ledger_db_config_config_map = k8s.core.v1.ConfigMap("ledger_db_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -297,7 +313,8 @@ def add_namespace(obj): jwt_key_secret = k8s.core.v1.Secret("jwt_keySecret", api_version="v1", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), kind="Secret", metadata=k8s.meta.v1.ObjectMetaArgs( name="jwt-key", @@ -309,11 +326,13 @@ def add_namespace(obj): "jwtRS256.key.pub": str(encode_public, "utf-8") }) -# Create resources for the Bank of Sirius using the Kubernetes YAML manifests which have been pulled from -# the google repository. # -# Note that these have been lightly edited to remove dependencies on GCP where necessary. Additionally, the -# `frontend` service has been updated to use a ClusterIP rather than the external load balancer, as that interaction +# Create resources for the Bank of Sirius using the Kubernetes YAML manifests +# which have been pulled from the google repository. +# +# Note that these have been lightly edited to remove dependencies on GCP where +# necessary. Additionally, the `frontend` service has been updated to use a +# ClusterIP rather than the external load balancer, as that interaction # is now handled by the NGNIX Ingress Controller # sirius_manifests = sirius_manifests_location() @@ -325,10 +344,11 @@ def add_namespace(obj): opts=pulumi.ResourceOptions(depends_on=[tracing_config_config_map]) ) +# # We need to create an issuer for the cert-manager (which is installed in a # separate project directory). This can (and should) be adjusted as required, # as the default issuer is self-signed. - +# k8_manifest = k8_manifest_location() selfissuer = ConfigFile( @@ -336,13 +356,17 @@ def add_namespace(obj): transformations=[add_namespace], file=k8_manifest) -# Add the Ingress controller for the Bank of Sirius application. This uses the NGINX IC that is installed -# as part of this Pulumi stack. +# +# Add the Ingress controller for the Bank of Sirius application. This uses the +# NGINX IC that is installed as part of this Pulumi stack. # -# This block is responsible for creating the Ingress object for the application. This object -# is deployed into the same namespace as the application and requires that an IngressClass -# and Ingress controller be installed (which is done in an earlier step, deploying the KIC). +# +# This block is responsible for creating the Ingress object for the +# application. This object is deployed into the same namespace as the +# application and requires that an IngressClass # and Ingress controller be +# installed (which is done in an earlier step, deploying the KIC). +# bosingress = k8s.networking.v1.Ingress("bosingress", api_version="networking.k8s.io/v1", kind="Ingress", @@ -367,7 +391,7 @@ def add_namespace(obj): # to store the generated certificate. tls=[k8s.networking.v1.IngressTLSArgs( hosts=[sirius_host], - secret_name="sirius-secret", + secret_name="sirius-secret", # pragma: allowlist secret )], # The block below defines the rules for traffic coming into the KIC. # In the example below, we take any traffic on the host for path / @@ -396,11 +420,9 @@ def add_namespace(obj): )) # -# Get the hostname for our connect URL; this logic will be collapsed once the kubeconfig -# deployments are moved over to the automation api. Until then, we have to use a different -# process. -# -# TODO: Update as part of the conversion of kubeconfig to AutomationAPI #178 +# Get the hostname for our connect URL; this logic will be collapsed once the +# kubeconfig # deployments are moved over to the automation api. Until then, +# we have to use a different process. # config = pulumi.Config('kubernetes') @@ -408,7 +430,6 @@ def add_namespace(obj): if infra_type == 'kubeconfig': pulumi.export('hostname', lb_ingress_hostname) pulumi.export('ipaddress', lb_ingress_ip) - #pulumi.export('application_url', f'https://{lb_ingress_hostname}') application_url = sirius_host.apply(lambda host: f'https://{host}') else: application_url = sirius_host.apply(lambda host: f'https://{host}') diff --git a/pulumi/python/kubernetes/applications/sirius/verify.py b/pulumi/python/kubernetes/applications/sirius/verify.py index 415510c..99883ec 100755 --- a/pulumi/python/kubernetes/applications/sirius/verify.py +++ b/pulumi/python/kubernetes/applications/sirius/verify.py @@ -8,20 +8,23 @@ stdin_json = json.load(sys.stdin) if 'application_url' not in stdin_json: - raise ValueError("Missing expected key 'application_url' in STDIN json data") + raise ValueError( + "Missing expected key 'application_url' in STDIN json data") url = f"{stdin_json['application_url']}/login" payload = 'username=testuser&password=password' headers = { - 'Content-Type': 'application/x-www-form-urlencoded' + 'Content-Type': 'application/x-www-form-urlencoded' } urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) -response = requests.request("POST", url, headers=headers, data=payload, verify=False) +response = requests.request( + "POST", url, headers=headers, data=payload, verify=False) response_code = response.status_code if response_code != 200: - print(f'Application failed health check [url={url},response_code={response_code}', file=sys.stderr) + print( + f'Application failed health check [url={url},response_code={response_code}', file=sys.stderr) sys.exit(1) else: print('Application passed health check', file=sys.stderr) diff --git a/pulumi/python/kubernetes/observability/otel-objects/README.md b/pulumi/python/kubernetes/observability/otel-objects/README.md index 012808c..28873e4 100644 --- a/pulumi/python/kubernetes/observability/otel-objects/README.md +++ b/pulumi/python/kubernetes/observability/otel-objects/README.md @@ -1,44 +1,56 @@ -## Sample Configurations -This directory contains a number of sample configurations that can be used with the -[OTEL kubernetes operator](https://github.com/open-telemetry/opentelemetry-operator) that is installed as part of the -MARA project. +# Sample Configurations -Each configuration currently uses the `simplest` deployment, which uses an in-memory store for data being processed. -This is obviously not suited to a production deployment, but it is intended to illustrate the steps required to work -with the OTEL deployment. +This directory contains a number of sample configurations that can be used with +the +[OTEL kubernetes operator](https://github.com/open-telemetry/opentelemetry-operator) +that is installed as part of the MARA project. + +Each configuration currently uses the `simplest` deployment, which uses an +in-memory store for data being processed. This is obviously not suited to a +production deployment, but it is intended to illustrate the steps required to +work with the OTEL deployment. ## Commonality ### Listening Ports -Each of the sample files is configured to listen on the -[OTLP protocol](https://opentelemetry.io/docs/reference/specification/protocol/otlp/). The listen ports configured are: + +Each of the sample files is configured to listen on the +[OTLP protocol](https://opentelemetry.io/docs/reference/specification/protocol/otlp/) +. The listen ports configured are: + * grpc on port 9978 * http on port 9979 -### Logging -All the examples log to the container's stdout. However, the basic configuration is configured to only show the -condensed version of the traces being received. In order to see the full traces, you need to set the logging level to +### Logging + +All the examples log to the container's stdout. However, the basic configuration +is configured to only show the condensed version of the traces being received. +In order to see the full traces, you need to set the logging level to `DEBUG`. The basic-debug object is configured to do this automatically. ## Configurations -### `otel-collector.yaml.basic` -This is the default collector that only listens and logs summary spans to the container's stdout. ### `otel-collector.yaml.basic` -This is a variant of the default collector that will output full spans to the container's stdout. + +This is the default collector that only listens and logs summary spans to the +container's stdout. ### `otel-collector.yaml.full` -This is a more complex variant that contains multiple receivers, processors, and exporters. Please see the file for -details. + +This is a more complex variant that contains multiple receivers, processors, +and exporters. Please see the file for details. ### `otel-collector.yaml.lightstep` -This configuration file deploys lightstep as an ingester. Please note you will need to have a -[lightstep](https://lightstep.com/) account to use this option, and you will need to add your lightstep access token -to the file in the field noted. -## Usage -By default, the `otel-collector.yaml.basic` configuration is copied into the live `otel-collector.yaml`. The logic for -this project runs all files ending in `.yaml` as part of the configuration, so you simply need to either rename your -chosen file to `otel-collector.yaml` or add ensuring only the files you want to use have the `.yaml` extension. +This configuration file deploys lightstep as an ingester. Please note you will +need to have a [lightstep](https://lightstep.com/) account to use this option, +and you will need to add your lightstep access token to the file in the field +noted. +## Usage +By default, the `otel-collector.yaml.basic` configuration is copied into the +live `otel-collector.yaml`. The logic for this project runs all files ending in +`.yaml` as part of the configuration, so you simply need to either rename your +chosen file to `otel-collector.yaml` or add ensuring only the files you want to +use have the `.yaml` extension. diff --git a/pulumi/python/kubernetes/observability/otel-operator/README.md b/pulumi/python/kubernetes/observability/otel-operator/README.md index ed83b05..e599436 100644 --- a/pulumi/python/kubernetes/observability/otel-operator/README.md +++ b/pulumi/python/kubernetes/observability/otel-operator/README.md @@ -1,12 +1,16 @@ # Directory + `/pulumi/python/kubernetes/observablity/otel-operator` ## Purpose + Deploys the OpenTelemetry Operator via a YAML manifest. ## Key Files -- [`opentelemetry-operator.yaml`](./opentelemetry-operator.yaml) This file is used by the Pulumi code in the -directory above to deploy the OTEL operator. + +* [`opentelemetry-operator.yaml`](./opentelemetry-operator.yaml) This file is + used by the Pulumi code in the directory above to deploy the OTEL operator. ## Notes -The OTEL operator had dependencies on [cert-manager](../../certmgr + +The OTEL operator had dependencies on [cert-manager](../../certmgr) diff --git a/pulumi/python/kubernetes/prometheus/extras/README.md b/pulumi/python/kubernetes/prometheus/extras/README.md index a485fc8..8fae4c4 100644 --- a/pulumi/python/kubernetes/prometheus/extras/README.md +++ b/pulumi/python/kubernetes/prometheus/extras/README.md @@ -1,13 +1,14 @@ -## Purpose -This directory contains a manifest that can be used to change the metrics bind port -for the kube-proxy from 127.0.0.1 to 0.0.0.0 in order to allow the metrics to be scraped -by the prometheus service. +# Purpose -This is not being automatically applied, since it is changing the bind address that is -being used for the metrics port. That said, this should be secure since it's internal -to the installation and the connection is done via HTTPS. +This directory contains a manifest that can be used to change the metrics +bind port for the kube-proxy from 127.0.0.1 to 0.0.0.0 in order to allow the +metrics to be scraped by the prometheus service. + +This is not being automatically applied, since it is changing the bind address +that is being used for the metrics port. That said, this should be secure +since it is internal to the installation and the connection is done via HTTPS. + +However, please see this -However, please see this [github issue](https://github.com/prometheus-community/helm-charts/issues/977) for the full discussion of why this is required. - diff --git a/pulumi/python/tools/README.md b/pulumi/python/tools/README.md index ba27c04..61e9b06 100644 --- a/pulumi/python/tools/README.md +++ b/pulumi/python/tools/README.md @@ -1,35 +1,40 @@ -## Directory +# Directory `/pulumi/python/tools` -## Deprecation Notice -These tools are no longer supported by the MARA team and will be removed in a future release. They *should* work -correctly, but this is not guaranteed. Any use is at your own risk. +## _Deprecation Notice_ + +These tools are no longer supported by the MARA team and will be removed in a +future release. They *should* work correctly, but this is not guaranteed. Any +use is at your own risk. ## Purpose -This directory holds common tools that *may* be required by kubernetes installations that do not meet the minimum -requirements of MARA as checked by the [testcap.sh](../../../bin/testcap.sh) script. +This directory holds common tools that *may* be required by kubernetes +installations that do not meet the minimum requirements of MARA. These tools address two main areas: -- Ability to create persistent volumes. -- Ability to obtain an external egress IP. +* Ability to create persistent volumes. +* Ability to obtain an external egress IP. -Note that these tools are not specifically endorsed by the creators of MARA, and you should do your own determination of -the best way to provide these capabilities. Many kubernetes distributions have recommended approaches to solving these -problems. +Note that these tools are not specifically endorsed by the creators of MARA, and +you should do your own determination of the best way to provide these +capabilities. Many kubernetes distributions have recommended approaches to +solving these problems. -To use these tools you will need to run the [kubernetes-extras.sh](../../../bin/kubernetes-extras.sh) script from the -main `bin` directory. This will walk you through the process of setting up these tools. +To use these tools you will need to run the +[kubernetes-extras.sh](../../../bin/kubernetes-extras.sh) script from the +main `bin` directory. This will walk you through the process of setting up +these tools. ## Key Files -- [`common`](./common) Common directory to hold the pulumi configuration file. -- [`kubevip`](./kubevip) Install directory for the `kubevip` package. Currently WIP. -- [`metallb`](./metallb) Install directory for the `metallb` package. -- [`nfsvolumes`](./nfsvolumes) Install directory for the `nfsvolumes` package. +* [`common`](./common) Common directory to hold the pulumi configuration file. +* [`metallb`](./metallb) Install directory for the `metallb` package. +* [`nfsvolumes`](./nfsvolumes) Install directory for the `nfsvolumes` package. ## Notes -Please read the comments inside the installation script, as there are some important caveats. +Please read the comments inside the installation script, as there are some +important caveats. From cfee4be3782ad43e82df60a509320f24d17a8825 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Tue, 30 Aug 2022 14:19:23 -0600 Subject: [PATCH 61/62] fix: linode jenkinsfile and log level adjust / comment add (#194) * fix: update log level and add comment to clarify print stmt * fix: add closing braces for Linode Jenkinsfile * fix: cosmetic fix for || construct --- extras/jenkins/Linode/Jenkinsfile | 4 ++-- pulumi/python/automation/main.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/extras/jenkins/Linode/Jenkinsfile b/extras/jenkins/Linode/Jenkinsfile index 6c73af4..c8c8702 100644 --- a/extras/jenkins/Linode/Jenkinsfile +++ b/extras/jenkins/Linode/Jenkinsfile @@ -170,7 +170,7 @@ pipeline { */ sh ''' - $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER destroy + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER} destroy find $WORKSPACE -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } @@ -187,7 +187,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER destroy|| true + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER} destroy || true # Clean up the Pulumi stack find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py index d0f7fe1..f76c441 100755 --- a/pulumi/python/automation/main.py +++ b/pulumi/python/automation/main.py @@ -100,9 +100,10 @@ def write_env(env_config, stack_name): """Create a new environment file and write our stack to it""" with open(env_config.filename, 'w') as f: try: + # Note that we are printing to a file here, not STDOUT print("PULUMI_STACK=" + stack_name, file=f) msg = 'Environment configuration file not found. Creating new file at the path: %s' - RUNNER_LOG.error(msg, env_config.filename) + RUNNER_LOG.info(msg, env_config.filename) except (FileNotFoundError, PermissionError): RUNNER_LOG.error("Unable to build configuration file") sys.exit(2) @@ -113,8 +114,9 @@ def append_env(env_config, stack_name): with open(env_config.config_path, 'a') as f: try: msg = 'Environment configuration file does not contain PULUMI_STACK, adding' + # Note that we are printing to a file here, not STDOUT print("PULUMI_STACK=" + stack_name, file=f) - RUNNER_LOG.error(msg) + RUNNER_LOG.info(msg) except (FileNotFoundError, PermissionError): RUNNER_LOG.error("Unable to append to configuration file") sys.exit(2) From c5515a394c3a5efdaae961167cd607ff0fb1cfc7 Mon Sep 17 00:00:00 2001 From: Jason Schmidt Date: Tue, 30 Aug 2022 17:22:17 -0600 Subject: [PATCH 62/62] feat: add new logo to repo (#195) --- README.md | 4 +++- docs/NGINX-MARA-icon.png | Bin 0 -> 26127 bytes 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 docs/NGINX-MARA-icon.png diff --git a/README.md b/README.md index 4eea6f6..8467c0d 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,8 @@ ![MicroK8s Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=MicroK8s) ![Minikube Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Minikube) +![MARA Project](./docs/NGINX-MARA-icon.png) + This repository has the basics for a common way to deploy and manage modern apps. Over time, we'll build more example architectures using different deployment models and options – including other clouds – and you’ll be able @@ -101,4 +103,4 @@ Open source license notices for all projects in this repository can be found [here](https://app.fossa.com/reports/92595e16-c0b8-4c68-8c76-59696b6ac219). -[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=large)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_large) \ No newline at end of file +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=large)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_large) diff --git a/docs/NGINX-MARA-icon.png b/docs/NGINX-MARA-icon.png new file mode 100644 index 0000000000000000000000000000000000000000..bd4dde396006ee3827eaf5ce4aa66decc44445ab GIT binary patch literal 26127 zcmd?R1zTK8(=Lola1uPY2bc*E+}%AuaEIU;90r2ByL)hV4elD;Jvf6q->{$kKJPi_ zFMPwbuGO>XuIlPleV24Wh@!j%+B?E`P*6~4Qj(&|P*BkPkjnxI0rJF;mVF9xgLYJw z5QeH4|9AlT;&1Xz%2ZAc>Jy}m1O*R`4F&tw1@Z$0O$Y`5Pa6tK8k*?8ZDnY>fBV2d zK?RvZ!TsAu6LNq1L_;pf=zs69+0g$>F&pN8dPDPP!~VBz@iuHNaDW_gL$;IDaD;-w zrGC4hrIe}8A=EU?zkYN2CMU~nWNQsDFt#-`0k~S*y^Vt6b>)UMtxcQ^$XuoFbWst%C^}JK!_mGX?)UGBPq=2V+xiWl`~eheN*c zQG9oDvf~BjQJe|x<|Cf`E^Jb$E>|voE`tt)-)|@>~vk-Fn`(+m7?yraSWGieddi zf$QFiY3^xrI9SoIKFlJBXGC0MIruccrHh*DYY7sXLYgm5!<~t0adh+ESi1{Yq@$^mM5CC$M6V-y{EQ zg>em#woArq%pnDm>NamQ>ASKaopWJserz@$0v0-$t%f#ID2l+v-&r7KXQIQgu(rEy zzed{yw-{Yola=#O&8|zo6aI;p5~)da#_<>sB^K~_lB9s$<&XS}g{KNut4O*OJt~#l zvCwt5)c{{O37;QNHIMXz?=X3GuR2%y)vp>%GOQso+o>vN)Bbx-s~W1us07cvW~Ij- z<~>z4v|Vdht)HS^B%M^kDx+wgerHCzg$1^0?|(hZb)Bo9fOa>FmKg*ELn>i>oLo|n zJ~VER9nPo-uOU)RdH8a13y5~Z8WEgSjK2HSJT?8{eY>}#r}tm$Kiq18Kx!=E9ysWd z=Lx|aTU8MV2@BVQsgJ`;tn~_4HuNNCI$Pis7@|zl8?MoA_ik+TxGRfFS6-n%2fx|c znh*p8Xf`K5G`sZy9I?#OEY3*5PwEuxD0UcIfVt|;!(^@4Z})HN}++eVAx*JW@CgDaGXZYPz{)$ztBd`hYp+q6G)_x9VzIj|zevax;|@w#!|qdNQkB4M|wcwzYvMw}J<) z?!CUeUz@!x#8D1ki@wzq8Rn-E3yuQ2&!&0CtETl#GzDM?U(|@TjkZ2gcO?HU{KQ_9 z?=c+SvH$a$ypDc4Eqn!r@8Z*P8ibNrm6uyYvM6Ak7mEL{60(i}FZGxeopttF2i znBS|O^?m-iUU~s6e-ywuCuJws{A;p8j4f(Owhh&iA&fJqlYD{aIpAh|9xIAh-sWY8 zN5yyP^;4M9=*Oa7JXRf5A(4!RVuI(iFxo3TQOy-}t;7VtFt-jekR@FHk(5%1BA)UrvqOL$1r3|hh3nTRJl<6v$LuAzn$ouZ~K8J1}AyI09LrvVPrVAfXp7f{7&IJEhJLEO+0&D_eb35T-Je&H9u^tCBl_zdmgfPcp^`M&Uaeqrb9*2H@}07N9Q%N z`)cqMDvxl@pJyS;U%50nyB$(+>`I`%br>h;s$)c^^OX-q6`15u*r^%B5E&OElf|K~ z_NdqHBxtQI8dAxwSXRm{y$EJ`FetRgHk3z96Kh^2HC0m|oU7Bdm=|h7zQ?+u4Yt?y zA-J-~e*rM#ls;D$-p$?LOMCu6w`T)x)Dg=a%|O$y)9Pnm2jMH=smguMz4YrjlF^YP_T^M#jt`jeDD)>jtu{Kr1Gblw& z%Xaakx_iCpK}%jA!^&HQ`qx3Fm0d2l5?inC>fMR!xRVkq3qW{=hBDz{3LuY%!}NDv zjR(B|qKn>9(gWQuw=Ko#OId}TB~0m-+ELhg5#(j|@O?d_h1dKfJxlaRS=@N3BE0!w zCColbYQM=JNwX@RCR?ip*2)hmb4!!r?+9xI(#aKP6Hu%1eOG@>}hBON;MSc4+ zI@(P>w^sflbK_RtaV4ANbH-25{yhk*ck#BGCIw}QsFFT}u;J+Y08!l}Wu97KIL5bD zZ%QoFn|C#27~;Z;z+3=h8diEAVMC7lH6dA7$K{7>lakITIKTN}c;ecfa=EM_=D!}P z90(@v#+$+xA)1F+&|X!CSXLqq?-B2*9CL_&*rVG#KgLLIP@#t4<8Npzw02OOJjr`IHsYF#gG>)68H*jCi>)UQuz~)WD z&j1LBqD6V-^ek~h3s`)rU(v%h82)^-eBMd@u?6u6WJdwVdY=qc4f>E*ICN?ijIF10 zTWT^=ZR1)P7TcAv(WqrGAoHni8;qqGC!%vJ;xudfJW24VM~4v;7)9M z!le0!uVv{Zb&d`^tExWjJ>!^M^cNv>>P%DHg5ubfI6^>|vtp)W9_O%H4Vct<^;;sY zw9{I6o0jZ(mQ@@H4OBYQ@pY@z+EpeHf~U4VcikA0vA{e!o%n)H`kVR)FRM9;2mw0h zhtl*710JGTSLK4k;U%JxqWnlDXLiJJ4dIe%l*@UVgSfbXClilPI11RC4 z0qEBTdW05wRU&tTJu7ft62sR=j=L1HyTL4m2Pg=b5O?6NLvgE=#2=!qrlwc@mEaEx z;K1)KzYd{G%o0L;OkNqJK{)AAF}*qtGt)SG-!1QC&|(J6i(w$_RG~&b9@f==*7^F= zmiM$(z~_p0@`8~A9$mGL4yHZ`N{-rqv*nQrh!lItOAY_qgY8-M8d`0ul+n3If8dCX zBm8q-G^0=6LCKR=w5J2(u`AOTov$WBf3_`xp+Mi5Zj;ixY^Vw{7$vCr0e)D>U#E|i zdr+F6ov1rP-IF=uS#|zgb>M9TdO-Qlzejoeo~$8uckr?#mi_IH=orN1o6yv?3Na5B zpBcajQR?7?#NwdQ19yLxG1rO!Y^ zQaW$~d=!ppzd^v@`*y9+c@Fd}@t%I=WW&K@C7w)Qp>;xh=%v?Qdt3Q9MZ4IQHp8UF z)3&5B1TRX+!i+z5<$DbTj>8hhHUw06>gxmHz@0mDZ^$tkzW+tIlPxrTcw^MSY`T@E0F2L|tV zfx3Yp^TNVTZhxgG>@1VVDw}9?uhcHmGxVmSCuB`s7Tkk9AjPeD@?wsMm_Q#1Oq~kl z6B>UZ*KciCN?yhYKb2jVky|4V?V`?5fh~rEvoCpPvrQbQwGfNXk8GK&am=^AwDk_t z=NV%$OC|hqM11FG3LOW~YbpJG(WWg}#FP5Ol~taHn3013S?JM72W|%u&3@}Aq3)Bu z^6#14R-FJS31&&q?lmPm?(}*P?zY;%MN$?ci*kR;GmxLep@{el{E|UM?n*i>rc-c` zt9`9s`Z+8v;Rm8d$U{^D!$OK7+v zX5cz0(q!L3<%)=>JkQ#d%5o^J0WY^LoUmImAk-NR7mc;nZqh|8n-Q*TzxUl(ulIy3 zGTsV99{;Khx--9#Nd~dVvSHSpET=q8_vnVpz(o04JG{#6_UAHd-$`O;cx!^gCR;XEVWhYCa8v+e-9o_%7({3M2p|G@%OgE<^%f5nkzc))}kQrQH7ldMc^ z7Uwn;)bsnpC_EXC>;|?&tg2&BNY!igPS6^*?Oz~^A=f38CR}pJo!&yi=qjHF!x`)5E{ljsQ%EBaL4w3fyjq#vSL)E7-rC(c^wp^^ zpV)(=dX_SKc>DXi215y+ZMrZT zKIE8g!xcSuX0$}U6#V*FagMh)l(uM#t3B(@8!ibVS-5he?ihJ8;r|}TsZb}|;c-SD7!Veuuo`k z#>*$fyc!8A{LIi@;H@7nAB#(J;5YB0EeF#6L!y^Sh!M{8ccKB9h1WRK6Kf`G)YFsJ$rsNIYu?seqE>R++|6FiPP* zyD!S|FQ1x__KJB-dSeR;$3DJ@3q3prQu7yi3|r-gy+g>pb*sc7l)e0EF%Y)Zs+CrV z^wr15q{8rlL0&ZQ-`FOP@>#V9GfU+p!`UX5>bM=v0<2@FP(3Xc5W&&(u&rumc3>k$ zhJcykq6Or7ogj^1@rDOFcGOuCbP$6dQljXy`B6MsfGv|agg z34B<8du%z7 zjmZb`t6TkVX%z>#7M+y!;t&b3vYq$+tVw&;y8v|V@2T6jQ5l|93vtpFB{psA6+Ed| zRZOq=@WNl9!qWz%_;*mdBL}_-kQ9kwDZ*_LL7UxaQS7lg!sz)yy;4vnwHx^qQ{E;m zQIlGR+th%D-xpmAKdn(U6>1yPjZ5Q+;Wj4ocC>00sKb5m-9oavBl@|zuJ`>GM0~JU z^)f)ap3-Bn_@d6!yZu*xYv%rM%arqgdn7>t5aFTmhyP>`07_z5P|hXU&K z+=nMth_u;v>>D+hKfg28$d3HJJU#46eRkqSn{SK3d?3wxN&kECEoF>gAe`U=bp7^0 zL@5#-44Y_I!|OetH`u5vndlph!TL9X>me8wN3>&*DlI<*xQEtss^2|QeL0lQ51;;A zOL$Y8SpgYmLeaGeoYo_MV)BpreU37D-A@VKt+4l!eiRJIe%_wBixcvn9E6fnR54 zS|uR84GDa+@A&xPI#1%OUa1JK!8GUTn=M;Q=|vmNoK!_JzEaSYJ(nX3^+`;bmA^>Q17pc~W(HHgT^g=|qn3fiQDQz!p4;6(hX)lMN+iWNVR zZxdz+?Y;P$3Shz``EQ)v+TUS)m-HiC#yaXxBElHw zYMk*!fz7*931ew?UBusR%?mQ{&Ab;q9g{#cQrZlPaxe1>8O};fZOhsY2FSYlkzv9$ z-gjA(Qgm<2I%Jw`r>&iR6*VcNyG^l)ZkRh0OQVDqec#5uW-y_af zGW>Mzt9(pvgU;53 zil`0IXKj+TE4{_YO@KAXt)KN5;p`#(8(AVGSEfkV`H+>9n8l*TIQurlFc(V)M8)G#6iKa=6{gEHD`&m7H< zZOIYCT@;=2a?4fVw8MN-#=2mM-_*Q?xjs=>y(HPLUnVl6q8mE!0+Aj`Xk>;3SogAL5Ex*p!=P%V z-QcvIRXn2IhhU#_iv@&60xiKloO@B)Z5lWi}GQMtzXe}=po)L#FZ8&#A9a!WKKItMa$i14L+eH5>D45 zkDn5tI~Iduu8;nQiLoiYk_b3@rkXBmCU-M($Kd5=^Bc(99Q?GUeP$z3-(Wy-!$T(n<3r;#jfV z6EQsW1)E}ry!~^ykO5e!2AE5FpO05G)v>>~zv)mKQZGE1`nPT)ph^MV@`YjOuS3o6 z36j|9LJ63anpZQfhltY8Ln>rC%8$bsLjJ{KbX0BG^17mek}mWxQ(=N)n@Cg37oX}F ziI*~Wya6sG_eAC~(|>CEa1L#$1m(>H>Rj1PSrVn^^XFQ#(IBf;bbpHjs;nWF&)Pxs1^$*?E+SyVe~eFC)wLNNl{h z%1}Wj3jNoL3*F_!S{eT3dg+eEYU#ZbZ;^Z`fn$2Fe9CSH4nHVM4`Y=u!#`T8_9C9U zVty` z3L}NjDEVH8P(%G4?ZKjk_Qo>W7gE4&hqnlf?uUrGtnkWm=wpW#Y9(D)Rv zexbuS&FAhVrA9LjG@KadCVhNma616m4*i{YL2`6fH_w{HrX&Vrt8NvRkHZ_CLsHfj z&Z%q)*ztvH#*7=iWlRNO#3x7c%KU^n8EVJGE^m~RN$SiJ(5P@@pnF|>si>1XG&S|Ibgo|D)(WY_ zeHi)uL#!TNL`dVQM>%7bNmx|i4+;!fv;UtD2U8Dr=6;(@XL3BHoRw$S?sDPo?$WF& z_RfV)-UnKXy3r8<4cOrmdhK=0?*O^I>Jk~4NC+mALGwfc{Bg&#gyw&yX6rP?vN54q zbYQR@zmQP8Z_%SrPd^7u6D+Vyb*G``Q9AUGP1iodx2{~pUrMKkVp4pI2R#a?jM;7@RdIq@}DOZ$SvqS`COuCXj(bvl3p0AByM(z>++d4EAWsEIZ?~2dQH_S z^3X>sj_3a3e1cvQWg)5P?fy}bjLwHA0({6YFXn*@1|(^s`Cn4|a9-4eTAwjkW-T{B zL@Jla`$|Ge(tOus1(aw%PcRwm#ui!QbU9PO_e(bSv5aa?(TwN5Ps8r|q^=+(GpPIOe>gTD)K?V*1*LXOTb>w-zUhIo-B%dLv}=u(AGj zEt9pVFyr2&+Yle9)_nBD=V0qK>$!knM@-cxZXw7Ki@bisnY4<2kLA8pV!a!YiOcu; zmDwYq`yP6A$wpja8QASh>D$DN0uOOBuG#@Xx5HXIiX8N8BHRw+tx&n^$b;?*tJV(-#|L(VlL1ttFj!0Wt{vA1wfTBJVIsc`gpw0cJ^H!VT>6aD}UB5l?n4l!nCTRoB z{`)(bI!H<~D{&4zFi{}W zxtyFY7LOA4Sl{)~c}3h|+j99g@tz5&F|tu{my0)PSu(pU?#Crz8*b&0Wi6WK2j4@o zY!~?-ieZM%WsNw5J-GpYfSK`*&NXqvFVy)*PG~BlDXKfzXP{fXZs__z!$YZ(1jM5 zLIg?gY1DdUz{r&`%;*iJ9N2UqomKur^#?5N}Y0yaOyq0vw0@!@wiKvXs`6Z|KEH<;cc)#2?A@N)K`hI+ z>GwP%n$gPlXxMBn79xzO>(dYmZz~y z=ivRT;SEeZ_z-s&g&cQR4IFnyDIX|VD?5|LC6bRf>4u7-05qg!x{M;aE%Ft*oU*Bb zGdrLCv&tndd@~Y|1h$!Z6Zg-x=YF?*qlGBQ@ez1QJ|f!Mh-gu*A;r1~H9mmFTnR26 zQg*>L6}&xB9N^rRaZhCIH4c;FdTdm)8S1igsNzjI7E8!;>^jF})S9-ib9r(@y4%lj zmRP&St<$m6Nhm{zp1`=#3@@rS+x_pJe$(`u>tD~+XpC?COwtyehXpn0-k*TtmBJ_l zA(cH_Py8GD^(d0$M3z-7qQ{{oCyy-`(XnXo&%GL$j%66K=?0QN$e< zU8JKl*-g=eq|c~3I8Q-=KJ{>DWr*}nvevY=uNIlS9$*oNErWK{PgGc1JzEGoN?39cqgE;wE2yCS2^7@YkjSB>)Pd! zhT8w(RxnEVV!9Es%mHC|{IFN$_9I~Xtl?O!hO$k^H0uU?pj=pR@)H%cLSz0aINSaH>lw5 z-Q`)u$J8jB4$Gqk2PED>ii&`dkbK9w`O#8br`(*$MAC9pDxD=Di>=2J*x>VLhsLHOd=+*pU;i+$165#1JsjYv^^rN`2g#nVcO0Y*%$ZIRY~~S zp`0n=8t)i7tHv0;8t0L9emC(*ckmZC&+89dANgIzu%{!?0Hg!6E}LxTuSO8;Y6$s9 zJ<&lKHv)&{E2fqe@hLU$VGV^INvp}lc@@GQu_lih0cS=_HN5yRHlh|q{-$PwD6lUK z`-TfihmFd-i_W}62#-7=6hzJEYtP;}s9G$ea zVX4E(pV{R=q-Z+YX4Z-kmE+mg3!wxpKR5&JmK_pD=6(Wqr}!0y_D1w#Wg_|Jqe8&IS45P{ETyBFd>T^VQs9nXbMr;Dn;?({-oy& zPzW>#Di|3ec5i4h77rST)vozME@VP%Rg)9PfC3jCuErQmxXI?gyT}u?nB-vjdc=?b zusAuDW4U=Qjs+ivT+H3v)5J3_Ceab;Yvt`SYNGrDZKxolWqMeX9NE4TP~r#EyRb`1 zA6bxCcrXq#rE~9O=t~tgm1yblQbh?R1mJ&+=H8$ZUh#Cv*TWOaO!Q~$Ziht!4xf-6 zz8=|#5k^_R=3*uiPt<@;#`wq-03acBkbSi5{i*zW?m3F;414O$md0x31+|vWsuNUD z9%Fn+jgq*Gw~X_!@iMb@MD#Z-RznHY5NhUnS{XxlL~Ao+2tH73zZdafnqE$?6%{9W zq1A9bn2DJY6LOTkNc;nrq%L=d^+BUqH@ zaJ5l?E&J^(`yE9CEP{{wedgqjmX!a6 zmx9swjQ35{Cm?pU-pGF#S%p;Nx{zh}?*H_@+C=VODVID)baU+@p8g0f@wD?gcc~3y zE9>h**F;m{mrLc4lTTlk$y|R*wMRDJv4umUm-q?zV?p=cBrn_vlmnrTAD{FRx_~8O zC?y-h#7r}%X-EozvxPGe#-)=aYqz85xKnhlo2tBUQTgd@1a!;WJ>vNN4p&v~PFLTR z%TBF)$Jhiw=qY_Yfhlfzw6gf{@RfS}s{VO5*Tu?yC81StOzi?bwu+%lcu0NXf<(5f z!axVFJ-v`4u)E@dV;EisL6|Zp&*oq#IBLE;ItQo+R0lSRrXs}Ui!A9c`t-Fh!~uq_ zv;V=N)IcKg=UHk5BEzpWn2<$>ti|t^9ia!lJxoYxQ!{{JO&`6jt*$wbc7~|_3y<~t zwv4a(n+h@}8zWvjVb7Kkr4qREOWF}mN_~|dAJ>DSXY{6?0~RnT4f_m@%#6&d;QM3dk8yjA1cK;DUCm@o~L}9{D*)a z4qR<@t;v@K7^5GS)mR}Xoz#yU4Fw^~a}skxZ2@r~xq@vbmDyp~+1b@hNc+lTssXRO ztMBC?CQ?kr_K&^RRSXlUeqW)d)k>4ZTXCAEFwL^Os$s1+6D)LJ396!YlT^q=Kt>4# zB(Wp-A5G5lfE*5V1wcqNSE;B{z2#oQeX#Y6J)J5cnd&A?$gw=0b4lo1PCw?Y^ea5- zR27mO$AfrR``z-YF_5eh84JWMhyhQ%WwkbhA^Ssnq4-lQWVGm8ogfQ;i#jAjt{v=y zorS`qDy9sHhz8$g+%IwamIy+DP<1pYDv^YY#w37f&EVZ%UC1=-$RSh#$6s-zAfqWD zdmWE}boK4z1@KmQ_x~p$w5#X0-_ajRAoZ1a+u8grr314$Vf?*@ytnMNH&5DS@M!Dj zW^h8Fun^WS7AX=?@Aj5k(QgENf-(viEP|upqQmiDkdO*!(USXs29BOR=XD29Xb1=B zMOr(?OJ)wH{%oI~P5b0cQrSl)$Qb+v>Ie+S)z!5IED%-nt8Z6&?PrAP%Zg9Nq4Xw_ zf2I^A>4|H(+yKD(%USA1U7Js0J@51wWDOR}VI=)P+?OJZJlv3DALzd%VkX1w_5XkN z#t(j2CD~4TaG%qyM#TF2Pnc52RJP@>l0WqYql#P`FE)vn+OygOj-~EFelJXqyoe@_ zb1(PYJZ~tA0>32Laa!qS zq&CFai%5*%^>idK@H}bG>HkW?8D(hz*wSav-azNd)@>_^4@;-e^FWq}9nd=n_%Adw zSI=QTB7HnOSV*F-pk5#zPeKnlF?pGo7~vy*Qgz-rX2t*rA^V}_aSCXoP`8k z$C?4WCD-^sIUiHzrWRVJnT4j;(u^ZrdfrUV|II6YzCSeoAEQRy(EBZn2UGDC!R3lF z|9o&yG15u$05}73m&HAP&@|2PPP#gqTgE>`TLk|SzQ|knxJ2##?BE>bTT?fKd@=m` zLfO-a4~%UWZz({JfgDh^89IKS|5(8@W-y|a#z*SS8#Hn}^FrOp^}-9uO~>aFEf#Ms z*4DA}r~lcQ=-sMkFz(HzuW^|0^rNq)?$=!L^|A0g{VPEUPin}Y9aNA`#eiWI4PAK~ zv%Ho`Cvf~tOVpK$2i0{94DkyTR>Xyys?QQ?_AcNrk}@#}yBFeKYY8Z5cmjQ<(D9as z4VaeYr~hlxE&nQL1j9Ej3LyRCuoy}*h7`+1PXRg`Djgtjq>5gY%sHIP8fAsND`f;q z{g}!AmE_gD=Hs=Y-u*J|#2`?kHKIHUgPGJB_wcj)qaGv&VKThvGWh0C0<1WCQL!BT z?xz>ienJ~vS@2pBEe|$0d>d&{K^PWJ`85SLF?SPp3OlzWX~ZHj6fFxTsHP0;jiu9A z(A+=pIHt@NSzS2mVWH;4L?8CGYp9d)4MUK$xvDwZnJ68P-uGAs?33_`DnBO@M;Yr| z@{|q!#{m=$GTLcB1IDg1zLr`oJm%CX5Rn-~_p*y1nCLbLu{Zb;N3qy&a_Y5>jYq2^ zbTF8a@<}Rt^_kQWnT<$xkD%`i@vKiIF{txW+&6@W=xV$lZoruDgdi;28|tf2#r|<# zICyge@0%ZYIL=Ate5L#`Q_`{3csGb$MuKiBS>Py`L>WEB1+T%c^-GPNfC=2rCaWlKM$_SkJPqqcJJMKzxuXQ|Ulu2`bf=Hl=D z`X^17gV@9c?|+pHoAWETWoID4ZhX}X|l_c;vV>x zOXviWuK)P9|ACxfRhNvVevY+6h=&S4zlN-oX!4<>1K zu+FqY0?)85bLS2I`d|IX#SVQTc*AoHx^cy{6S3EBD8>>bPp_ZhthK{7Z6g%bIljC7 zZ|GT7uhMGm?12XT6N?_jB=_G?u!7nfVzwBH&f@eXEMT_X7;JNgjJst?#?^WBSSUWm zZbF4K(gyTjI+TPSj@6l$_b39#5O49KyD!u965{qqDC73z;vya!a2ETle&&4slWXZ%DZ+mM zZ&{vz?%$b}5Ko9RdcT(Ngyo%iO=5E9TZD%H5P-*!*42-awBylzB$mMO{<6*BDQ?xt zO4D%VTU||!;#|vS2a-}a{P|TbXoT;zRqRyb+2tNoBp;0u#Wey4R>6)%8A7-+MCvC& zCtW}Np7LXps@e0}&0(O%{9-mzJ>TunJQhJcZY*czlW!MkavA0mSbu=0`r3BqcQ~!F z;%!H)4=Kf?J2i6D`u$Sa!V*V^n?l9C6*h(93vGr9q6{vD)hu6uw9dwZU&7tH)+RKs z-{)>JvOtZcgy5Y+^P?Hd5Qn@WD(@=ptz{6X9$%Qree-;+c~JozC^y*<478r%`zR% zEVK1p*XKeqG+B*#HzQ{My_Mu3mueS3Kg?YWz?+!AypV1hSD_OW2Zy5g+MSg+%r{~- zcyR)htgV~L$Es+}3C*{hF9)qg_+XPhG!j@zYGD9AfE#1AqS|x-oQA+Vr+=<1p=NG?Z@H-oo7fzir5qUdvr$BlO^qF>K=Yhg>enHjPMg4?T4PM$b2(Pb1J5^ zreFr-lc!7(>bxWR3){lZ2lvF{u!CHHP|L5x1UbBVn*do|cCEG5i@H1IM@huG04zM! zzZ8x-USYOyTZwnaHv3QcQ?lpTOaAH?oApxD{Ipx^!n>#Sw(Cg>x1CHUaSy*Mb}Qf# zsZyzSz(=TBORYRfgRPZ(OpPQzD#JOiU4pa!T5K?DvJPRq1~Y)TIj?AcGmHI!A4oTk zw4DDtX}9RN9s1xSz#;Bex1#F3P%Urj^`PKpHR~GVpO;Tm(N3Li8Kc{aK9vzFXb!iP zd508Io^oExr@g)J$+%VPJ%M}g_KJ>-kP;nNrj~ePX00G8`9T2ndSo61X!QJ%_C<;p zN9YXRB=h%!QS**(?_BqDfrc=Hd5UW?@69%qo7IVIiu1H_^RYil!4C@*%FJ@H_vF}V z&v}OLGoM#!cr(8`p$EqujJY8x1T6kYWd>KytGsVUU;(mP@AR9AB^M zR?Um*Rr|WIt=*CU|D1?7A`*7waq!yR^YiQ~?}TH`R6BwB(p1zY?xr4fX628$z6XB?d4Pq&NHTa?`voT#BSzB6f_{N7l9 z%GpRrMXtn$M%9qYD#zoX(~)w9T^G{GxP;EO!1!GW85IzUc?$2scZQgU7(zKH4E_`C zKVbI06b5(|t`y_!y5`;Ec)d=SNbRR*$%R+(L3-jSHh=N(VjvH#U~n-ptX15GSz3oL zv29{ogFS+&uL3i>*w)bYTYbMT1gsS6q-+lYfd%VCv<9+sDv{qE3ax^e7>JA;NCDWK zzl7V&C$}9aNV}L>H<=5$9aI!`emiUP43T?e9|1Xo7-L&Cr&S(h3@to3$=-t?hdZm}nDf16 z6C~dC2U>3SJ=+eVO9VCC@$hvcgweA)v`y`%5P}P&&#&lRc;^B<0$BG1$YkF<#|i>E zBfi-DnWADvvS^`0!SkwAf!j*Gv%(9#KT0Hwu8rvvky$0fWUPCo_JJaz&6hD0wsg$# zl=UO+nsjZnaQgw3qX z1)?#bf}Ug>?pqAm>(#VkxCq`cCmOA?mC7TvGCu``+#C6H^Yshz$^EL{ieBslU@B-k zJn?5*4R?Vunvf$;@6ESv zgD;+-B!O2om3xyxO6 zx#!84#2P7Rh#v32#qMRN2}3&M)BXd3)Y-(}nJqoU zx{oxIMkJtF>&3`_Gai)xs*hp8b-Y^>3?lfVT)}tEa=)3xrW`%iXLVIU-(qu zH7yrI&cLelv8ex5{8WGd%`}cpG2UEjwy65P$NGYeL)NS_?dySOm>KE zYrdAde_3B{nE8eS1w~o=_P+pj3Rr1|LQXsgJgsYthChk?G=);ao3w-&qg4uh?*{O; z9K|tG?RXZV<`F6F!#mPPPPM14=sVY1jh%V^dnAo^P(=Ebv|fty@cK^8HZXb)e{Jc8 zpTs;k`lg+;Ln_!W@j;?%9PglwWq?#Wt0~v;^*fIQ@M)!SCPb`=`|L6|!v2C>J|wFB zD5f0$hW(Nw?kks+IIvYsyz(OztGh1kbL(|tkCt=dj(@%~!_U4UuN*^hdYGqY6T^t% z4=>+q@v#}}j_lLq0uzdR$_ILx!6{nZWyq}VU5F#fVG zgT`lq%Jj6c60vn0Urj-uTY@+h4sKO~ z6xF2Y?YQFBR8&61L)|X&oY(7ugyPx~WK22dz0u7X@ifN#M=nYH{7$NOQkZK3I^MgC z74GSre=Quo5~0w!FS_o^OOr$P zC=st%;Kxk+zhuuJ^t?S2PHX|;j+h1lo+og`7LXb*6kl zJc6G5A4f=!Vgm$Lh9oiPdREM3wp-Jx_z zr*sJzl)w@K0>aWDy?}IgBO#K~NG~BJDcv0+EzQF6J-k0Z-1k3l_OUZ_X3jk4bzKvd zIVdrGSFx6w*tF;Qm@=EAU-#tTjX)jvH8O)yB#Q}>@m8f3I+kv8GUlo|q%917Qj4~` zOe#F@h}Qe^Sa=HSr2qM6$93RfpLY|iom3(BUCH0su}ycwKD<@8suX*Q1tiNN-%qS& zvURBCx-}F7$>Qp7o%!}tKTh5Y7u-hK01q4*l$3nTU098STkw7 z1J6Xacz)+$L#7#bLY5)P*GsGYocYW;OrWXDT4HkMm5^5*F!Eg5V5~I`M$C7WT6#eA z&B@eB%L4*~Aul4GeR)5*P|gq3xm+o5Y4UgYvt70Rl@ueSh8Q%+=wFoK7GftWcUI6E zoK$8n>=-E4(;2;(&Rt$kY*3Kloa6M@wxex22y0#oNelL8CMQ%tOn8O>MR*%1us-~W z7xDd{!U%;4TiSnp$@y@Vb}y;ulB%u(#oY-oq4_e%MsB-5EKgKs6GfI@4gTJ;X%w-! zwxUN&4ihB)u&;=o%oPjCl~9m*rRDY*L-uJISO$^>d7E)@_<^paF?~wjbGgY(&DkkSfgB;8rvBh z{!oU&{;61YUMDMfxel*9ijl@U2#amdZqpkh&bcZv%@KLuBfa-dH+yRwiQ^mGH-GNN z*b2|oFi|9h@ID3prn$)vlsdhUt;U4vbHAW3q8hJcLyryaT>nHi&fHPm#w!l1QCeN* zZ29#S`==O49{=5s<9n3SQbufYN+DC8JBq6XUxzG3=vHIfA!0JxsZ5dB38g3E2icL$@$+ zN>h#(dY_hjh?LW0GE=s-n1LbJmN34)8lxe2QvU5zf?qSvt1-|1OaU#D$>{LxF8972 zOo8e%`5VJ_*LLIc-wbR#U-LfVr6oP^70Yq{sF8QdB3!!6j|R=c&h7*~Dg8NiUi1@a zrNm$1XB%NVH>do00=%jvHs}ZQv&Dlk+tH@MobK+6DplMQi}lbf{7*i@#>RJv2Ui*t zq5ZtP>1CmInz5FsKxe*+H)W^IfSMvvi@hN!WU^-!3LI1a)sF`UZu^)w&r(29RT`(( znBgT4@|7^@arzaPE+!JD=j{YOKUDK(!-jV5xxV_n;PM>XN*o%n-4b!3ZGMuiFJ;G) z?ZnmB)wz%`Rcpk`DX8ty%7v%XSphOP)x zohU;S?u>17=F}U_tmMH-^vy$9M_9Ql<@i4mHV53Tu~rQ!zW*424i_+NWj|?UoxbA2 z2^|cax{I_Al`@Tb1aBlj_+ZK5G|v6HT{X!)-ge2H4+BH=je6%lbZol{X%hWDVQrmh zL55r*ZJb~}T3)=yj;A@VFx60cmfMHNc1>MnTXHHt|Wgvh$DsRQRW{Gn6R=C!c|6(1&K7WjecqD zVa@#eu&yH_n$Gw5t+R3${I6%l{onTYxBo^=D+po|b0-*dX>dLp<({d3M;N07NN>#q zm4Or21ziSd2ZzzWSy9lmr+zL}2{kYFZE7?Z24}4sE|AnQhkJ}4{>$QPu=ZR z#I!tV?1CIqH+^q|Q0x&0*M^UE_eF>0Q;#0ZQq!>K|Dx$E`h&i?+kfVEZO`=eI8rW$ zDL(ejYlH}*;bT(N-5$L#pm2^6Hq?Bb6~$w0Ju&TO^UTtG^IQLWE~ZboI?FtwJU3sr zPG%w??}Z`sJ3>diQkLEi$PFB=g!j;Z{C{A%ilO{fuuaab;UhByYf%pLr4JF@2?Ix( zWy7(poByK{ce-C5aTc;AM8t87@65WU?-HR}8)GQvgs(%vnv%|C+32cQ;76_p=zS~@ zs^-S^$d;0ane>|l96`H1*PU+GwiMT%%L9i`F{SC(2SSGH)y+)trUrn8hFNBeuIc)) z$qN@d{-FF)mWJ#8@r=|R7PxJmxN#Qfnl5)ktO?Oh`8{+MoFc`J8-q%f@)V%VS_Ky` z`;Y5xX_=VD+BdFD9J&2(*iXUxZxDGgX~@SpJd;`OD<^k^X`XHss0-XnL(j*OS|vg* zDPEeWUnX%{V^O>RQ(mU#-opAqoYc7^iFJN<@x2&<#kGbk_5YB-$2vkYj68IKmU9h| zTj|mo?)$fNSlx)uH!9s()$w)BZ*$?LR06+ci+4T44g!P_%jgdoIEgz@%ddf5=t&C? zWs}?9vCKrr*klq+mbZ+O)pSpulCC9PvC82@)ofk_(eWi^nl-48gpYL;xngcdtneiK z+2@^maQuw<2Th}g_m6pquypj}HVoh`WI*(UO9aFMsRw64l4}{H{TvvNv0k80P`FJQTry^pzN6 ziJcZsNd%q(;wCiVKIV=~hhSVBZ3^bNz>Jxa=U?pdt`CX{f)N^3F9N8`zDbI#VzO*O z)*fa_EIsIzE@#%O{_C3!Lewa zKUz3s%14008zG^1vSA^dOWCF_vjNjDm+*ySqe|V!X_i~k6m;PAJ~xPe=~XuXXPcNC z84Y=~sZPv-PYoTO(jh%CP@2F!t4HHVB1HZ=G(ke+>KRBNS`MYA-e3)lrTw7OPHKO1 zYsZR@=2Ntfdat>4!!w_IlDuUXT)Qy1bYcSYV6H6e=2%WWqwnNlvkLUqyy0QqYY(Et z=t2BNd`KOfLOK_hu%e(!bymI9LB?dGYM5}Pi- zXV3LT!HgHtMj|Llsoovex>ue7Rk?wn? zNyzig`!8v4&;d@(s-H_8Om2kF1FeX@d!JC69-=?OUzaH|{K>zEG;cXG@e z6BG*15x!3$NCaM<0&187z+ZxB)~iKy9ZhfRZJp78-eDq_{%P|g5yd3(Tz?AVA>v#g z%h!|f2yd&a9cx@lDa-E-)9QkWT5ClZcHMG1YE@44e56##{mrc_-;caf- z*(>;4()+b6#&M%AYv^2`TT?+nFdKHRAEX$f8Sr%-DxwZFZ3!WIx;-hrkOvTCDG_t3~h)#gaVKILkaNX#ozLK)LW6WiR95WCKVW{p0axL;9S2> z`(=vU1lc%LYd6`tVr{-g-D2$s2wH7zG+<)w){*3NdYSs)SV7D6Dy10w&UK%j zjW4uVBYvfDNt3c`VtY3gIfRff{8T|v{|vvNie`{%f?o>`6Ao}>T#*v5z$49i9)?iM#IL)qr(rKD$z|Q)pg78Z{vnQ9G`NZs`xf; z9XLsDUqmBElt7*v;1fNyQ+7a4Q^FZmiVEzGEyz*rROq>igp$L;qxw&>kc^=XKEsz! zHNkI5K730B7C#=bqfzA;YPziWct)%zwDEF*o9?gMY;A25lapCnJ&)OmWr8h_;72X{ zGpVdE>FMA0ev!L-=pjxdi^+p8oAR(e&PCtMTdJB-=l45 z4eYcTL#sX)P=oFaXH?2tGcM?@WwJhVZrONe^5-VxgD^}~n z-LUJ`!MrQWrI!6deQCf0^8R%gAww*&NkV~5&d=Zm6jU1{x&_eUdF%Y$t77&d`&+Jd zg#udCC-!~@W#mJmn4TL)IgCArZo1c@20}}#m(~1bN`Q8-YP{ZdGSlo&3wGv;v(MF$ zAK8|>j}^6}y+@tRm<&!oJmzc>hJ6g%5glZI^L#@xp&lB|j95*y6+Ao#JFzSW_oO3# zg+X)?J6$71O2i+W&?^wXJ4EY(C)w6owUzl6m{`Ue3w*@ux@e51FpJfsV0JRjyH6kPGPxQZEn|h*(eAt1^VSFAl?y7XrSo&!#2mzZT%Covwc^Sv}Kyn z>$=a>{3WJ%;G*gw@C#|2y<-lR`|0`AW!9e=%%CdD)VTa30P3m87)GyJU4q@@dlIL9 z5~bKfb{kLBn!Lu_au#^~G3Mh;g(2K-qG&L{Z1UX`(Ek4ThUv%2;t?y#4vN98E5zp{ zy7ZCl5EWDp%|7vrampm+IQpEh79=4lbCr#i)jf2v(czn17Jf;eK02h7aNy~?^q8Q% zDo4`+n!mIe^T#AhW=X8mp2=J5{USx{(68~61nx0`{>9t2MKpn$@y0$-F@NdfE_>d#b1$R_nf;|Jv~MJ9L`3%W4=RIcc0^E^beZhakQ9N zcdU1pzJimb?n;WJ*m3TCa~wYX_odPoYGA%-BP$UThe`8yX{MO{Pc+ECJ`V_i5>LW0 zga!BpxX!0uYrFTa<})@ai7vZMzOeIg-5q`IvR;=)O|Om;(nky9mOUldx>WY}N6wCu z#et}DBee96Y^jf3E6>*XX2awWz!en{>toPbaC$$Q^q_~zFm>kQ#%e>!g6RpMB8Bkj51;&YYeIs%Zu5oKAX-+BU@s8VYMcd{{Mm>y1dO>ST z@xZ)AYNcE8%XkGE4l8Zqc}hn%Z&aRyHmoXaO5(b+nrGEJEY1+IANb~3EP8yTz8zN} z{94+l;#T%bHPGXFvE*}>(_eQ%>CI;qvYpebz%KBfR0jch%Q$U0qy$XTHv&Z$n^(f3 zO+;7p%db<@g(8<;Teup$d-WHFhLx!`{@443WMS$hxu}^-A?u#AE0KcXOQ&j`^>`Hq zzj`X+Uec2SpCJiHOHID#%E{cCZoa4v35`9C>b{EK%044sz54J_SNr>)GA%x0_1OF@ zD|~vjWka6|t_!pSXT7&CG00#9-?`zL4E#ODjM{iGf8EyyejkUrD63#z38ooYG3@W| zrl$0M=OVA!`<5;b<|j3o6T7}NJG)wb@U>ZoqGBg5E^J)OcIP*93HNTN|4Hu+P9H$w z%Z*xIKm44;c$j=0{?a|F=E2;ni{^I`+~^cIYe|}-*j8+4$pcEcDF*VfpyRq4 zg1Xbri8NlQ3S@CL4~BoeN@FXI*e;}InL2$~o<>w}p60H%_xOZgksLo;dDqte@m<~i zPf-w+3`pRx_kdfp=nMcFuBi~+tbEB1PPWa^)egT+0i1t~GH@79j4#pp^x;B7YZ?}R z4Jy*>Aj2YiA#p3*vazI3CB&}W>5uaOvOlAkOW^J+(=F=U!%H6ZfL={1sh0rRB8V4! z0=U+iVO9e%0u&)XIs-QaB~6Q-c+OqxC0(z1&f@j+<>!pjV3@q6Z(^MbDravrEw>!X6~M?fz5UWFgY^)Uv6aZ z3m@9v5ap|9^`ibo1XaU>1o4@9buFyI__i_L*vxm5AA=!Xh8af7M7dGsRk}w+yFS9* ztFb?et!7Pq4o%r)3SNUhU&=nw@IpK5YquRtN@VQU(Cg~Crr=k$*IX_Pi|#7m&HUmH z91!T`h4JH_Wce_GJ=&^|#kK~rERBYJLa$1SWqOifh7`CMB`7<5a9 zEau-+B6recxtC+6^aO}uQNvp@w;l}(c==2aJk zC|1uTG~W2!Xm0;I2)6AiiG7Ghw8C|L{lLFx9?P}|8*j=&JTyCR_KBV(lZA?8Zihryq5nkwB~kzL2Md&^1QGzW!8%p&rz~|V4NjR zDlusNkY-@plfAyZnXz5t&@%b@mqLw9<5^m6pM5F^qCNq%iY`UTVupZ81_Rp4INp*T zdxPSr_Y7h6Ix13itW09Mjt0oO=Hj~J{c7SGqvs8RZ6HRthMaFz;CKnf3hKxqabf`v zIO5~R&j@4h6Ooq@FO|cV@Y6V+?V0y~7 zdHljV+C~XJ)Y{eQ6YklXy<}ex>Y8(@aL}LcP80M_b%LkI{%SJ zncIup2udXLJ-LYOZF%TH&M(u&m*8IRf$}fyHWq^WY_5XOrL1ov>M|C7f%I3LMGToD zK41;+$e^Spue#_m4gYfiZP^noNn~ez*7s?PRoBvz#_#aebm|nl^^uExAPnf8Ilszh?2Hw$_E@4Pci3pSd53PjCMlMtG$BA zP#d<<=_+zYlnKEeQ+mw9I1P2vpt$o>Nz9%#;g%}7^1|YFf-0pcOaYp^or_sWz?ZiC zY7&O{$X^@vs3S+KYC))fLRRbrLS9HHYXy+A`6;(fpIOSED!$w{&j@>%cBBUoh856lO+HFOM;g4h)6 zuO@jgiw68T>wt&hXe2vu1H0{R*f(#Pw+F^#0>R8qaVnK8c(8 zOmFn_Sg}OmtU?v#QMZnsSou&0I-`VXqj9ZkBY|TvWM)H#G!0KL~D7FgM%*ZVKG3yU5*L}&4MPC=kc(W40*w`pA@Ba()izmpD>=MlNlZN`g zT(8Wd?XM*bh7ISbl03&$2~Pb(R|)Io$Jt>(2)PRdO|1TaA@F$Ug+T~=x0q|m3|VSb z#FM#NG=ndpwyOyyVIQa^P5I_dpN#PKF-X<$)sR3AuJYMnfJ(}@H}}u-E2(s=bFasA zZ}Q}ax`*JTl0bU`AgmmVQ=_c=BU&XF%@awwsjgQVro;izBYUsh?{Fvi0q^OTjX1bp9ls(cee~(J7 zH`Ny&im%#H7FN+HTP~7)rQmiCb%E}WIUZFm4dd?^rM`n7BdB)$AMldwc+6(Obg|K*x)xXz%?$Ks4zWtxYkZ0KU zAmuo;O9{g;zM+W(;BNFi>~Zq5dBJhHD=!m?kUI%9tb;%X(ldf!G+lyJ#B&VAs{FQ5 zZ(1{@x_HJepp$KoL2$dH3{(^^%>nw@H%mxSp+ZsmTeWKYE&|MAuB*idz}p9eD~$L* zl263&?e!^}regIkO*SVg0o3Mj&ZV~i5e2v(&!f4%94|*5@GT1<_GixZ^;$~a1D{Q3 zF(3svPvRv#of!Jj8{57b%b`bXFS zoBS(}iJce!|__dP1ezI#R$-Hu>odWPGtQ_NVW{JZ9 zAR9_EoA8>FCAoyRXa@9}SCSP+b=!=&oo+94^~nC9NcLzPq%DE|KZ0yX`HR=}V*N^g z)T$`PCEQhOgPrTHQsf|jwT(!S5rKCYIgm-x@=lreVL^DPR{j;<*83jH<8lS;{Hz() z