diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 6bcce42f..066b2d92 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -14,4 +14,4 @@ A clear and concise description of what you want to happen. A clear and concise description of any alternative solutions or features you've considered. **Additional context** -Add any other context or screenshots about the feature request here. \ No newline at end of file +Add any other context or screenshots about the feature request here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 98a73732..023b0f73 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,12 +1,12 @@ ### Proposed changes -Describe the use case and detail of the change. If this PR addresses an issue -on GitHub, make sure to include a link to that issue here in this description +Describe the use case and detail of the change. If this PR addresses an issue +on GitHub, make sure to include a link to that issue here in this description (not in the title of the PR). ### Checklist Before creating a PR, run through this checklist and mark each as complete. -- [ ] I have written my commit messages in the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format. +- [ ] I have written my commit messages in the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format. - [ ] I have read the [CONTRIBUTING](/CONTRIBUTING.md) doc - [ ] I have added tests (when possible) that prove my fix is effective or that my feature works - [ ] I have checked that all unit tests pass after adding my changes diff --git a/.gitignore b/.gitignore index bb596ba6..2b62406c 100644 --- a/.gitignore +++ b/.gitignore @@ -260,6 +260,9 @@ override.tf.json # End of https://www.toptal.com/developers/gitignore/api/python,pycharm+all,terraform +# Ignore locally installed pyenv environment +.pyenv + *.pyc !/extras/jwt.token /pulumi/python/tools/common/config/*.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..ef0b1654 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,49 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: check-yaml + args: [--allow-multiple-documents] + - id: check-added-large-files + - id: check-merge-conflict + - id: detect-private-key + - id: trailing-whitespace + - id: mixed-line-ending + - id: end-of-file-fixer + - id: debug-statements + - id: check-merge-conflict + - id: check-ast + +- repo: https://github.com/pre-commit/mirrors-autopep8 + rev: v1.7.0 + hooks: + - id: autopep8 + +- repo: https://github.com/asottile/dead + rev: v1.5.0 + hooks: + - id: dead + +- repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck + - id: shfmt + - id: markdownlint + +- repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + +- repo: https://github.com/zricethezav/gitleaks + rev: v8.11.0 + hooks: + - id: gitleaks + +- repo: https://github.com/Yelp/detect-secrets + rev: v1.3.0 + hooks: + - id: detect-secrets diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 4547fd84..1396e345 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -117,13 +117,15 @@ the community. This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. +[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html) +. -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. \ No newline at end of file +[https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq) +. Translations are available at +[https://www.contributor-covenant.org/translations](https://www.contributor-covenant.org/translations). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8913447a..54f05b5c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,16 +1,18 @@ # Contributing Guidelines -The following is a set of guidelines for contributing. We really appreciate that you are considering contributing! +The following is a set of guidelines for contributing. We really appreciate +that you are considering contributing! -#### Table Of Contents +## Table Of Contents [Ask a Question](#ask-a-question) [Contributing](#contributing) [Style Guides](#style-guides) - * [Git Style Guide](#git-style-guide) - * [Go Style Guide](#go-style-guide) + +* [Git Style Guide](#git-style-guide) +* [Go Style Guide](#go-style-guide) [Code of Conduct](https://github.com/nginxinc/nginx-wrapper/blob/master/CODE_OF_CONDUCT.md) @@ -22,33 +24,50 @@ Please open an Issue on GitHub with the label `question`. ### Report a Bug -To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the issue has not already been reported. +To report a bug, open an issue on GitHub with the label `bug` using the +available bug report issue template. Please ensure the issue has not already +been reported. ### Suggest an Enhancement -To suggest an enhancement, please create an issue on GitHub with the label `enhancement` using the available feature issue template. +To suggest an enhancement, please create an issue on GitHub with the label +`enhancement` using the available feature issue template. ### Open a Pull Request -* Fork the repo, create a branch, submit a PR when your changes are tested and ready for review. +* Fork the repo, create a branch, submit a PR when your changes are tested and + ready for review. * Fill in [our pull request template](/.github/PULL_REQUEST_TEMPLATE.md) -Note: if you’d like to implement a new feature, please consider creating a feature request issue first to start a discussion about the feature. +Note: if you’d like to implement a new feature, please consider creating a +feature request issue first to start a discussion about the feature. ## Style Guides ### Git Style Guide -* Keep a clean, concise and meaningful git commit history on your branch, rebasing locally and squashing before submitting a PR -* Use the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format when writing a commit message, so that changelogs can be automatically generated -* Follow the guidelines of writing a good commit message as described [here](https://chris.beams.io/posts/git-commit/) and summarised in the next few points - * In the subject line, use the present tense ("Add feature" not "Added feature") - * In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...") - * Limit the subject line to 72 characters or less - * Reference issues and pull requests liberally after the subject line - * Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`) +* Keep a clean, concise and meaningful git commit history on your branch, + rebasing locally and squashing before submitting a PR +* Use the + [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format + when writing a commit message, so that changelogs can be automatically + generated +* Follow the guidelines of writing a good commit message as described + [here](https://chris.beams.io/posts/git-commit/) and summarised in the next + few points + * In the subject line, use the present tense + ("Add feature" not "Added feature") + * In the subject line, use the imperative mood ("Move cursor to..." not + "Moves cursor to...") + * Limit the subject line to 72 characters or less + * Reference issues and pull requests liberally after the subject line + * Add more detailed description in the body of the git message ( + `git commit -a` to give you more space and time in your text editor to + write a good message instead of `git commit -am`) ### Code Style Guide -* Python code should conform to the [PEP-8 style guidelines](https://www.python.org/dev/peps/pep-0008/) whenever possible. +* Python code should conform to the + [PEP-8 style guidelines](https://www.python.org/dev/peps/pep-0008/) + whenever possible. * Where feasible, include unit tests. diff --git a/README.md b/README.md index 899ee8b1..8467c0db 100644 --- a/README.md +++ b/README.md @@ -1,40 +1,47 @@ +# NGINX Modern Reference Architectures + +## Current Test Status + [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=shield)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_shield) -![AWS Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=AWS) -![DO Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=DigitalOcean) -![LKE Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Linode) +![AWS Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=AWS) +![DO Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=DigitalOcean) +![LKE Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Linode) ![K3s Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=K3s) ![MicroK8s Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=MicroK8s) -![Minikube Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Minikube) +![Minikube Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Minikube) -# NGINX Modern Reference Architectures +![MARA Project](./docs/NGINX-MARA-icon.png) -This repository has the basics for a common way to deploy and manage modern apps. Over time, we'll build more example -architectures using different deployment models and options – including other clouds – and you’ll be able to find those -here. +This repository has the basics for a common way to deploy and manage modern +apps. Over time, we'll build more example architectures using different +deployment models and options – including other clouds – and you’ll be able +to find those here. ## Nomenclature -Internally, we refer to this project as MARA for Modern Application Reference Architecture. The current repository name -reflects the humble origins of this project, as it was started with the purpose of allowing users to build custom -versions of the NGINX Ingress Controller in Kubernetes. This went so well that we expanded it to the project you're -currently viewing. +Internally, we refer to this project as MARA for Modern Application Reference +Architecture. The current repository name reflects the humble origins of this +project, as it was started with the purpose of allowing users to build custom +versions of the NGINX Ingress Controller in Kubernetes. This went so well that +we expanded it to the project you're currently viewing. ## Modern App Architectures We define modern app architectures as those driven by four characteristics: -*scalability*, *portability*, *resiliency*, and *agility*. While many different aspects of a modern architecture exist, -these are fundamental. +*scalability*, *portability*, *resiliency*, and *agility*. While many different +aspects of a modern architecture exist, these are fundamental. -* **Scalability** – Quickly and seamlessly scale up or down to accommodate spikes or reductions in demand, anywhere in - the world. +* **Scalability** – Quickly and seamlessly scale up or down to accommodate + spikes or reductions in demand, anywhere in the world. -* **Portability** – Easy to deploy on multiple types of devices and infrastructures, on public clouds, and on premises. +* **Portability** – Easy to deploy on multiple types of devices and + infrastructures, on public clouds, and on premises. -* **Resiliency** – Can fail over to newly spun‑up clusters or virtual environments in different availability regions, - clouds, or data centers. +* **Resiliency** – Can fail over to newly spun‑up clusters or virtual + environments in different availability regions, clouds, or data centers. -* **Agility** – Ability to update through automated CI/CD pipelines with higher code velocity and more frequent code - pushes. +* **Agility** – Ability to update through automated CI/CD pipelines with higher + code velocity and more frequent code pushes. This diagram is an example of what we mean by a **modern app architecture**: ![Modern Apps Architecture Example Diagram](docs/DIAG-NGINX-ModernAppsRefArch-NGINX-MARA-1-0-blog-1024x800.png) @@ -53,26 +60,32 @@ To satisfy the four key characteristics, many modern app architectures employ: For details on the current state of this project, please see the [readme](pulumi/python/README.md) in the [`pulumi/python`](pulumi/python) -subdirectory. This project is under active development, and the current work is using [Pulumi](https://www.pulumi.com/) -with Python. Additionally, please see -[Status and Issues](docs/status-and-issues.md) for the project's up-to-date build status and known issues. - -Subdirectories contained within the root directory separate reference architectures by infrastructure deployment tooling -with additional subdirectories as needed. For example, Pulumi allows the use of multiple languages for deployment. As we -decided to use Python in our first build, there is a `python` subdirectory under the `pulumi` directory. - -This project was started to provide a complete, stealable, easy to deploy, and standalone example of how a modern app -architecture can be built. It was driven by the necessity to be flexible and not require a long list of dependencies to -get started. It needs to provide examples of tooling used to build this sort of architecture in the real world. Most -importantly, it needs to work. Hopefully this provides a ‘jumping off’ point for someone to build their own +subdirectory. This project is under active development, and the current work is +using [Pulumi](https://www.pulumi.com/) with Python. Additionally, please see +[Status and Issues](docs/status-and-issues.md) for the project's up-to-date +build status and known issues. + +Subdirectories contained within the root directory separate reference +architectures by infrastructure deployment tooling with additional +subdirectories as needed. For example, Pulumi allows the use of multiple +languages for deployment. As we decided to use Python in our first build, there +is a `python` subdirectory under the `pulumi` directory. + +This project was started to provide a complete, stealable, easy to deploy, and +standalone example of how a modern app architecture can be built. It was driven +by the necessity to be flexible and not require a long list of dependencies to +get started. It needs to provide examples of tooling used to build this sort of +architecture in the real world. Most importantly, it needs to work. Hopefully +this provides a ‘jumping off’ point for someone to build their own infrastructure. ## Deployment Tools ### Pulumi -[Pulumi](https://www.pulumi.com/) is a modern Infrastructure as Code (IaC) tool that allows you to write code (node, -Python, Go, etc.) that defines cloud infrastructure. Within the [`pulumi`](pulumi) folder are examples of the pulumi +[Pulumi](https://www.pulumi.com/) is a modern Infrastructure as Code (IaC) tool +that allows you to write code (node, Python, Go, etc.) that defines cloud +infrastructure. Within the [`pulumi`](pulumi) folder are examples of the pulumi being used to stand up MARA. ## Contribution @@ -87,6 +100,7 @@ All code in this repository is licensed under the [Apache License v2 license](LICENSE). Open source license notices for all projects in this repository can be -found [here](https://app.fossa.com/reports/92595e16-c0b8-4c68-8c76-59696b6ac219). +found +[here](https://app.fossa.com/reports/92595e16-c0b8-4c68-8c76-59696b6ac219). [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git.svg?type=large)](https://app.fossa.com/projects/custom%2B5618%2Fgit%40github.com%3Anginxinc%2Fkic-reference-architectures.git?ref=badge_large) diff --git a/bin/aws_write_creds.sh b/bin/aws_write_creds.sh index 485539bf..000c5b4a 100755 --- a/bin/aws_write_creds.sh +++ b/bin/aws_write_creds.sh @@ -2,40 +2,38 @@ set -o errexit # abort on nonzero exit status set -o pipefail # don't hide errors within pipes -# -# This script is temporary until we rewrite the AWS deployment following #81 and #82. -# We look into the environment and if we see environment variables for the AWS -# authentication process we move them into a credentials file. This is primarily being -# done at this time to support Jenkins using env vars for creds +# +# This script is temporary until we rewrite the AWS deployment following +# 81 and #82. # We look into the environment and if we see environment +# variables for the AWS # authentication process we move them into a +# credentials file. This is primarily being # done at this time to support +# Jenkins using env vars for creds # aws_auth_vars=(AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN) missing_auth_vars=() -for i in "${aws_auth_vars[@]}" -do - test -n "${!i:+y}" || missing_vars+=("$i") +for i in "${aws_auth_vars[@]}"; do + test -n "${!i:+y}" || missing_vars+=("$i") done -if [ ${#missing_auth_vars[@]} -ne 0 ] -then - echo "Did not find values for:" - printf ' %q\n' "${missing_vars[@]}" - echo "Will assume they are in credentials file or not needed" +if [ ${#missing_auth_vars[@]} -ne 0 ]; then + echo "Did not find values for:" + printf ' %q\n' "${missing_vars[@]}" + echo "Will assume they are in credentials file or not needed" else - echo "Creating credentials file" - # Create the directory.... - mkdir -p ~/.aws - CREDS=~/.aws/credentials - echo "[default]" > $CREDS - echo "aws_access_key_id=$AWS_ACCESS_KEY_ID" >> $CREDS - echo "aws_secret_access_key=$AWS_SECRET_ACCESS_KEY" >> $CREDS - # This is if we have non-temp credentials... - if [[ -z "${AWS_SESSION_TOKEN+x}" ]]; then - echo "Variable AWS_SESSION_TOKEN was unset; not adding to credentials" - else - echo "aws_session_token=$AWS_SESSION_TOKEN" >> $CREDS - fi + echo "Creating credentials file" + # Create the directory.... + mkdir -p ~/.aws + CREDS=~/.aws/credentials + echo "[default]" >$CREDS + echo "aws_access_key_id=$AWS_ACCESS_KEY_ID" >>$CREDS + echo "aws_secret_access_key=$AWS_SECRET_ACCESS_KEY" >>$CREDS + # This is if we have non-temp credentials... + if [[ -z "${AWS_SESSION_TOKEN+x}" ]]; then + echo "Variable AWS_SESSION_TOKEN was unset; not adding to credentials" + else + echo "aws_session_token=$AWS_SESSION_TOKEN" >>$CREDS + fi fi - diff --git a/bin/destroy.sh b/bin/destroy.sh index 4f38632b..58f6f5ad 100755 --- a/bin/destroy.sh +++ b/bin/destroy.sh @@ -10,10 +10,11 @@ export PULUMI_SKIP_UPDATE_CHECK=true export PULUMI_SKIP_CONFIRMATIONS=true script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based -# projects. # -if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" > /dev/null ; then +# Check to see if the venv has been installed, since this is only going to be +# used to start pulumi/python based projects. +# +if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." echo " " @@ -54,10 +55,8 @@ if ! pulumi whoami --non-interactive >/dev/null 2>&1; then fi echo " " -echo "Notice! This shell script will read the config/environment file to determine which pulumi stack to destroy." -echo "Based on the type of stack it will either run the ./bin/destroy_kube.sh or the ./bin/destroy_aws.sh script." -echo "If this is not what you want to do, please abort the script by typing ctrl-c and running the appropriate " -echo "script manually." +echo "Notice! This shell script will only destroy kubeconfig based deployments; if you have deployed to AWS, " +echo "DigitalOcean, or Linode you will need to use the ./pulumi/python/runner script instead." echo " " # Sleep so we are seen... @@ -69,27 +68,28 @@ echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" # # Determine what destroy script we need to run # -if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config>/dev/null 2>&1; then +if pulumi config get kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" - if [ $INFRA == 'AWS' ]; then - echo "Destroying an AWS based stack; if this is not right please type ctrl-c to abort this script." - sleep 5 - ${script_dir}/destroy_aws.sh + if [ "$INFRA" == 'AWS' ]; then + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec ${script_dir}/../pulumi/python/runner exit 0 - elif [ $INFRA == 'kubeconfig' ]; then + elif [ "$INFRA" == 'kubeconfig' ]; then echo "Destroying a kubeconfig based stack; if this is not right please type ctrl-c to abort this script." sleep 5 - ${script_dir}/destroy_kube.sh + "${script_dir}"/destroy_kube.sh exit 0 - elif [ $INFRA == 'DO' ]; then - echo "Destroying a Digital Ocean based stack; if this is not right please type ctrl-c to abort this script." + elif [ "$INFRA" == 'DO' ]; then + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner sleep 5 - ${script_dir}/destroy_do.sh + "${script_dir}"/destroy_do.sh exit 0 - elif [ $INFRA == 'LKE' ]; then - echo "Destroying a Linode LKE based stack; if this is not right please type ctrl-c to abort this script." + elif [ "$INFRA" == 'LKE' ]; then + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner sleep 5 - ${script_dir}/destroy_lke.sh + "${script_dir}"/destroy_lke.sh exit 0 else print "No infrastructure set in config file; aborting!" diff --git a/bin/destroy_aws.sh b/bin/destroy_aws.sh deleted file mode 100755 index d439b449..00000000 --- a/bin/destroy_aws.sh +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -function validate_aws_credentials() { - pulumi_aws_profile="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get aws:profile || true)" - if [ "${pulumi_aws_profile}" != "" ]; then - profile_arg="--profile ${pulumi_aws_profile}" - elif [[ -n "${AWS_PROFILE+x}" ]]; then - profile_arg="--profile ${AWS_PROFILE}" - else - profile_arg="" - fi - - echo "Validating AWS credentials" - if ! "${script_dir}/../pulumi/python/venv/bin/aws" ${profile_arg} sts get-caller-identity > /dev/null; then - echo >&2 "AWS credentials have expired or are not valid" - exit 2 - fi -} - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(kubernetes/nginx/ingress-controller utility/kic-image-build utility/kic-image-push) -AWSINFRA=(ecr eks vpc) - -if command -v aws > /dev/null; then - validate_aws_credentials -fi - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -set +o errexit # don't abort on nonzero exit status for these commands -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 -set -o errexit # abort on nonzero exit status - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi -done - -# Clean up the kubeconfig project -for project_dir in "kubeconfig" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done - -# Destroy the infrastructure -for project_dir in "${AWSINFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/aws/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/aws/${project_dir} --emoji --stack ${PULUMI_STACK}" - echo "Destroying aws/${project_dir}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/aws/${project_dir}" - fi -done \ No newline at end of file diff --git a/bin/destroy_do.sh b/bin/destroy_do.sh deleted file mode 100755 index e2f5113a..00000000 --- a/bin/destroy_do.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(kubernetes/nginx/ingress-controller-repo-only) -INFRA=(kubeconfig digitalocean/domk8s) - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi -done - -# Clean up the kubeconfig project -for project_dir in "${INFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done diff --git a/bin/destroy_kube.sh b/bin/destroy_kube.sh index c90c5e78..0f9592e7 100755 --- a/bin/destroy_kube.sh +++ b/bin/destroy_kube.sh @@ -9,59 +9,58 @@ export PULUMI_SKIP_UPDATE_CHECK=true # Run Pulumi non-interactively export PULUMI_SKIP_CONFIRMATIONS=true -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + +if ! command -v pulumi >/dev/null; then + if [ -x "${script_dir}/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 +if ! command -v python3 >/dev/null; then + echo >&2 "Python 3 must be installed to continue" + exit 1 fi -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi +if ! command -v node >/dev/null; then + if [ -x "${script_dir}/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/venv/bin:$PATH" + + if ! command -v node >/dev/null; then + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi + else + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi fi # Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login +if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + pulumi login - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi fi source "${script_dir}/../config/pulumi/environment" echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) +KUBERNETES=(secrets observability logagent logstore certmgr prometheus) NGINX=(kubernetes/nginx/ingress-controller-repo-only) INFRA=(kubeconfig digitalocean/domk8s) @@ -71,61 +70,57 @@ INFRA=(kubeconfig digitalocean/domk8s) # # Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi +for project_dir in "${APPLICATIONS[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" + fi done # Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi +for project_dir in "${KUBERNETES[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" + fi done # TODO: figure out a more elegant way to do the CRD removal for prometheus #83 # This is a hack for now to remove the CRD's for prometheus-kube-stack # See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 +kubectl delete crd alertmanagerconfigs.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd alertmanagers.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd podmonitors.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd probes.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd prometheuses.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd prometheusrules.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd servicemonitors.monitoring.coreos.com >/dev/null 2>&1 +kubectl delete crd thanosrulers.monitoring.coreos.com >/dev/null 2>&1 # Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" - fi +for project_dir in "${NGINX[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/${project_dir}" + fi done # Clean up the kubeconfig project -for project_dir in "${INFRA[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi +for project_dir in "${INFRA[@]}"; do + echo "$project_dir" + if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then + pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" + pulumi $pulumi_args destroy + else + echo >&2 "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" + fi done - - - - diff --git a/bin/destroy_lke.sh b/bin/destroy_lke.sh deleted file mode 100755 index af3e4006..00000000 --- a/bin/destroy_lke.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - -if ! command -v pulumi > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v pulumi > /dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 > /dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node > /dev/null; then - if [ -x "${script_dir}/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/venv/bin:$PATH" - - if ! command -v node > /dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive > /dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - - -APPLICATIONS=(sirius) -KUBERNETES=(observability logagent logstore certmgr prometheus) -NGINX=(ingress-controller-repo-only) -LINODE=(lke) -KUBECONFIG=(kubeconfig) - -# -# This is a temporary process until we complete the directory reorg and move the start/stop -# process into more solid code. -# - -# Destroy the application(s) -for project_dir in "${APPLICATIONS[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/applications/${project_dir}" - fi -done - -# Destroy other K8 resources -for project_dir in "${KUBERNETES[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/${project_dir}" - fi -done - -# TODO: figure out a more elegant way to do the CRD removal for prometheus #83 -# This is a hack for now to remove the CRD's for prometheus-kube-stack -# See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#uninstall-chart -# This was bombing out if K8 was not responding; hence the || true... -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd alertmanagers.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd podmonitors.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd probes.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd prometheuses.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd prometheusrules.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd servicemonitors.monitoring.coreos.com > /dev/null 2>&1 || true -kubectl delete crd thanosrulers.monitoring.coreos.com > /dev/null 2>&1 || true - -# Destroy NGINX components -for project_dir in "${NGINX[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/kubernetes/nginx/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/kubernetes/nginx/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/kubernetes/nginx/${project_dir}" - fi -done - -# -# We need to do a cleanup of kubernetes making sure that we get rid of our PV's so they don't hang around -# -for NAMESPACE in $(kubectl get namespaces) ; do - # Change to a namespace - kubectl config set-context --current --namespace=$NAMESPACE - # Delete all pods - kubectl delete pod --all - # Delete all volume claims - kubectl delete pvc --all - # Delete all persistent volumes - kubectl delete pv --all -done - -# Clean up the kubeconfig project -for project_dir in "${KUBECONFIG[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/${project_dir}" - fi -done - -# Clean up the linode project -for project_dir in "${LINODE[@]}" ; do - echo "$project_dir" - if [ -f "${script_dir}/../pulumi/python/infrastructure/linode/${project_dir}/Pulumi.yaml" ]; then - pulumi_args="--cwd ${script_dir}/../pulumi/python/infrastructure/linode/${project_dir} --emoji --stack ${PULUMI_STACK}" - pulumi ${pulumi_args} destroy - else - >&2 echo "Not destroying - Pulumi.yaml not found in directory: ${script_dir}/../pulumi/python/infrastructure/linode/${project_dir}" - fi -done diff --git a/bin/kubernetes-extras.sh b/bin/kubernetes-extras.sh index 94b8ba27..ca7914b8 100755 --- a/bin/kubernetes-extras.sh +++ b/bin/kubernetes-extras.sh @@ -28,51 +28,47 @@ sleep 5 # Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based # projects. # -if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" > /dev/null ; then - echo "NOTICE! Unable to find the vnev directory. This is required for the pulumi/python deployment process." - echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." - echo " " - exit 1 +if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then + echo "NOTICE! Unable to find the vnev directory. This is required for the pulumi/python deployment process." + echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." + echo " " + exit 1 else - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" fi if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi - function retry() { - local -r -i max_attempts="$1"; shift - local -i attempt_num=1 - until "$@" - do - if ((attempt_num==max_attempts)) - then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done + local -r -i max_attempts="$1" + shift + local -i attempt_num=1 + until "$@"; do + if ((attempt_num == max_attempts)); then + echo "Attempt ${attempt_num} failed and there are no more attempts left!" + return 1 + else + echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." + sleep $((attempt_num++)) + fi + done } - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" - +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" echo " " echo "NOTICE! The stack name provided here should be different from the stack name you use for your main" @@ -89,12 +85,12 @@ echo " " sleep 5 if [ ! -f "${script_dir}/../pulumi/python/tools/common/config/environment" ]; then - touch "${script_dir}/../pulumi/python/tools/common/config/environment" + touch "${script_dir}/../pulumi/python/tools/common/config/environment" fi if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../pulumi/python/tools/common/config/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in tool installation: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../pulumi/python/tools/common/config/environment" + read -r -e -p "Enter the name of the Pulumi stack to use in tool installation: " PULUMI_STACK + echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../pulumi/python/tools/common/config/environment" fi source "${script_dir}/../pulumi/python/tools/common/config/environment" @@ -103,7 +99,6 @@ echo "Configuring all tool installations to use the stack: ${PULUMI_STACK}" # Create the stack if it does not already exist find "${script_dir}/../pulumi/python/tools" -mindepth 2 -maxdepth 2 -type f -name Pulumi.yaml -execdir pulumi stack select --create "${PULUMI_STACK}" \; - echo " " echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to" echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)" @@ -118,27 +113,27 @@ echo " " sleep 5 if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "Kubeconfig file found" + echo "Kubeconfig file found" else - echo "Provide an absolute path to your kubeconfig file" - pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common + echo "Provide an absolute path to your kubeconfig file" + pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common fi # Clustername if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "Clustername found" + echo "Clustername found" else - echo "Provide your clustername" - pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common + echo "Provide your clustername" + pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common fi # Contextname # TODO: Update process to use context name as well as kubeconfig and clustername #84 if pulumi config get kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "Context name found" + echo "Context name found" else - echo "Provide your context name" - pulumi config set kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common + echo "Provide your context name" + pulumi config set kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common fi # Set our variables @@ -147,15 +142,15 @@ cluster_name="$(pulumi config get kubernetes:cluster_name -C ${script_dir}/../pu context_name="$(pulumi config get kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common)" # Show our config...based on the kubeconfig file -if command -v kubectl > /dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl --kubeconfig="${kubeconfig}" config view +if command -v kubectl >/dev/null; then + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl --kubeconfig="${kubeconfig}" config view fi # Connect to the cluster -if command -v kubectl > /dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl --kubeconfig="${kubeconfig}" --cluster="${cluster_name}" --context="${context_name}" version > /dev/null +if command -v kubectl >/dev/null; then + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl --kubeconfig="${kubeconfig}" --cluster="${cluster_name}" --context="${context_name}" version >/dev/null fi echo " " @@ -168,67 +163,73 @@ echo " " sleep 5 while true; do - read -r -e -p "Do you wish to install metallb? " yn - case $yn in - [Yy]* ) echo "Checking for necessary values in the configuration:" - pulumi config set metallb:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 - if pulumi config get metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "CIDR found" - else - echo "Provide your CIDR (Note: no validation is done on this data)" - pulumi config set metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common - fi - break;; - [Nn]* ) # If they don't want metallb, but have a value in there we delete it - pulumi config rm metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - pulumi config rm metallb:enabled -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - break;; - * ) echo "Please answer yes or no.";; - esac + read -r -e -p "Do you wish to install metallb? " yn + case $yn in + [Yy]*) + echo "Checking for necessary values in the configuration:" + pulumi config set metallb:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 + if pulumi config get metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then + echo "CIDR found" + else + echo "Provide your CIDR (Note: no validation is done on this data)" + pulumi config set metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common + fi + break + ;; + [Nn]*) # If they don't want metallb, but have a value in there we delete it + pulumi config rm metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + pulumi config rm metallb:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + break + ;; + *) echo "Please answer yes or no." ;; + esac done while true; do - read -r -e -p "Do you wish to install nfs client support for persistent volumes? " yn - case $yn in - [Yy]* ) echo "Checking for necessary values in the configuration:" - pulumi config set nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 - if pulumi config get nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "NFS Server IP found" - else - echo "Provide your NFS Server IP (Note: no validation is done on this data)" - pulumi config set nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common - fi - if pulumi config get nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "NFS Share Path found" - else - echo "Provide your NFS Share Path (Note: no validation is done on this data)" - pulumi config set nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common - fi - break;; - [Nn]* ) # If they don't want nfsvols, but have a value in there we delete it - pulumi config rm nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - pulumi config rm nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - pulumi config rm nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common > /dev/null 2>&1 - break;; - * ) echo "Please answer yes or no.";; - esac + read -r -e -p "Do you wish to install nfs client support for persistent volumes? " yn + case $yn in + [Yy]*) + echo "Checking for necessary values in the configuration:" + pulumi config set nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1 + if pulumi config get nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then + echo "NFS Server IP found" + else + echo "Provide your NFS Server IP (Note: no validation is done on this data)" + pulumi config set nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common + fi + if pulumi config get nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then + echo "NFS Share Path found" + else + echo "Provide your NFS Share Path (Note: no validation is done on this data)" + pulumi config set nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common + fi + break + ;; + [Nn]*) # If they don't want nfsvols, but have a value in there we delete it + pulumi config rm nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + pulumi config rm nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + pulumi config rm nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1 + break + ;; + *) echo "Please answer yes or no." ;; + esac done pulumi_args="--emoji " if pulumi config get metallb:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "=====================" - echo "| MetalLB |" - echo "=====================" - cd "${script_dir}/../pulumi/python/tools/metallb" - pulumi $pulumi_args up + echo "=====================" + echo "| MetalLB |" + echo "=====================" + cd "${script_dir}/../pulumi/python/tools/metallb" + pulumi $pulumi_args up fi if pulumi config get nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then - echo "=====================" - echo "| NFSVols |" - echo "=====================" + echo "=====================" + echo "| NFSVols |" + echo "=====================" - cd "${script_dir}/../pulumi/python/tools/nfsvolumes" - pulumi $pulumi_args up -fi \ No newline at end of file + cd "${script_dir}/../pulumi/python/tools/nfsvolumes" + pulumi $pulumi_args up +fi diff --git a/bin/setup_venv.sh b/bin/setup_venv.sh index 499268c3..fbce414e 100755 --- a/bin/setup_venv.sh +++ b/bin/setup_venv.sh @@ -7,47 +7,47 @@ set -o pipefail # don't hide errors within pipes # https://stackoverflow.com/a/31939275/33611 # CC BY-SA 3.0 License: https://creativecommons.org/licenses/by-sa/3.0/ function askYesNo() { - QUESTION=$1 - DEFAULT=$2 - if [ "$DEFAULT" = true ]; then - OPTIONS="[Y/n]" - DEFAULT="y" - else - OPTIONS="[y/N]" - DEFAULT="n" - fi - if [ "${DEBIAN_FRONTEND}" != "noninteractive" ]; then - read -p "$QUESTION $OPTIONS " -n 1 -s -r INPUT - INPUT=${INPUT:-${DEFAULT}} - echo "${INPUT}" - fi - - if [ "${DEBIAN_FRONTEND}" == "noninteractive" ]; then - ANSWER=$DEFAULT - elif [[ "$INPUT" =~ ^[yY]$ ]]; then - ANSWER=true - else - ANSWER=false - fi + QUESTION=$1 + DEFAULT=$2 + if [ "$DEFAULT" = true ]; then + OPTIONS="[Y/n]" + DEFAULT="y" + else + OPTIONS="[y/N]" + DEFAULT="n" + fi + if [ "${DEBIAN_FRONTEND}" != "noninteractive" ]; then + read -p "$QUESTION $OPTIONS " -n 1 -s -r INPUT + INPUT=${INPUT:-${DEFAULT}} + echo "${INPUT}" + fi + + if [ "${DEBIAN_FRONTEND}" == "noninteractive" ]; then + ANSWER=$DEFAULT + elif [[ "$INPUT" =~ ^[yY]$ ]]; then + ANSWER=true + else + ANSWER=false + fi } # Does basic OS distribution detection for "class" of distribution, such # as debian, rhel, etc function distro_like() { - local like - if [ "$(uname -s)" == "Darwin" ]; then - like="darwin" - elif [ -f /etc/os-release ]; then - if grep --quiet '^ID_LIKE=' /etc/os-release; then - like="$(grep '^ID_LIKE=' /etc/os-release | cut -d'=' -f2 | tr -d \")" - else - like="$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d \")" - fi - else - like="unknown" - fi - - echo "${like}" + local like + if [ "$(uname -s)" == "Darwin" ]; then + like="darwin" + elif [ -f /etc/os-release ]; then + if grep --quiet '^ID_LIKE=' /etc/os-release; then + like="$(grep '^ID_LIKE=' /etc/os-release | cut -d'=' -f2 | tr -d \")" + else + like="$(grep '^ID=' /etc/os-release | cut -d'=' -f2 | tr -d \")" + fi + else + like="unknown" + fi + + echo "${like}" } script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" @@ -56,120 +56,135 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" unset VIRTUAL_ENV if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 + echo >&2 "git must be installed to continue" + exit 1 fi +# When Python does not exist if ! command -v python3 >/dev/null; then - if ! command -v make >/dev/null; then - echo >&2 "make must be installed in order to install python with pyenv" - echo >&2 "Either install make or install Python 3 with the venv module" - exit 1 - fi - if ! command -v gcc >/dev/null; then - echo >&2 "gcc must be installed in order to install python with pyenv" - echo >&2 "Either install gcc or install Python 3 with the venv module" - exit 1 - fi - - echo "Python 3 is not installed. Adding pyenv to allow for Python installation" - echo "If development library dependencies are not installed, Python build may fail." - - # Give relevant hint for the distro - if distro_like | grep --quiet 'debian'; then - echo "You may need to install additional packages using a command like the following:" - echo " apt-get install libbz2-dev libffi-dev libreadline-dev libsqlite3-dev libssl-dev" - elif distro_like | grep --quiet 'rhel'; then - echo "You may need to install additional packages using a command like the following:" - echo " yum install bzip2-devel libffi-devel readline-devel sqlite-devel openssl-devel zlib-devel" - else - echo "required libraries: libbz2 libffi libreadline libsqlite3 libssl zlib1g" - fi - - export PYENV_ROOT="${script_dir}/../pulumi/python/.pyenv" - - mkdir -p "${PYENV_ROOT}" - git_clone_log="$(mktemp -t pyenv_git_clone-XXXXXXX.log)" - if git clone --depth 1 --branch v2.0.3 https://github.com/pyenv/pyenv.git "${PYENV_ROOT}" 2>"${git_clone_log}"; then - rm "${git_clone_log}" - else - echo >&2 "Error cloning pyenv repository:" - cat >&2 "${git_clone_log}" - fi - - export PATH="$PYENV_ROOT/bin:$PATH" + if ! command -v make >/dev/null; then + echo >&2 "make must be installed in order to install python with pyenv" + echo >&2 "Either install make or install Python 3 with the venv module" + exit 1 + fi + if ! command -v gcc >/dev/null; then + echo >&2 "gcc must be installed in order to install python with pyenv" + echo >&2 "Either install gcc or install Python 3 with the venv module" + exit 1 + fi + + echo "Python 3 is not installed. Adding pyenv to allow for Python installation" + echo "If development library dependencies are not installed, Python build may fail." + + # Give relevant hint for the distro + if distro_like | grep --quiet 'debian'; then + echo "You may need to install additional packages using a command like the following:" + echo " apt-get install libbz2-dev libffi-dev libreadline-dev libsqlite3-dev libssl-dev" + elif distro_like | grep --quiet 'rhel'; then + echo "You may need to install additional packages using a command like the following:" + echo " yum install bzip2-devel libffi-devel readline-devel sqlite-devel openssl-devel zlib-devel" + else + echo "required libraries: libbz2 libffi libreadline libsqlite3 libssl zlib1g" + fi + + PYENV_ROOT="${script_dir}/../pulumi/python/.pyenv" + + mkdir -p "${PYENV_ROOT}" + git_clone_log="$(mktemp -t pyenv_git_clone-XXXXXXX.log)" + if git clone --depth 1 --branch v2.0.3 https://github.com/pyenv/pyenv.git "${PYENV_ROOT}" 2>"${git_clone_log}"; then + rm "${git_clone_log}" + else + echo >&2 "Error cloning pyenv repository:" + cat >&2 "${git_clone_log}" + fi + + PATH="$PYENV_ROOT/bin:$PATH" fi -# If pyenv is available we use a hardcoded python version +# +# If pyenv is available we use the python version as set in the +# .python-version file. This gives us a known and well tested version +# of python. +# if command -v pyenv >/dev/null; then - eval "$(pyenv init --path)" - eval "$(pyenv init -)" - pyenv install --skip-existing <"${script_dir}/../.python-version" - - # If the pyenv-virtualenv tools are installed, prompt the user if they want to - # use them. - if [ -d "${PYENV_ROOT}/plugins/pyenv-virtualenv" ]; then - askYesNo "Use pyenv-virtualenv to manage virtual environment?" true - if [ $ANSWER = true ]; then - has_pyenv_venv_plugin=1 - else - has_pyenv_venv_plugin=0 - fi - else - has_pyenv_venv_plugin=0 - fi + eval "$(pyenv init --path)" + eval "$(pyenv init -)" + + if [ -z "${PYENV_ROOT}" ]; then + PYENV_ROOT=~/.pyenv + fi + + echo "pyenv detected in: ${PYENV_ROOT}" + pyenv install --skip-existing <"${script_dir}/../.python-version" + + # If the pyenv-virtualenv tools are installed, prompt the user if they want to + # use them. + if [ -d "${PYENV_ROOT}/plugins/pyenv-virtualenv" ]; then + askYesNo "Use pyenv-virtualenv to manage virtual environment?" true + if [ $ANSWER = true ]; then + has_pyenv_venv_plugin=1 + else + has_pyenv_venv_plugin=0 + fi + else + has_pyenv_venv_plugin=0 + fi else - has_pyenv_venv_plugin=0 + has_pyenv_venv_plugin=0 fi # if pyenv with virtual-env plugin is installed, use that if [ ${has_pyenv_venv_plugin} -eq 1 ]; then - eval "$(pyenv virtualenv-init -)" - - if ! pyenv virtualenvs --bare | grep --quiet '^ref-arch-pulumi-aws'; then - pyenv virtualenv ref-arch-pulumi-aws - fi - - if [ -z "${VIRTUAL_ENV}" ]; then - pyenv activate ref-arch-pulumi-aws - fi - - if [ -h "${script_dir}/../pulumi/python/venv" ]; then - echo "Link already exists [${script_dir}/../pulumi/python/venv] - removing and relinking" - rm "${script_dir}/../pulumi/python/venv" - elif [ -d "${script_dir}/../pulumi/python/venv" ]; then - echo "Virtual environment directory already exists" - askYesNo "Delete and replace with pyenv-virtualenv managed link?" false - if [ $ANSWER = true ]; then - echo "Deleting ${script_dir}/../pulumi/python/venv" - rm -rf "${script_dir}/../pulumi/python/venv" - else - echo >&2 "The path ${script_dir}/../pulumi/python/venv must not be a virtual environment directory when using pyenv-virtualenv" - echo >&2 "Exiting. Please manually remove the directory" - exit 1 - fi - fi - - echo "Linking virtual environment [${VIRTUAL_ENV}] to local directory [venv]" - ln -s "${VIRTUAL_ENV}" "${script_dir}/../pulumi/python/venv" + eval "$(pyenv virtualenv-init -)" + + if ! pyenv virtualenvs --bare | grep --quiet '^mara'; then + pyenv virtualenv mara + fi + + if [ -z "${VIRTUAL_ENV}" ]; then + pyenv activate mara + fi + + if [ -h "${script_dir}/../pulumi/python/venv" ]; then + echo "Link already exists [${script_dir}/../pulumi/python/venv] - removing and relinking" + rm "${script_dir}/../pulumi/python/venv" + elif [ -d "${script_dir}/../pulumi/python/venv" ]; then + echo "Virtual environment directory already exists" + askYesNo "Delete and replace with pyenv-virtualenv managed link?" false + if [ $ANSWER = true ]; then + echo "Deleting ${script_dir}/../pulumi/python/venv" + rm -rf "${script_dir}/../pulumi/python/venv" + else + echo >&2 "The path ${script_dir}/../pulumi/python/venv must not be a virtual environment directory when using pyenv-virtualenv" + echo >&2 "Exiting. Please manually remove the directory" + exit 1 + fi + fi + + # We create a symbolic link to the pyenv managed venv because using the + # pyenv virtual environment tooling introduces too many conditional logic paths + # in subsequent scripts/programs that need to load the virtual environment. + # Assuming that the venv directory is at a fixed known path makes things easier. + echo "Linking virtual environment [${VIRTUAL_ENV}] to local directory [venv]" + ln -s "${VIRTUAL_ENV}" "${script_dir}/../pulumi/python/venv" fi # If pyenv isn't present do everything with default python tooling -if [ ${has_pyenv_venv_plugin} -eq 0 ]; then - if [ -z "${VIRTUAL_ENV}" ]; then - VIRTUAL_ENV="${script_dir}/../pulumi/python/venv" - echo "No virtual environment already specified, defaulting to: ${VIRTUAL_ENV}" - fi - - if [ ! -d "${VIRTUAL_ENV}" ]; then - echo "Creating new virtual environment: ${VIRTUAL_ENV}" - if ! python3 -m venv "${VIRTUAL_ENV}"; then - echo "Deleting partially created virtual environment: ${VIRTUAL_ENV}" - rm -rf "${VIRTUAL_ENV}" || true - fi - fi - - source "${VIRTUAL_ENV}/bin/activate" +if [ "${has_pyenv_venv_plugin}" -eq 0 ]; then + if [ -z "${VIRTUAL_ENV}" ]; then + VIRTUAL_ENV="${script_dir}/../pulumi/python/venv" + echo "No virtual environment already specified, defaulting to: ${VIRTUAL_ENV}" + fi + + if [ ! -d "${VIRTUAL_ENV}" ]; then + echo "Creating new virtual environment: ${VIRTUAL_ENV}" + if ! python3 -m venv "${VIRTUAL_ENV}"; then + echo "Deleting partially created virtual environment: ${VIRTUAL_ENV}" + rm -rf "${VIRTUAL_ENV}" || true + fi + fi + + source "${VIRTUAL_ENV}/bin/activate" fi source "${VIRTUAL_ENV}/bin/activate" @@ -183,15 +198,17 @@ pip3 install pipenv # Install certain utility packages like `nodeenv` and `wheel` that aid # in the installation of other build tools and dependencies # required by the other python packages. +pip3 install wheel + # `pipenv sync` uses only the information in the `Pipfile.lock` ensuring repeatable builds PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipenv sync --dev # Install node.js into virtual environment so that it can be used by Python # modules that make call outs to it. if [ ! -x "${VIRTUAL_ENV}/bin/node" ]; then - nodeenv -p --node=lts + nodeenv -p --node=lts else - echo "Node.js version $("${VIRTUAL_ENV}/bin/node" --version) is already installed" + echo "Node.js version $("${VIRTUAL_ENV}/bin/node" --version) is already installed" fi # Install general package requirements @@ -202,9 +219,8 @@ PIPENV_VERBOSITY=-1 PIPENV_PIPFILE="${script_dir}/../pulumi/python/Pipfile" pipe pip3 install "${script_dir}/../pulumi/python/utility/kic-pulumi-utils" rm -rf "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/.eggs" \ - "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/build" \ - "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/kic_pulumi_utils.egg-info" - + "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/build" \ + "${script_dir}/../pulumi/python/utility/kic-pulumi-utils/kic_pulumi_utils.egg-info" ARCH="" case $(uname -m) in @@ -215,79 +231,92 @@ aarch64) ARCH="arm64" ;; arm64) ARCH="arm64" ;; arm) dpkg --print-architecture | grep -q "arm64" && ARCH="arm64" || ARCH="arm" ;; *) - echo >&2 "Unable to determine system architecture." - exit 1 - ;; + echo >&2 "Unable to determine system architecture." + exit 1 + ;; esac OS="$(uname -s | tr '[:upper:]' '[:lower:]')" if command -v wget >/dev/null; then - download_cmd="wget --quiet --max-redirect=12 --output-document -" + download_cmd="wget --quiet --max-redirect=12 --output-document -" elif command -v curl >/dev/null; then - download_cmd="curl --fail --silent --location" + download_cmd="curl --fail --silent --location" else - echo >&2 "either wget or curl must be installed" - exit 1 + echo >&2 "either wget or curl must be installed" + exit 1 fi if command -v sha256sum >/dev/null; then - sha256sum_cmd="sha256sum --check" + sha256sum_cmd="sha256sum --check" elif command -v shasum >/dev/null; then - sha256sum_cmd="shasum --algorithm 256 --check" + sha256sum_cmd="shasum --algorithm 256 --check" else - echo >&2 "either sha256sum or shasum must be installed" - exit 1 + echo >&2 "either sha256sum or shasum must be installed" + exit 1 fi # # This section originally pulled the most recent version of Kubectl down; however it turned out that -# was causing isues with our AWS deploy (see the issues in the repo). Addtionally, this was only +# was causing issues with our AWS deploy (see the issues in the repo). Additionally, this was only # downloading the kubectl if it did not exist; this could result in versions not being updated if the # MARA project was run in the same environment w/o a refresh. # -# The two fixes here are to hardcode (For now) to a known good version (1.23.6) and force the script to +# The two fixes here are to hardcode (For now) to a known good version (1.24.3) and force the script to # always download this version. # -# TODO: Figure out a way to not hardocde the kubectl version +# TODO: Figure out a way to not hardcode the kubectl version # TODO: Should not always download if the versions match; need a version check # # if [ ! -x "${VIRTUAL_ENV}/bin/kubectl" ]; then - echo "Downloading kubectl into virtual environment" - KUBECTL_VERSION="v1.23.6" - ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" - KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" - echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} - chmod +x "${VIRTUAL_ENV}/bin/kubectl" + echo "Downloading kubectl into virtual environment" + KUBECTL_VERSION="v1.24.3" + ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" + KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" + echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} + chmod +x "${VIRTUAL_ENV}/bin/kubectl" else - echo "kubectl is already installed, but will overwrite to ensure correct version" - echo "Downloading kubectl into virtual environment" - KUBECTL_VERSION="v1.23.6" - ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" - KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" - echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} - chmod +x "${VIRTUAL_ENV}/bin/kubectl" + echo "kubectl is already installed, but will overwrite to ensure correct version" + echo "Downloading kubectl into virtual environment" + KUBECTL_VERSION="v1.24.3" + ${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl" + KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")" + echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd} + chmod +x "${VIRTUAL_ENV}/bin/kubectl" fi # Download Pulumi CLI tooling # Regular expression and sed command from https://superuser.com/a/363878 echo "Downloading Pulumi CLI into virtual environment" -PULUMI_VERSION="$(pip3 list | grep 'pulumi ' | sed -nre 's/^[^0-9]*(([0-9]+\.)*[0-9]+).*/\1/p')" - if [ -z $PULUMI_VERSION ] ; then - echo "Failed to find Pulumi version - EXITING" - exit 5 - else - echo "Pulumi version found: $PULUMI_VERSION" - fi +PULUMI_VERSION="$(pip3 list | grep 'pulumi ' | sed -nEe 's/^[^0-9]*(([0-9]+\.)*[0-9]+).*/\1/p')" +if [ -z "$PULUMI_VERSION" ]; then + echo "Failed to find Pulumi version - EXITING" + exit 5 +else + echo "Pulumi version found: $PULUMI_VERSION" +fi if [[ -x "${VIRTUAL_ENV}/bin/pulumi" ]] && [[ "$(PULUMI_SKIP_UPDATE_CHECK=true "${VIRTUAL_ENV}/bin/pulumi" version)" == "v${PULUMI_VERSION}" ]]; then - echo "Pulumi version ${PULUMI_VERSION} is already installed" + echo "Pulumi version ${PULUMI_VERSION} is already installed" else - PULUMI_TARBALL_URL="https://get.pulumi.com/releases/sdk/pulumi-v${PULUMI_VERSION}-${OS}-${ARCH/amd64/x64}.tar.gz" - PULUMI_TARBALL_DESTTARBALL_DEST=$(mktemp -t pulumi.tar.gz.XXXXXXXXXX) - ${download_cmd} "${PULUMI_TARBALL_URL}" >"${PULUMI_TARBALL_DESTTARBALL_DEST}" - [ $? -eq 0 ] && echo "Pulumi downloaded successfully" || echo "Failed to download Pulumi" - tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --strip-components 1 --file "${PULUMI_TARBALL_DESTTARBALL_DEST}" - [ $? -eq 0 ] && echo "Pulumi installed successfully" || echo "Failed to install Pulumi" - rm "${PULUMI_TARBALL_DESTTARBALL_DEST}" + PULUMI_TARBALL_URL="https://get.pulumi.com/releases/sdk/pulumi-v${PULUMI_VERSION}-${OS}-${ARCH/amd64/x64}.tar.gz" + PULUMI_TARBALL_DEST=$(mktemp -t pulumi.tar.gz.XXXXXXXXXX) + ${download_cmd} "${PULUMI_TARBALL_URL}" >"${PULUMI_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Pulumi downloaded successfully" || echo "Failed to download Pulumi" + tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --strip-components 1 --file "${PULUMI_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Pulumi installed successfully" || echo "Failed to install Pulumi" + rm "${PULUMI_TARBALL_DEST}" +fi + +# Digital Ocean CLI +if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then + echo "Downloading Digital Ocean CLI" + DOCTL_VERSION="1.75.0" + DOCTL_TARBALL_URL="https://github.com/digitalocean/doctl/releases/download/v${DOCTL_VERSION}/doctl-${DOCTL_VERSION}-${OS}-${ARCH}.tar.gz" + DOCTL_TARBALL_DEST=$(mktemp -t doctl.tar.gz.XXXXXXXXXX) + ${download_cmd} "${DOCTL_TARBALL_URL}" >"${DOCTL_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Digital Ocean CLI downloaded successfully" || echo "Failed to download Digital Ocean CLI" + tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}" + [ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI" + rm "${DOCTL_TARBALL_DEST}" fi diff --git a/bin/start.sh b/bin/start.sh index 92e9c62c..f28e9791 100755 --- a/bin/start.sh +++ b/bin/start.sh @@ -18,50 +18,49 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" # projects. # if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then - echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." - echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." - echo " " - exit 1 + echo "NOTICE! Unable to find the venv directory. This is required for the pulumi/python deployment process." + echo "Please run ./setup_venv.sh from this directory to install the required virtual environment." + echo " " + exit 1 else - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" fi if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 + echo >&2 "Python 3 must be installed to continue" + exit 1 fi # Check to see if the user is logged into Pulumi if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login + pulumi login - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi fi echo " " -echo "NOTICE! This shell script will call the appropriate helper script depending on your answer to the next question." +echo "NOTICE! This shell script is maintained for compatibility for the kubeconfig only deployment and will be" +echo "deprecated once the kubeconfig deployments are fully integrated with the automation api." echo " " -echo "This script currently supports standing up AWS, Linode, and Digital Ocean kubernetes deployments, provided " -echo "the correct credentials are supplied. It also supports the user of a kubeconfig file with a defined cluster name" -echo "and context, which must be provided by the user." +echo "If you are deploying AWS, DigitalOcean, or Linode based stacks you will need to use the runner script." echo " " echo "Please read the documentation for more details." echo " " @@ -69,72 +68,75 @@ echo " " sleep 5 if [ -s "${script_dir}/../config/pulumi/environment" ] && grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - source "${script_dir}/../config/pulumi/environment" - echo "Environment data found for stack: ${PULUMI_STACK}" - while true; do - read -r -e -p "Environment file exists and is not empty. Answer yes to use, no to delete. " yn - case $yn in - [Yy]*) # We have an environment file and they want to keep it.... - if pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - INFRA="$(pulumi config get kubernetes:infra_type -C ${script_dir}/../pulumi/python/config)" - if [ $INFRA == 'AWS' ]; then - exec ${script_dir}/start_aws.sh - exit 0 - elif [ $INFRA == 'kubeconfig' ]; then - exec ${script_dir}/start_kube.sh - exit 0 - elif [ $INFRA == 'DO' ]; then - exec ${script_dir}/start_do.sh - exit 0 - elif [ $INFRA == 'LKE' ]; then - exec ${script_dir}/start_lke.sh - exit 0 - else - echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." - exit 1 - fi - else - echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." - exit 1 - fi - break - ;; - [Nn]*) # They want to remove and reconfigure - rm -f ${script_dir}/../config/pulumi/environment - break - ;; - *) echo "Please answer yes or no." ;; - esac - done + source "${script_dir}"/../config/pulumi/environment + echo "Environment data found for stack: ${PULUMI_STACK}" + while true; do + read -r -e -p "Environment file exists and is not empty. Answer yes to use, no to delete. " yn + case $yn in + [Yy]*) # We have an environment file, and they want to keep it.... + if pulumi config get kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + INFRA=$(pulumi config get kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config) + if [ "$INFRA" == 'AWS' ]; then + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + elif [ "$INFRA" == 'kubeconfig' ]; then + exec "${script_dir}"/start_kube.sh + exit 0 + elif [ "$INFRA" == 'DO' ]; then + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + elif [ "$INFRA" == 'LKE' ]; then + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + else + echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." + exit 1 + fi + else + echo "Corrupt or non-existent configuration file, please restart and delete and reconfigure." + exit 1 + fi + break + ;; + [Nn]*) # They want to remove and reconfigure + rm -f "${script_dir}"/../config/pulumi/environment + break + ;; + *) echo "Please answer yes or no." ;; + esac + done fi while true; do - read -e -r -p "Type a for AWS, d for Digital Ocean, k for kubeconfig, l for Linode? " infra - case $infra in - [Aa]*) - echo "Calling AWS startup script" - exec ${script_dir}/start_aws.sh - exit 0 - break - ;; - [Kk]*) - echo "Calling kubeconfig startup script" - exec ${script_dir}/start_kube.sh - exit 0 - break - ;; - [Dd]*) - echo "Calling Digital Ocean startup script" - exec ${script_dir}/start_do.sh - exit 0 - break - ;; - [Ll]*) - echo "Calling Linode startup script" - exec ${script_dir}/start_lke.sh - exit 0 - break - ;; - *) echo "Please answer a, d, k, or l." ;; - esac + read -e -r -p "Type a for AWS, d for Digital Ocean, k for kubeconfig, l for Linode? " infra + case "$infra" in + [Aa]*) + echo "This script no longer works with AWS deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + break + ;; + [Kk]*) + echo "Calling kubeconfig startup script" + exec "${script_dir}"/start_kube.sh + exit 0 + break + ;; + [Dd]*) + echo "This script no longer works with DigitalOcean deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + break + ;; + [Ll]*) + echo "This script no longer works with Linode deployments; please use ./pulumi/python/runner instead" + exec "${script_dir}"/../pulumi/python/runner + exit 0 + break + ;; + *) echo "Please answer a, d, k, or l." ;; + esac done diff --git a/bin/start_aws.sh b/bin/start_aws.sh deleted file mode 100755 index dbf94896..00000000 --- a/bin/start_aws.sh +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# We skip over the tools directory, because that uses a unique stack for setup of the -# kubernetes components for installations without them. -find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -if [[ -z "${AWS_PROFILE+x}" ]]; then - echo "AWS_PROFILE not set" - if ! grep --quiet '^AWS_PROFILE=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the AWS Profile to use in all projects (leave blank for default): " AWS_PROFILE - if [[ -z "${AWS_PROFILE}" ]]; then - AWS_PROFILE=default - fi - echo "AWS_PROFILE=${AWS_PROFILE}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set aws:profile "${AWS_PROFILE}" \; - fi -else - echo "Using AWS_PROFILE from environment: ${AWS_PROFILE}" -fi - -# Check for default region in environment; set if not found -# The region is set by checking the following in the order below: -# * AWS_DEFAULT_REGION environment variable -# * config/environment values of AWS_DEFAULT_REGION -# * prompt the user for a region - -if [[ -z "${AWS_DEFAULT_REGION+x}" ]]; then - echo "AWS_DEFAULT_REGION not set" - if ! grep --quiet '^AWS_DEFAULT_REGION=.*' "${script_dir}/../config/pulumi/environment"; then - # First, check the config file for our current profile. If there - # is no AWS command we assume that there is no config file, which - # may not always be a valid assumption. - if ! command -v aws >/dev/null; then - AWS_CLI_DEFAULT_REGION="us-east-1" - elif aws configure get region --profile "${AWS_PROFILE}" >/dev/null; then - AWS_CLI_DEFAULT_REGION="$(aws configure get region --profile "${AWS_PROFILE}")" - else - AWS_CLI_DEFAULT_REGION="us-east-1" - fi - - read -r -e -p "Enter the name of the AWS Region to use in all projects [${AWS_CLI_DEFAULT_REGION}]: " AWS_DEFAULT_REGION - echo "AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-${AWS_CLI_DEFAULT_REGION}}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set aws:region "${AWS_DEFAULT_REGION}" \; - fi -else - echo "Using AWS_DEFAULT_REGION from environment/config: ${AWS_DEFAULT_REGION}" - pulumi config set aws:region -C "${script_dir}/../pulumi/python/config" "${AWS_DEFAULT_REGION}" -fi - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -function add_kube_config() { - pulumi_region="$(pulumi ${pulumi_args} config get aws:region -C ${script_dir}/../pulumi/python/config)" - if [ "${pulumi_region}" != "" ]; then - region_arg="--region ${pulumi_region}" - else - region_arg="" - fi - pulumi_aws_profile="$(pulumi ${pulumi_args} config get aws:profile -C ${script_dir}/../pulumi/python/config)" - if [ "${pulumi_aws_profile}" != "" ]; then - echo "Using AWS profile [${pulumi_aws_profile}] from Pulumi configuration" - profile_arg="--profile ${pulumi_aws_profile}" - elif [[ -n "${AWS_PROFILE+x}" ]]; then - echo "Using AWS profile [${AWS_PROFILE}] from environment" - profile_arg="--profile ${AWS_PROFILE}" - else - profile_arg="" - fi - - cluster_name="$(pulumi ${pulumi_args} stack output cluster_name -C ${script_dir}/../pulumi/python/infrastructure/aws/eks)" - - echo "adding ${cluster_name} cluster to local kubeconfig" - "${script_dir}"/../pulumi/python/venv/bin/aws ${profile_arg} ${region_arg} eks update-kubeconfig --name ${cluster_name} -} - -function validate_aws_credentials() { - pulumi_aws_profile="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get aws:profile || true)" - if [ "${pulumi_aws_profile}" != "" ]; then - profile_arg="--profile ${pulumi_aws_profile}" - elif [[ -n "${AWS_PROFILE+x}" ]]; then - profile_arg="--profile ${AWS_PROFILE}" - else - profile_arg="" - fi - - echo "Validating AWS credentials" - if ! aws ${profile_arg} sts get-caller-identity >/dev/null; then - echo >&2 "AWS credentials have expired or are not valid" - exit 2 - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -if command -v aws >/dev/null; then - validate_aws_credentials -fi - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# We automatically set this to aws for infra type; since this is a script specific to AWS -# TODO: combined file should query and manage this -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config AWS -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius AWS - -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "AWS VPC" -cd "${script_dir}/../pulumi/python/infrastructure/aws/vpc" -pulumi $pulumi_args up - -header "AWS EKS" -cd "${script_dir}/../pulumi/python/infrastructure/aws/eks" -pulumi $pulumi_args up - -# pulumi stack output cluster_name -add_kube_config - -if command -v kubectl >/dev/null; then - echo "Attempting to connect to newly create kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -# Display the server information -echo "Kubernetes client/server version information:" -kubectl version -o json -echo " " - -# -# This is used to streamline the pieces that follow. Moving forward we can add new logic behind this and this -# should abstract away for us. This way we just call the kubeconfig project to get the needed information and -# let the infrastructure specific parts do their own thing (as long as they work with this module) -# -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -header "AWS ECR" -cd "${script_dir}/../pulumi/python/infrastructure/aws/ecr" -pulumi $pulumi_args up - -header "IC Image Build" -cd "${script_dir}/../pulumi/python/utility/kic-image-build" -pulumi $pulumi_args up - -header "IC Image Push" -# If we are on MacOS and the user keychain is locked, we need to prompt the -# user to unlock it so that `docker login` will work correctly. -if command -v security >/dev/null && [[ "$(uname -s)" == "Darwin" ]]; then - if ! security show-keychain-info 2>/dev/null; then - echo "Enter in your system credentials in order to access the system keychain for storing secrets securely with Docker." - security unlock-keychain - fi -fi -cd "${script_dir}/../pulumi/python/utility/kic-image-push" -pulumi $pulumi_args up - -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" - -pulumi $pulumi_args up -app_url="$(pulumi ${pulumi_args} stack output --json | python3 "${script_dir}"/../pulumi/python/kubernetes/applications/sirius/verify.py)" - -header "Finished!" -echo "The startup process has finished successfully" -echo " " -echo "Next Steps:" -echo " " -echo "1. The application can now be accessed at: ${app_url}." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" diff --git a/bin/start_do.sh b/bin/start_do.sh deleted file mode 100755 index 6e8537e2..00000000 --- a/bin/start_do.sh +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# We skip over the tools directory, because that uses a unique stack for setup of the -# kubernetes components for installations without them. -find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -if [[ -z "${DIGITALOCEAN_TOKEN+x}" ]]; then - echo "DIGITALOCEAN_TOKEN not set" - if ! grep --quiet '^DIGITALOCEAN_TOKEN=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the Digital Ocean Token to use in all projects (leave blank for default): " DIGITALOCEAN_TOKEN - if [[ -z "${DIGITALOCEAN_TOKEN}" ]]; then - echo "No Digital Ocean token found - exiting" - exit 4 - fi - echo "DIGITALOCEAN_TOKEN=${DIGITALOCEAN_TOKEN}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext digitalocean:token "${DIGITALOCEAN_TOKEN}" \; - fi -else - echo "Using DIGITALOCEAN_TOKEN from environment: ${DIGITALOCEAN_TOKEN}" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext digitalocean:token "${DIGITALOCEAN_TOKEN}" \; -fi - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 -# -# This version of the code forces you to add a hostname which is used to generate the cert when the application is -# deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed -# cert and to access the application. -# - -echo " " -echo "NOTICE! Currently we do not automatically pull the hostname of the K8 LoadBalancer with this deployment; instead" -echo "you will need to create a FQDN and map the assigned IP address to your FQDN in order to use the deployment. " -echo "You can then add this mapping to DNS, or locally to your host file" -echo " " -echo "See https://networkdynamics.com/2017/05/the-benefits-of-testing-your-website-with-a-local-hosts-file/ for details" -echo "on how this can be accomplished. " -echo " " -echo "This will be streamlined in a future release of MARA." -echo " " - -# So we can see... -sleep 5 - -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" -else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config -fi -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -function add_kube_config() { - echo "adding ${cluster_name} cluster to local kubeconfig" - doctl kubernetes cluster config save ${cluster_name} -} - -function validate_do_credentials() { - pulumi_do_token="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get digitalocean:token)" - echo "Validating Digital Ocean credentials" - if ! doctl account get >/dev/null; then - echo >&2 "Digital Ocean credentials have expired or are not valid" - exit 2 - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -# -# This deploy only works with the NGINX registries. -# -echo " " -echo "NOTICE! Currently the deployment for Digital Ocean only supports pulling images from the registry! A JWT is " -echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" -echo " " -echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " -echo "details and examples." -echo " " - -# Make sure we see it -sleep 5 - -# -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That -# secret is not a valid secret, but it is created to make the logic easier to read/code. -# -if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -fi - -if command -v doctl >/dev/null; then - validate_do_credentials -fi - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# We automatically set this to DO for infra type; since this is a script specific to DO -# TODO: combined file should query and manage this -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config DO -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius DO - -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "DO Kubernetes" -cd "${script_dir}/../pulumi/python/infrastructure/digitalocean/domk8s" -pulumi $pulumi_args up - -# pulumi stack output cluster_name -cluster_name=$(pulumi stack output cluster_id -s "${PULUMI_STACK}" -C ${script_dir}/../pulumi/python/infrastructure/digitalocean/domk8s) -add_kube_config - -if command -v kubectl >/dev/null; then - echo "Attempting to connect to newly create kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -# Display the server information -echo "Kubernetes client/server version information:" -kubectl version -o json -echo " " - - -# -# This is used to streamline the pieces that follow. Moving forward we can add new logic behind this and this -# should abstract away for us. This way we just call the kubeconfig project to get the needed information and -# let the infrastructure specific parts do their own thing (as long as they work with this module) -# -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" -pulumi $pulumi_args up - -header "Finished!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") -THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") - -echo " " -echo "The startup process has finished successfully" -echo " " -echo " " -echo "Next Steps:" -echo " " -echo "1. Map the IP address ($THE_IP) of your Ingress Controller with your FQDN ($THE_FQDN)." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" diff --git a/bin/start_kube.sh b/bin/start_kube.sh index 5da8786b..e2f4f746 100755 --- a/bin/start_kube.sh +++ b/bin/start_kube.sh @@ -12,72 +12,73 @@ export PULUMI_SKIP_CONFIRMATIONS=true script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v pulumi >/dev/null; then + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi + else + echo >&2 "Pulumi must be installed to continue" + exit 1 + fi fi if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 + echo >&2 "Python 3 must be installed to continue" + exit 1 fi if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi + if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then + echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" + export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" + + if ! command -v node >/dev/null; then + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi + else + echo >&2 "NodeJS must be installed to continue" + exit 1 + fi fi if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 + echo >&2 "git must be installed to continue" + exit 1 fi if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." + echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." fi if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." + echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." fi # Check to see if the user is logged into Pulumi if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login + pulumi login - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi + if ! pulumi whoami --non-interactive >/dev/null 2>&1; then + echo >&2 "Unable to login to Pulumi - exiting" + exit 2 + fi fi if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" + touch "${script_dir}/../config/pulumi/environment" fi if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" + read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK + echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" fi +# # Do we have the submodule source.... # # Note: We had been checking for .git, but this is not guaranteed to be @@ -85,102 +86,105 @@ fi # for the src subdirectory which should always be there. # if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" + echo "Submodule source found" else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 + # Error out with instructions. + echo "Bank of Sirius submodule not found" + echo " " + echo "Please run:" + echo " git submodule update --init --recursive --remote" + echo "Inside your git directory and re-run this script" + echo "" + echo >&2 "Unable to find submodule - exiting" + exit 3 fi source "${script_dir}/../config/pulumi/environment" echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" +# # Create the stack if it does not already exist # Do not change the tools directory of add-ons. +# find "${script_dir}/../pulumi" -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; +# # Show colorful fun headers if the right utils are installed and NO_COLOR is not set # function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi + if [ -z ${NO_COLOR+x} ]; then + "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat + else + "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" + fi } function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done + local -r -i max_attempts="$1" + shift + local -i attempt_num=1 + until "$@"; do + if ((attempt_num == max_attempts)); then + echo "Attempt ${attempt_num} failed and there are no more attempts left!" + return 1 + else + echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." + sleep $((attempt_num++)) + fi + done } function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD + PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) + echo "$PWORD" } # -# This deploy only works with the NGINX registries. +# This deployment only works with the NGINX registries. # echo " " -echo "NOTICE! Currently the deployment via kubeconfig only supports pulling images from the registry! A JWT is " +echo "NOTICE! Currently, the deployment via kubeconfig only supports pulling images from the registry! A JWT is " echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" +echo "in the project root in a file named jwt.token" echo " " echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " echo "details and examples." echo " " +# # Make sure we see it +# sleep 5 # -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That +# This logic takes the JWT and transforms it into a secret, so we can pull the NGINX Plus IC. If the user is not +# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secret. That # secret is not a valid secret, but it is created to make the logic easier to read/code. # if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml + JWT=$(cat "${script_dir}"/../extras/jwt.token) + echo "Loading JWT into nginx-ingress/regcred" + "${script_dir}"/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username="${JWT}" --docker-password=none -n nginx-ingress --dry-run=client -o yaml >"${script_dir}"/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml + echo "No JWT found; writing placeholder manifest" + "${script_dir}"/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >"${script_dir}"/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml fi -# Check for stack info.... -# TODO: Move these to use kubeconfig for the Pulumi main config (which redirects up) instead of aws/vpc #80 # - +# Check for stack info.... # We automatically set this to a kubeconfig type for infra type -# TODO: combined file should query and manage this #80 -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config kubeconfig -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own +# +pulumi config set kubernetes:infra_type -C "${script_dir}"/../pulumi/python/config kubeconfig +# +# This is a bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the +# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses its own # configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius kubeconfig +# +pulumi config set kubernetes:infra_type -C "${script_dir}"/../pulumi/python/kubernetes/applications/sirius kubeconfig +# # Inform the user of what we are doing - +# echo " " echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to" echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)" @@ -188,109 +192,112 @@ echo "you may need to remove them and replace them with a simple ~/.kube/config echo "addressed in a future release." echo " " +# # Sleep so that this is seen... +# sleep 5 -if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Kubeconfig file found" +if pulumi config get kubernetes:kubeconfig -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + echo "Kubeconfig file found" else - echo "Provide an absolute path to your kubeconfig file" - pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/config + echo "Provide an absolute path to your kubeconfig file" + pulumi config set kubernetes:kubeconfig -C "${script_dir}"/../pulumi/python/config fi +# # Clustername -if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Clustername found" +# +if pulumi config get kubernetes:cluster_name -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + echo "Clustername found" else - echo "Provide your clustername" - pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/config + echo "Provide your clustername" + pulumi config set kubernetes:cluster_name -C "${script_dir}"/../pulumi/python/config fi +# # Connect to the cluster +# if command -v kubectl >/dev/null; then - echo "Attempting to connect to kubernetes cluster" - retry 30 kubectl version >/dev/null + echo "Attempting to connect to kubernetes cluster" + retry 30 kubectl version >/dev/null fi -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 # # This version of the code forces you to add a hostname which is used to generate the cert when the application is # deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed # cert and to access the application. # -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" +if pulumi config get kic-helm:fqdn -C "${script_dir}"/../pulumi/python/config >/dev/null 2>&1; then + echo "Hostname found for deployment" else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config + echo "Create a fqdn for your deployment" + pulumi config set kic-helm:fqdn -C "${script_dir}"/../pulumi/python/config fi -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. # -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set +# The bank of sirius secrets (and all other secrets) are stored in the "secrets" +# project. # echo "Checking for required secrets" +if pulumi config get prometheus:adminpass -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a password for grafana" + pulumi config set prometheus:adminpass --secret -C pulumi/python/kubernetes/secrets +fi -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true +if pulumi config get sirius:accounts_pwd -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW + echo "Please enter a password for the sirius accountsdb" + pulumi config set sirius:accounts_pwd --secret -C pulumi/python/kubernetes/secrets fi -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true +if pulumi config get sirius:demo_login_pwd -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW + echo "Please enter a password for the sirius ledgerdb" + pulumi config set sirius:demo_login_pwd --secret -C pulumi/python/kubernetes/secrets fi -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" +if pulumi config get sirius:demo_login_user -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" +else + echo "Please enter a username for the BoS" + pulumi config set sirius:demo_login_user --secret -C pulumi/python/kubernetes/secrets +fi + +if pulumi config get sirius:ledger_pwd -C "${script_dir}"/../pulumi/python/kubernetes/secrets >/dev/null 2>&1; then + echo "Configuration value found" else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config + echo "Please enter a password for the BoS user account" + pulumi config set sirius:ledger_pwd --secret -C pulumi/python/kubernetes/secrets fi # -# TODO: Allow startup scripts to prompt and accept additional config values #97 -# The default helm timeout for all of the projects is set at the default of 300 seconds (5 minutes) +# The default helm timeout for all the projects is set at the default of 300 seconds (5 minutes) # However, since this code path is most commonly going to be used to deploy locally we need to bump # that value up. A fix down the road will add this a prompt, but for now we are going to double this # value for all helm deploys. # - -pulumi config set kic-helm:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set logagent:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set logstore:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set certmgr:helm_timeout 600 -C ${script_dir}/../pulumi/python/config -pulumi config set prometheus:helm_timeout 600 -C ${script_dir}/../pulumi/python/config +pulumi config set kic-helm:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set logagent:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set logstore:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set certmgr:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config +pulumi config set prometheus:helm_timeout 600 -C "${script_dir}"/../pulumi/python/config # # Set the headers to respect the NO_COLOR variable # if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" + pulumi_args="--emoji --stack ${PULUMI_STACK}" else - pulumi_args="--color never --stack ${PULUMI_STACK}" + pulumi_args="--color never --stack ${PULUMI_STACK}" fi # -# Note that this is somewhat different than the other startup scripts, because at the point we run this -# here we know that we have a server so we can get the version. The other builds do not have server info +# Note that this is somewhat different from the other startup scripts, because at the point we run this +# here we know that we have a server, so we can get the version. The other builds do not have server info # at this point in time. # header "Version Info" @@ -300,9 +307,9 @@ echo "Pulumi version is: $(pulumi version)" echo "Pulumi user is: $(pulumi whoami)" echo "Python version is: $(python --version)" echo "Kubectl version information: " -echo "$(kubectl version -o json)" +kubectl version -o json echo "Python module information: " -echo "$(pip list)" +pip list echo "=====================================================================" echo " " @@ -310,7 +317,10 @@ header "Kubeconfig" cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" pulumi $pulumi_args up -# TODO: This is using a different project than the AWS deploy; we need to collapse those #80 +header "Secrets" +cd "${script_dir}/../pulumi/python/kubernetes/secrets" +pulumi $pulumi_args up + header "Deploying IC" cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" pulumi $pulumi_args up @@ -340,7 +350,7 @@ cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" pulumi $pulumi_args up header "Finished!!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") +THE_FQDN=$(pulumi config get kic-helm:fqdn -C "${script_dir}"/../pulumi/python/config || echo "Cannot Retrieve") THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") echo " " @@ -360,4 +370,4 @@ echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" echo " " -echo "Please see the documentation in the github repository for more information" +echo "Please see the documentation in the GitHub repository for more information" diff --git a/bin/start_lke.sh b/bin/start_lke.sh deleted file mode 100755 index 774f67c8..00000000 --- a/bin/start_lke.sh +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Don't pollute console output with upgrade notifications -export PULUMI_SKIP_UPDATE_CHECK=true -# Run Pulumi non-interactively -export PULUMI_SKIP_CONFIRMATIONS=true - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - -if ! command -v pulumi >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v pulumi >/dev/null; then - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi - else - echo >&2 "Pulumi must be installed to continue" - exit 1 - fi -fi - -if ! command -v python3 >/dev/null; then - echo >&2 "Python 3 must be installed to continue" - exit 1 -fi - -if ! command -v node >/dev/null; then - if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then - echo "Adding to [${script_dir}/../pulumi/python/venv/bin] to PATH" - export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH" - - if ! command -v node >/dev/null; then - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi - else - echo >&2 "NodeJS must be installed to continue" - exit 1 - fi -fi - -if ! command -v git >/dev/null; then - echo >&2 "git must be installed to continue" - exit 1 -fi - -if ! command -v make >/dev/null; then - echo >&2 "make is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -if ! command -v docker >/dev/null; then - echo >&2 "docker is not installed - it must be installed if you intend to build NGINX Kubernetes Ingress Controller from source." -fi - -# Check to see if the user is logged into Pulumi -if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - pulumi login - - if ! pulumi whoami --non-interactive >/dev/null 2>&1; then - echo >&2 "Unable to login to Pulumi - exiting" - exit 2 - fi -fi - -if [ ! -f "${script_dir}/../config/pulumi/environment" ]; then - touch "${script_dir}/../config/pulumi/environment" -fi - -if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the name of the Pulumi stack to use in all projects: " PULUMI_STACK - echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../config/pulumi/environment" -fi - -# Do we have the submodule source.... -# -# Note: We had been checking for .git, but this is not guaranteed to be -# there if we build the docker image or use a tarball. So now we look -# for the src subdirectory which should always be there. -# -if [[ -d "${script_dir}/../pulumi/python/kubernetes/applications/sirius/src/src" ]]; then - echo "Submodule source found" -else - # Error out with instructions. - echo "Bank of Sirius submodule not found" - echo " " - echo "Please run:" - echo " git submodule update --init --recursive --remote" - echo "Inside your git directory and re-run this script" - echo "" - echo >&2 "Unable to find submodule - exiting" - exit 3 -fi - -function createpw() { - PWORD=$(dd if=/dev/urandom count=1 2>/dev/null | base64 | head -c16) - echo $PWORD -} - -source "${script_dir}/../config/pulumi/environment" -echo "Configuring all Pulumi projects to use the stack: ${PULUMI_STACK}" - -# Create the stack if it does not already exist -# We skip over the tools directory, because that uses a unique stack for setup of the -# kubernetes components for installations without them. -find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi stack select --create "${PULUMI_STACK}" \; - -if [[ -z "${LINODE_TOKEN+x}" ]]; then - echo "LINODE_TOKEN not set" - if ! grep --quiet '^LINODE_TOKEN=.*' "${script_dir}/../config/pulumi/environment"; then - read -r -e -p "Enter the Linode Token to use in all projects (leave blank for default): " LINODE_TOKEN - if [[ -z "${LINODE_TOKEN}" ]]; then - echo "No Linode Token found - exiting" - exit 4 - fi - echo "LINODE_TOKEN=${LINODE_TOKEN}" >>"${script_dir}/../config/pulumi/environment" - source "${script_dir}/../config/pulumi/environment" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext linode:token "${LINODE_TOKEN}" \; - fi -else - echo "Using LINODE_TOKEN from environment: ${LINODE_TOKEN}" - find "${script_dir}/../pulumi/python" -mindepth 1 -maxdepth 7 -type f -name Pulumi.yaml -not -path "*/tools/*" -execdir pulumi config set --plaintext linode:token "${LINODE_TOKEN}" \; -fi - -# The bank of sirius configuration file is stored in the ./sirius/config -# directory. This is because we cannot pull secrets from different project -# directories. -# -# This work-around is expected to be obsoleted by the work described in -# https://github.com/pulumi/pulumi/issues/4604, specifically around issue -# https://github.com/pulumi/pulumi/issues/2307 -# -# Check for secrets being set -# -echo "Checking for required secrets" - -# Sirius Accounts Database -if pulumi config get sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - ACCOUNTS_PW=$(createpw) - pulumi config set --secret sirius:accounts_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $ACCOUNTS_PW -fi - -# Sirius Ledger Database -if pulumi config get sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius >/dev/null 2>&1; then - true -else - LEDGER_PW=$(createpw) - pulumi config set --secret sirius:ledger_pwd -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius $LEDGER_PW -fi - -# Admin password for grafana (see note in __main__.py in prometheus project as to why not encrypted) -# This is for the deployment that is setup as part of the the prometheus operator driven prometheus-kube-stack. -# -if pulumi config get prometheus:adminpass -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Existing password found for grafana admin user" -else - echo "Create a password for the grafana admin user; this password will be used to access the Grafana dashboard" - echo "This should be an alphanumeric string without any shell special characters; it is presented in plain text" - echo "due to current limitations with Pulumi secrets. You will need this password to access the Grafana dashboard." - pulumi config set prometheus:adminpass -C ${script_dir}/../pulumi/python/config -fi - -# TODO: Figure out better way to handle hostname / ip address for exposing our IC #82 -# -# This version of the code forces you to add a hostname which is used to generate the cert when the application is -# deployed, and will output the IP address and the hostname that will need to be set in order to use the self-signed -# cert and to access the application. -# - -echo " " -echo "NOTICE! Currently we do not automatically pull the hostname of the K8 LoadBalancer with this deployment; instead" -echo "you will need to create a FQDN and map the assigned IP address to your FQDN in order to use the deployment. " -echo "You can then add this mapping to DNS, or locally to your host file" -echo " " -echo "See https://networkdynamics.com/2017/05/the-benefits-of-testing-your-website-with-a-local-hosts-file/ for details" -echo "on how this can be accomplished. " -echo " " -echo "This will be streamlined in a future release of MARA." -echo " " - -# So we can see... -sleep 5 - -if pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config >/dev/null 2>&1; then - echo "Hostname found for deployment" -else - echo "Create a fqdn for your deployment" - pulumi config set kic-helm:fqdn -C ${script_dir}/../pulumi/python/config -fi - -# Show colorful fun headers if the right utils are installed and NO_COLOR is not set -# -function header() { - if [ -z ${NO_COLOR+x} ]; then - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" | "${script_dir}"/../pulumi/python/venv/bin/lolcat - else - "${script_dir}"/../pulumi/python/venv/bin/fart --no_copy -f standard "$1" - fi -} - -# -# The initial version of this tried to manage the kubernetes configuration file, but for some reason -# Linode is a bit touchy about this. -# -# So, now we just backup the existing file and slide ours in place. This will be streamlined/addressed as -# part of the rewrite... -# -function add_kube_config() { - echo "adding ${cluster_name} cluster to local kubeconfig" - mv $HOME/.kube/config $HOME/.kube/config.mara.backup || true - pulumi stack output kubeconfig -s "${PULUMI_STACK}" -C ${script_dir}/../pulumi/python/infrastructure/kubeconfig --show-secrets >$HOME/.kube/config -} - -function validate_lke_credentials() { - pulumi_lke_token="$(pulumi --cwd "${script_dir}/../pulumi/python/config" config get linode:token)" - echo "Validating Linode credentials" - if ! linode_cli account view >/dev/null; then - echo >&2 "Linode credentials have expired or are not valid" - exit 2 - fi -} - -function retry() { - local -r -i max_attempts="$1" - shift - local -i attempt_num=1 - until "$@"; do - if ((attempt_num == max_attempts)); then - echo "Attempt ${attempt_num} failed and there are no more attempts left!" - return 1 - else - echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..." - sleep $((attempt_num++)) - fi - done -} - -# -# This deploy only works with the NGINX registries. -# -echo " " -echo "NOTICE! Currently the deployment for Linode LKE only supports pulling images from the registry! A JWT is " -echo "required in order to access the NGINX Plus repository. This should be placed in a file in the extras directory" -echo "in the project root, in a file named jwt.token" -echo " " -echo "See https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret/ for more " -echo "details and examples." -echo " " - -# Make sure we see it -sleep 5 - -# -# TODO: Integrate this into the mainline along with logic to work with/without #80 -# -# This logic takes the JWT and transforms it into a secret so we can pull the NGINX Plus IC. If the user is not -# deploying plus (and does not have a JWT) we create a placeholder credential that is used to create a secert. That -# secret is not a valid secret, but it is created to make the logic easier to read/code. -# -if [[ -s "${script_dir}/../extras/jwt.token" ]]; then - JWT=$(cat ${script_dir}/../extras/jwt.token) - echo "Loading JWT into nginx-ingress/regcred" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=${JWT} --docker-password=none -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -else - # TODO: need to adjust so we can deploy from an unauthenticated registry (IC OSS) #81 - echo "No JWT found; writing placeholder manifest" - ${script_dir}/../pulumi/python/venv/bin/kubectl create secret docker-registry regcred --docker-server=private-registry.nginx.com --docker-username=placeholder --docker-password=placeholder -n nginx-ingress --dry-run=client -o yaml >${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/regcred.yaml -fi - -if command -v linode_cli >/dev/null; then - validate_lke_credentials -fi - -# -# Set the headers to respect the NO_COLOR variable -# -if [ -z ${NO_COLOR+x} ]; then - pulumi_args="--emoji --stack ${PULUMI_STACK}" -else - pulumi_args="--color never --stack ${PULUMI_STACK}" -fi - -# We automatically set this to LKE for infra type; since this is a script specific to LKE -# TODO: combined file should query and manage this -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/config LKE -# Bit of a gotcha; we need to know what infra type we have when deploying our application (BoS) due to the -# way we determine the load balancer FQDN or IP. We can't read the normal config since Sirius uses it's own -# configuration because of the encryption needed for the passwords. -pulumi config set kubernetes:infra_type -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius LKE - -header "Version Info" -echo "Version and Account Information" -echo "=====================================================================" -echo "Pulumi version is: $(pulumi version)" -echo "Pulumi user is: $(pulumi whoami)" -echo "Python version is: $(python --version)" -echo "Kubectl version information: " -echo "$(kubectl version -o json)" -echo "Python module information: " -echo "$(pip list)" -echo "=====================================================================" -echo " " - -header "Linode LKE" -cd "${script_dir}/../pulumi/python/infrastructure/linode/lke" -pulumi $pulumi_args up - -# -# This is used to streamline the pieces that follow. Moving forward we can add new logic behind this and this -# should abstract away for us. This way we just call the kubeconfig project to get the needed information and -# let the infrastructure specific parts do their own thing (as long as they work with this module) -# -header "Kubeconfig" -cd "${script_dir}/../pulumi/python/infrastructure/kubeconfig" -pulumi $pulumi_args up - -# pulumi stack output cluster_name -cluster_name=$(pulumi stack output cluster_id -s "${PULUMI_STACK}" -C ${script_dir}/../pulumi/python/infrastructure/linode/lke) -add_kube_config - -# Display the server information -echo "Kubernetes client/server version information:" -kubectl version -o json -echo " " - -if command -v kubectl >/dev/null; then - echo "Attempting to connect to newly create kubernetes cluster" - retry 30 kubectl version >/dev/null -fi - -header "Deploying IC" -cd "${script_dir}/../pulumi/python/kubernetes/nginx/ingress-controller-repo-only" -pulumi $pulumi_args up - -header "Logstore" -cd "${script_dir}/../pulumi/python/kubernetes/logstore" -pulumi $pulumi_args up - -header "Logagent" -cd "${script_dir}/../pulumi/python/kubernetes/logagent" -pulumi $pulumi_args up - -header "Cert Manager" -cd "${script_dir}/../pulumi/python/kubernetes/certmgr" -pulumi $pulumi_args up - -header "Prometheus" -cd "${script_dir}/../pulumi/python/kubernetes/prometheus" -pulumi $pulumi_args up - -header "Observability" -cd "${script_dir}/../pulumi/python/kubernetes/observability" -pulumi $pulumi_args up - -header "Bank of Sirius" -cd "${script_dir}/../pulumi/python/kubernetes/applications/sirius" -pulumi $pulumi_args up - -header "Finished!" -THE_FQDN=$(pulumi config get kic-helm:fqdn -C ${script_dir}/../pulumi/python/config || echo "Cannot Retrieve") -THE_IP=$(kubectl get service kic-nginx-ingress --namespace nginx-ingress --output=jsonpath='{.status.loadBalancer.ingress[*].ip}' || echo "Cannot Retrieve") - -echo " " -echo "The startup process has finished successfully" -echo " " -echo " " -echo "Next Steps:" -echo " " -echo "1. Map the IP address ($THE_IP) of your Ingress Controller with your FQDN ($THE_FQDN)." -echo "2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools." -echo "3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment." -echo " " -echo "To review your configuration options, including the passwords defined, you can access the pulumi secrets via the" -echo "following commands:" -echo " " -echo "Main Configuration: pulumi config -C ${script_dir}/../pulumi/python/config" -echo "Bank of Sirius (Example Application) Configuration: pulumi config -C ${script_dir}/../pulumi/python/kubernetes/applications/sirius" -echo "K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress" -echo " " -echo "Please see the documentation in the github repository for more information" diff --git a/bin/test-forward.sh b/bin/test-forward.sh index 52985f8a..87d44431 100755 --- a/bin/test-forward.sh +++ b/bin/test-forward.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # # This is a simple shell script that sets up port forwards locally for -# the various benchmarking/monitoring tooling that is part of the +# the various benchmarking/monitoring tooling that is part of the # deployment. This should be run on the same machine as your web browser, # then you will be able to connect to the localhost ports to get to the # services. @@ -9,37 +9,36 @@ # This script is designed to clean itself up once a Ctrl-C is issued. # -PID01=$(mktemp) -PID02=$(mktemp) -PID03=$(mktemp) -PID04=$(mktemp) -PID05=$(mktemp) +PID01="$(mktemp)" +PID02="$(mktemp)" +PID03="$(mktemp)" +PID04="$(mktemp)" +PID05="$(mktemp)" # this function is called when Ctrl-C is sent -function trap_ctrlc () -{ - # perform cleanup here - echo "Ctrl-C caught...performing clean up" +function trap_ctrlc() { + # perform cleanup here + echo "Ctrl-C caught...performing clean up" - echo "Doing cleanup" + echo "Doing cleanup" - echo "Kill forwards" - kill $(cat $PID01) - kill $(cat $PID02) - kill $(cat $PID03) - kill $(cat $PID04) - kill $(cat $PID05) + echo "Kill forwards" + kill $(cat "$PID01") + kill $(cat "$PID02") + kill $(cat "$PID03") + kill $(cat "$PID04") + kill $(cat "$PID05") - echo "Remove temp files" - rm $PID01 - rm $PID02 - rm $PID03 - rm $PID04 - rm $PID05 + echo "Remove temp files" + rm "$PID01" + rm "$PID02" + rm "$PID03" + rm "$PID04" + rm "$PID05" - # exit shell script with error code 2 - # if omitted, shell script will continue execution - exit 2 + # exit shell script with error code 2 + # if omitted, shell script will continue execution + exit 2 } # initialise trap to call trap_ctrlc function @@ -48,23 +47,23 @@ trap "trap_ctrlc" 2 ## Kibana Tunnel kubectl port-forward service/elastic-kibana --namespace logstore 5601:5601 & -echo $! > $PID01 +echo $! >"$PID01" ## Grafana Tunnel kubectl port-forward service/prometheus-grafana --namespace prometheus 3000:80 & -echo $! > $PID02 +echo $! >"$PID02" ## Loadgenerator Tunnel kubectl port-forward service/loadgenerator --namespace bos 8089:8089 & -echo $! > $PID03 +echo $! >"$PID03" ## Prometheus Tunnel kubectl port-forward service/prometheus-kube-prometheus-prometheus --namespace prometheus 9090:9090 & -echo $! > $PID04 +echo $! >"$PID04" ## Elasticsearch Tunnel kubectl port-forward service/elastic-coordinating-only --namespace logstore 9200:9200 & -echo $! > $PID05 +echo $! >"$PID05" ## Legend echo "Connections Details" @@ -79,5 +78,3 @@ echo "" echo "Issue Ctrl-C to Exit" ## Wait... wait - - diff --git a/bin/test.py b/bin/test.py index 1c785dc2..2e4d62c0 100755 --- a/bin/test.py +++ b/bin/test.py @@ -8,42 +8,38 @@ import collections from typing import List -IGNORE_DIRS = ['venv', 'kic-pulumi-utils'] +IGNORE_DIRS = ['.pyenv', 'venv', 'config', 'kic-pulumi-utils'] SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +TEST_FILE_PATTERN = 'test_*.py' -TestsInDir = collections.namedtuple(typename='TestsInDir', field_names=['directory', 'loader']) -RunDirectories = collections.namedtuple(typename='RunDirectories', field_names=['start_dir', 'top_level_dir']) +TestsInDir = collections.namedtuple( + typename='TestsInDir', field_names=['directory', 'loader']) +RunDirectories = collections.namedtuple( + typename='RunDirectories', field_names=['start_dir', 'top_level_dir']) test_dirs: List[TestsInDir] = [] -def find_testable_dirs(dir_name: pathlib.Path) -> pathlib.Path: - sublisting = os.listdir(dir_name) - dir_count = 0 - last_path = None +def find_testable_dirs(dir_name: pathlib.Path) -> List[pathlib.Path]: + def is_main_file(filename: str) -> bool: + return filename == '__main__.py' or filename == 'main.py' - for item in sublisting: - path = pathlib.Path(dir_name, item) + test_dirs = [] + contains_main_file = False - # We assume we are in the starting directory for test invocation if there is a - # __main__.py file present. - if path.is_file() and (path.name == '__main__.py'): - return dir_name - - # Otherwise, we are probably in a module directory and the starting directory is - # one level deeper. - if path.is_dir(): - dir_count += 1 - if not last_path: - last_path = path - if dir_count > 1: + for item in os.listdir(dir_name): + name = str(item) + path = pathlib.Path(dir_name, name) + if path.is_dir() and name != '__pycache__': + test_dirs.extend(find_testable_dirs(path.absolute())) + # If there is a main file we consider it a top level project where tests would + # live under it + elif path.is_file() and is_main_file(name) and not contains_main_file: + contains_main_file = True + test_dirs.append(pathlib.Path(dir_name)) break - # If the directory contains only a single subdirectory, we traverse down once - if dir_count == 1: - return last_path - - return dir_name + return test_dirs def find_kic_util_path(): @@ -65,27 +61,24 @@ def find_kic_util_path(): return TestsInDir(venv_start_dir, kic_util_loader) +# We explicitly test the kic util package separately because it needs to live +# under venv when tested. By default, we do no traverse into the venv directory. test_dirs.append(find_kic_util_path()) +pulumi_python_dir = os.path.join(SCRIPT_DIR, '..', 'pulumi', 'python') -for item in os.listdir(SCRIPT_DIR): - if item in IGNORE_DIRS: - continue - - directory = pathlib.Path(SCRIPT_DIR, item) - if not directory.is_dir(): +for item in os.listdir(pulumi_python_dir): + directory = pathlib.Path(pulumi_python_dir, item) + if not directory.is_dir() or item in IGNORE_DIRS: continue - run_dir = find_testable_dirs(directory) - if run_dir is None: - continue - - start_dir = str(run_dir) - - loader = unittest.defaultTestLoader.discover( - start_dir=start_dir, - top_level_dir=start_dir, - pattern='test_*.py') - test_dirs.append(TestsInDir(start_dir, loader)) + directory = pathlib.Path(pulumi_python_dir, item) + for test_dir in find_testable_dirs(directory): + start_dir = str(os.path.realpath(test_dir)) + loader = unittest.defaultTestLoader.discover( + start_dir=start_dir, + top_level_dir=start_dir, + pattern=TEST_FILE_PATTERN) + test_dirs.append(TestsInDir(start_dir, loader)) successful = True diff --git a/bin/test_runner.sh b/bin/test_runner.sh index 5ebfab43..2efa1552 100755 --- a/bin/test_runner.sh +++ b/bin/test_runner.sh @@ -12,10 +12,10 @@ set -o pipefail # don't hide errors within pipes # for docker but not GH actions # -if [ -z "$1" ] ; then - source ~/pulumi/python/venv/bin/activate - ~/pulumi/python/venv/bin/python3 ~/bin/test.py +if [ -z "$1" ]; then + source ~/pulumi/python/venv/bin/activate + ~/pulumi/python/venv/bin/python3 ~/bin/test.py else - source $1/pulumi/python/venv/bin/activate - $1/pulumi/python/venv/bin/python3 $1/bin/test.py + source "$1/pulumi/python/venv/bin/activate" + $1/pulumi/python/venv/bin/python3 $1/bin/test.py fi diff --git a/bin/testcap.sh b/bin/testcap.sh deleted file mode 100755 index d8a49a3b..00000000 --- a/bin/testcap.sh +++ /dev/null @@ -1,359 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit # abort on nonzero exit status -set -o nounset # abort on unbound variable -set -o pipefail # don't hide errors within pipes - -# Test to see if we have persistent volume support; to do this we provision a PV using the default storage class -# and then conduct a read and write test against it. -# -# Since MARA is intended as a testbed application, the performance numbers are not a particular concern, however it is -# advised that you test your PV provider for performance and concurrency if you are in production, development, or -# quality assurance testing. For example, the NFS volume support is known to potentially cause issues due to the way -# that NFS works (latency, performance). -# - -# Timeout Value -# We check in 15 second increments -TIMEOUT=15 - - -# Clean up the namespace.... -cleanitup() { - echo "Deleting testspace namespace" - echo "This should remove all test resources" - kubectl delete ns testspace - if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to remove namespace testpsace" - echo " " - exit 100 - fi -} - -echo " " -echo "IMPORTANT NOTICE!" -echo "====================================================================================================" -echo " This script is deprecated and will be removed in a future release. " -echo " " -echo " This script may not function properly in your environment; run at your own risk. " -echo " " -echo " For more information, please see Discussion #155 in the repository (nginx.com/mara)" -echo "====================================================================================================" -sleep 5 - -echo " " -echo "This script will perform testing on the current kubernetes installation using the currently active kubernetes" -echo "configuration and context." -echo " " -echo "Any failures should be investigated, as they will indicate that the installation does not meet the minimum set" -echo "of capabilities required to run MARA." -echo " " -sleep 5 - -# We need kubectl to do any of this.... -if command -v kubectl > /dev/null; then - echo "Found kubectl - continuing" -else - echo "Cannot proceed without kubectl!" - echo "Please install kubectl and ensure it is in your path." - exit 101 -fi - -# Write out the configuration so we can see it -echo "Dumping current configuration:" -kubectl config view -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to connect to dump configuration from kubeconfig file." - echo "Please check your kubeconfig file." - echo " " - exit 102 -else - echo " " -fi - -# Make sure we can connect -echo "Connecting to cluster:" -kubectl cluster-info -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to connect to cluster and pull information!" - echo "Please make sure you are able to connect to the cluster context defined in your kubeconfig file" - echo " " - exit 103 -else - echo "Success connecting to cluster" - echo " " -fi - - -# We are going to do all our testing in a dedicated namespace -echo "Test ability to create a namespace:" -kubectl create ns testspace -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to create namespace testspace!" - echo "Please make sure you are able to create namespaces in your cluster" - echo " " - exit 104 -fi -echo "Namespace testspace created" -echo " " - -# Create a PV Claim -echo "Create a persistent volume" -kubectl apply -f - << EOF -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: maratest01 - namespace: testspace -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 5G -EOF - -if [ $? -ne 0 ] ; then - echo "FAILURE! Error trying to create persistent volume!" - echo "This could be related to an error running the YAML or an issue attempting to create" - echo "a persistent volume." - echo " " - echo "Please make sure you are able to create persistent volumes in your cluster and try again." - echo " " - cleanitup - exit 105 -fi -echo "Persistent volume yaml applied" -echo " " - -# Perform a write test -echo "Test writing to the persistent volume" -kubectl apply -f - << EOF -apiVersion: batch/v1 -kind: Job -metadata: - name: write - namespace: testspace -spec: - template: - metadata: - name: write - spec: - containers: - - name: write - image: ubuntu:xenial - command: ["dd","if=/dev/zero","of=/mnt/pv/test.img","bs=1G","count=1","oflag=dsync"] - volumeMounts: - - mountPath: "/mnt/pv" - name: maratest01 - volumes: - - name: maratest01 - persistentVolumeClaim: - claimName: maratest01 - restartPolicy: Never -EOF - -WRITEJOB="FIRSTRUN" -KOUNT=1 -while [ "$WRITEJOB" != "Completed" ] && [ $KOUNT -lt $TIMEOUT ] ; do - WRITEJOB=$(kubectl get pods --selector=job-name=write --namespace testspace --output=jsonpath='{.items[*].status.containerStatuses[0].state.terminated.reason}') - echo "Attempt $KOUNT of $TIMEOUT: Waiting for job to complete..." - sleep 15 - ((KOUNT++)) -done - -if [ $KOUNT -ge $TIMEOUT ] ; then - echo "FAILURE! Unable to create or write to persistent volume!" - echo "Please make sure you are able to create and write to persistent volumes in your cluster." - cleanitup - exit 106 -elif [ "$WRITEJOB" == "Completed" ] ; then - echo "Persistent volume write test completed; logs follow:" - kubectl logs --selector=job-name=write --namespace testspace - echo " " -else - echo "Should not get here! Exiting!" - cleanitup - exit 107 -fi - - -# Perform a read test -echo "Test reading from the persistent volume" -kubectl apply -f - << EOF -apiVersion: batch/v1 -kind: Job -metadata: - name: read - namespace: testspace -spec: - template: - metadata: - name: read - spec: - containers: - - name: read - image: ubuntu:xenial - command: ["dd","if=/mnt/pv/test.img","of=/dev/null","bs=8k"] - volumeMounts: - - mountPath: "/mnt/pv" - name: maratest01 - volumes: - - name: maratest01 - persistentVolumeClaim: - claimName: maratest01 - restartPolicy: Never -EOF - -READJOB="FIRSTRUN" -KOUNT=1 -while [ "$READJOB" != "Completed" ] && [ $KOUNT -lt $TIMEOUT ] ; do - READJOB=$(kubectl get pods --selector=job-name=read --namespace testspace --output=jsonpath='{.items[*].status.containerStatuses[0].state.terminated.reason}') - echo "Attempt $KOUNT of $TIMEOUT: Waiting for job to complete..." - sleep 15 - ((KOUNT++)) -done - -if [ $KOUNT -ge $TIMEOUT ] ; then - echo "FAILURE! Unable to read from persistent volume!" - echo "Please make sure you are able to read from persistent volumes in your cluster" - cleanitup - exit 108 -elif [ "$READJOB" == "Completed" ] ; then - echo "Persistent volume read test completed; logs follow:" - kubectl logs --selector=job-name=read --namespace testspace - echo " " -else - echo "Should not get here! Exiting!" - cleanitup - exit 109 -fi - -# Clean up... -echo "Cleaning up read job" -kubectl --namespace testspace delete job read -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete read job!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 110 -else - echo "Complete" - echo " " -fi - -echo "Cleaning up write job" -kubectl --namespace testspace delete job write -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete write job!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 111 -else - echo "Complete" - echo " " -fi - -echo "Cleaning up persistent volume" -kubectl --namespace testspace delete pvc maratest01 -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to clean up persistent volume!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 112 -else - echo "Complete" - echo " " -fi - -echo "Deploying KUARD application" -kubectl apply -f - << EOF -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: kuard - name: kuard - namespace: testspace -spec: - replicas: 2 - selector: - matchLabels: - app: kuard - template: - metadata: - labels: - app: kuard - spec: - containers: - - image: gcr.io/kuar-demo/kuard-amd64:1 - name: kuard ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: kuard - name: kuard - namespace: testspace -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 8080 - selector: - app: kuard - sessionAffinity: None - type: LoadBalancer -EOF - -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to create KUARD application!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 113 -fi - -echo "Sleeping 30 to wait for IP assignment" -sleep 30 -echo "Checking for External IP address" -echo " " -EXTIP=$(kubectl get service kuard --namespace testspace --output=jsonpath='{.status.loadBalancer.ingress[*].ip}') -if [ "$EXTIP" == "" ] ; then - echo "FAILURE! Unable to pull loadBalancer IP address!" - echo "This could mean that you do not have a loadBalancer egress defined for the cluster, or it could" - echo "be misconfigured. Please remediate this issue." - echo " " - cleanitup - exit 114 -fi - -echo "External IP is $EXTIP" -echo " " - -echo "Deleting KUARD deployment" -kubectl --namespace testspace delete deployment kuard -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete KUARD deployment!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 115 -fi - -echo "Deleting KUARD service" -kubectl --namespace testspace delete service kuard -if [ $? -ne 0 ] ; then - echo "FAILURE! Unable to delete KUARD service!" - echo "Please check your installation to determine why this is failing!" - cleanitup - exit 116 -fi - -# If we reached this point we are good! -cleanitup -echo " " -echo "==============================================================" -echo "| All tests passed! This system meets the basic requirements |" -echo "| to deploy MARA. |" -echo "==============================================================" - diff --git a/config/pulumi/Pulumi.stackname.yaml.example b/config/pulumi/Pulumi.stackname.yaml.example index bfcef90d..d261d750 100644 --- a/config/pulumi/Pulumi.stackname.yaml.example +++ b/config/pulumi/Pulumi.stackname.yaml.example @@ -16,6 +16,39 @@ ################################################################################ config: + ############################################################################ + # Bank of Sirius (Sample Application) Settings + ############################################################################ + + # These parameters define the name of the database and the database credentials + # used by the Bank of Sirius ledger application. + # + # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius + # deployment if no password is provided. + # sirius:ledger_pwd: Password # Required + sirius:ledger_admin: admin + sirius:ledger_db: postgresdb + + # This optional parameter supplies a hostname for the Bank of Sirius Ingress + # controller. If not set, the FQDN of the LB is used. + #sirius:hostname: demo.example.com + + # These parameters define the name of the database and the database credentials + # used by the Bank of Sirius accounts application. + # + # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius + # deployment if no password is provided. + #sirius:accounts_pwd: Password # Required + sirius:accounts_admin: admin + sirius:accounts_db: postgresdb + + # Prometheus Configuration + sirius:chart_version: 2.3.5 + # Chart version for the Pulumi chart for prometheus + sirius:helm_repo_name: prometheus-community + # Name of the repo to pull the prometheus chart from + sirius:helm_repo_url: https://prometheus-community.github.io/helm-charts + ############################################################################ # AWS Access Settings ############################################################################ @@ -94,7 +127,7 @@ config: # Chart name for the helm chart for kic kic-helm:chart_name: nginx-ingress # Chart version for the helm chart for kic - kic-helm:chart_version: 0.13.1 + kic-helm:chart_version: 0.14.0 # Name of the repo to pull the kic chart from kic-helm:helm_repo_name: nginx-stable # URL of the chart repo to pull kic from @@ -118,12 +151,12 @@ config: # https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image/ # # The following are all valid image names: - # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.0 - # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.0-ot - # kic:image_name: docker.io/nginx/nginx-ingress:2.2.0 - # kic:image_name: nginx/nginx-ingress:2.2.0 - # kic:image_name: nginx/nginx-ingress:2.2.0-alpine - kic:image_name: nginx/nginx-ingress:2.2.0 + # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.3.0 + # kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.3.0-ot + # kic:image_name: docker.io/nginx/nginx-ingress:2.3.0 + # kic:image_name: nginx/nginx-ingress:2.3.0 + # kic:image_name: nginx/nginx-ingress:2.3.0-alpine + kic:image_name: nginx/nginx-ingress:2.3.0 ############################################################################ @@ -197,7 +230,7 @@ config: # Logagent Configuration logagent:chart_name: filebeat # Chart name for the helm chart for the logagent - logagent:chart_version: 7.16.3 + logagent:chart_version: 7.17.3 # Chart version for the helm chart for the logagent logagent:helm_repo_name: elastic # Name of the repo to pull the logagent from @@ -213,7 +246,7 @@ config: # Logstore Configuration logstore:chart_name: elasticsearch # Chart name for the helm chart for the logstore - logstore:chart_version: 17.6.2 + logstore:chart_version: 19.1.4 # Chart version for the helm chart for the logstore logstore:helm_repo_name: bitnami # Name of the repo to pull the logstore from @@ -244,7 +277,7 @@ config: # Cert Manager Configuration certmgr:chart_name: cert-manager # Chart hame for the helm chart for certmanager - certmgr:chart_version: v1.6.1 + certmgr:chart_version: v1.9.1 # Chart version for the helm chart for certmanager certmgr:certmgr_helm_repo_name: jetstack # Name of the repo to pull the certmanager chart from @@ -260,7 +293,7 @@ config: # Prometheus Configuration prometheus:chart_name: kube-prometheus-stack # Chart name for the helm chart for prometheus - prometheus:chart_version: 30.0.1 + prometheus:chart_version: 39.2.1 # Chart version for the helm chart for prometheus prometheus:helm_repo_name: prometheus-community # Name of the repo to pull the prometheus chart from @@ -268,7 +301,7 @@ config: # URL of the chart repo prometheus:statsd_chart_name: prometheus-statsd-exporter # Name of the statsd chart (uses the same repo as the prom chart) - prometheus.statsd_chart_version: 0.4.2 + prometheus.statsd_chart_version: 0.5.0 # Version of the statsd chart (uses the same repo as the prom chart) prometheus:helm_timeout: 300 # Timeout value for helm operations in seconds @@ -288,28 +321,29 @@ config: # within that project. ############################################################################ - # Digital Ocean Managed Kubernetes + # Digital Ocean Managed Kubernetes and Container Registry ############################################################################ # This is the Kubernetes version to install using Digital Ocean K8s. - domk8s:k8s_version: latest - # Version of Kubernetes to use - domk8s:instance_type: s-2vcpu-4gb + docean:k8s_version: 1.22.12-do.0 # This is the default instance type used by Digital Ocean K8s. - domk8s:node_count: 3 + docean:instance_size: s-4vcpu-8gb # The desired node count of the Digital Ocean K8s cluster. - domk8s:region: sfo3 + docean:node_count: 3 # The region to deploy the cluster - + docean:region: sfo3 + # Subscription tier for container registry + digitalocean:container_registry_subscription_tier: starter ############################################################################ # Linode Kubernetes Engine ############################################################################ # This is the Kubernetes version to install using Linode K8s. - lke:k8s_version: 1.22 - # Version of Kubernetes to use - lke:instance_type: g6-standard-8 - # This is the default instance type used Linode K8s. - lke:node_count: 3 + linode:k8s_version: 1.23 + # This is the default instance type used Linode Kubernetes + linode:instance_type: g6-standard-8 # The desired node count of the Linode K8s cluster. - lke:region: us-central + linode:node_count: 3 # The region to deploy the cluster + linode:region: us-central + # Flag to enable or disable HA mode for the Kubernetes cluster + linode:k8s_ha: true diff --git a/config/pulumi/README.md b/config/pulumi/README.md index 5e7354e8..290c6b34 100644 --- a/config/pulumi/README.md +++ b/config/pulumi/README.md @@ -1,19 +1,26 @@ -## Directory +# Directory + `/config/pulumi` ## Purpose -This directory contains the yaml configuration files used for the pulumi installation. + +This directory contains the yaml configuration files used for the pulumi +installation. ## Key Files -- [`Pulumi.stackname.yaml.example`](./Pulumi.stackname.yaml.example) Contains the list of variables that -this installation understands. -- [`environmenet`](./environment) Created at runtime; this file contains details about the environment including -the stack name, and the ASW profile and region (if deploying in AWS). -- `Pulumi.YOURSTACK.yaml` Contains the list of variables associated with the stack with the name YOURSTACK. This -configuration will be created at the first run for the named stack, but it can be created in advance with an -editor. + +* [`Pulumi.stackname.yaml.example`](./Pulumi.stackname.yaml.example) Contains + the list of variables that this installation understands. +* [`environmenet`](./environment) Created at runtime; this file contains details + about the environment including the stack name, and the ASW profile and region + (if deploying in AWS). +* `Pulumi.YOURSTACK.yaml` Contains the list of variables associated with the + stack with the name YOURSTACK. This configuration will be created at the first + run for the named stack, but it can be created in advance with an editor. ## Notes -Many of the variables have defaults that are enforced through the Pulumi code for each project, however -there are certain variables that are required. When the process reaches one of these variables and it -is not set the process will abort with an error message. \ No newline at end of file + +Many of the variables have defaults that are enforced through the Pulumi code +for each project, however there are certain variables that are required. When +the process reaches one of these variables and it is not set the process will +abort with an error message. diff --git a/docker/README.md b/docker/README.md index af71fb64..efe69f6d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,16 +1,18 @@ -## Directory +# Directory `/docker` ## Purpose -This directory contains the necessary code to create a docker image that can then be used to deploy MARA. Each docker -image created is self-sufficient with all necessary tools installed. In order to fully understand how to use these +This directory contains the necessary code to create a docker image that can +then be used to deploy MARA. Each docker image created is self-sufficient with +all necessary tools installed. In order to fully understand how to use these images, please see the [Getting Started](../docs/getting_started.md) guide. ## Key Files -- [`build_dev_docker_image.sh`](./build_dev_docker_image.sh) Controlling script for docker build process. +* [`build_dev_docker_image.sh`](./build_dev_docker_image.sh) Controlling script + for docker build process. ## Notes diff --git a/docs/NGINX-MARA-icon.png b/docs/NGINX-MARA-icon.png new file mode 100644 index 00000000..bd4dde39 Binary files /dev/null and b/docs/NGINX-MARA-icon.png differ diff --git a/docs/accessing_mgmt_tools.md b/docs/accessing_mgmt_tools.md index 77bc0fca..31b69c52 100644 --- a/docs/accessing_mgmt_tools.md +++ b/docs/accessing_mgmt_tools.md @@ -1,4 +1,4 @@ -## Accessing the Management Tools in MARA +# Accessing the Management Tools in MARA Currently, the management tool suite in MARA consists of: @@ -8,17 +8,20 @@ Currently, the management tool suite in MARA consists of: - [Elasticsearch](https://elastic.co) - [Kibana](https://www.elastic.co/kibana/) -Each of these tools provides an interface that can be reached through an endpoint exposed by the tool. For security -reasons these tools are not exposed to the internet, which means you will need to use some form of port forwarding to -access them. +Each of these tools provides an interface that can be reached through an +endpoint exposed by the tool. For security reasons these tools are not exposed +to the internet, which means you will need to use some form of port forwarding +to access them. -### Running MARA on your Local Workstation +## Running MARA on your Local Workstation -If you are running MARA on your local workstation, you can use the [`test-forward.sh`](../bin/test-forward.sh) -script to use [`kubectl`](https://kubernetes.io/docs/reference/kubectl/) to forward the ports on your behalf. These -ports are all forwarded to the corresponding port on localhost as shown below: +If you are running MARA on your local workstation, you can use the +[`test-forward.sh`](../bin/test-forward.sh) script to use +[`kubectl`](https://kubernetes.io/docs/reference/kubectl/) to forward the ports +on your behalf. These ports are all forwarded to the corresponding port on +localhost as shown below: -``` +```txt Connections Details ==================================== Kibana: http://localhost:5601 @@ -33,22 +36,25 @@ Issue Ctrl-C to Exit Issuing a Ctrl-C will cause the ports to close. -### Running MARA Somewhere Else +## Running MARA Somewhere Else -In the event you are running MARA somewhere else - in the cloud, on a different server, in a VM on your laptop, etc you -will need to go through an additional step. Note that this is just one way of accomplishing this, and depending on your -environment you may want or need to do this differently. +In the event you are running MARA somewhere else - in the cloud, on a different +server, in a VM on your laptop, etc. you will need to go through an additional +step. Note that this is just one way of accomplishing this, and depending on +your environment you may want or need to do this differently. -The easiest thing is to install `kubectl` on the system you want to access the MARA tooling from and then copy over the -`kubeconfig` from your MARA deployment system. This will then allow you to copy over the `test-forward.sh` script and +The easiest thing is to install `kubectl` on the system you want to access the +MARA tooling from and then copy over the `kubeconfig` from your MARA deployment +system. This will then allow you to copy over the `test-forward.sh` script and use that to build the tunnels locally. -### Edge Cases +## Edge Cases -There are definitely cases where these solutions will not work. Please see the "More Information" section below, and if -you have one of these cases and discover a solution please open a PR so that we can add to this section. +There are definitely cases where these solutions will not work. Please see the +"More Information" section below, and if you have one of these cases and +discover a solution please open a PR so that we can add to this section. -### More Information +## More Information To learn more about Kubernetes port-forwarding, please see -[this article](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) \ No newline at end of file +[this article](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) diff --git a/docs/dir_template.md b/docs/dir_template.md deleted file mode 100644 index 4d8d41c0..00000000 --- a/docs/dir_template.md +++ /dev/null @@ -1,11 +0,0 @@ -## Directory -`/` - -## Purpose -Main purpose for this directory. - -## Key Files -- [`filename`](./file-link) Draw out details for key files along with a link. - -## Notes -Any other information the user should know. diff --git a/docs/getting_started.md b/docs/getting_started.md index bd173670..dc1f58af 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -1,39 +1,48 @@ # Getting Started Guide -There are a few ways to get the reference architecture set up on your machine. You can install the dependencies locally -and run the project. Alternatively, the project can be run in a Docker container that you have built. +There are a few ways to get the reference architecture set up on your machine. +You can install the dependencies locally and run the project. Alternatively, +the project can be run in a Docker container that you have built. Here is a rough outline of the steps to get started: -1. Clone git repository, including the Bank of Sirius submodule. This can be done by - running `git clone --recurse-submodules https://github.com/nginxinc/kic-reference-architectures` -2. Install dependencies (install section below - python3, python venv module, git, docker, make). -3. Setup Pulumi credentials. -4. Setup AWS credentials OR Setup `kubectl` to connect to an existing cluster -5. Run `./bin/start.sh` and answer the prompts. +1. Clone git repository, including the Bank of Sirius submodule. This can be +done by running +`git clone --recurse-submodules https://github.com/nginxinc/kic-reference-architectures` + +1. Install dependencies (install section below - python3, python venv module, +git, docker, make). + +1. Setup Pulumi credentials. + +1. Setup AWS credentials OR Setup `kubectl` to connect to an existing cluster + +1. Run `./bin/start.sh` and answer the prompts. ## Install on macOS with HomeBrew and Docker Desktop -``` +```sh # Install Homebrew for the Mac: https://brew.sh/ -# Install Docker Toolbox for the Mac: https://docs.docker.com/docker-for-mac/install/ +# Install Docker Toolbox for the Mac: +# https://docs.docker.com/docker-for-mac/install/ $ brew install make git python3 ``` ## Install on macOS with Docker Desktop -``` -# In a terminal window with the MacOS UI, install developer tools if they haven't already -# been installed. +```sh +# In a terminal window with the MacOS UI, install developer tools if they +# haven't already # been installed. $ xcode-select --install $ bash ./bin/setup_venv.sh ``` ## Install with Debian/Ubuntu Linux -``` +```sh $ sudo apt-get update -$ sudo apt-get install --no-install-recommends curl ca-certificates git make python3-venv docker.io +$ sudo apt-get install --no-install-recommends curl ca-certificates git make \ +python3-venv docker.io $ sudo usermod -aG docker $USER $ newgrp docker $ bash ./bin/setup_venv.sh @@ -41,7 +50,7 @@ $ bash ./bin/setup_venv.sh ## Install with CentOS/Redhat/Rocky Linux -``` +```sh # Install Docker Yum repository $ sudo dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo $ sudo yum install python3-pip make git docker-ce @@ -55,7 +64,7 @@ $ bash ./bin/setup_venv.sh Run the following helper script to build a Debian container image. -``` +```sh $ ./docker/build_dev_docker_image.sh debian ``` @@ -65,43 +74,50 @@ $ ./docker/build_dev_docker_image.sh debian #### Python 3.7 or Newer or Prerequisites for Building Python 3.7 or Newer -In this project, Pulumi executes Python code that creates cloud and Kubernetes infrastructure. In order for it to work, -Python 3 and the [venv module](https://docs.python.org/3/library/venv.html) -must be installed. Alternative, if GNU make and the gcc compiler are installed the setup script can build and install -Python 3. +In this project, Pulumi executes Python code that creates cloud and Kubernetes +infrastructure. In order for it to work, Python 3 and the +[venv module](https://docs.python.org/3/library/venv.html) must be installed. +Alternative, if GNU make and the gcc compiler are installed the setup script +can build and install Python 3. Note that the minimum supported version is 3.7. #### Git -The `git` command line tool is required for checking out KIC source code from GitHub and for the KIC image build -process. +The `git` command line tool is required for checking out KIC source code from +GitHub and for the KIC image build process. #### Make -In order to build the Ingress Controller from source, GNU `make` is required to be installed on the running system. If -you are not building from source, you do not need to install `make`. By default, the build script looks for -`gmake` and then `make`. +In order to build the Ingress Controller from source, GNU `make` is required to +be installed on the running system. If you are not building from source, you do +not need to install `make`. By default, the build script looks for `gmake` and +then `make`. #### Docker -Docker is required because the Ingress Controller is a Docker image and needs Docker to generate the image. +Docker is required because the Ingress Controller is a Docker image and needs +Docker to generate the image. -**NOTE**: The kubeconfig deployment option currently only allows you to deploy from a registry. This allows you to -deploy the NGINX IC or the NGINX Plus IC (with a JWT from your F5 account) +**NOTE**: The kubeconfig deployment option currently only allows you to deploy +from a registry. This allows you to deploy the NGINX IC or the NGINX Plus IC +(with a JWT from your F5 account) #### Kubernetes -Although not required, installing the [CLI tool `kubectl`](https://kubernetes.io/docs/tasks/tools/) -will allow you to interact with the Kubernetes cluster that you have stood up using this project. This -tool is also installed as part of the venv that is created and can be used from that directory. +Although not required, installing the +[CLI tool `kubectl`](https://kubernetes.io/docs/tasks/tools/) will allow you to +interact with the Kubernetes cluster that you have stood up using this project. +This tool is also installed as part of the venv that is created and can be used +from that directory. #### Setup -Within the project, you will need to install Python and dependent libraries into the `venv` directory. To do this is to -invoke the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -from the project root. This script will install into -the [virtual environment](https://docs.python.org/3/tutorial/venv.html) +Within the project, you will need to install Python and dependent libraries +into the `venv` directory. To do this is to invoke the +[`./bin/setup_venv.sh`](../bin/setup_venv.sh) from the project root. This +script will install into the +[virtual environment](https://docs.python.org/3/tutorial/venv.html) directory: * Python 3 (via pyenv) if it is not already present @@ -109,159 +125,220 @@ directory: * AWS CLI utilities * `kubectl` -After running [`./bin/setup_venv.sh`](../bin/setup_venv.sh) from the project root, you will need to activate the newly -created virtual environment by running -`source ./pulumi/python/venv/bin/activate` from the project root. This will load the virtual environment's path and -other environment variables into the current shell. +After running [`./bin/setup_venv.sh`](../bin/setup_venv.sh) from the project +root, you will need to activate the newly created virtual environment by +running `source ./pulumi/python/venv/bin/activate` from the project root. +This will load the virtual environment's path and other environment variables +into the current shell. ## Post Install Configuration -### Kubeconfig - -If you are using an existing kubernetes installation for this project, you will need to provide three pieces of -information to the installer: - -- The full path to a kubeconfig file. -- The name of the cluster you are using. -- The cluster context you are using. +### Stack Name -The easiest way to test this is to run the command: -`kubectl --kubeconfig="yourconfig" --cluster="yourcluster" --context="yourcontext"` +For AWS, Linode, or Digital Ocean deployments you will need to add the variable +`PULUMI_STACK_NAME` to the environment file for the deployment at +[`../config/pulumi/environment`](../config/pulumi/environment). This is the name +that will be used for the provisioned Pulumi stack. -Once you have verified you can connect to the cluster you will need to test to make sure your cluster supports the -minimum required capabilities for MARA. You can test this by running the [`./bin/testcap.sh`](../bin/testcap.sh) -script. +If you are running a `kubeconfig` deployment, the process will prompt you for +the value of `PULUMI_STACK_NAME` and update the environment file for you. -This script does several things: +### Kubeconfig -1. Creates a namespace -2. Creates a persistent volume claim -3. Deploys a pod to test the persistent volume -4. Writes to the persistent volume -5. Reads from the persistent volume -6. Destroys the pod -7. Destroys the persistent volume -8. Deploys a service and attempts to provision a `LoadBalancer` to obtain an egress IP address -9. Destroys the service -10. Destroys the namespace +If you are using an existing kubernetes installation for this project, you will +need to provide three pieces of information to the installer: -If any of these tests fails the script exits with notes on the failure. These failures need to be remediated before MARA -can be installed. +* The full path to a kubeconfig file. +* The name of the cluster you are using. +* The cluster context you are using. -There are several utilities under the `./pulumi/python/tools` directory that are intended for use to add the necessary -capabilities to a Kubernetes cluster. Note that these are not extensively tested with MARA, but are included for -convenience. Please see the [README.md](../pulumi/python/tools/README.md) in that directory for additional information. -Note that these tools can be installed via the [kubernetes-extras.sh](../bin/kubernetes-extras.sh) -script. +The easiest way to test this is to run the command: +`kubectl --kubeconfig="yourconfig" --cluster="yourcluster" --context="yourcontext"` ### AWS +*Note:* The AWS deployment has been updated from v1.1 and no longer uses the +[`../bin/start.sh`](../bin/start.sh) script for deployment. If you attempt to +use the script to deploy to AWS you will receive an error message. Please +use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for +these deployments. + If you are using AWS as your infrastructure provider [configuring Pulumi for AWS](https://www.pulumi.com/docs/intro/cloud-providers/aws/setup/) -is necessary. If you already have run the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -script, you will have the `aws` CLI tool installed in the path `./pulumi/python/venv/bin/aws` +is necessary. If you already have run the +[`./bin/setup_venv.sh`](../bin/setup_venv.sh) +script, you will have the `aws` CLI tool installed in the path +`../pulumi/python/venv/bin/aws` and you do not need to install it to run the steps in the Pulumi Guide. -If you want to avoid using environment variables, AWS profile and region definitions can be contained in -the `config/Pulumi..yaml` -files in each project. Refer to the Pulumi documentation for details on how to do this. When you run the -script [`./bin/start.sh`](../bin/start.sh) and select an AWS installation, you will be prompted to add the AWS region -and profile values that will then be added to the `./config/Pulumi/Pulumi..yaml`. This is the main configuration -file for the project, although there are two other configuration files kept for the application standup and the -kubernetes extras functionality. For more details on those, please see the README.md in those directories. +If you want to avoid using environment variables, AWS profile and region +definitions can be contained in the `config/Pulumi..yaml` files in each +project. Refer to the Pulumi documentation for details on how to do this. +When you run the [`../pulumi/python/runnner`](../pulumi/python/runner) program +and select your provider you will be prompted for all variables necessary to +use that provider along with MARA specific variables. This information will +be added to the `../config/Pulumi/Pulumi..yaml` configuration file. This is +the main configuration file for the project, although there is one other +configuration file used to maintain secrets in the +[`../pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) +kubernetes extras functionality. For more details on those, please see the +README.md in those directories. ### Digital Ocean -If you are using Digital Ocean as your infrastructure provider -[configuring Pulumi for Digital Ocean](https://www.pulumi.com/registry/packages/digitalocean/) is necessary. The first -step is to install the [`doctl`](https://docs.digitalocean.com/reference/doctl/how-to/install/) utility to interact with -your Digital Ocean account. +*Note:* The Digital Ocean deployment has been updated from v1.1 and no longer +uses the [`../bin/start.sh`](../bin/start.sh) script for deployment. If you +attempt to use the script to deploy to AWS you will receive an error message. +Please use the new [`../pulumi/python/runner`](../pulumi/python/runner) program +for these deployments. -Next, you will need to create a +You will need to create a [Digital Ocean Personal API Token](https://docs.digitalocean.com/reference/api/create-personal-access-token/) -for authentication to Digital Ocean. When you run the script [`./bin/start.sh`](../bin/start.sh) and select a Digital -Ocean deployment, your token will be added to the `./config/Pulumi/Pulumi..yaml`. This is the main configuration -file for the project, although there are two other configuration files kept for the application standup and the -kubernetes extras functionality. For more details on those, please see the README.md in those directories. +for authentication to Digital Ocean. When you run the +[`./pulumi/python/runnner`](./pulumi/python/runner) program and select your +provider you will be prompted for all variables necessary to use that provider +along with MARA specific variables. This information will be added to the +`./config/Pulumi/Pulumi..yaml` configuration file. This is the main +configuration file for the project, although there is one other configuration file +used to maintain secrets in the +[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) +kubernetes extras functionality. For more details on those, please see the +README.md in those directories. + +### Linode + +*Note:* The Linode deployment has been updated from v1.1 and no longer uses the +[`../bin/start.sh`](../bin/start.sh) script for deployment. If you attempt to +use the script to deploy to AWS you will receive an error message. Please +use the new [`../pulumi/python/runner`](../pulumi/python/runner) program for +these deployments. + +You will need to create a +[Linode API Token](https://www.linode.com/docs/products/tools/linode-api/guides/get-access-token/) +for authentication to Linode. When you run the +[`./pulumi/python/runnner`](./pulumi/python/runner) program and select your +provider you will be prompted for all variables necessary to use that provider +along with MARA specific variables. This information will be added to the +`./config/Pulumi/Pulumi..yaml` configuration file. This is the main +configuration file for the project, although there is one other configuration file +used to maintain secrets in the +[`./pulumi/python/kubernetes/secrets`](./pulumi/python/kubernetes/secrets) +kubernetes extras functionality. For more details on those, please see the +README.md in those directories. + +### Kubeconfig Deployments: MicroK8s / Minikube / K3s / Other + +Deployments that use a `kubeconfig` file to access an existing K8 installation +will continue to use the [`../bin/start.sh`](../bin/start.sh) script. +Additionally, these deployments are not able to build the Ingress Controller +and instead need to download from the NGINX repositories. The installation of +NGINX+ is supported via the use of a JWT, if desired. + +These deployments will be moved over to use the +[`../pulumi/python/runner`](../pulumi/python/runner) program in a future +release, which will bring them to parity for NGINX IC build/deployment with the +other infrastructures. ### Pulumi If you already have run the [`./bin/setup_venv.sh`](../bin/setup_venv.sh) -script, you will have the `pulumi` CLI tool installed in the path `venv/bin/pulumi`. You will need to make an account -on [pulumi.com](https://pulumi.com) or alternatively use another form of state store. Next, login to pulumi from the CLI -by running the -command [`./pulumi/python/venv/bin/pulumi login`](https://www.pulumi.com/docs/reference/cli/pulumi_login/). Refer to the -Pulumi documentation for additional details regarding the command and alternative state stores. +script, you will have the `pulumi` CLI tool installed in the path +`venv/bin/pulumi`. You will need to make an account on +[pulumi.com](https://pulumi.com) or alternatively use another form of state +store. Next, login to pulumi from the CLI by running the command +[`./pulumi/python/venv/bin/pulumi login`](https://www.pulumi.com/docs/reference/cli/pulumi_login/). +Refer to the Pulumi documentation for additional details regarding the command +and alternative state stores. ## Running the Project -The easiest way to run the project is to run [`start.sh`](../bin/start.sh) -after you have completed the installation steps. When doing so, be sure to choose the same -[Pulumi stack name](https://www.pulumi.com/docs/intro/concepts/stack/) -for all of your projects. Additionally, this script will prompt you for infrastructure specific configuration values. -This information will be used to populate the `./config/pulumi/Pulumi..yaml` file. +Provided you have completed the installation steps, the easiest way to run the +project is to run [`../pulumi/python/runner`](../pulumi/python/runner) for AWS, +Linode, or Digital Ocean and [`../bin/start.sh`](../bin/start.sh) for +`kubeconfig` deployments. This process will prompt you for all required +variables for your deployment type. This information will be used to populate +the configuration files. -Alternatively, you can enter into each Pulumi project directory and execute each project independently by doing -`pulumi up`. Take a look at `start.sh` and dependent scripts to get a feel for the flow. +Alternatively, you can enter into each Pulumi project directory and execute +each project independently by doing `pulumi up`. Take a look at `start.sh` and +dependent scripts to get a feel for the flow. -If you want to destroy the entire environment you can run [`destroy.sh`](../bin/destroy.sh). This script calls the -correct destroy script based on the information stored in the `./config/Pulumi/Pulumi..yaml` configuration file. -Detailed information and warnings are emitted by the script as it runs. +If you want to destroy the entire environment you can run +[`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, or +Digital Ocean or [`destroy.sh`](../bin/destroy.sh) for `kubeconfig` deployments. +Detailed information and warnings are emitted by the process as it runs. ### Running the Project in a Docker Container -If you are using a Docker container to run Pulumi, you will want to run the with the docker socket mounted, like the -following command. +If you are using a Docker container to run Pulumi, you will want to run the +with the docker socket mounted, like the following command. +```console +$ docker run --interactive --tty \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + kic-ref-arch-pulumi-aws: ``` -docker run --interactive --tty --volume /var/run/docker.sock:/var/run/docker.sock \ - kic-ref-arch-pulumi-aws: -``` - -If you already have set up Pulumi, kubeconfig information, and/or AWS credentials on the host machine, you can mount -them into the container using Docker with the following options. -``` -docker run --interactive --tty \ - --volume /var/run/docker.sock:/var/run/docker.sock \ - --volume $HOME/.pulumi:/pulumi/projects/kic-reference-architectures/.pulumi \ - --volume $HOME/.aws:/pulumi/projects/kic-reference-architectures/.aws \ - --volume $HOME/.kube:/pulumi/projects/kic-reference-architectures/.kube \ - kic-ref-arch-pulumi-aws:debian +If you already have set up Pulumi, kubeconfig information, and/or AWS +credentials on the host machine, you can mount them into the container using +Docker with the following options. + +```console +$ docker run --interactive --tty \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + --volume $HOME/.pulumi:/pulumi/projects/kic-reference-architectures/.pulumi \ + --volume $HOME/.aws:/pulumi/projects/kic-reference-architectures/.aws \ + --volume $HOME/.kube:/pulumi/projects/kic-reference-architectures/.kube \ + kic-ref-arch-pulumi-aws:debian ``` ### Accessing the Application -The final output from the startup process will provide you with detailed information on how to access your project. This -information will vary based on the K8 distribution that you are deploying against; the following output is from a -deployment against an existing K8 installation using the *kubeconfig* option: - -``` +The final output from the startup process will provide you with detailed +information on how to access your project. This information will vary based on +the K8 distribution that you are deploying against; the following output is +from a deployment against an existing K8 installation using the *kubeconfig* +option: +```console Next Steps: -1. Map the IP address (192.168.100.100) of your Ingress Controller with your FQDN (mara.example.com). -2. Use the ./bin/test-forward.sh program to establish tunnels you can use to connect to the management tools. +1. Map the IP address (192.168.100.100) of your Ingress Controller with your + FQDN (mara.example.com). +2. Use the ./bin/test-forward.sh program to establish tunnels you can use to + connect to the management tools. 3. Use kubectl, k9s, or the Kubernetes dashboard to explore your deployment. -To review your configuration options, including the passwords defined, you can access the pulumi secrets via the +To review your configuration options, including the passwords defined, you can +access the pulumi secrets via the following commands: -Main Configuration: pulumi config -C /jenkins/workspace/jaytest/bin/../pulumi/python/config -Bank of Sirius (Example Application) Configuration: pulumi config -C /jenkins/workspace/jaytest/bin/../pulumi/python/kubernetes/applications/sirius +Main Configuration: pulumi config -C +Bank of Sirius (Example Application) Configuration: pulumi config -C K8 Loadbalancer IP: kubectl get services --namespace nginx-ingress -Please see the documentation in the github repository for more information - +Please see the documentation in the github repository for more information ``` ### Accessing the Management Tooling -Please see the document [Accessing Management Tools in MARA](./accessing_mgmt_tools.md) for information on how to access -these tools. +Please see the document +[Accessing Management Tools in MARA](./accessing_mgmt_tools.md) for information +on how to access these tools. ### Cleaning Up -If you want to completely remove all the resources you have provisioned, run the -script: [`./bin/destroy.sh`](../bin/destroy.sh). +If you want to completely remove all the resources you have provisioned, +run the [`../pulumi/python/runner`](../pulumi/python/runner) for AWS, Linode, +or Digital Ocean or [`destroy.sh`](../bin/destroy.sh) for `kubeconfig` +deployments. Detailed information and warnings are emitted by the +process as it runs. Be careful because this will **DELETE ALL** the resources you have provisioned. + +## Other Resources + +Starting with release `v1.1`, the MARA project has begun the process of +transitioning the deployment logic away from BASH scripts and instead using the +[Pulumi Automation API](https://www.pulumi.com/docs/guides/automation-api/) with +Python. For more information on this, please see this +[Design Document](../pulumi/python/automation/DESIGN.md). diff --git a/docs/status-and-issues.md b/docs/status-and-issues.md index 6f915e7b..e8460bb4 100644 --- a/docs/status-and-issues.md +++ b/docs/status-and-issues.md @@ -1,82 +1,122 @@ # Overview -This project is a work in progress and as such there are a number of areas for improvement. As of this writing, the -development process is primarily using AWS and MicroK8s for development and testing. However, there is manual testing -being undertaken on several other K8 providers. Current information on known issues, bugs, and open feature requests can -be seen on the [Project GitHub Issue Page](https://github.com/nginxinc/kic-reference-architectures/issues). +This project is a work in progress and as such there are a number of areas for +improvement. As of this writing, the development process is primarily using AWS +and MicroK8s for development and testing. However, there is manual testing +being undertaken on several other K8 providers. Current information on known +issues, bugs, and open feature requests can be seen on the +[Project GitHub Issue Page](https://github.com/nginxinc/kic-reference-architectures/issues). Additionally, the core contributors are available for discussion on the [Project GitHub Discussion Page](https://github.com/nginxinc/kic-reference-architectures/discussions) ## Provider Status -This matrix lists out the currently tested configurations, along with any notes on that configuration. The matrix -includes the following: - -- K8 Provider: The name of the provider -- Infrastructure Support: Does the project stand up the infrastructure with this provider? -- Ingress Controller Options: What are the options for IC deployment? -- FQDN/IP: How does the project handle the IP addressing and FQDN for the certificates? -- Notes: Any additional information on the provider / project interaction. - -All of these configurations use Pulumi code within Python as the Infrastructure as Code (IaaC) provider. - -| K8 Provider | Tested / Deploy Status | Infrastructure Support | IC Options | FQDN/IP | Notes | -|-----------------|--------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------|-----------------|--------------------------------------------------| -| AWS EKS | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | -| Azure AKS | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Digtal Ocean | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=Deploy) | Full Infrastructure Standup | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Google GKE | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | -| K3S | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Linode | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Deploy) | Full Infrastructure Standup | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| MicroK8s | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Storage, DNS, and Metallb need to be Enabled (4) | -| Minikube | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | -| Rancher Desktop | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | - -### Notes: - -1. The NGINX IC build/deploy process is currently under active development and support for IC will be standardized - across all providers. Follow [#81](https://github.com/nginxinc/kic-reference-architectures/issues/81) and - [#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for details. Currently, for all non-AWS - environments you have the option to specify either NGINX or NGINX Plus as your IC. The latter does require an active - subscription and a JWT to be included at build time. Please see the documentation for more details. -2. The process via which the IP and FQDN are created and used is currently under active development, and will be - streamlined and standardized for all providers. - Follow [#82](https://github.com/nginxinc/kic-reference-architectures/issues/82) for details. -3. The initial deployment was entirely built to work with AWS. As part of our reorganization the ability to use a - kubeconfig file was added, along with the necessary configuration to support additional standup options. This is - currently in active development and will result in this process being streamlined for these additional environments. - Please follow - [#80](https://github.com/nginxinc/kic-reference-architectures/issues/80) for details. -4. We are currently using filebeat as our logging agent. This deployment requires that the correct paths to the - container log directory are present in the deployment data. We have discovered that this differs based on the K8 - provider. Please see [#76](https://github.com/nginxinc/kic-reference-architectures/issues/76) for more detail. +This matrix lists out the currently tested configurations, along with any notes +on that configuration. The matrix includes the following: + +* K8 Provider: The name of the provider + +* Infrastructure Support: Does the project stand up the infrastructure with +this provider? + +* Ingress Controller Options: What are the options for IC deployment? + +* FQDN/IP: How does the project handle the IP addressing and FQDN for the +certificates? + +* Notes: Any additional information on the provider / project interaction. + +All of these configurations use Pulumi code within Python as the Infrastructure +as Code (IaaC) provider. + +| K8 Provider | Tested / Deploy Status | Infrastructure Support | IC Options | FQDN/IP | Notes | +|-----------------|--------------------------------------------------------------------------------------------------------|-----------------------------|-----------------------------------|-----------------|--------------------------------------------------| +| AWS EKS | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_aws_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses ECR) | Provided | | +| Azure AKS | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Digital Ocean | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_do_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (Uses DO Registry) | Provided | Requires DNS delegation to DO | +| Google GKE | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Harvester/RKE2 | Yes | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | +| K3S | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_k3s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Linode | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_lke_prod&subject=Deploy) | Full Infrastructure Standup | Build, Pull (uses Harbor install) | Provided | | +| MicroK8s | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_mk8s_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Storage, DNS, and Metallb need to be Enabled (4) | +| Minikube | ![Deploy Status](https://jenkins.mantawang.com/buildStatus/icon?job=mara_minikube_prod&subject=Deploy) | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | | +| Rancher Desktop | No | Kubeconfig Only (3) | NGINX / NGINX Plus (w/ JWT) (1) | Manual FQDN (2) | Needs Storage, K8 LoadBalancer | + +### Notes + +1. The NGINX IC build/deploy process is currently under active development and + support for IC will be standardized across all providers. Follow + [#81](https://github.com/nginxinc/kic-reference-architectures/issues/81) and + [#86](https://github.com/nginxinc/kic-reference-architectures/issues/86) for + details. Currently, for all `kubeconfig` environments you have the option to + specify either NGINX or NGINX Plus as your IC. The latter does require an + active subscription and a JWT to be included at build time. Please see the + documentation for more details. + +1. The process via which the IP and FQDN are created and used is currently + under active development, and will be streamlined and standardized for all + providers. Follow + [#82](https://github.com/nginxinc/kic-reference-architectures/issues/82) for + details. + +1. The initial deployment was entirely built to work with AWS. As part of our + reorganization the ability to use a kubeconfig file was added, along with the + necessary configuration to support additional standup options. This is + currently in active development and will result in this process being + streamlined for these additional environments. Please follow + [#80](https://github.com/nginxinc/kic-reference-architectures/issues/80) + for details. + +1. We are currently using filebeat as our logging agent. This deployment + requires that the correct paths to the container log directory are present + in the deployment data. We have discovered that this differs based on the K8 + provider. Please see + [#76](https://github.com/nginxinc/kic-reference-architectures/issues/76) + for more detail. ## Known Issues / Caveats -1. Currently, the use of the Elastic tooling has shown to be problematic under heavy load, with containers falling over - and causing disruptions. Please see the [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) - variables to adjust the number of replicas deployed for the Elastic logstore to tune to your environment. These will - need to be added/updated in the configuration for your stack, which is located in `./config/pulumi` and +1. Currently, the use of the Elastic tooling has shown to be problematic under + heavy load, with containers falling over and causing disruptions. Please see + the + [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) + variables to adjust the number of replicas deployed for the Elastic logstore + to tune to your environment. These will need to be added/updated in the + configuration for your stack, which is located in `./config/pulumi` and is named `Pulumi.$STACK.yaml`. -2. The default Helm timeout is 5 minutes, which is acceptable for most managed clouds but tends to be too short for - single-vm or workstation deployments. Please see - the [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) - variables to adjust the helm timeout as required for your environment. These will need to be added/updated in the - configuration for your stack, which is located in `./config/pulumi` and is named `Pulumi.$STACK.yaml`. -3. When load testing the Bank of Sirius using [Locust](https://locust.io/), you will likely see a high failure rate as - you increase the max users and spawn rate. This is "normal" and is an area we want to expose and explore for + +1. The default Helm timeout is 5 minutes, which is acceptable for most managed + clouds but tends to be too short for single-vm or workstation deployments. + Please see the + [example configuration file](../config/pulumi/Pulumi.stackname.yaml.example) + variables to adjust the helm timeout as required for your environment. These + will need to be added/updated in the configuration for your stack, which is + located in `./config/pulumi` and is named `Pulumi.$STACK.yaml`. + +1. When load testing the Bank of Sirius using [Locust](https://locust.io/), you + will likely see a high failure rate as you increase the max users and spawn + rate. This is "normal" and is an area we want to expose and explore for troubleshooting, determining which metrics/traces are helpful, etc. -4. The most common failure modes for non-cloud environments tend towards the following failures: + +1. The most common failure modes for non-cloud environments tend towards the + following failures: + 1. Unable to provision persistent storage; correct by ensuring you have a - [persistent volume provider](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) and can provision a - volume. - 2. Unable to provision an External IP; correct by adding an IP provider such + [persistent volume provider](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + and can provision a volume. + + 1. Unable to provision an External IP; correct by adding an IP provider such as [kubevip](https://kube-vip.chipzoller.dev/) or [metallb](https://metallb.org/). - 3. Resource starvation (not enough CPU, Memory); expand the size of the VM or detune the environment. - 4. Timeouts in helm; increase the helm timeout in the configuration file. -5. If you are using a cloud provider with timed credentials, such as AWS, one failure mode that can arise is when the - credentials expire. This will result in a number of strange and seemingly confusing errors. Double check to make sure - that the credentials are valid. -6. Currently, the build/test process is highly manual. This will be addressed in the future. + + 1. Resource starvation (not enough CPU, Memory); expand the size of the VM + or detune the environment. + + 1. Timeouts in helm; increase the helm timeout in the configuration file. + If you are using a cloud provider with timed credentials, such as AWS, one + failure mode that can arise is when the credentials expire. This will result + in a number of strange and seemingly confusing errors. Double check to make + sure that the credentials are valid. + +1. Currently, the build/test process is highly manual. This will be addressed + in the future. diff --git a/extras/README.md b/extras/README.md index bda08f0f..2e0c1bae 100644 --- a/extras/README.md +++ b/extras/README.md @@ -1,20 +1,23 @@ -## Directory +# Directory `/extras` ## Purpose -This directory is for files that, although important, don't have a clearly defined home. Files from this directory will -most likely be moved as the project matures. +This directory is for files that, although important, don't have a clearly +defined home. Files from this directory will most likely be moved as the +project matures. ## Key Files -- [`jwt.token`](./jwt.token) This file contains the JWT required to pull the NGINX IC from the NGINX, Inc registry. - See [this webpage](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret) +* [`jwt.token`](./jwt.token) This file contains the JWT required to pull + the NGINX IC from the NGINX, Inc registry. See + [this webpage](https://docs.nginx.com/nginx-ingress-controller/installation/using-the-jwt-token-docker-secret) for details and examples. -- [`jenkins`](./jenkins) This directory contains sample jenkinsfiles. Note that these are not guaranteed to be production - ready. These files are named according to the specific type of build they manage; for example, AWS, K3S, MicroK8s, Linode, Minikube and - DO (Digital Ocean). +* [`jenkins`](./jenkins) This directory contains sample jenkinsfiles. Note + that these are not guaranteed to be production ready. These files are named + according to the specific type of build they manage; for example, AWS, K3S, + MicroK8s, and DO (Digital Ocean). ## Notes diff --git a/extras/jenkins/AWS/Jenkinsfile b/extras/jenkins/AWS/Jenkinsfile index bc82f5bc..9759b280 100644 --- a/extras/jenkins/AWS/Jenkinsfile +++ b/extras/jenkins/AWS/Jenkinsfile @@ -29,9 +29,10 @@ pipeline { AWS_ACCESS_KEY_ID = credentials('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = credentials('AWS_SECRET_ACCESS_KEY') AWS_SESSION_TOKEN = credentials('AWS_SESSION_TOKEN') - NGINX_JWT = credentials('NGINX_JWT') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') + } @@ -86,15 +87,11 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. - * * Other cleanup related functions can be placed here as well. */ sh ''' - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; + true ''' } } @@ -145,17 +142,28 @@ pipeline { echo "AWS_PROFILE=${AWS_PROFILE}" >> $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkaws${BUILD_NUMBER} -C pulumi/python/config $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkaws${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkaws${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "AWS" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "aws" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "marajenkaws${BUILD_NUMBER}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set aws:profile "${AWS_PROFILE}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set aws:region "${AWS_DEFAULT_REGION}" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:k8s_version "1.22" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:instance_type "t2.large" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:min_size "3" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:max_size "12" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set eks:desired_capacity "3" -C pulumi/python/config -s marajenkaws${BUILD_NUMBER} ''' } } @@ -164,13 +172,11 @@ pipeline { steps { /* - * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the credentials file if we have environment variables set. Finally, it moves the JWT into the correct location. + * */ sh ''' - echo "${NGINX_JWT}" > $WORKSPACE/extras/jwt.token - $WORKSPACE/bin/start_aws.sh + $WORKSPACE/pulumi/python/runner -p aws -s marajenkaws${BUILD_NUMBER} up ''' } } @@ -182,12 +188,16 @@ pipeline { * Clean up the environment; this includes running the destroy script to remove our pulumi resources and * destroy the deployed infrastructure in AWS * + * AWS will not remove a registry that contains images, so we do a force removal here; this should ultimately + * be fixed in the code. + * * After that completes, we remove the pulumi stack from the project with the find command; this is because * we need to delete the stack in each project it's been instantiated in. */ sh ''' - $WORKSPACE/bin/destroy.sh + $WORKSPACE/pulumi/python/venv/bin/aws ecr delete-repository --repository-name ingress-controller-marajenkaws${BUILD_NUMBER} --force + $WORKSPACE/pulumi/python/runner -p aws -s marajenkaws${BUILD_NUMBER} destroy find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } @@ -205,7 +215,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true + $WORKSPACE/pulumi/python/runner -p aws -s marajenkaws${BUILD_NUMBER} destroy || true find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkaws${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/DigitalOcean/Jenkinsfile b/extras/jenkins/DigitalOcean/Jenkinsfile index a5b3cbce..3005cdbe 100644 --- a/extras/jenkins/DigitalOcean/Jenkinsfile +++ b/extras/jenkins/DigitalOcean/Jenkinsfile @@ -21,8 +21,8 @@ pipeline { environment { DIGITALOCEAN_TOKEN = credentials('DIGITALOCEAN_TOKEN') - NGINX_JWT = credentials('NGINX_JWT') PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') NO_COLOR = "TRUE" DEBIAN_FRONTEND = "noninteractive" } @@ -84,15 +84,11 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. - * * Other cleanup related functions can be placed here as well. */ sh ''' - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkdo${BUILD_NUMBER} --force --yes \\; + true ''' } @@ -128,18 +124,26 @@ pipeline { echo "PULUMI_STACK=marajenkdo${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment echo "DO_TOKEN=${DO_TOKEN}" >> $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkdo${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkdo${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkdo${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "DO" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "601" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set digitalocean:token "${DO_TOKEN}" --plaintext -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set domk8s:k8s_version "latest" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:instance_size "s-4vcpu-8gb" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:k8s_version "latest" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:node_count "3" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:region "sfo3" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "mara${BUILD_NUMBER}.docean.mantawang.com" -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set docean:token "${DO_TOKEN}" --plaintext -C pulumi/python/config -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkdo${BUILD_NUMBER} + ''' } @@ -148,14 +152,8 @@ pipeline { stage('Deploying Pulumi') { steps { - /* - * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the MARA deployment in Digital Ocean. - */ - sh ''' - echo $NGINX_JWT > $WORKSPACE/extras/jwt.token - $WORKSPACE/bin/start_do.sh + $WORKSPACE/pulumi/python/runner -p do -s marajenkdo${BUILD_NUMBER} up ''' } } @@ -172,7 +170,7 @@ pipeline { */ sh ''' - PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh + $WORKSPACE/pulumi/python/runner -p do -s marajenkdo${BUILD_NUMBER} destroy find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkdo${BUILD_NUMBER} --force --yes \\; ''' } @@ -189,7 +187,7 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true + $WORKSPACE/pulumi/python/runner -p do -s marajenkdo${BUILD_NUMBER} destroy || true find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/K3S/Jenkinsfile b/extras/jenkins/K3S/Jenkinsfile index 36b09877..4ef4f190 100644 --- a/extras/jenkins/K3S/Jenkinsfile +++ b/extras/jenkins/K3S/Jenkinsfile @@ -15,18 +15,16 @@ pipeline { } /* - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. - * - * The POSTRUN_CMD is used to execute an arbitrary command following the cleanup process; this is just a work-around - * for the time being and will be addressed in the future. - */ + * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the + * open source IC. + */ environment { NGINX_JWT = credentials('NGINX_JWT') - POSTRUN_CMD = credentials('POSTRUN_CMD') PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') NO_COLOR = "TRUE" + MARA_PASSWORD = credentials('MARA_PASSWORD') + } stages { @@ -79,24 +77,11 @@ pipeline { steps { /* - * Run a find and check for any stacks that currently exist with our generated stack name; this should not - * happen in normal operation, but could potentially happen if things break so better safe than sorry. - * - * This function also tries to remove both K3S and Microk8s if they are found on the host; this is because we - * will be installing K3S and we want to both make sure we are removing any previous installations as well as - * ensuring this Jenkins Agent does not already have a microk8s installation on it. + * Any pre-run cleanup can be put here... */ sh ''' - # Reset our K3S Environment - /usr/local/bin/k3s-killall.sh || true - /usr/local/bin/k3s-uninstall.sh || true - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - sudo snap remove microk8s || true - # Clean up the Pulumi stack if it exists for our run - which it shouldn\'t, but you never know. - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + true ''' } } @@ -112,7 +97,7 @@ pipeline { steps { sh ''' # Is this super safe? No, but we’re going to roll with it for now. - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik" sh - + curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable=traefik" INSTALL_K3S_VERSION="v1.23.9+k3s1" sh - ''' } } @@ -157,20 +142,24 @@ pipeline { */ sh ''' - echo "PULUMI_STACK=marajenk${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenk${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius - $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenk${BUILD_NUMBER} + echo "PULUMI_STACK=marajenkk3s${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkk3s${BUILD_NUMBER} -C pulumi/python/config + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkk3s${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets + $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkk3s${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "default" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkk3s${BUILD_NUMBER} ''' } } @@ -202,10 +191,10 @@ pipeline { steps { sh ''' - PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh + $WORKSPACE/bin/destroy.sh /usr/local/bin/k3s-killall.sh || true /usr/local/bin/k3s-uninstall.sh || true - find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenkk3s${BUILD_NUMBER} --force --yes \\; ''' } } @@ -227,7 +216,7 @@ pipeline { # Reset our K3S Environment /usr/local/bin/k3s-killall.sh || true /usr/local/bin/k3s-uninstall.sh || true - find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenk${BUILD_NUMBER} --force --yes \\; + find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkk3s${BUILD_NUMBER} --force --yes \\; ''' } } diff --git a/extras/jenkins/Linode/Jenkinsfile b/extras/jenkins/Linode/Jenkinsfile index f6a5ee3c..c8c87023 100644 --- a/extras/jenkins/Linode/Jenkinsfile +++ b/extras/jenkins/Linode/Jenkinsfile @@ -14,16 +14,14 @@ pipeline { /* * The Linode token is passed into the process via a credential in Jenkins. If this is not found the * process will fail out. - * - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. */ environment { LINODE_TOKEN = credentials('LINODE_TOKEN') - NGINX_JWT = credentials('NGINX_JWT') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') + } stages { @@ -114,23 +112,37 @@ pipeline { * of the manual deployment if required. * * This will likely evolve further as the project does, and we may reach a point where these defaults are assumed - * for a given development type. + * for a given development type. kubernetes:cluster_name */ sh ''' echo "PULUMI_STACK=marajenklke${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment echo "LINODE_TOKEN=${LINODE_TOKEN}" >> $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenklke${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenklke${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenklke${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenks${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "DO" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "lke" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "marajenklke${BUILD_NUMBER}" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:harbor_db_password "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:harbor_password "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:harbor_sudo_user_password "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:instance_type "g6-standard-8" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:k8s_ha "true" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:k8s_version "1.23" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:node_count "3" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:region "us-central" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:soa_email "qdzlug@gmail.com" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set linode:token "${LINODE_TOKEN}" --plaintext -C pulumi/python/config -s marajenklke${BUILD_NUMBER} ''' @@ -140,14 +152,8 @@ pipeline { stage('Deploying Pulumi') { steps { - /* - * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the MARA deployment in Linode. - */ - sh ''' - echo $NGINX_JWT > $WORKSPACE/extras/jwt.token - $WORKSPACE/bin/start_lke.sh + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER} up ''' } } @@ -164,7 +170,7 @@ pipeline { */ sh ''' - PATH=$WORKSPACE/pulumi/python/venv/bin:$PATH $WORKSPACE/bin/destroy.sh + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER} destroy find $WORKSPACE -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } @@ -181,7 +187,8 @@ pipeline { sh ''' # Destroy our partial build... - $WORKSPACE/bin/destroy.sh || true + $WORKSPACE/pulumi/python/runner -p linode -s marajenklke${BUILD_NUMBER} destroy || true + # Clean up the Pulumi stack find $WORKSPACE -mindepth 2 -maxdepth 7 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenklke${BUILD_NUMBER} --force --yes \\; ''' } diff --git a/extras/jenkins/MicroK8s/Jenkinsfile b/extras/jenkins/MicroK8s/Jenkinsfile index 5476fce0..7dfc88f4 100644 --- a/extras/jenkins/MicroK8s/Jenkinsfile +++ b/extras/jenkins/MicroK8s/Jenkinsfile @@ -15,18 +15,15 @@ pipeline { } /* - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. - * - * The POSTRUN_CMD is used to execute an arbitrary command following the cleanup process; this is just a work-around - * for the time being and will be addressed in the future. - */ + * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the + * open source IC. + */ environment { NGINX_JWT = credentials('NGINX_JWT') - POSTRUN_CMD = credentials('POSTRUN_CMD') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') } stages { @@ -152,18 +149,22 @@ pipeline { sh ''' echo "PULUMI_STACK=marajenkmk8s${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmk8s${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmk8ss${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmk8s${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmk8s${BUILD_NUMBER} ''' } } diff --git a/extras/jenkins/Minikube/Jenkinsfile b/extras/jenkins/Minikube/Jenkinsfile index c4016967..30fdb95d 100644 --- a/extras/jenkins/Minikube/Jenkinsfile +++ b/extras/jenkins/Minikube/Jenkinsfile @@ -1,9 +1,9 @@ pipeline { agent { /* - * Nodes that are configured for Microk8s are tagged as "mk8s". Unlike the deployment to cloud providers, this logic - * will install Microk8s on the Jenkins Agent. This means that the agent should have sufficient resources available - * to run Microk8s. A minimum of 16GB RAM, 2 vCPU, and 20GB of disk is recommended. Testing is done with 20GB of RAM, + * Nodes that are configured for minkube are tagged as "minikube". Unlike the deployment to cloud providers, this logic + * will install minikube on the Jenkins Agent. This means that the agent should have sufficient resources available + * to run minikube. A minimum of 16GB RAM, 2 vCPU, and 20GB of disk is recommended. Testing is done with 20GB of RAM, * 4 vCPU, and 64GB of disk. * * This has been * tested on Ubuntu 20.04. Be sure to check that your Agent has the necessary components installed @@ -15,18 +15,16 @@ pipeline { } /* - * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the - * open source IC. - * - * The POSTRUN_CMD is used to execute an arbitrary command following the cleanup process; this is just a work-around - * for the time being and will be addressed in the future. - */ + * The JWT for using NGINX Plus is passed in via a variable; if the JWT is not found the process will deploy the + * open source IC. + */ environment { NGINX_JWT = credentials('NGINX_JWT') - POSTRUN_CMD = credentials('POSTRUN_CMD') NO_COLOR = "TRUE" PULUMI_ACCESS_TOKEN = credentials('PULUMI_ACCESS_TOKEN') + MARA_PASSWORD = credentials('MARA_PASSWORD') + } stages { @@ -95,7 +93,7 @@ pipeline { stage('Minikube Setup') { /* - * This step installs Microk8s. This assumes you have the snap store installed and configured properly. Note that + * This step installs minikube. This assumes you have the snap store installed and configured properly. Note that * the snap store will always pull the latest version of the software so you may end up with a deployment that * does not work as expected; if this happens please check back with the github repository and verify the known * working configurations. @@ -107,7 +105,7 @@ pipeline { curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube mkdir -p /usr/local/bin/ install minikube /usr/local/bin/ - minikube start --vm-driver=none + minikube start --vm-driver=docker --force --cpus 4 --memory 30000 --kubernetes-version=v1.23.9 ''' } } @@ -159,18 +157,23 @@ _EOF_ sh ''' echo "PULUMI_STACK=marajenkmkube${BUILD_NUMBER}" > $WORKSPACE/config/pulumi/environment $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/config - $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/kubernetes/applications/sirius + $WORKSPACE/pulumi/python/venv/bin/pulumi stack select --create marajenkmkube${BUILD_NUMBER} -C pulumi/python/kubernetes/secrets $WORKSPACE/pulumi/python/venv/bin/pulumi config set certmgr:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkubes${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logagent:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set logstore:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "password" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} - $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:helm_timeout "600" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kic-helm:fqdn "marajenkmkube${BUILD_NUMBER}.zathras.io" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:cluster_name "microk8s-cluster" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:infra_type "kubeconfig" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set kubernetes:kubeconfig "$HOME/.kube/config" -C pulumi/python/config -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set prometheus:adminpass "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:accounts_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_pwd "password" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:demo_login_user "testuser" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + $WORKSPACE/pulumi/python/venv/bin/pulumi config set sirius:ledger_pwd "${MARA_PASSWORD}" --secret -C pulumi/python/kubernetes/secrets -s marajenkmkube${BUILD_NUMBER} + ''' } } @@ -179,7 +182,7 @@ _EOF_ /* * This step echoes the JWT into the correct file for the startup to find it and then calls the script to build - * the MARA deployment in Microk8s + * the MARA deployment in minikube */ steps { @@ -194,7 +197,7 @@ _EOF_ /* * Clean up the environment; this includes running the destroy script to remove our pulumi resources and - * destroy the deployed Microk8s installation. + * destroy the deployed minikube installation. * * After that completes, we remove the pulumi stack from the project with the find command; this is because * we need to delete the stack in each project it's been instantiated in. @@ -203,17 +206,8 @@ _EOF_ steps { sh ''' $WORKSPACE/bin/destroy.sh - # Reset our Microk8s Environment; true if it’s not there - microk8s reset --destroy-storage || true - # True if it’s not there… - snap remove microk8s || true + minikube delete || true find . -mindepth 2 -maxdepth 6 -type f -name Pulumi.yaml -execdir $WORKSPACE/pulumi/python/venv/bin/pulumi stack rm marajenkmkube${BUILD_NUMBER} --force --yes \\; - # This is a hack to allow additional commands to be issued following cleanup. This is needed because the VMs - # that currently run as agents for K3S and Microk8s deployments need to be rebooted following some number of - # runs due to zombie processes and other issues. Long term we want to deploy these VM's via IaaC so the only - # exist for the lifetime of the project. We do it this way in order to provide some flexibility for the - # jenkins configuration. - ${POSTRUN_CMD- true} ''' } diff --git a/extras/jenkins/README.md b/extras/jenkins/README.md index 4915455c..0fd04f2e 100644 --- a/extras/jenkins/README.md +++ b/extras/jenkins/README.md @@ -1,28 +1,35 @@ -## Directory +# Directory `/extras/jenkins` ## Purpose -This directory contains several subdirectories, each of which contains a -[Jenkinsfile](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/). These are designed to be used by the -[Jenkins](https://www.jenkins.io/) CI system to run builds of the MARA project. These can be used as-is from the -repository using the ability of Jenkins to pull its pipeline configuration from SCM, as described in -[this article](https://www.jenkins.io/doc/book/pipeline/getting-started/#defining-a-pipeline-in-scm ) +This directory contains several subdirectories, each of which contains a +[Jenkinsfile](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/). These are +designed to be used by the [Jenkins](https://www.jenkins.io/) CI system to run +deployments of the MARA project. These can be used as-is from the repository +using the ability of Jenkins to pull its pipeline configuration from SCM, as +described in +[this article](https://www.jenkins.io/doc/book/pipeline/getting-started/#defining-a-pipeline-in-scm) -Please note that these should be considered to be in a "draft" status, and should be reviewed and modified if you plan -on using them. As always, pull requests, issues, and comments are welcome. +Please note that these should be considered to be in a "draft" status, and +should be reviewed and modified if you plan on using them. As always, pull +requests, issues, and comments are welcome. ## Key Files -- [`AWS`](./AWS) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) to deploy to AWS. Please see the - file for additional information regarding the configuration. -- [`DigitalOcean`](./DigitalOcean) This directory contains the [`Jenkinsfile`](./DigitalOcean/Jenkinsfile) to deploy to - Digital Ocean. Please see the file for additional information regarding the configuration. -- [`K3S`](./K3S) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) to deploy to K3S. Please see the - file for additional information regarding the configuration. -- [`MicroK8s`](./MicroK8s) This directory contains the [`Jenkinsfile`](./AWS/MicroK8s) to deploy to MicroK8s. Please see - the file for additional information regarding the configuration. +- [`AWS`](./AWS) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) + to deploy to AWS. Please see the file for additional information regarding the + configuration. +- [`DigitalOcean`](./DigitalOcean) This directory contains the + [`Jenkinsfile`](./DigitalOcean/Jenkinsfile) to deploy to Digital Ocean. Please + see the file for additional information regarding the configuration. +- [`K3S`](./K3S) This directory contains the [`Jenkinsfile`](./AWS/Jenkinsfile) + to deploy to K3S. Please see the file for additional information regarding the + configuration. +- [`MicroK8s`](./MicroK8s) This directory contains the + [`Jenkinsfile`](./AWS/MicroK8s) to deploy to MicroK8s. Please see the file for + additional information regarding the configuration. ## Notes diff --git a/pulumi/python/Pipfile b/pulumi/python/Pipfile index de3b95c8..482308e4 100644 --- a/pulumi/python/Pipfile +++ b/pulumi/python/Pipfile @@ -4,15 +4,15 @@ verify_ssl = true name = "pypi" [packages] -awscli = "~=1.22.101" +awscli = "~=1.25.35" grpcio = "==1.43.0" fart = "~=0.1.5" lolcat = "~=1.4" passlib = "~=1.7.4" -pulumi-aws = ">=4.37.5" +pulumi-aws = ">=4.39.0" pulumi-docker = "==3.1.0" -pulumi-eks = "==0.39.0" -pulumi-kubernetes = "==3.19.1" +pulumi-eks = ">=0.41.2" +pulumi-kubernetes = "==3.20.1" pycryptodome = "~=3.14.0" requests = "~=2.27.1" setuptools-git-versioning = "==1.9.2" @@ -20,8 +20,9 @@ yamlreader = "==3.0.4" pulumi-digitalocean = "==4.12.0" pulumi-linode = "==3.7.1" linode-cli = "~=5.17.2" -pulumi = "~=3.32.0" +pulumi = "~=3.36.0" PyYAML = "~=5.4.1" +nodeenv = "~=1.6.0" [dev-packages] wheel = "~=0.37.1" diff --git a/pulumi/python/Pipfile.lock b/pulumi/python/Pipfile.lock index ab6af085..f5f99156 100644 --- a/pulumi/python/Pipfile.lock +++ b/pulumi/python/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "26ad2e064332a5855c06569e11375440a111043957d9186d9098a3e1a0122ae4" + "sha256": "177455c15d31187879995d736c67fd353a973ace31fd79906ad499f668a09900" }, "pipfile-spec": 6, "requires": { @@ -33,27 +33,27 @@ }, "awscli": { "hashes": [ - "sha256:3a7d9260ecb44e677f04640fd9959fb4310189e39ef0a42fbb652888843890a3", - "sha256:54772140fa9fe72c36f1214cd8f2a210af420940983d8f663f5cdf4b103b7e58" + "sha256:1b3adbc9cfb9aad7d0f6abc4cb0a5b95eb640afb77486885d3c4ff0cbc28f494", + "sha256:8883c357165a1e1866636c19a264876e9a3938af4f25425d587255698162535f" ], "index": "pypi", - "version": "==1.22.101" + "version": "==1.25.35" }, "botocore": { "hashes": [ - "sha256:663d8f02b98641846eb959c54c840cc33264d5f2dee5b8fc09ee8adbef0f8dcf", - "sha256:89a203bba3c8f2299287e48a9e112e2dbe478cf67eaac26716f0e7f176446146" + "sha256:9949d61959476b5a34408881bdb98f54b0642238ffb217c5260124ec58fb0c72", + "sha256:d2e708dd766b21c8e20a57ce1a90e98d324f871f81215efbc2dddaa42d13c551" ], - "markers": "python_version >= '3.6'", - "version": "==1.24.46" + "markers": "python_version >= '3.7'", + "version": "==1.27.35" }, "certifi": { "hashes": [ - "sha256:9c5705e395cd70084351dd8ad5c41e65655e08ce46f2ec9cf6c2c08390f71eb7", - "sha256:f1d53542ee8cbedbe2118b5686372fb33c297fcd6379b050cca0ef13a597382a" + "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d", + "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412" ], "markers": "python_version >= '3.6'", - "version": "==2022.5.18.1" + "version": "==2022.6.15" }, "charset-normalizer": { "hashes": [ @@ -65,11 +65,11 @@ }, "colorama": { "hashes": [ - "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff", - "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1" + "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", + "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.4.3" + "version": "==0.4.4" }, "dill": { "hashes": [ @@ -81,12 +81,11 @@ }, "docutils": { "hashes": [ - "sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0", - "sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827", - "sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99" + "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af", + "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.15.2" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==0.16" }, "fart": { "hashes": [ @@ -156,11 +155,11 @@ }, "jmespath": { "hashes": [ - "sha256:a490e280edd1f57d6de88636992d05b71e97d69a26a19f058ecf7d304474bf5e", - "sha256:e8dcd576ed616f14ec02eed0005c85973b5890083313860136657e24784e4c04" + "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", + "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe" ], "markers": "python_version >= '3.7'", - "version": "==1.0.0" + "version": "==1.0.1" }, "linode-cli": { "hashes": [ @@ -177,6 +176,14 @@ "index": "pypi", "version": "==1.4" }, + "nodeenv": { + "hashes": [ + "sha256:3ef13ff90291ba2a4a7a4ff9a979b63ffdd00a464dbe04acf0ea6471517a4c2b", + "sha256:621e6b7076565ddcacd2db0294c0381e01fd28945ab36bcf00f41c5daf63bef7" + ], + "index": "pypi", + "version": "==1.6.0" + }, "packaging": { "hashes": [ "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb", @@ -203,47 +210,37 @@ }, "protobuf": { "hashes": [ - "sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf", - "sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f", - "sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f", - "sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7", - "sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996", - "sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067", - "sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c", - "sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7", - "sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9", - "sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c", - "sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739", - "sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91", - "sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c", - "sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153", - "sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9", - "sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388", - "sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e", - "sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab", - "sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde", - "sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531", - "sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8", - "sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7", - "sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20", - "sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3" + "sha256:174bc835cc639c82164bbce4e28e2af5aa7821285d7fde3162afbe5e226a5a73", + "sha256:382c01e2ce14dcc3b4d25b8839f2139cc09c8a4006ad678579dc4080f6be1b29", + "sha256:5330df7650785c7ffdd1199c04933668d5e2dfefb62250e2b03ec1c1d20e7c2e", + "sha256:64fd63629f8952d58a41150b242f1c1c30c5062c9b0de8e420c6d3b360ec5d89", + "sha256:75aaa6d76a76a6f41f02645f6ebd255d738e9bb14c4d9d8269c676e65d0e0c7c", + "sha256:7dfc160de830b96b2c92c10d8f60e582e92252701bf6640ae75dfdecf6fdeb7a", + "sha256:8a2b4976872b71ea56cd3d55d320751d36a53f10220cc6075517806076cf4829", + "sha256:9130759e719bee1e6d05ca6a3037f7eff66d7a7ff6ba25871917dc40e8f3fbb6", + "sha256:9f510e743462899b1e296ac19bbaf4212d3106cdc51260ecde59ee6063f743f9", + "sha256:cebfd1fb899180c0523955d5cae0e764210961b12dfc39fd96af8fc81fe71ac7", + "sha256:d367e7385cd808ad33b580155bf9694881dd711c4271fe7b6f4e5270a01980b7", + "sha256:dec4cb439e25058518e2cd469c5eb0f4e634b113eb0b1343b55ba9303ab1ad38", + "sha256:df5a126706bd1d5072a6a0f6895c633ede67ea6cd679b4268eecce6b438bbe69", + "sha256:fa22e2413f6fd98ec1b388686aadef5420ea8205e37b35cad825adea7c019625" ], "markers": "python_version >= '3.7'", - "version": "==3.20.1" + "version": "==4.21.3" }, "pulumi": { "hashes": [ - "sha256:570654c82f8dbf8584447218db4de537bc417aa181ab8f888fe523b2b5f6bc7a" + "sha256:86acb1e0921619d49123d1a4ce43bfa7dc2dae9723266e21c24a11632f3231d9" ], "index": "pypi", - "version": "==3.32.1" + "version": "==3.36.0" }, "pulumi-aws": { "hashes": [ - "sha256:06f63aaa3bc36f9ef6a563fe397d8a13883757aca7f2d4cd433fbc0835bd08aa" + "sha256:e82655bd961447167e1bb2839032e93ba73c37cf2f048ed2447de67dc73e9fd5" ], "index": "pypi", - "version": "==5.4.0" + "version": "==5.10.0" }, "pulumi-digitalocean": { "hashes": [ @@ -261,17 +258,17 @@ }, "pulumi-eks": { "hashes": [ - "sha256:9ec4a19976b76a4f141e9b469be7ea65940ac546cec192e3c96435d3038532a0" + "sha256:d8f7dafa71eaaab4d8f115691c80fe63df5ac5df07df643c3977f2dc1e9b0cf4" ], "index": "pypi", - "version": "==0.39.0" + "version": "==0.41.2" }, "pulumi-kubernetes": { "hashes": [ - "sha256:c1c6b0c75716fa421282b85cf1a4fbc93a1b895558c580ad91479bcc353445b9" + "sha256:4fe4fcc19be7f3834e06e2baecafaa2bc3fcd7d3af192d7d7d67986c6699096a" ], "index": "pypi", - "version": "==3.19.1" + "version": "==3.20.1" }, "pulumi-linode": { "hashes": [ @@ -409,11 +406,11 @@ }, "s3transfer": { "hashes": [ - "sha256:7a6f4c4d1fdb9a2b640244008e142cbc2cd3ae34b386584ef044dd0f27101971", - "sha256:95c58c194ce657a5f4fb0b9e60a84968c808888aed628cd98ab8771fe1db98ed" + "sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd", + "sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947" ], - "markers": "python_version >= '3.6'", - "version": "==0.5.2" + "markers": "python_version >= '3.7'", + "version": "==0.6.0" }, "semver": { "hashes": [ @@ -425,11 +422,11 @@ }, "setuptools": { "hashes": [ - "sha256:68e45d17c9281ba25dc0104eadd2647172b3472d9e01f911efa57965e8d51a36", - "sha256:a43bdedf853c670e5fed28e5623403bad2f73cf02f9a2774e91def6bda8265a7" + "sha256:0d33c374d41c7863419fc8f6c10bfe25b7b498aa34164d135c622e52580c6b16", + "sha256:c04b44a57a6265fe34a4a444e965884716d34bae963119a76353434d6f18e450" ], "markers": "python_version >= '3.7'", - "version": "==62.3.2" + "version": "==63.2.0" }, "setuptools-git-versioning": { "hashes": [ @@ -465,11 +462,11 @@ }, "urllib3": { "hashes": [ - "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14", - "sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e" + "sha256:8298d6d56d39be0e3bc13c1c97d133f9b45d797169a0e11cdd0e0489d786f7ec", + "sha256:879ba4d1e89654d9769ce13121e0f94310ea32e8d2f8cf587b77c08bbcdb30d6" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==1.26.9" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_version < '4'", + "version": "==1.26.10" }, "yamlreader": { "hashes": [ diff --git a/pulumi/python/README.md b/pulumi/python/README.md index 25dcc548..089fb5a2 100644 --- a/pulumi/python/README.md +++ b/pulumi/python/README.md @@ -1,13 +1,12 @@ # MARA: Pulumi / Python -This project illustrates the end-to-end stand up of an AWS VPC cluster, Elastic -Kubernetes Service (EKS), NGINX Kubernetes Ingress Controller (KIC), and a sample -application using [Pulumi](https://www.pulumi.com/). It is intended to be used -as a reference when building your own Infrastructure as Code (IaC) deployments. -As such, each discrete stage of deployment is defined as a separate Pulumi project -that can be deployed independently of each stage. Although Pulumi supports many -programming languages, Python was chosen as the language for this project. -The reimplementation of the deployment here should be easily reproducible +This project illustrates the end-to-end stand up of the MARA project using +[Pulumi](https://www.pulumi.com/). It is intended to be used as a reference +when building your own Infrastructure as Code (IaC) deployments. As such, each +discrete stage of deployment is defined as a separate Pulumi project that can be +deployed independently of each stage. Although Pulumi supports many +programming languages, Python was chosen as the language for this project. +The reimplementation of the deployment here should be easily reproducible in other languages. ## Getting Started @@ -19,11 +18,11 @@ For instructions on running the project, refer to the ### Top Level -Several directories, located at the root of the project, are used. These are -at the project root because they are intended to be outside the specific +Several directories, located at the root of the project, are used. These are +at the project root because they are intended to be outside the specific IaC providers (e.g., to be used for a port to Terraform). -``` +```console ├── bin ├── config │ └── pulumi @@ -32,86 +31,84 @@ IaC providers (e.g., to be used for a port to Terraform). └── extras ``` -- The [`bin`](../../bin) directory contains all the binaries and scripts that - are used to start/stop the project, as well as perform capabilities testing - and deployment of extra functionality. -- The [`config`](../../config) directory holds the `requirements.txt` for the - venv needed for this project. -- The [`config/pulumi`](../../config/pulumi) directory holds the configuration - files for deployments, as well as a reference configuration that illustrates +* The [`bin`](../../bin) directory contains all the binaries and scripts that + are used to start/stop the project and provide additional capabilities. +* The [`config/pulumi`](../../config/pulumi) directory holds the configuration + files for deployments, as well as a reference configuration that illustrates the available configuration options and their defaults. -- The [`docker`](../../docker) directory contains Dockerfiles and a script to - build a Docker-based deployment image that contains all the tooling necessary +* The [`docker`](../../docker) directory contains Dockerfiles and a script to + build a Docker-based deployment image that contains all the tooling necessary to deploy MARA. -- The [`docs`](../../docs) directory contains all documentation relevant to the +* The [`docs`](../../docs) directory contains all documentation relevant to the overall project. -- The [`extras`](../../extras) directory contains additional scripts, notes, +* The [`extras`](../../extras) directory contains additional scripts, notes, and configurations. ### Pulumi/Python Level -This directory contains all Pulumi/Python-based logic, which currently +This directory contains all Pulumi/Python-based logic, which currently consists of the following: -``` +```console +├── automation +│   └── providers ├── config ├── infrastructure -│ ├── aws -│ ├── digitalocean -│ └── kubeconfig +│   ├── aws +│   ├── digitalocean +│   ├── kubeconfig +│   └── linode ├── kubernetes -│ ├── applications -│ ├── certmgr -│ ├── logagent -│ ├── logstore -│ ├── nginx -│ ├── observability -│ ├── prometheus -│ └── venv +│   ├── applications +│   ├── certmgr +│   ├── logagent +│   ├── logstore +│   ├── nginx +│   ├── observability +│   ├── prometheus +│   └── secrets ├── tools -│ ├── common -│ ├── kubevip -│ ├── metallb -│ └── nfsvolumes -├── utility -│ ├── kic-image-build -│ ├── kic-image-push -│ └── kic-pulumi-utils -└── venv - ├── bin - ├── include - ├── lib - ├── lib64 -> lib - ├── share - └── src +│   ├── common +│   ├── metallb +│   └── nfsvolumes +└── utility + ├── kic-image-build + ├── kic-image-push + └── kic-pulumi-utils ``` -- The [`config`](./config) directory contains files used by Pulumi to manage - the configuration for this project. Note that this directory is essentially +* The [`automation`](./automation) directory contains the files used to + interface with the pulumi automation api, including provider-specific files. +* The [`config`](./config) directory contains files used by Pulumi to manage + the configuration for this project. Note that this directory is essentially a redirect to the project-wide [`config`](../../config/pulumi) directory. -- The [`infrastructure`](./infrastructure) directory contains files used to stand - up Kubernetes as well as to provide a common project for all of the infrastructure - and kubeconfig-based clusters. -- The [`kubernetes`](./kubernetes) directory contains all of the Kubernetes-based +* The [`infrastructure`](./infrastructure) directory contains files used to + stand up Kubernetes as well as to provide a common project for the + infrastructure and kubeconfig-based clusters. +* The [`kubernetes`](./kubernetes) directory contains the Kubernetes-based deployments. There are two key subdirectories in this directory: - - The [`nginx`](./kubernetes/nginx) directory contains all NGINX products. - - The [`applications`](./kubernetes/applications) directory contains all applications - that have been tested for deployment with MARA. -- The [`tools`](./tools) directory contains projects that are used with the - `kubernetes-extras.sh` script found in the bin directory. -- The [`utility`](./utility) directory contains the code used to build/pull/push KIC, - and other projects used to support the environment. -- The [`venv/bin`](./venv/bin) directory contains the virtual environment for Python - along with some key utilities, such as `pulumi`, `kubectl`, and `node`. + * The [`nginx`](./kubernetes/nginx) directory contains all NGINX products. + * The [`secrets`](./kubernetes/secrets) directory contains all encrypted + secrets. + * The [`applications`](./kubernetes/applications) directory contains all + applications that have been tested for deployment with MARA. +* The [`tools`](./tools) directory contains extra tooling for specific use + cases. +* The [`utility`](./utility) directory contains the code used to + build/pull/push KIC, and other projects used to support the environment. +* The [`venv/bin`](./venv/bin) directory contains the virtual environment for + Python along with some key utilities, such as `pulumi`, `kubectl`, and `node` + . ## Configuration -The Pulumi configuration files are in the [`config`](../../config/pulumi) -directory. Pulumi's configuration files use the following naming convention: -`Pulumi..yaml`. To create a new configuration file for your Pulumi -stack, create a new file with a name that includes the stack name. Then, refer -to the sample [configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) -for configuration entries that you want to customize and copy over the entries +The Pulumi configuration files are in the [`config`](../../config/pulumi) +directory. Pulumi's configuration files use the following naming convention: +`Pulumi..yaml`. To create a new configuration file for your Pulumi +stack, create a new file with a name that includes the stack name. Then, refer +to the sample +[configuration file](../../config/pulumi/Pulumi.stackname.yaml.example) +for configuration entries that you want to customize and copy over the entries that you want to modify from their defaults. ### AWS @@ -120,23 +117,23 @@ The following directories are specific to AWS. #### VPC -Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first -Pulumi project which is responsible for setting up the VPC and subnets used by EKS. -The project is built so that it will attempt to create a subnet for each availability -zone within the running region. You may want to customize this behavior, or the IP -addressing scheme used. +Contained within the [`vpc`](./infrastructure/aws/vpc) directory is the first +Pulumi project which is responsible for setting up the VPC and subnets used by +EKS. The project is built so that it will attempt to create a subnet for each +availability zone within the running region. You may want to customize this +behavior, or the IP addressing scheme used. #### Elastic Kubernetes Service (EKS) -Located within the [`eks`](./infrastructure/aws/eks) directory is a project used -to stand up a new EKS cluster on AWS. This project reads data from the previously -executed VPC project using its VPC id and subnets. In this project you may want to -customize the `instance_type`, `min_size`, or `max_size` parameters provided -to the cluster. +Located within the [`eks`](./infrastructure/aws/eks) directory is a project +used to stand up a new EKS cluster on AWS. This project reads data from the +previously executed VPC project using its VPC id and subnets. In this project +you may want to customize the `instance_type`, `min_size`, or `max_size` +parameters provided to the cluster. #### Elastic Container Registry (ECR) -The [`ecr`](./infrastructure/aws/ecr) project is responsible for installing and +The [`ecr`](./infrastructure/aws/ecr) project is responsible for installing and configuring ECR for use with the previously created EKS cluster. ### Digital Ocean @@ -145,74 +142,103 @@ The following directories are specific to Digital Ocean. #### DOMK8S -Contained within the [`domk8s`](./infrastructure/digitalocean/domk8s) directory contains the -logic needed to stand up a Digital Ocean Managed Kubernetes cluster. There are a number of -configuration options available to customize the build, however the defaults can be used -to create a standard sized cluster in the SFO3 region. +Contained within the [`domk8s`](./infrastructure/digitalocean/domk8s) directory +contains the logic needed to stand up a Digital Ocean Managed Kubernetes +cluster. There are a number of configuration options available to customize the +build, however the defaults can be used to create a standard sized cluster in +the SFO3 region. + +#### container-registry / container-registry-credentials + +These directories contain the projects required to create and use the Digital +Ocean container registry. + +#### dns-record + +This directory contains the project required to provision a DNS record for the +Digital Ocean egress. + +### Linode + +The following directories are specific to Linode. + +#### LKE + +Contained within the [`lke`](./infrastructure/linode/lke) directory contains +the logic needed to stand up a Linode Kubernetes Engine cluster. There are a +number of configuration options available to customize the build. + +#### harbor / harbor-configuration / container-registry-credentials + +These directories contain the projects required to create and use the Harbor +container registry with the Linode deployment. ### NGINX Ingress Controller Docker Image Build -Within the [`kic-image-build`](./utility/kic-image-build) directory, there is -a Pulumi project that will allow you to build a new KIC from source. Download -of source, compilation, and image creation are fully automated. This project +Within the [`kic-image-build`](./utility/kic-image-build) directory, there is +a Pulumi project that will allow you to build a new KIC from source. Download +of source, compilation, and image creation are fully automated. This project can be customized to build different flavors of KIC. ### NGINX Ingress Controller Docker Image Push -Within the [`kic-image-push`](./utility/kic-image-push) directory, there is a -Pulumi project that will allow you to push the previously created KIC Docker +Within the [`kic-image-push`](./utility/kic-image-push) directory, there is a +Pulumi project that will allow you to push the previously created KIC Docker image to ECR in a fully automated manner. ### NGINX Ingress Controller Helm Chart -In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, you -will find the Pulumi project responsible for installing NGINX KIC. You may want to -customize this project to allow for deploying different versions of KIC. This chart -is only used for AWS deployments. All other deployments use the [`ingress-controller- -repo-only`](./kubernetes/nginx/ingress-controller-repo-only) directory, which at this -time **only allows the use of deployments from the NGINX repo - either NGINX IC or -NGINX Plus IC (with a JWT)**. - -A sample config-map is provided in the Pulumi deployment code. This code will adjust -the logging format to approximate the upstream NGINX KIC project which will allow for -easier ingestion into log storage and processing systems. - -**Note**: This deployment uses the GA Ingress APIs. This has been tested with helm -chart version 0.11.1 and NGINX KIC 2.0.2. Older versions of the KIC and helm charts -can be used, but care should be taken to ensure that the helm chart version used is -compatible with the KIC version. This information can be found in the [NGINX KIC Release -Notes](https://docs.nginx.com/nginx-ingress-controller/releases/) for each release. +In the [`ingress-contoller`](./kubernetes/nginx/ingress-controller) directory, +you will find the Pulumi project responsible for installing NGINX KIC. You may +want to customize this project to allow for deploying different versions of +KIC. This chart is only used for AWS deployments. All other deployments use the +[`ingress-controller- repo-only`](./kubernetes/nginx/ingress-controller-repo-only) +directory, which at this time **only allows the use of deployments from the +NGINX repo - either NGINX IC or NGINX Plus IC (with a JWT)**. + +A sample config-map is provided in the Pulumi deployment code. This code will +adjust the logging format to approximate the upstream NGINX KIC project which +will allow for easier ingestion into log storage and processing systems. + +**Note**: This deployment uses the GA Ingress APIs. This has been tested with +helm chart version 0.11.1 and NGINX KIC 2.0.2. Older versions of the KIC and +helm charts can be used, but care should be taken to ensure that the helm chart +version used is compatible with the KIC version. This information can be found +in the +[NGINX KIC Release Notes](https://docs.nginx.com/nginx-ingress-controller/releases/) +for each release. #### Ingress API Versions and NGINX KIC -Starting with Kubernetes version 1.22, support for the Ingress Beta API -`networking.k8s.io/v1beta` will be dropped, requiring use of the GA Ingress API -`networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows -these two API versions to coexist and maintains compatibility for consumers of -the API – meaning, the API will respond correctly to calls to either the `v1beta` -and/or `v1` routes. +Starting with Kubernetes version 1.22, support for the Ingress Beta API +`networking.k8s.io/v1beta` will be dropped, requiring use of the GA Ingress API +`networking.k8s.io/v1`. However, Kubernetes versions 1.19 through 1.21 allows +these two API versions to coexist and maintains compatibility for consumers of +the API – meaning, the API will respond correctly to calls to either the +`v1beta` and/or `v1` routes. -This project uses the NGINX KIC v2.x releases which includes full support +This project uses the NGINX KIC v2.x releases which includes full support for the GA APIs. ### Log Store -In the [`logstore`](./kubernetes/logstore) directory, you will find the Pulumi +In the [`logstore`](./kubernetes/logstore) directory, you will find the Pulumi project responsible for installing your log store. The current solution deploys [Elasticsearch and Kibana](https://www.elastic.co/elastic-stack) using the [Bitnami Elasticsearch](https://bitnami.com/stack/elasticsearch/helm) -chart. This solution can be swapped for other options as desired. This application -is deployed to the `logstore` namespace. There are several configuration options -available in the configuration file for the project in order to better tailor this -deployment to the size of the cluster being used. +chart. This solution can be swapped for other options as desired. This +application is deployed to the `logstore` namespace. There are several +configuration options available in the configuration file for the project in order +to better tailor this deployment to the size of the cluster being used. #### Notes -To access the Kibana dashboard via your web browser, you will need to set up port -forwarding for the kibana pod. This can be accomplished using the `kubectl` command: +To access the Kibana dashboard via your web browser, you will need to set up +port forwarding for the kibana pod. This can be accomplished using the +`kubectl` command: -``` +```console $ # Find the Kibana pod name $ kubectl get pods -n logstore NAME READY STATUS RESTARTS AGE @@ -234,114 +260,119 @@ Handling connection for 5601 ### Log Agent -In the [`logagent`](./logagent) directory, you will find the Pulumi project -responsible for installing your log agent. The current solution deploys -[`Filebeat`](https://www.elastic.co/beats/), which connects to the logstore -deployed in the previous step. This solution can be swapped for other options +In the [`logagent`](./logagent) directory, you will find the Pulumi project +responsible for installing your log agent. The current solution deploys +[`Filebeat`](https://www.elastic.co/beats/), which connects to the logstore +deployed in the previous step. This solution can be swapped for other options as desired. This application is deployed to the `logagent` namespace. ### Certificate Management -TLS is enabled via [cert-manager](https://cert-manager.io/), which is installed -in the cert-manager namespace. Creation of ClusterIssuer or Issuer resources is -delegated to the individual applications and is not done as part of this deployment. +TLS is enabled via [cert-manager](https://cert-manager.io/), which is installed +in the cert-manager namespace. Creation of ClusterIssuer or Issuer resources is +delegated to the individual applications and is not done as part of this +deployment. ### Prometheus -Prometheus is deployed and configured to enable the collection of metrics for -all components that have a defined service monitor. At installation time, the +Prometheus is deployed and configured to enable the collection of metrics for +all components that have a defined service monitor. At installation time, the deployment will instantiate: -- Node Exporters -- Kubernetes Service Monitors -- Grafana preloaded with dashboards and datasources for Kubernetes management -- The NGINX Ingress Controller -- Statsd receiver +* Node Exporters +* Kubernetes Service Monitors +* Grafana preloaded with dashboards and datasources for Kubernetes management +* The NGINX Ingress Controller +* Statsd receiver -The former behavior of using the `prometheus.io:scrape: true` property set in -annotations indicating pods (where metrics should be scraped) has been deprecated, -and these annotations will be removed in the near future. +The former behavior of using the `prometheus.io:scrape: true` property set in +annotations indicating pods (where metrics should be scraped) has been +deprecated, and these annotations will be removed in the near future. -Also, the standalone Grafana deployment has been removed from the standard deployment -scripts, as it is installed as part of this project. +Also, the standalone Grafana deployment has been removed from the standard +deployment scripts, as it is installed as part of this project. -Finally, this namespace will hold service monitors created by other projects. For -example, the Bank of Sirius deployment currently deploys a service monitor for each -of the postgres monitors that are deployed. +Finally, this namespace will hold service monitors created by other projects. +For example, the Bank of Sirius deployment currently deploys a service monitor +for each of the postgres monitors that are deployed. **Notes**: -1. The KIC needs to be configured to expose Prometheus metrics. This is currently - done by default. -2. The default address binding of the `kube-proxy` component is set to `127.0.0.1` - and therefore will cause errors when the canned Prometheus scrape configurations - are run. The fix is to set this address to `0.0.0.0`. An example manifest has been - provided in [prometheus/extras](./kubernetes/prometheus/extras) that can be applied - against your installation with `kubectl apply -f ./filename`. Please only apply this - change once you have verified that it will work with your version of Kubernetes. -3. The _grafana_ namespace has been maintained in the configuration file to be used by - the Prometheus operator-deployed version of Grafana. This version only accepts a - password – you can still specify a username for the admin account but it will - be silently ignored. This will be changed in the future. +1. The KIC needs to be configured to expose Prometheus metrics. This is + currently done by default. +2. The default address binding of the `kube-proxy` component is set to + `127.0.0.1` and therefore will cause errors when the canned Prometheus + scrape configurations are run. The fix is to set this address to `0.0.0.0`. An + example manifest has been provided in + [prometheus/extras](./kubernetes/prometheus/extras) that can be applied + against your installation with `kubectl apply -f ./filename`. + Please only apply this change once you have verified that it will work with + your version of Kubernetes. +3. The _grafana_ namespace has been maintained in the configuration file to be + used by the Prometheus operator-deployed version of Grafana. This version + only accepts a password – you can still specify a username for the admin + account but it will be silently ignored. This will be changed in the future. ### Observability -We deploy the [OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) -along with a simple collector. There are several other configurations in the -[observability/otel-objects](./kubernetes/observability/otel-objects) directory. -See the [README.md](./kubernetes/observability/otel-objects/README.md) file -in the [observability/otel-objects](./kubernetes/observability/otel-objects) for more information, -including an explanation of the default configuration. +We deploy the +[OTEL Collector Operator](https://github.com/open-telemetry/opentelemetry-collector) +along with a simple collector. There are several other configurations in the +[observability/otel-objects](./kubernetes/observability/otel-objects) +directory. +See the [README.md](./kubernetes/observability/otel-objects/README.md) file +in the [observability/otel-objects](./kubernetes/observability/otel-objects) +for more information, including an explanation of the default configuration. ### Demo Application A forked version of the Google [_Bank of Anthos_](https://github.com/GoogleCloudPlatform/bank-of-anthos) -application is contained in the [`sirius`](./kubernetes/applications/sirius) directory. -The github repository for this for is at +application is contained in the [`sirius`](./kubernetes/applications/sirius) +directory. The github repository for this for is at [_Bank of Sirius_](https://github.com/nginxinc/bank-of-sirius). -Normally, the `frontend` microservice is exposed via a load balancer -for traffic management. This deployment has been modified to use the NGINX -or NGINX Plus KIC to manage traffic to the `frontend` microservice. The NGINX -or NGINX Plus KIC is integrated into the cluster logging system, and the user +Normally, the `frontend` microservice is exposed via a load balancer +for traffic management. This deployment has been modified to use the NGINX +or NGINX Plus KIC to manage traffic to the `frontend` microservice. The NGINX +or NGINX Plus KIC is integrated into the cluster logging system, and the user can configure the KIC as desired. -An additional change to the application is the conversion of several of the -standard Kubernetes deployment manifests into Pulumi code. This has been done +An additional change to the application is the conversion of several of the +standard Kubernetes deployment manifests into Pulumi code. This has been done for the configuration maps, the ingress controller, and the JWT RSA signing key -pair. This allows the user to take advantage Pulumi's feature set, by demonstrating -the process of creating and deploying an RSA key pair at deployment time and using -the project configuration file to set config variables, including secrets. +pair. This allows the user to take advantage Pulumi's feature set, by +demonstrating the process of creating and deploying an RSA key pair at +deployment time and using the project configuration file to set config variables, +including secrets. As part of the Bank of Sirius deployment, we deploy a cluster-wide -[self-signed](https://cert-manager.io/docs/configuration/selfsigned/) issuer -using the cert-manager deployed above. This is then used by the ingress object -created to enable TLS access to the application. Note that this issuer can be -changed out by the user, for example to use the -[ACME](https://cert-manager.io/docs/configuration/acme/) issuer. -The use of the ACME issuer has been tested and works without issues, provided -the FQDN meets the length requirements. As of this writing, the AWS ELB hostname -is too long to work with the ACME server. Additional work in this area will be -undertaken to provide dynamic DNS record creation as part of this process so -legitimate certificates can be issued. - -To provide visibility into the Postgres databases that are running as part -of the application, the Prometheus Postgres data exporter will be deployed +[self-signed](https://cert-manager.io/docs/configuration/selfsigned/) issuer +using the cert-manager deployed above. This is then used by the ingress object +created to enable TLS access to the application. Note that this issuer can be +changed out by the user, for example to use the +[ACME](https://cert-manager.io/docs/configuration/acme/) issuer. +The use of the ACME issuer has been tested and works without issues, provided +the FQDN meets the length requirements. As of this writing, the AWS ELB +hostname is too long to work with the ACME server. Additional work in this area +will be undertaken to provide dynamic DNS record creation as part of this +process so legitimate certificates can be issued. + +To provide visibility into the Postgres databases that are running as part +of the application, the Prometheus Postgres data exporter will be deployed into the same namespace as the application and will be configured to be scraped by the Prometheus server installed earlier. -**Note**: Due to the way that Pulumi currently handles secrets, -the [sirius](./kubernetes/applications/sirius) directory contains its own -configuration directory [sirius/config](./kubernetes/applications/sirius/config). -This directory contains an example configuration file that can be copied over -and used. The user will be prompted to add passwords to the configuration file -at the first run of the [start.sh](../../bin/start_all.sh) script. This is a -workaround that will be retired as Pulumi provides better tools -for hierarchical configuration files. +**Note**: Due to the way that Pulumi currently handles secrets, +the [secrets](./kubernetes/secrets) directory contains its own +configuration directory [secrets/config](./kubernetes/secrets/config). +This directory contains an example configuration file that can be copied over +and used. The user will be prompted to add passwords to the configuration file +at the first run of the startup process. ## Simple Load Testing To help enable simple load testing, a script has been provided that uses the -`kubectl` command to port-forward monitoring and management connections -to the local workstation. This command is [`test-foward.sh`](../../bin/test-forward.sh). +`kubectl` command to port-forward monitoring and management connections +to the local workstation. This command is +[`test-foward.sh`](../../bin/test-forward.sh). diff --git a/pulumi/python/automation/DESIGN.md b/pulumi/python/automation/DESIGN.md new file mode 100644 index 00000000..07ef65c5 --- /dev/null +++ b/pulumi/python/automation/DESIGN.md @@ -0,0 +1,231 @@ +# MARA Runner Design + +## Problem + +When creating an infrastructure as code deployment in Pulumi, it is common to +have infrastructure that depends on the presence of other infrastructure. If +there are only few layers of dependencies, it is manageable. However, once you +pass three layers of dependencies, it becomes quite difficult to manage the +complexity of your deployment. This also results in deployment plans that are +almost incomprehensible. + +This is the problem that was faced when using Pulumi to build MARA. Multiple +infrastructure services must be instantiated in order to get a working +Kubernetes environment. Moreover, once the Kubernetes is present, it needs +additional components that have a web of dependencies. For example, if we use +AWS, a full deployment looks something like the following: + +```console + ┌── infrastructure/aws + │ ├── vpc [VPC] + │ ├── eks [EKS] + │ ├── ecr [ECR] + ├── infrastructure + │ └── kubeconfig [Kubeconfig] + ├── kubernetes + │ └── secrets [Secrets] + ├── utility + │ ├── kic-image-build [KIC Image Build] + │ ├── kic-image-push [KIC Image Push] + ├── kubernetes/nginx + │ ├── ingress-controller-namespace [K8S Ingress NS] + │ ├── ingress-controller [Ingress Controller] + ├── kubernetes + │ ├── logstore [Logstore] + │ ├── logagent [Log Agent] + │ ├── certmgr [Cert Manager] + │ ├── prometheus [Prometheus] + │ ├── observability [Observability] + └── kubernetes/applications + └── application +``` + +EKS cannot be instantiated until the VPC is configured. The Ingress Controller +cannot be pushed until a container registry is available. The application +cannot be started until log management, certificate management, and +observability services have been instantiated. A non-trivial Kubernetes +deployment is truly a web of dependencies! + +The above example shows the dependencies for a single infrastructure provider +(AWS) that is hosting a Kubernetes environment and a container registry. +However, if the infrastructure provider is changed, then the content and order +of dependencies also changes. As such, this introduces a conditional element +that needs to be managed. + +## Solution + +The approach taken in MARA to mitigate the Pulumi dependency problem is to +break apart Pulumi deployments (projects) into bite sized pieces that each did +one thing. Pulumi projects pass state to each other by executing sequentially +and using +[stack references](https://www.pulumi.com/learn/building-with-pulumi/stack-references/) +. + +Initially, sequential execution was implemented through a bash script that +would run `pulumi up` across a series of directories in a set order. Each +directory was a Pulumi project. If a given project had dependent state on +another project, it would use a stack reference to pull state out of the +dependent project that was previously executed. When additional infrastructure +providers were added, they were supported by different bash scripts that were +conditionally called. + +This approach has proven to be unmanageable as it lacks flexibility and +configurability as well as makes adding new infrastructure providers difficult. +For example, if the content and/or ordering of infrastructure deployed to +Kubernetes needs to change based on the infrastructure provider, then this is +difficult or impossible with the bash script approach. Moreover, if you want to +read configuration and change what or how things are deployed, this also becomes +difficult using just bash scripting. Lastly, due to differences in execution +environments such as Linux and MacOS, it is difficult to write portable bash +scripting. + +When Pulumi released the +[Automation API](https://www.pulumi.com/docs/guides/automation-api/) +it presented an opportunity to resolve the shortcomings mentioned above. Using +the Automation API, the MARA Runner was created to provide a framework for +gluing together multiple Pulumi Projects such that they can all be deployed as +one single unit of execution and at the same time allow for piecemeal +deployment using `pulumi up`. + +The MARA Runner is a CLI program written in Python that provides the following: + +* The selection of an infrastructure provider +* Configuration using configuration files that control all Pulumi projects +* Pulumi operations such as up, refresh, destroy to be propagated across all + projects +* Visualizing which Pulumi projects will be executed for a given + infrastructure provider + +## Terms + +The following terms are used repeatedly in the MARA runner. For clarity, they +are defined below. + +### Pulumi Project + +A Pulumi [Project](https://www.pulumi.com/docs/intro/concepts/project/) is a +folder/directory that contains a `Pulumi.yaml` file. It is a stand-alone single +unit of execution. Multiple projects execution is tied together by the MARA +Runner. + +### Infrastructure Provider + +The term Infrastructure provider (or provider for short) within the context of +the MARA Runner, is referring to what will be hosting a Kubernetes environment +and a container registry. Infrastructure providers are implemented as a +subclass of the [Provider](providers/base_provider.py) class. They contain a +collection references to the directories of Pulumi projects which are +categorized as either "infrastructure" or "kubernetes". The categorization of +"infrastructure" means that a project is a requirement for having a working +Kubernetes cluster and container registry. + +### Execution + +Execution is referring to the running of a Pulumi project by doing `pulumi up`. + +### Environment Configuration + +The environment configuration file by default is located at: +`/config/pulumi/environment`. +It is used to define the environment variables needed when executing a Pulumi +project. When executing Pulumi projects, the system environment is used AND the +values from the environment configuration are appended/overwritten over the +system environment. The file format is a simple key value mapping where each +line contains a single: `=`. + +### Stack Configuration + +The stack configuration is a Pulumi native configuration file that is specific for +a single Pulumi [Stack](https://www.pulumi.com/docs/intro/concepts/stack/). The +stack configuration is located by default at +`/config/pulumi/Pulumi..yaml`. + +## Design + +Below is a rough outline of the major components of the Runner and their order +of execution. + +```console +Validate Prompt User for Prompt User for +Configuration───►Provider Configuration────►Secrets │ + │ +┌─────────────────────────────────────────────────────────┘ +▼ +Provider Provider Infrastructure +Selection ──────►Execution───►Project + Execution───────────────────────┐ + │ │ + └─►Infrastructure Project(s)... │ + │ +┌─────────────────────────────────────────────────────────────┘ +▼ +Write Secrets Kubernetes +to Kubernetes───►Project + Execution + │ + └─►Kubernetes Projects(s)... +``` + +### Assumptions + +There are some assumptions for how Pulumi is used by the Runner that differ +from what is possible using Pulumi directly. + +* All Pulumi projects use the same name for their stack +* All Pulumi projects use the same stack configuration file (except the + [secrets](../kubernetes/secrets) project) +* All secrets are stored encrypted in the [secrets](../kubernetes/secrets) + project and loaded into Kubernetes as secrets +* Infrastructure providers cannot be changed on a stack after the first run, + and as such a new stack will need to be made when using multiple + infrastructure providers +* Stack references are used to pass state between Pulumi projects +* The configuration key `kubernetes:infra_type` contains the name of the + infrastructure provider as used in the Runner +* If there is any error running a Pulumi project, the Runner will exit, and it + is up to the user to try again or fix the issue +* The order of execution may change between different infrastructure providers +* All required external programs are installed +* The Runner is invoked from a virtual environment as set up by the + [setup_venv.sh](../../../bin/setup_venv.sh) script +* After a Kubernetes cluster is stood up, the relevant configuration files are + added to the system such that it can be managed with the `kubectl` tool + +### Configuration + +The initial phase of the Runner's execution reads, parses and validates the +environment and stack configuration files. If the stack configuration is missing +or empty, it is assumed that it is the first time starting up the environment +and the user is prompted for required configuration parameters. + +After configuration validation, the user is prompted to input any required +secrets that are not currently persisted. These secrets are encrypted using +Pulumi's local secret handling and stored in ciphertext in the +[secrets](../kubernetes/secrets) project. + +### Provider + +After configuration has completed, a provider is selected based on the options +specified by the user when invoking the Runner. This provider is used as the +source of data for what Pulumi projects are executed and in what order. When +standing up an environment, the provider executes first the Pulumi projects that +are categorized as "infrastructure". Infrastructure in this context means that +these projects are required to have been executed successfully +in order to have a working Kubernetes cluster and container registry. + +A Pulumi project reference within a provider may optionally have an +`on_success` event registered which is run when the project executes +successfully. Typically, these events do things like add configuration for a +cluster to the kubectl configuration directory. + +After the infrastructure projects have completed executing, the Runner then +executes the [secrets](../kubernetes/secrets) project which stores the locally +encrypted secrets as +[Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) +on the newly created Kubernetes cluster. + +Once the required secrets are in place, the Runner then executes all the +projects categorized as "kubernetes" including the final application to be +deployed. + +At this point, the application should be deployed. diff --git a/pulumi/python/automation/colorize.py b/pulumi/python/automation/colorize.py new file mode 100644 index 00000000..bd3e78a0 --- /dev/null +++ b/pulumi/python/automation/colorize.py @@ -0,0 +1,62 @@ +""" +This file provides two functions println_nocolor and println_color - println_color will be redirected to +println_nocolor if the execution environment does not support color output. If the environment does support +color output, then the string specified for println_color will be rendered in rainbow colors using the lolcat +library. +""" + +import collections +import os +import random +import sys +import typing +from importlib.machinery import SourceFileLoader + + +def println_nocolor(text: str, output: typing.TextIO = sys.stdout): + """Prints a new line to the console without using color + :param text: text to print + :param output: output destination + """ + print(text, file=output) + + +if os.environ.get('NO_COLOR'): + PRINTLN_FUNC = println_nocolor +else: + lolcat_fields = ['animate', 'duration', 'force', 'freq', 'mode', 'speed', 'spread', 'os'] + LolCatOptions = collections.namedtuple('LolCatOptions', lolcat_fields) + + # Unfortunately, we do the below hack to load the lolcat code because it was not written + # such that it could be easily consumable as a library, for it was a stand-alone executable. + if os.environ.get('VIRTUAL_ENV'): + venv = os.environ.get('VIRTUAL_ENV') + lolcat_path = os.path.sep.join([venv, 'bin', 'lolcat']) + if os.path.exists(lolcat_path): + loader = SourceFileLoader('lolcat', lolcat_path) + lolcat = loader.load_module() + + if lolcat: + options = LolCatOptions(animate=False, + duration=12, + freq=0.1, + os=random.randint(0, 256), + mode=lolcat.detect_mode(), + speed=-1.0, + spread=0.5, + force=False) + + def println_color(text: str, output: typing.TextIO = sys.stdout): + """Prints a new line to the console using rainbow colors + :param text: text to print + :param output: output destination + """ + colorizer = lolcat.LolCat(mode=options.mode, output=output) + colorizer.println_plain(text, options) + output.write('\x1b[0m') + output.flush() + + PRINTLN_FUNC = println_color + else: + PRINTLN_FUNC = println_nocolor + diff --git a/pulumi/python/automation/env_config_parser.py b/pulumi/python/automation/env_config_parser.py new file mode 100644 index 00000000..73817197 --- /dev/null +++ b/pulumi/python/automation/env_config_parser.py @@ -0,0 +1,76 @@ +""" +This file defines a data structure containing the environment variables that have been written to a file +(`config/pulumi/environment`). The values stored there are used to specify the environment when executing +operations using the Pulumi Automation API. +""" + +import os +from typing import Optional, Mapping +from configparser import ConfigParser + +import stack_config_parser + +# Directory in which script is located +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Default path to the MARA environment file +DEFAULT_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi', 'environment'])) + +# Default environment variables set for all Pulumi executions invoked by the Automation API +DEFAULT_ENV_VARS = { + 'PULUMI_SKIP_UPDATE_CHECK': 'true' +} + + +class EnvConfig(dict): + """Object containing environment variables used when executing operations with the Pulumi Automation API""" + + _stack_config: Optional[stack_config_parser.PulumiStackConfig] = None + config_path: Optional[str] = None + + def __init__(self, + env_vars: Mapping[str, str], + file_vars: Mapping[str, str], + stack_config: Optional[stack_config_parser.PulumiStackConfig] = None, + config_path: Optional[str] = None) -> None: + super().__init__() + self.update(DEFAULT_ENV_VARS) + self.update(env_vars) + self.update(file_vars) + self._stack_config = stack_config + self.config_path = config_path + + def stack_name(self) -> str: + """Returns the stack name used in the environment""" + return self.get('PULUMI_STACK') + + def no_color(self) -> bool: + """Returns a flag if color in the console is supported""" + return self.get('NO_COLOR') is not None + + def pulumi_color_settings(self): + """Returns a string indicating if console colors should be auto-detected or just disabled""" + if self.no_color(): + return 'never' + else: + return 'auto' + + +def read(config_file_path: str = DEFAULT_PATH) -> EnvConfig: + """Reads the contents of the specified file path into a new instance of `EnvConfig`. + :param config_file_path: path to environment variable file + :return: new instance of EnvConfig + """ + config_parser = ConfigParser() + config_parser.optionxform = lambda option: option + + with open(config_file_path, 'r') as f: + # The Python configparser library is used to parse the file because it supports the KEY=VALUE syntax of the + # environment file. However, there is one exception; it requires the presence of a [main] section using the + # ini format style. In order avoid having to add a "[main]" string to the environment file, we spoof the + # presence of that section with this line below. It just prepends the string "[main]" before the contents of + # the environment file. + content = f'[main]{os.linesep}{f.read()}' + + config_parser.read_string(content) + + return EnvConfig(env_vars=os.environ, file_vars=config_parser['main'], config_path=config_file_path) diff --git a/pulumi/python/automation/headers.py b/pulumi/python/automation/headers.py new file mode 100644 index 00000000..12339f70 --- /dev/null +++ b/pulumi/python/automation/headers.py @@ -0,0 +1,30 @@ +""" +This file defines the functions needed to render headers that are displayed before each Pulumi project is executed. +These headers provide a useful visual distinction between each step taken to set up an environment. +""" +import logging + +import colorize +import env_config_parser +from fart import fart + +LOG = logging.getLogger('runner') +FART_FONT = fart.load_font('standard') +banner_type = 'fabulous' + + +def render_header(text: str, env_config: env_config_parser.EnvConfig): + """Renders the given text to a header displayed in the console - this header could be large ascii art + :param text: header text to render + :param env_config: reference to environment configuration + """ + global banner_type + + if banner_type == 'fabulous': + header = fart.render_fart(text=text, font=FART_FONT) + if not env_config.no_color(): + colorize.PRINTLN_FUNC(header) + elif banner_type == 'log': + LOG.info('[%s] started', text) + else: + print(f'* {text}') diff --git a/pulumi/python/automation/main.py b/pulumi/python/automation/main.py new file mode 100755 index 00000000..f76c441d --- /dev/null +++ b/pulumi/python/automation/main.py @@ -0,0 +1,562 @@ +#!/usr/bin/env python3 + +""" +This file is the entrypoint for the Modern Application Reference Architecture (MARA) Runner. + +This Python script ties together all of the different Pulumi projects needed to setup a +Kubernetes environment on a given infrastructure provider (like AWS), configures it, +installed required services on the Kubernetes environment, and deploys an application to +Kubernetes. + +The runner functions as a simple CLI application that can be run just like any other program +as long as the virtual environment for it (python-venv) is set up. This environment can be +set up using the bin/setup_venv.sh script. +""" + +import getopt +import importlib +import importlib.util +import logging +import os +import shutil +import sys +import typing + +import yaml + +import env_config_parser +import headers +from typing import List, Optional +from getpass import getpass + +from providers.base_provider import Provider, InvalidConfigurationException +from providers.pulumi_project import PulumiProject, PulumiProjectEventParams +from pulumi import automation as auto +from typing import Any, Hashable, Dict, Union + +import stack_config_parser + +# Directory in which script is located +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Root directory of the MARA project +PROJECT_ROOT = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..'])) +# Allowed operations - if operation is not in this list, the runner will reject it +OPERATIONS: List[str] = ['down', 'destroy', 'refresh', + 'show-execution', 'up', 'validate', 'list-providers'] +# List of available infrastructure providers - if provider is not in this list, the runner will reject it +PROVIDERS: typing.Iterable[str] = Provider.list_providers() +# Types of headings available to show the difference between Pulumi projects +# fabulous: a large rainbow covered banner +# boring: a single line of text uncolored +# log: writes the header to the same logger as Pulumi output +BANNER_TYPES: List[str] = ['fabulous', 'boring', 'log'] +# Logger instance +PULUMI_LOG = logging.getLogger('pulumi') +RUNNER_LOG = logging.getLogger('runner') + +# We default to a fabulous banner of course +banner_type = BANNER_TYPES[0] +# Debug flag that will trigger additional output +debug_on = False + +# Use he script name as invoked rather than hard coding it +script_name = os.path.basename(sys.argv[0]) + + +def provider_instance(provider_name: str) -> Provider: + """Dynamically instantiates an infrastructure provider + :param provider_name: name of infrastructure provider + :return: instance of infrastructure provider + """ + module = importlib.import_module(name=f'providers.{provider_name}') + return module.INSTANCE + + +def usage(): + usage_text = f"""Modern Application Reference Architecture (MARA) Runner + +USAGE: + {script_name} [FLAGS] [OPERATION] + +FLAGS: + -d, --debug Enable debug output on all of the commands executed + -b, --banner-type= Banner type to indicate which project is being executed (e.g. {', '.join(BANNER_TYPES)}) + -h, --help Prints help information + -s, --stack Specifies the Pulumi stack to use + -p, --provider= Specifies the provider used (e.g. {', '.join(PROVIDERS)}) + +OPERATIONS: + down/destroy Destroys all provisioned infrastructure + list-providers Lists all of the supported providers + refresh Refreshes the Pulumi state of all provisioned infrastructure + show-execution Displays the execution order of the Pulumi projects used to provision + up Provisions all configured infrastructure + validate Validates that the environment and configuration is correct +""" + print(usage_text, file=sys.stdout) + + +def write_env(env_config, stack_name): + """Create a new environment file and write our stack to it""" + with open(env_config.filename, 'w') as f: + try: + # Note that we are printing to a file here, not STDOUT + print("PULUMI_STACK=" + stack_name, file=f) + msg = 'Environment configuration file not found. Creating new file at the path: %s' + RUNNER_LOG.info(msg, env_config.filename) + except (FileNotFoundError, PermissionError): + RUNNER_LOG.error("Unable to build configuration file") + sys.exit(2) + + +def append_env(env_config, stack_name): + """Append our stack to the existing environment file""" + with open(env_config.config_path, 'a') as f: + try: + msg = 'Environment configuration file does not contain PULUMI_STACK, adding' + # Note that we are printing to a file here, not STDOUT + print("PULUMI_STACK=" + stack_name, file=f) + RUNNER_LOG.info(msg) + except (FileNotFoundError, PermissionError): + RUNNER_LOG.error("Unable to append to configuration file") + sys.exit(2) + + +def main(): + """Entrypoint to application""" + + try: + shortopts = 'hds:p:b:' # single character options available + longopts = ["help", 'debug', 'banner-type', + 'stack=', 'provider='] # long form options + opts, args = getopt.getopt(sys.argv[1:], shortopts, longopts) + except getopt.GetoptError as err: + RUNNER_LOG.error(err) + usage() + sys.exit(2) + + provider_name: Optional[str] = None + stack_name: Optional[str] = None + + global debug_on + + # First, we parse the flags given to the CLI runner + for opt, value in opts: + if opt in ('-h', '--help'): + usage() + sys.exit(0) + elif opt in ('-p', '--provider'): + if value.lower() != 'none': + provider_name = value.lower() + elif opt in ('-s', '--stack'): + if value.lower() != 'none': + stack_name = value.lower() + elif opt in ('-d', '--debug'): + debug_on = True + elif opt in ('-b', '--banner-type'): + if value in BANNER_TYPES: + headers.banner_type = value + + # Next, we validate to make sure the input to the runner was correct + + # Make sure we got an operation - it is the last string passed as an argument + if len(args) == 1: + operation = args[0] + elif len(args) >= 1: + RUNNER_LOG.error('Only one operation per invocation allowed') + usage() + sys.exit(2) + else: + RUNNER_LOG.error('No operation specified') + usage() + sys.exit(2) + + if operation not in OPERATIONS: + RUNNER_LOG.error('Unknown operation specified: %s', operation) + usage() + sys.exit(2) + + # Start processing operations, first we process those that do not depend on providers + if operation == 'list-providers': + for provider in PROVIDERS: + print(provider, file=sys.stdout) + sys.exit(0) + + # Now validate providers because everything underneath here depends on them + if not provider_name or provider_name.strip() == '': + RUNNER_LOG.error( + 'No provider specified - provider is a required argument') + sys.exit(2) + if provider_name not in PROVIDERS: + RUNNER_LOG.error('Unknown provider specified: %s', provider_name) + sys.exit(2) + + setup_loggers() + + provider = provider_instance(provider_name.lower()) + RUNNER_LOG.debug( + 'Using [%s] infrastructure provider', provider.infra_type()) + + # Now validate the stack name + if not stack_name or stack_name.strip() == '': + RUNNER_LOG.error( + 'No Pulumi stack specified - Pulumi stack is a required argument') + sys.exit(2) + + # We execute the operation requested - different operations have different pre-requirements, so they are matched + # differently. Like show-execution does not require reading the configuration files, so we just look for a match + # for it right away, and if matched, we run and exit. + + if operation == 'show-execution': + provider.display_execution_order(output=sys.stdout) + sys.exit(0) + + # We parse the environment file up front in order to have the necessary values required by this program. + # The logic around the PULUMI_STACK accounts for three scenarios: + # + # 1. If there is no environment file, the argument given on the CLI is used and added to the environment file. + # 2. If there is a difference between the CLI and the environment file, the environment file value is used. + # 3. If there is an environment file with no PULUMI_STACK, the environment file is appended with the argument. + try: + env_config = env_config_parser.read() + except FileNotFoundError as e: + # No file, we create one and then read it back in + write_env(e, stack_name) + env_config = env_config_parser.read() + + if env_config.stack_name() is None: + # Found file, if there is no stack we append it + try: + env_config = env_config_parser.read() + except FileNotFoundError: + sys.exit(2) + append_env(env_config, stack_name) + env_config = env_config_parser.read() + elif env_config.stack_name() != stack_name: + # Found file, but stack name mismatch; bail out + msg = 'Stack "%s" given on CLI but Stack "%s" is in env file; exiting' + RUNNER_LOG.error(msg, stack_name, env_config.stack_name()) + sys.exit(2) + + stack_config = read_stack_config(provider=provider, env_config=env_config) + + validate_with_verbosity = operation == 'validate' or debug_on + try: + validate(provider=provider, env_config=env_config, stack_config=stack_config, + verbose=validate_with_verbosity) + except Exception as e: + RUNNER_LOG.error('Validation failed: %s', e) + sys.exit(3) + + if operation == 'refresh': + pulumi_cmd = refresh + elif operation == 'up': + pulumi_cmd = up + elif operation == 'down' or operation == 'destroy': + pulumi_cmd = down + elif operation == 'validate': + init_secrets(env_config=env_config, + pulumi_projects=provider.execution_order()) + pulumi_cmd = None + # validate was already run above + else: + RUNNER_LOG.error('Unknown operation: %s', operation) + sys.exit(2) + + # Lastly, if the operation involves the execution of a Pulumi command, we make sure that secrets have been + # instantiated, before invoking Pulumi via the Automation API. This is required because certain Pulumi + # projects need to pull secrets in order to be stood up. + if pulumi_cmd: + init_secrets(env_config=env_config, + pulumi_projects=provider.execution_order()) + try: + pulumi_cmd(provider=provider, env_config=env_config) + except Exception as e: + logging.error('Error running Pulumi operation [%s] with provider [%s] for stack [%s]', + operation, provider_name, env_config.stack_name()) + raise e + + +def setup_loggers(): + """Configures two loggers: 1) For the MARA Runner itself 2) For Pulumi output""" + global debug_on + + if debug_on: + level = logging.DEBUG + else: + level = logging.INFO + + # Pulumi output goes to STDOUT + PULUMI_LOG.setLevel(level=level) + pulumi_ch = logging.StreamHandler(stream=sys.stdout) + pulumi_ch.setLevel(level=level) + formatter = logging.Formatter('%(message)s') + pulumi_ch.setFormatter(formatter) + PULUMI_LOG.addHandler(pulumi_ch) + + # Runner output goes to STDERR + RUNNER_LOG.setLevel(level=level) + runner_ch = logging.StreamHandler(stream=sys.stderr) + runner_ch.setLevel(level=level) + formatter = logging.Formatter('%(message)s') + runner_ch.setFormatter(formatter) + RUNNER_LOG.addHandler(runner_ch) + + +def read_stack_config(provider: Provider, + env_config: env_config_parser.EnvConfig) -> stack_config_parser.PulumiStackConfig: + """Load and parse the Pulumi stack configuration file. In MARA, this is a globally shared file. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + :return: data structure containing stack configuration + """ + try: + stack_config = stack_config_parser.read( + stack_name=env_config.stack_name()) + RUNNER_LOG.debug('stack configuration file read') + except FileNotFoundError as e: + RUNNER_LOG.info( + 'stack configuration file [%s] does not exist', e.filename) + stack_config = prompt_for_stack_config( + provider, env_config, e.filename) + except stack_config_parser.EmptyConfigurationException as e: + RUNNER_LOG.info('stack configuration file [%s] is empty', e.filename) + stack_config = prompt_for_stack_config( + provider, env_config, e.filename) + + return stack_config + + +def prompt_for_stack_config(provider: Provider, + env_config: env_config_parser.EnvConfig, + filename: str) -> stack_config_parser.PulumiStackConfig: + """Prompts user via tty for required configuration values when the stack config is empty or missing. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + :param filename: location to write stack config file to + :return: data structure containing stack configuration + """ + RUNNER_LOG.info('creating new configuration based on user input') + + stack_defaults_path = os.path.sep.join([os.path.dirname(filename), + 'Pulumi.stackname.yaml.example']) + + stack_defaults: Union[Dict[Hashable, Any], list, None] + with open(stack_defaults_path, 'r') as f: + stack_defaults = yaml.safe_load(stream=f) + + stack_config_values = { + 'config': provider.new_stack_config(env_config=env_config, defaults=stack_defaults['config']) + } + with open(filename, 'w') as f: + yaml.safe_dump(data=stack_config_values, stream=f) + stack_config = stack_config_parser.read(stack_name=env_config.stack_name()) + return stack_config + + +def validate(provider: Provider, + env_config: env_config_parser.EnvConfig, + stack_config: Optional[stack_config_parser.PulumiStackConfig], + verbose: Optional[bool] = False): + """Validates that the runtime environment for MARA is correct. Will validate that external tools are present and + configurations are correct. If validation fails, an exception will be raised. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + :param stack_config: reference to stack configuration + :param verbose: flag to enable verbose output mode + """ + + # First, we validate that we have the right tools installed + def check_path(cmd: str, fail_message: str) -> bool: + cmd_path = shutil.which(cmd) + if cmd_path: + RUNNER_LOG.debug('[%s] found at path: %s', cmd, cmd_path) + return True + else: + RUNNER_LOG.error('[%s] is not installed - %s', cmd, fail_message) + return False + + success = True + + # Validate presence of required tools + if not check_path('make', 'it must be installed if you intend to build NGINX Ingress Controller from source'): + success = False + if not check_path('docker', 'it must be installed if you intend to build NGINX Ingress Controller from source'): + success = False + if not check_path('node', 'NodeJS is required to run required Pulumi modules, install in order to continue'): + success = False + + if not success: + sys.exit(3) + + # Next, we validate that the environment file has the required values + try: + provider.validate_env_config(env_config) + except InvalidConfigurationException as e: + if e.key == 'PULUMI_STACK': + msg = 'Environment file [%s] does not contain the required key PULUMI_STACK. This key specifies the ' \ + 'name of the Pulumi Stack (https://www.pulumi.com/docs/intro/concepts/stack/) that is used ' \ + 'globally across Pulumi projects in MARA.' + else: + msg = 'Environment file [%s] failed validation' + + RUNNER_LOG.error(msg, env_config.config_path) + raise e + if verbose: + RUNNER_LOG.debug( + 'environment file [%s] passed validation', env_config.config_path) + + if not stack_config: + RUNNER_LOG.debug('stack configuration is not available') + return False + + if 'kubernetes:infra_type' in stack_config['config']: + previous_provider = stack_config['config']['kubernetes:infra_type'] + if previous_provider.lower() != provider.infra_type().lower(): + RUNNER_LOG.error('Stack has already been used with the provider [%s], so it cannot ' + 'be run with the specified provider [%s]. Destroy all resources ' + 'and remove the kubernetes:infra_type key from the stack configuration.', + previous_provider, provider.infra_type()) + sys.exit(3) + + try: + provider.validate_stack_config(stack_config, env_config) + except Exception as e: + RUNNER_LOG.error( + 'Stack configuration file [%s] at path failed validation', stack_config.config_path) + raise e + if verbose: + RUNNER_LOG.debug( + 'Stack configuration file [%s] passed validation', stack_config.config_path) + + RUNNER_LOG.debug('All configuration is OK') + + +def init_secrets(env_config: env_config_parser.EnvConfig, + pulumi_projects: List[PulumiProject]): + """Goes through a list of Pulumi projects and prompts the user for secrets required by each project that have not + already been stored. Each secret is encrypted using Pulumi's secret management and stored in the stack configuration + for the Pulumi project kubernetes/secrets and *not* in the global stack configuration. When the secrets Pulumi + project is stood up, it adds the secrets that were encrypted in its stack configuration to the running Kubernetes + cluster as a Kubernetes Secret. This approach is taken because Pulumi does not support sharing secrets across + projects. + :param env_config: reference to environment configuration + :param pulumi_projects: list of pulumi project to instantiate secrets for + """ + secrets_work_dir = os.path.sep.join( + [SCRIPT_DIR, '..', 'kubernetes', 'secrets']) + stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), + opts=auto.LocalWorkspaceOptions( + env_vars=env_config, + ), + project_name='secrets', + work_dir=secrets_work_dir) + + for project in pulumi_projects: + if not project.config_keys_with_secrets: + continue + for secret_config_key in project.config_keys_with_secrets: + if secret_config_key.key_name not in stack.get_all_config().keys(): + if secret_config_key.default: + prompt = f'{secret_config_key.prompt} [{secret_config_key.default}]: ' + else: + prompt = f'{secret_config_key.prompt}: ' + + value = getpass(prompt) + if secret_config_key.default and value.strip() == '': + value = secret_config_key.default + + config_value = auto.ConfigValue(secret=True, value=value) + stack.set_config(secret_config_key.key_name, + value=config_value) + + +def build_pulumi_stack(pulumi_project: PulumiProject, + env_config: env_config_parser.EnvConfig) -> auto.Stack: + """Uses the Pulumi Automation API to do a `pulumi stack init` for the given project. If the stack already exists, it + will select it as the stack to use. + :param pulumi_project: reference to Pulumi project + :param env_config: reference to environment configuration + :return: reference to a new or existing stack + """ + RUNNER_LOG.info('Project [%s] selected: %s', + pulumi_project.name(), pulumi_project.abspath()) + stack = auto.create_or_select_stack(stack_name=env_config.stack_name(), + opts=auto.LocalWorkspaceOptions( + env_vars=env_config, + ), + project_name=pulumi_project.name(), + work_dir=pulumi_project.abspath()) + return stack + + +def refresh(provider: Provider, + env_config: env_config_parser.EnvConfig): + """Execute `pulumi refresh` for the given project using the Pulumi Automation API. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + """ + for pulumi_project in provider.execution_order(): + headers.render_header( + text=pulumi_project.description, env_config=env_config) + stack = build_pulumi_stack(pulumi_project=pulumi_project, + env_config=env_config) + stack.refresh_config() + try: + stack.refresh(color=env_config.pulumi_color_settings(), + on_output=write_pulumi_output) + except auto.CommandError as e: + msg = str(e).strip() + if msg.endswith('no previous deployment'): + logging.warning("Cannot refresh project that has no previous deployment for stack [%s]", + env_config.stack_name()) + else: + raise e + + +def up(provider: Provider, + env_config: env_config_parser.EnvConfig): + """Execute `pulumi up` for the given project using the Pulumi Automation API. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + """ + for pulumi_project in provider.execution_order(): + headers.render_header( + text=pulumi_project.description, env_config=env_config) + stack = build_pulumi_stack(pulumi_project=pulumi_project, + env_config=env_config) + stack_up_result = stack.up(color=env_config.pulumi_color_settings(), + on_output=write_pulumi_output) + + # If the project is instantiated without problems, then the on_success event + # as specified in the provider is run. This event is often used to do additional + # configuration, clean up, or to run external tools after a project is stood up. + if pulumi_project.on_success: + params = PulumiProjectEventParams(stack_outputs=stack_up_result.outputs, + config=stack.get_all_config(), + env_config=env_config) + pulumi_project.on_success(params) + + +def down(provider: Provider, + env_config: env_config_parser.EnvConfig): + """Execute `pulumi down` for the given project using the Pulumi Automation API. + :param provider: reference to infrastructure provider + :param env_config: reference to environment configuration + """ + for pulumi_project in reversed(provider.execution_order()): + headers.render_header( + text=pulumi_project.description, env_config=env_config) + stack = build_pulumi_stack(pulumi_project=pulumi_project, + env_config=env_config) + stack_down_result = stack.destroy(color=env_config.pulumi_color_settings(), + on_output=write_pulumi_output) + + +def write_pulumi_output(text: str): + """Handles output from Pulumi invocations via the Automation API""" + PULUMI_LOG.info(text) + + +if __name__ == "__main__": + main() diff --git a/pulumi/python/automation/providers/aws.py b/pulumi/python/automation/providers/aws.py new file mode 100644 index 00000000..24087bf6 --- /dev/null +++ b/pulumi/python/automation/providers/aws.py @@ -0,0 +1,210 @@ +""" +File containing the AWS infrastructure provider for the MARA runner. +""" + +import json +import logging +import os +import sys + +from kic_util import external_process +from typing import List, Optional, Union, Hashable, Dict, Any, Mapping + +from .base_provider import PulumiProject, Provider, InvalidConfigurationException +from .pulumi_project import PulumiProjectEventParams + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +RUNNER_LOG = logging.getLogger('runner') +AUTH_ERR_MSG = '''Unable to authenticate to AWS with provided credentials. Are the settings in your ~/.aws/credentials +correct? Error: %s +''' + +class AwsProviderException(Exception): + pass + + +class AwsCli: + """AWS CLI execution helper class""" + region: str + profile: str + + def __init__(self, region: Optional[str] = None, profile: Optional[str] = None): + super().__init__() + self.region = region + self.profile = profile + + def base_cmd(self) -> str: + """ + :return: returns the base command and any required flags + """ + cmd = 'aws ' + if self.region and self.region != '': + cmd += f'--region {self.region} ' + if self.profile and self.profile != '': + cmd += f'--profile {self.profile} ' + return cmd.strip() + + def update_kubeconfig_cmd(self, cluster_name: str) -> str: + """ + Returns the command used to update the kubeconfig with the passed cluster name + :param cluster_name: name of the cluster to add to the kubeconfig + :return: command to be executed + """ + return f'{self.base_cmd()} eks update-kubeconfig --name {cluster_name}' + + def validate_credentials_cmd(self) -> str: + """ + Returns the command used to verify that AWS has valid credentials + :return: command to be executed + """ + return f'{self.base_cmd()} sts get-caller-identity' + + def list_azs_cmd(self) -> str: + """ + Returns the command that provides a list of the AWS availability zones that can be provisioned to by the + current user. + :return: command to be executed + """ + return f"{self.base_cmd()} ec2 describe-availability-zones --filter " \ + f"'Name=state,Values=available' --zone-ids" + + +class AwsProvider(Provider): + """AWS infrastructure provider""" + def infra_type(self) -> str: + return 'AWS' + + def infra_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/aws/vpc', description='VPC'), + PulumiProject(path='infrastructure/aws/eks', description='EKS', + on_success=AwsProvider._update_kubeconfig), + PulumiProject(path='infrastructure/aws/ecr', description='ECR') + ] + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> Union[ + Dict[Hashable, Any], list, None]: + config = super().new_stack_config(env_config, defaults) + + # AWS region + if 'AWS_DEFAULT_REGION' in env_config: + default_region = env_config['AWS_DEFAULT_REGION'] + else: + default_region = defaults['aws:region'] + + aws_region = input(f'AWS region to use [{default_region}]: ').strip() or default_region + config['aws:region'] = aws_region + print(f"AWS region: {config['aws:region']}") + + # AWS profile + if 'AWS_PROFILE' in env_config: + default_profile = env_config['AWS_PROFILE'] + else: + default_profile = 'none' + aws_profile = input( + f'AWS profile to use [{default_profile}] (enter "none" for none): ').strip() or default_profile + print(f'AWS profile: {aws_profile}') + + if aws_profile != 'none': + config['aws:profile'] = aws_profile + + aws_cli = AwsCli(region=aws_region, profile=aws_profile) + + _, err = external_process.run(cmd=aws_cli.validate_credentials_cmd(), suppress_error=True) + if err: + RUNNER_LOG.error(AUTH_ERR_MSG, err.lstrip()) + sys.exit(3) + + # AWS availability zones + az_data, _ = external_process.run(aws_cli.list_azs_cmd()) + zones = [] + for zone in json.loads(az_data)['AvailabilityZones']: + if zone['ZoneType'] == 'availability-zone': + zones.append(zone['ZoneName']) + + def validate_selected_azs(selected: List[str]) -> bool: + for az in selected: + if az not in zones: + print(f'[{az} is not a known availability zone') + return False + return True + + selected_azs = [] + while len(selected_azs) == 0 or not validate_selected_azs(selected_azs): + default_azs = ', '.join(zones) + azs = input( + f'AWS availability zones to use with VPC [{default_azs}] (separate with commas): ') or default_azs + selected_azs = [x.strip() for x in azs.split(',')] + + config['vpc:azs'] = list(selected_azs) + print(f"AWS availability zones: {', '.join(config['vpc:azs'])}") + + # EKS version + default_version = defaults['eks:k8s_version'] or '1.21' + config['eks:k8s_version'] = input(f'EKS Kubernetes version [{default_version}]: ').strip() or default_version + print(f"EKS Kubernetes version: {config['eks:k8s_version']}") + + # EKS instance type + default_inst_type = defaults['eks:instance_type'] or 't2.large' + config['eks:instance_type'] = input(f'EKS instance type [{default_inst_type}]: ').strip() or default_inst_type + print(f"EKS instance type: {config['eks:instance_type']}") + + # Minimum number of compute instances for cluster + default_min_size = defaults['eks:min_size'] or 3 + while 'eks:min_size' not in config: + min_size = input('Minimum number compute instances for EKS cluster ' + f'[{default_min_size}]: ').strip() or default_min_size + if type(min_size) == int or min_size.isdigit(): + config['eks:min_size'] = int(min_size) + print(f"EKS minimum cluster size: {config['eks:min_size']}") + + # Maximum number of compute instances for cluster + default_max_size = defaults['eks:max_size'] or 12 + while 'eks:max_size' not in config: + max_size = input('Maximum number compute instances for EKS cluster ' + f'[{default_max_size}]: ').strip() or default_max_size + if type(max_size) == int or max_size.isdigit(): + config['eks:max_size'] = int(max_size) + print(f"EKS maximum cluster size: {config['eks:max_size']}") + + # Desired capacity of compute instances + default_desired_capacity = config['eks:min_size'] + while 'eks:desired_capacity' not in config: + desired_capacity = input('Desired number compute instances for EKS cluster ' + f'[{default_desired_capacity}]: ').strip() or default_desired_capacity + if type(desired_capacity) == int or desired_capacity.isdigit(): + config['eks:desired_capacity'] = int(desired_capacity) + print(f"EKS maximum cluster size: {config['eks:desired_capacity']}") + + return config + + def validate_stack_config(self, + stack_config: Union[Dict[Hashable, Any], list, None], + env_config: Mapping[str, str]): + super().validate_stack_config(stack_config=stack_config, env_config=env_config) + config = stack_config['config'] + + if 'aws:region' not in config: + raise InvalidConfigurationException('When using the AWS provider, the region [aws:region] ' + 'must be specified') + + aws_cli = AwsCli(region=config['aws:region'], profile=config['aws:profile']) + _, err = external_process.run(cmd=aws_cli.validate_credentials_cmd(), suppress_error=True) + if err: + RUNNER_LOG.error(AUTH_ERR_MSG, err.lstrip()) + sys.exit(3) + + @staticmethod + def _update_kubeconfig(params: PulumiProjectEventParams): + if 'cluster_name' not in params.stack_outputs: + raise AwsProviderException('Cannot find key [cluster_name] in stack output') + + aws_cli = AwsCli(region=params.config.get('aws:region').value, + profile=params.config.get('aws:profile').value) + cluster_name = params.stack_outputs['cluster_name'].value + cmd = aws_cli.update_kubeconfig_cmd(cluster_name) + res, err = external_process.run(cmd) + print(res) + + +INSTANCE = AwsProvider() diff --git a/pulumi/python/automation/providers/base_provider.py b/pulumi/python/automation/providers/base_provider.py new file mode 100644 index 00000000..4f502aa6 --- /dev/null +++ b/pulumi/python/automation/providers/base_provider.py @@ -0,0 +1,169 @@ +""" +This file is provides the super class for all infrastructure providers. +""" + +import abc +import os +import pathlib +import sys +from typing import List, Mapping, Iterable, TextIO, Union, Dict, Any, Hashable, Optional + +from .pulumi_project import PulumiProject, SecretConfigKey + +# Directory in which script is located +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class InvalidConfigurationException(Exception): + key: Optional[str] + + def __init__(self, msg: str, key: Optional[str] = None) -> None: + super().__init__(msg) + self.key = key + + +class Provider: + """Super class for all infrastructure providers""" + @staticmethod + def list_providers() -> Iterable[str]: + """returns an iterable of the providers available derived from the files in the providers directory + :return all the usable providers""" + def is_provider(file: pathlib.Path) -> bool: + # Filter out the non-provider files + return file.is_file() and \ + not file.stem.endswith('base_provider') and \ + not file.stem.endswith('pulumi_project') and \ + not file.stem.endswith('update_kubeconfig') + + path = pathlib.Path(SCRIPT_DIR) + return [os.path.splitext(file.stem)[0] for file in path.iterdir() if is_provider(file)] + + @staticmethod + def validate_env_config_required_keys(required_keys: List[str], config: Mapping[str, str]): + """Validates that the required environment variables as defined by file or runtime environment are present""" + + for key in required_keys: + if key not in config.keys(): + raise InvalidConfigurationException(msg=f'Required configuration key [{key}] not found', key=key) + + @abc.abstractmethod + def infra_type(self) -> str: + """ + :return string representing the type of underlying infrastructure used to stand up Kubernetes + """ + pass + + @abc.abstractmethod + def infra_execution_order(self) -> List[PulumiProject]: + """Pulumi infrastructure (not Kubernetes) projects to be executed in sequential order""" + pass + + def new_stack_config(self, env_config: Mapping[str, str], + defaults: Union[Dict[Hashable, Any], list, None]) -> Union[Dict[Hashable, Any], list, None]: + """Creates a new Pulumi stack configuration""" + config = { + 'kubernetes:infra_type': self.infra_type() + } + return config + + def validate_env_config(self, env_config: Mapping[str, str]): + """Validates that the passed environment variables are correct""" + Provider.validate_env_config_required_keys(['PULUMI_STACK'], env_config) + + def validate_stack_config(self, + stack_config: Union[Dict[Hashable, Any], list, None], + env_config: Mapping[str, str]): + """Validates that the passed stack configuration is correct""" + pass + + def k8s_execution_order(self) -> List[PulumiProject]: + """Pulumi Kubernetes projects to be executed in sequential order""" + return [ + PulumiProject(path='infrastructure/kubeconfig', description='Kubeconfig'), + PulumiProject(path='kubernetes/secrets', description='Secrets'), + PulumiProject(path='utility/kic-image-build', description='KIC Image Build'), + PulumiProject(path='utility/kic-image-push', description='KIC Image Push'), + PulumiProject(path='kubernetes/nginx/ingress-controller-namespace', + description='K8S Ingress NS'), + PulumiProject(path='kubernetes/nginx/ingress-controller', description='Ingress Controller'), + PulumiProject(path='kubernetes/logstore', description='Logstore'), + PulumiProject(path='kubernetes/logagent', description='Log Agent'), + PulumiProject(path='kubernetes/certmgr', description='Cert Manager'), + PulumiProject(path='kubernetes/prometheus', description='Prometheus', + config_keys_with_secrets=[SecretConfigKey(key_name='prometheus:adminpass', + prompt='Prometheus administrator password')]), + PulumiProject(path='kubernetes/observability', description='Observability'), + PulumiProject(path='kubernetes/applications/sirius', description='Bank of Sirius', + config_keys_with_secrets=[SecretConfigKey(key_name='sirius:accounts_pwd', + prompt='Bank of Sirius Accounts Database password'), + SecretConfigKey(key_name='sirius:ledger_pwd', + prompt='Bank of Sirius Ledger Database password'), + SecretConfigKey(key_name='sirius:demo_login_user', + prompt='Bank of Sirius demo site login username', + default='testuser'), + SecretConfigKey(key_name='sirius:demo_login_pwd', + prompt='Bank of Sirius demo site login password', + default='password')]) + ] + + def execution_order(self) -> List[PulumiProject]: + """Full list of Pulumi projects to be executed in sequential order (including both infrastructure and + Kubernetes""" + return self.infra_execution_order() + self.k8s_execution_order() + + def display_execution_order(self, output: TextIO = sys.stdout): + """Writes the execution order of Pulumi projects in a visual tree to an output stream""" + execution_order = self.execution_order() + last_prefix = '' + + for index, pulumi_project in enumerate(execution_order): + path_parts = pulumi_project.path.split(os.path.sep) + project = f'{path_parts[-1]} [{pulumi_project.description}]' + prefix = os.path.sep.join(path_parts[:-1]) + + # First item in the list + if last_prefix != prefix and index == 0: + print(f' ┌── {prefix}', file=output) + print(f' │ ├── {project}', file=output) + # Last item in the list with a new prefix + elif last_prefix != prefix and index == len(execution_order) - 1: + print(f' └── {prefix}', file=output) + print(f' └── {project}', file=output) + # Any other item with a new prefix + elif last_prefix != prefix and index != 0: + print(f' ├── {prefix}', file=output) + + peek = execution_order[index + 1] + splitted = peek.path.split(f'{prefix}{os.path.sep}')[0] + # item is not the last item with the prefix + if os.path.sep not in splitted: + print(f' │ ├── {project}', file=output) + # item is the last item with the prefix + else: + print(f' │ └── {project}', file=output) + elif last_prefix == prefix: + print(f' │ ├── {project}', file=output) + elif last_prefix == prefix and index == len(execution_order) - 1: + print(f' │ └── {project}', file=output) + + if last_prefix != prefix: + last_prefix = prefix + + @staticmethod + def _find_position_of_project_by_path(path: str, k8s_execution_order: List[PulumiProject]) -> int: + for index, project in enumerate(k8s_execution_order): + if project.path == path: + return index + return -1 + + @staticmethod + def _insert_project(project_path_to_insert_after: str, + project: PulumiProject, + k8s_execution_order: List[PulumiProject]): + project_position = Provider._find_position_of_project_by_path(project_path_to_insert_after, + k8s_execution_order) + + if project_position < 0: + raise ValueError(f'Could not find project at path {project_path_to_insert_after}') + + k8s_execution_order.insert(project_position + 1, project) diff --git a/pulumi/python/automation/providers/do.py b/pulumi/python/automation/providers/do.py new file mode 100644 index 00000000..0de22746 --- /dev/null +++ b/pulumi/python/automation/providers/do.py @@ -0,0 +1,234 @@ +""" +File containing the Digital Ocean infrastructure provider for the MARA runner. +""" + +import json +import sys +from typing import List, Dict, Hashable, Any, Union, MutableMapping, Optional, Mapping + +import yaml +from pulumi import automation as auto +from kic_util import external_process + +from .base_provider import PulumiProject, Provider, InvalidConfigurationException +from .pulumi_project import PulumiProjectEventParams + + +class DigitalOceanProviderException(Exception): + pass + + +class DoctlCli: + """Digital Ocean CLI execution helper class""" + access_token: str + region: Optional[str] + + def __init__(self, access_token: str, region: Optional[str] = None): + self.access_token = access_token + self.region = region + + def base_cmd(self) -> str: + """ + :return: returns the base command and any required flags + """ + cmd = 'doctl' + cmd += f' --access-token "{self.access_token}" ' + return cmd.strip() + + def validate_credentials_cmd(self) -> str: + """ + Returns the command that validates if the doctl command can authenticate correctly. + :return: command to be executed + """ + return f'{self.base_cmd()} account get' + + def save_kubernetes_cluster_cmd(self, cluster_name: str) -> str: + """ + Returns the command used to update the kubeconfig with the passed cluster name + :param cluster_name: name of the cluster to add to the kubeconfig + :return: command to be executed + """ + return f'{self.base_cmd()} kubernetes cluster config save {cluster_name}' + + def get_kubernetes_versions_json(self) -> str: + """ + Returns the command that lists the Kubernetes versions available. + :return: command to be executed + """ + return f'{self.base_cmd()} kubernetes options versions --output json' + + def get_kubernetes_regions_json(self) -> str: + """ + Returns the command that lists the regions available to run Kubernetes. + :return: command to be executed + """ + return f'{self.base_cmd()} kubernetes options regions --output json' + + def get_kubernetes_instance_sizes_json(self) -> str: + """ + Returns the command that lists the instance sizes available for Kubernetes nodes. + :return: command to be executed + """ + return f'{self.base_cmd()} kubernetes options sizes --output json' + + +class DigitalOceanProvider(Provider): + """Digital Ocean infrastructure provider""" + def infra_type(self) -> str: + return 'DO' + + def infra_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/digitalocean/container-registry', description='DO Container Registry'), + PulumiProject(path='infrastructure/digitalocean/domk8s', description='DO Kubernetes', + on_success=DigitalOceanProvider._update_kubeconfig), + ] + + def k8s_execution_order(self) -> List[PulumiProject]: + # The default Kubernetes Pulumi project instantiation order must be modified because + # the Digital Ocean Container Registry login credentials *must* be added under the + # Ingress Controller's namespace. As such, we insert a Digital Ocean specific + # Pulumi project that gets the credentials and adds them to the Kubernete's cluster + # under the appropriate namespace. + original_order = super().k8s_execution_order() + new_order = original_order.copy() + + # Add container registry credentials project after ingress controller namespace project + add_credentials_project = PulumiProject(path='infrastructure/digitalocean/container-registry-credentials', + description='Registry Credentials') + Provider._insert_project(project_path_to_insert_after='kubernetes/nginx/ingress-controller-namespace', + project=add_credentials_project, + k8s_execution_order=new_order) + + # Add DNS record project after ingress controller project + dns_record_project = PulumiProject(path='infrastructure/digitalocean/dns-record', description='DNS Record') + Provider._insert_project(project_path_to_insert_after='kubernetes/nginx/ingress-controller', + project=dns_record_project, + k8s_execution_order=new_order) + + return new_order + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ + Union[Dict[Hashable, Any], list, None]: + config = super().new_stack_config(env_config, defaults) + + if 'DIGITALOCEAN_TOKEN' not in env_config: + config['docean:token'] = input("Digital Ocean API token (this is stored in plain-text - " + "alternatively this can be specified as the environment variable " + "DIGITALOCEAN_TOKEN): ") + + token = DigitalOceanProvider.token(stack_config={'config': config}, env_config=env_config) + do_cli = DoctlCli(access_token=token) + + # FQDN + config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') + + # Kubernetes versions + k8s_versions_json_str, _ = external_process.run(do_cli.get_kubernetes_versions_json()) + k8s_versions_json = json.loads(k8s_versions_json_str) + k8s_version_slugs = [version['slug'] for version in k8s_versions_json] + + print('Supported Kubernetes versions:') + for slug in k8s_version_slugs: + print(f' {slug}') + default_version = defaults['docean:k8s_version'] or k8s_version_slugs[0] + config['docean:k8s_version'] = input(f'Kubernetes version [{default_version}]: ').strip() or default_version + print(f"Kubernetes version: {config['docean:k8s_version']}") + + # Kubernetes regions + k8s_regions_json_str, _ = external_process.run(do_cli.get_kubernetes_regions_json()) + k8s_regions_json = json.loads(k8s_regions_json_str) + default_region = defaults['docean:region'] or k8s_regions_json[-1]['slug'] + + print('Supported Regions:') + for item in k8s_regions_json: + print(f" {item['name']}: {item['slug']}") + config['docean:region'] = input(f'Region [{default_region}]: ').strip() or default_region + print(f"Region: {config['docean:region']}") + + # Kubernetes instance size + k8s_sizes_json_str, _ = external_process.run(do_cli.get_kubernetes_instance_sizes_json()) + k8s_sizes_json = json.loads(k8s_sizes_json_str) + k8s_sizes_slugs = [size['slug'] for size in k8s_sizes_json] + default_size = defaults['docean:instance_size'] or 's-2vcpu-4gb' + + print('Supported Instance Sizes:') + for slug in k8s_sizes_slugs: + print(f' {slug}') + + config['docean:instance_size'] = input(f'Instance size [{default_size}]: ').strip() or default_size + print(f"Instance size: {config['docean:instance_size']}") + + # Kubernetes instance count + default_node_count = defaults['docean:node_count'] or 3 + while 'docean:node_count' not in config: + node_count = input('Node count for Kubernetes cluster ' + f'[{default_node_count}]: ').strip() or default_node_count + if type(node_count) == int or node_count.isdigit(): + config['docean:node_count'] = int(node_count) + print(f"Node count: {config['docean:node_count']}") + + return config + + def validate_stack_config(self, + stack_config: Union[Dict[Hashable, Any], list, None], + env_config: Mapping[str, str]): + super().validate_stack_config(stack_config=stack_config, env_config=env_config) + token = DigitalOceanProvider.token(stack_config=stack_config, env_config=env_config) + do_cli = DoctlCli(access_token=token) + _, err = external_process.run(cmd=do_cli.validate_credentials_cmd()) + if err: + print(f'Digital Ocean authentication error: {err}', file=sys.stderr) + sys.exit(3) + + @staticmethod + def _update_kubeconfig(params: PulumiProjectEventParams): + if 'cluster_name' not in params.stack_outputs: + raise DigitalOceanProviderException('Cannot find key [cluster_name] in stack output') + + kubeconfig = yaml.safe_load(params.stack_outputs['kubeconfig'].value) + full_cluster_name = kubeconfig['clusters'][0]['name'] + + res, _ = external_process.run('kubectl config get-clusters') + clusters = filter(lambda cluster: cluster != 'NAME', res.splitlines()) + + if full_cluster_name in clusters: + print(f'Local kubectl configuration already has credentials for cluster {full_cluster_name}') + else: + print(f'Adding credentials for cluster {full_cluster_name} to local kubectl configuration') + cluster_name = params.stack_outputs['cluster_name'].value + token = DigitalOceanProvider.token(stack_config=params.config, env_config=params.env_config) + do_cli = DoctlCli(access_token=token) + + res, _ = external_process.run(do_cli.save_kubernetes_cluster_cmd(cluster_name)) + if res: + print(res) + + @staticmethod + def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], + env_config: Mapping[str, str]) -> str: + """Looks into multiple configuration sources for a valid Digital Ocean authentication token. + :param stack_config: reference to stack configuration + :param env_config: reference to environment configuration + :return: authentication token + """ + # Token is in an environment variable or the environment variable file + if 'DIGITALOCEAN_TOKEN' in env_config: + return env_config['DIGITALOCEAN_TOKEN'] + + # We were given a reference to a StackConfigParser object + if 'config' in stack_config and 'docean:token' in stack_config['config']: + return stack_config['config']['docean:token'] + + # We were given a reference to a Pulumi Stack configuration + if 'docean:token' in stack_config: + return stack_config['docean:token'].value + + # Otherwise + msg = 'When using the Digital Ocean provider, an API token must be specified - ' \ + 'this token can be specified with the Pulumi config parameter docean:token ' \ + 'or the environment variable DIGITALOCEAN_TOKEN' + raise InvalidConfigurationException(msg) + + +INSTANCE = DigitalOceanProvider() diff --git a/pulumi/python/automation/providers/linode.py b/pulumi/python/automation/providers/linode.py new file mode 100644 index 00000000..11eb1669 --- /dev/null +++ b/pulumi/python/automation/providers/linode.py @@ -0,0 +1,190 @@ +""" +File containing the Linode infrastructure provider for the MARA runner. +""" + +import base64 +from typing import List, Union, Dict, Hashable, Any, Mapping, MutableMapping + +import yaml +from pulumi import automation as auto + +from kic_util import external_process + +from .base_provider import PulumiProject, Provider, InvalidConfigurationException +from .pulumi_project import PulumiProjectEventParams, SecretConfigKey + +from .update_kubeconfig import update_kubeconfig + + +class LinodeProviderException(Exception): + pass + + +class LinodeCli: + """Linode CLI execution helper class""" + + def base_cmd(self) -> str: + return 'linode-cli' + + def get_regions(self) -> str: + return f'{self.base_cmd()} regions list --suppress-warnings' + + def get_k8s_versions(self) -> str: + return f'{self.base_cmd()} lke versions-list --suppress-warnings' + + def get_instance_sizes(self) -> str: + return f'{self.base_cmd()} linodes types --suppress-warnings' + + +class LinodeProvider(Provider): + def infra_type(self) -> str: + return 'LKE' + + def infra_execution_order(self) -> List[PulumiProject]: + return [ + PulumiProject(path='infrastructure/linode/lke', description='LKE', + on_success=LinodeProvider._update_kubeconfig), + ] + + def k8s_execution_order(self) -> List[PulumiProject]: + original_order = super().k8s_execution_order() + new_order = original_order.copy() + + harbor_secrets = [SecretConfigKey(key_name='linode:harbor_password', + prompt='Harbor administrator password'), + SecretConfigKey(key_name='linode:harbor_db_password', + prompt='Harbor database password'), + SecretConfigKey(key_name='linode:harbor_sudo_user_password', + prompt='Harbor instance sudo user password')] + harbor_project = PulumiProject(path='infrastructure/linode/harbor', + description='Harbor', + config_keys_with_secrets=harbor_secrets) + + Provider._insert_project(project_path_to_insert_after='kubernetes/secrets', + project=harbor_project, + k8s_execution_order=new_order) + + # Add container registry credentials project after ingress controller namespace project + # Harbor is configured some time after it is stood up in order to give it time to + # instantiate. + add_credentials_project = PulumiProject(path='infrastructure/linode/container-registry-credentials', + description='Registry Credentials') + Provider._insert_project(project_path_to_insert_after='kubernetes/nginx/ingress-controller-namespace', + project=add_credentials_project, + k8s_execution_order=new_order) + + # Add project that configures Harbor for use in the cluster + harbor_config_project = PulumiProject(path='infrastructure/linode/harbor-configuration', + description='Harbor Config') + Provider._insert_project(project_path_to_insert_after='utility/kic-image-build', + project=harbor_config_project, + k8s_execution_order=new_order) + + return new_order + + def new_stack_config(self, env_config, defaults: Union[Dict[Hashable, Any], list, None]) -> \ + Union[Dict[Hashable, Any], list, None]: + config = super().new_stack_config(env_config, defaults) + + if 'LINODE_TOKEN' not in env_config: + config['linode:token'] = input('Linode API token (this is stored in plain-text - ' + 'alternatively this can be specified as the environment variable ' + 'LINODE_TOKEN): ') + + token = LinodeProvider.token(stack_config={'config': config}, env_config=env_config) + linode_cli = LinodeCli() + + cli_env = {} + cli_env.update(env_config) + cli_env['LINODE_CLI_TOKEN'] = token + + # FQDN + config['kic-helm:fqdn'] = input(f'Fully qualified domain name (FQDN) for application: ') + print(f"FQDN: {config['kic-helm:fqdn']}") + + # SOA Email + config['linode:soa_email'] = input(f'DNS Start of Authority (SOA) email address for container registry domain: ').strip() + print(f"SOA email address: {config['linode:soa_email']}") + + # Kubernetes versions + k8s_version_list, _ = external_process.run(cmd=linode_cli.get_k8s_versions(), + env=cli_env) + print(f'Supported Kubernetes versions:\n{k8s_version_list}') + default_version = defaults['linode:k8s_version'] or '1.22' + config['linode:k8s_version'] = input(f'Kubernetes version [{default_version}]: ').strip() or default_version + print(f"Kubernetes version: {config['linode:k8s_version']}") + + # Region + regions_list, _ = external_process.run(cmd=linode_cli.get_regions(), + env=cli_env) + print(f'Supported regions:\n{regions_list}') + default_region = defaults['linode:region'] or 'us-central' + config['linode:region'] = input(f'Region [{default_region}]: ').strip() or default_region + print(f"Region: {config['linode:region']}") + + # Instance Type + instance_type_list, _ = external_process.run(cmd=linode_cli.get_instance_sizes(), + env=cli_env) + print(f'Supported instance types:\n{instance_type_list}') + default_type = defaults['linode:instance_type'] or 'g6-standard-8' + config['linode:instance_type'] = input(f'Instance type [{default_type}]: ').strip() or default_type + print(f"Instance type: {config['linode:instance_type']}") + + # Node Count + default_node_count = defaults['linode:node_count'] or 3 + while 'linode:node_count' not in config: + node_count = input('Node count for Kubernetes cluster ' + f'[{default_node_count}]: ').strip() or default_node_count + if type(node_count) == int or node_count.isdigit(): + config['linode:node_count'] = int(node_count) + print(f"Node count: {config['linode:node_count']}") + + # HA Enabled + k8s_ha_input = input('Enable Kubernetes HA mode [Y]: ').strip().lower() + k8s_ha = k8s_ha_input in ['', 'y', 'yes', 't', 'true', '1'] + config['linode:k8s_ha'] = k8s_ha + print(f'HA mode enabled: {k8s_ha}') + + return config + + @staticmethod + def token(stack_config: Union[Mapping[str, Any], MutableMapping[str, auto._config.ConfigValue]], + env_config: Mapping[str, str]) -> str: + """Looks into multiple configuration sources for a valid Linode authentication token. + :param stack_config: reference to stack configuration + :param env_config: reference to environment configuration + :return: authentication token + """ + + # Token is in an environment variable or the environment variable file + if 'LINODE_TOKEN' in env_config: + return env_config['LINODE_TOKEN'] + + # We were given a reference to a StackConfigParser object + if 'config' in stack_config and 'linode:token' in stack_config['config']: + return stack_config['config']['linode:token'] + + # We were given a reference to a Pulumi Stack configuration + if 'linode:token' in stack_config: + return stack_config['linode:token'].value + + # Otherwise + msg = 'When using the Linode provider, an API token must be specified - ' \ + 'this token can be specified with the Pulumi config parameter linode:token ' \ + 'or the environment variable LINODE_TOKEN' + raise InvalidConfigurationException(msg) + + @staticmethod + def _update_kubeconfig(params: PulumiProjectEventParams): + if 'cluster_name' not in params.stack_outputs: + raise LinodeProviderException('Cannot find key [cluster_name] in stack output') + + cluster_name = params.stack_outputs['cluster_name'].value + kubeconfig_encoded = params.stack_outputs['kubeconfig'].value + kubeconfig_bytes = base64.b64decode(kubeconfig_encoded) + kubeconfig = yaml.safe_load(kubeconfig_bytes) + + update_kubeconfig(env=params.env_config, cluster_name=cluster_name, kubeconfig=kubeconfig) + + +INSTANCE = LinodeProvider() diff --git a/pulumi/python/automation/providers/pulumi_project.py b/pulumi/python/automation/providers/pulumi_project.py new file mode 100644 index 00000000..538dfe69 --- /dev/null +++ b/pulumi/python/automation/providers/pulumi_project.py @@ -0,0 +1,91 @@ +""" +This file contains classes related to modeling Pulumi projects as discrete directories that +are invoked individually in sequence by the Pulumi Automation API. +""" + +import os.path +from typing import Optional, Callable, Mapping, List, MutableMapping +import yaml +from pulumi import automation as auto + +# Directory in which script is located +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class PulumiConfigException(Exception): + """Generic exception thrown when Pulumi configuration errors are encountered""" + pass + + +class SecretConfigKey: + """ + Class representing a secret that the user will be prompted to enter and subsequently stored in the Pulumi + secrets store. + """ + key_name: str + prompt: str + default: Optional[str] + + def __init__(self, key_name: str, prompt: str, default: Optional[str] = None) -> None: + super().__init__() + self.key_name = key_name + self.prompt = prompt + self.default = default + + +class PulumiProject: + """ + Class representing a Pulumi project that is associated with a directory and containing properties regarding the + secrets used, description and the operation to run when it is successfully stood up. + """ + path: str + description: str + config_keys_with_secrets: List[SecretConfigKey] + on_success: Optional[Callable] = None + _config_data: Optional[Mapping[str, str]] = None + + def __init__(self, + path: str, + description: str, + config_keys_with_secrets: Optional[List[SecretConfigKey]] = None, + on_success: Optional[Callable] = None) -> None: + super().__init__() + self.path = path + self.description = description + self.config_keys_with_secrets = config_keys_with_secrets or [] + self.on_success = on_success + + def abspath(self) -> str: + relative_path = os.path.sep.join([SCRIPT_DIR, '..', '..', self.path]) + return os.path.abspath(relative_path) + + def config(self) -> Mapping[str, str]: + if not self._config_data: + config_path = os.path.sep.join([self.abspath(), 'Pulumi.yaml']) + with open(config_path, 'r') as f: + self._config_data = yaml.safe_load(f) + + return self._config_data + + def name(self) -> str: + config_data = self.config() + + if 'name' not in config_data.keys(): + raise PulumiConfigException('Pulumi configuration did not contain required "name" key') + + return config_data['name'] + + +class PulumiProjectEventParams: + """Object containing the state passed to an on_success event after the successful stand up of a Pulumi project.""" + stack_outputs: MutableMapping[str, auto._output.OutputValue] + config: MutableMapping[str, auto._config.ConfigValue] + env_config: Mapping[str, str] + + def __init__(self, + stack_outputs: MutableMapping[str, auto._output.OutputValue], + config: MutableMapping[str, auto._config.ConfigValue], + env_config: Mapping[str, str]) -> None: + self.stack_outputs = stack_outputs + self.config = config + self.env_config = env_config diff --git a/pulumi/python/automation/providers/update_kubeconfig.py b/pulumi/python/automation/providers/update_kubeconfig.py new file mode 100644 index 00000000..e88f0c15 --- /dev/null +++ b/pulumi/python/automation/providers/update_kubeconfig.py @@ -0,0 +1,433 @@ +# This code is derived from code within the AWS SDK licensed under the +# Apache 2.0 License. +# +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# Copyright 2022 F5, Inc. All Rights Reserved. +# +# This file is licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +# CONDITIONS OF ANY KIND, either express or implied. See the License for +# the specific language governing permissions and limitations under +# the License. + +""" +This file contains functions that allow for merging in a new kubeconfig into the existing +kubectl config files contained in a user's home directory or path specified by KUBECONFIG. +""" + +import os +import logging +import errno +from collections import OrderedDict +from typing import Mapping, Any +import yaml + +# Default path to user's kubectl config files +DEFAULT_PATH = os.path.expanduser("~/.kube/config") +LOG = logging.getLogger('runner') + + +def update_kubeconfig(cluster_name: str, env: Mapping[str, str], kubeconfig: Mapping[str, Any]): + """Merge the passed kubeconfig for the given cluster into the existing kubectl config files. + :param cluster_name: name of cluster associated with kubeconfig + :param env: map environment variables to get KUBECONFIG from + :param kubeconfig: contents of kubeconfig + """ + + cluster = kubeconfig['clusters'][0] + user = kubeconfig['users'][0] + alias = kubeconfig['contexts'][0]['name'] + + config_selector = KubeconfigSelector(env_variable=env.get('KUBECONFIG', ''), + path_in=None) + config = config_selector.choose_kubeconfig(cluster_name) + + appender = KubeconfigAppender() + new_context_dict = appender.insert_cluster_user_pair(config=config, + cluster=cluster, + user=user, + alias=alias) + + writer = KubeconfigWriter() + writer.write_kubeconfig(config) + + if config.has_cluster(cluster_name): + LOG.info('Updated context %s in %s', new_context_dict["name"], config.path) + else: + LOG.info('Added new context %s to %s', new_context_dict["name"], config.path) + + +# Everything after this line is sourced from the AWS SDK + + +class KubeconfigError(RuntimeError): + """ Base class for all kubeconfig errors.""" + + +class KubeconfigCorruptedError(KubeconfigError): + """ Raised when a kubeconfig cannot be parsed.""" + + +class KubeconfigInaccessableError(KubeconfigError): + """ Raised when a kubeconfig cannot be opened for read/writing.""" + + +class SafeOrderedDumper(yaml.SafeDumper): + """ Safely dump an OrderedDict as yaml.""" + + +def _ordered_representer(dumper, data): + return dumper.represent_mapping( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, + data.items()) + + +SafeOrderedDumper.add_representer(OrderedDict, _ordered_representer) + + +def ordered_yaml_dump(to_dump, stream=None): + """ + Dump an OrderedDict object to yaml. + + :param to_dump: The OrderedDict to dump + :type to_dump: OrderedDict + + :param stream: The file to dump to + If not given or if None, only return the value + :type stream: file + """ + return yaml.dump(to_dump, stream, + SafeOrderedDumper, default_flow_style=False) + + +class SafeOrderedLoader(yaml.SafeLoader): + """ Safely load a yaml file into an OrderedDict.""" + + +def _ordered_constructor(loader, node): + loader.flatten_mapping(node) + return OrderedDict(loader.construct_pairs(node)) + + +SafeOrderedLoader.add_constructor( + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, + _ordered_constructor) + + +def ordered_yaml_load(stream): + """ Load an OrderedDict object from a yaml stream.""" + return yaml.load(stream, SafeOrderedLoader) + + +def _get_new_kubeconfig_content(): + return OrderedDict([ + ("apiVersion", "v1"), + ("clusters", []), + ("contexts", []), + ("current-context", ""), + ("kind", "Config"), + ("preferences", OrderedDict()), + ("users", []) + ]) + + +class KubeconfigSelector(object): + + def __init__(self, env_variable, path_in, validator=None, loader=None): + """ + Parse KUBECONFIG into a list of absolute paths. + Also replace the empty list with DEFAULT_PATH + + :param env_variable: KUBECONFIG as a long string + :type env_variable: string + + :param path_in: The path passed in through the CLI + :type path_in: string or None + """ + if validator is None: + validator = KubeconfigValidator() + self._validator = validator + + if loader is None: + loader = KubeconfigLoader(validator) + self._loader = loader + + if path_in is not None: + # Override environment variable + self._paths = [self._expand_path(path_in)] + else: + # Get the list of paths from the environment variable + if env_variable == "": + env_variable = DEFAULT_PATH + self._paths = [self._expand_path(element) + for element in env_variable.split(os.pathsep) + if len(element.strip()) > 0] + if len(self._paths) == 0: + self._paths = [DEFAULT_PATH] + + def choose_kubeconfig(self, cluster_name): + """ + Choose which kubeconfig file to read from. + If name is already an entry in one of the $KUBECONFIG files, + choose that one. + Otherwise choose the first file. + + :param cluster_name: The name of the cluster which is going to be added + :type cluster_name: String + + :return: a chosen Kubeconfig based on above rules + :rtype: Kubeconfig + """ + # Search for an existing entry to update + for candidate_path in self._paths: + try: + loaded_config = self._loader.load_kubeconfig(candidate_path) + + if loaded_config.has_cluster(cluster_name): + LOG.debug("Found entry to update at {0}".format( + candidate_path + )) + return loaded_config + except KubeconfigError as e: + LOG.warning("Passing {0}:{1}".format(candidate_path, e)) + + # No entry was found, use the first file in KUBECONFIG + # + # Note: This could raise KubeconfigErrors if paths[0] is corrupted + return self._loader.load_kubeconfig(self._paths[0]) + + def _expand_path(self, path): + """ A helper to expand a path to a full absolute path. """ + return os.path.abspath(os.path.expanduser(path)) + + +class Kubeconfig(object): + def __init__(self, path, content=None): + self.path = path + if content is None: + content = _get_new_kubeconfig_content() + self.content = content + + def dump_content(self): + """ Return the stored content in yaml format. """ + return ordered_yaml_dump(self.content) + + def has_cluster(self, name): + """ + Return true if this kubeconfig contains an entry + For the passed cluster name. + """ + if 'clusters' not in self.content: + return False + return name in [cluster['name'] + for cluster in self.content['clusters']] + + +class KubeconfigValidator(object): + def __init__(self): + # Validation_content is an empty Kubeconfig + # It is used as a way to know what types different entries should be + self._validation_content = Kubeconfig(None, None).content + + def validate_config(self, config): + """ + Raises KubeconfigCorruptedError if the passed content is invalid + + :param config: The config to validate + :type config: Kubeconfig + """ + if not isinstance(config, Kubeconfig): + raise KubeconfigCorruptedError("Internal error: " + "Not a Kubeconfig object.") + self._validate_config_types(config) + self._validate_list_entry_types(config) + + def _validate_config_types(self, config): + """ + Raises KubeconfigCorruptedError if any of the entries in config + are the wrong type + + :param config: The config to validate + :type config: Kubeconfig + """ + if not isinstance(config.content, dict): + raise KubeconfigCorruptedError("Content not a dictionary.") + for key, value in self._validation_content.items(): + if (key in config.content and + config.content[key] is not None and + not isinstance(config.content[key], type(value))): + raise KubeconfigCorruptedError( + "{0} is wrong type:{1} " + "(Should be {2})".format( + key, + type(config.content[key]), + type(value) + ) + ) + + def _validate_list_entry_types(self, config): + """ + Raises KubeconfigCorruptedError if any lists in config contain objects + which are not dictionaries + + :param config: The config to validate + :type config: Kubeconfig + """ + for key, value in self._validation_content.items(): + if (key in config.content and + type(config.content[key]) == list): + for element in config.content[key]: + if not isinstance(element, OrderedDict): + raise KubeconfigCorruptedError( + "Entry in {0} not a dictionary.".format(key)) + + +class KubeconfigLoader(object): + def __init__(self, validator=None): + if validator is None: + validator = KubeconfigValidator() + self._validator = validator + + def load_kubeconfig(self, path): + """ + Loads the kubeconfig found at the given path. + If no file is found at the given path, + Generate a new kubeconfig to write back. + If the kubeconfig is valid, loads the content from it. + If the kubeconfig is invalid, throw the relevant exception. + + :param path: The path to load a kubeconfig from + :type path: string + + :raises KubeconfigInaccessableError: if the kubeconfig can't be opened + :raises KubeconfigCorruptedError: if the kubeconfig is invalid + + :return: The loaded kubeconfig + :rtype: Kubeconfig + """ + try: + with open(path, "r") as stream: + loaded_content = ordered_yaml_load(stream) + except IOError as e: + if e.errno == errno.ENOENT: + loaded_content = None + else: + raise KubeconfigInaccessableError( + "Can't open kubeconfig for reading: {0}".format(e)) + except yaml.YAMLError as e: + raise KubeconfigCorruptedError( + "YamlError while loading kubeconfig: {0}".format(e)) + + loaded_config = Kubeconfig(path, loaded_content) + self._validator.validate_config(loaded_config) + + return loaded_config + + +class KubeconfigWriter(object): + def write_kubeconfig(self, config): + """ + Write config to disk. + OK if the file doesn't exist. + + :param config: The kubeconfig to write + :type config: Kubeconfig + + :raises KubeconfigInaccessableError: if the kubeconfig + can't be opened for writing + """ + directory = os.path.dirname(config.path) + + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise KubeconfigInaccessableError( + "Can't create directory for writing: {0}".format(e)) + try: + with os.fdopen( + os.open( + config.path, + os.O_CREAT | os.O_RDWR | os.O_TRUNC, + 0o600), + "w+") as stream: + ordered_yaml_dump(config.content, stream) + except (IOError, OSError) as e: + raise KubeconfigInaccessableError( + "Can't open kubeconfig for writing: {0}".format(e)) + + +class KubeconfigAppender(object): + def insert_entry(self, config, key, entry): + """ + Insert entry into the array at content[key] + Overwrite an existing entry if they share the same name + + :param config: The kubeconfig to insert an entry into + :type config: Kubeconfig + """ + if key not in config.content: + config.content[key] = [] + array = config.content[key] + if not isinstance(array, list): + raise KubeconfigError("Tried to insert into {0}," + "which is a {1} " + "not a {2}".format(key, + type(array), + list)) + found = False + for counter, existing_entry in enumerate(array): + if "name" in existing_entry and\ + "name" in entry and\ + existing_entry["name"] == entry["name"]: + array[counter] = entry + found = True + + if not found: + array.append(entry) + + config.content[key] = array + return config + + def _make_context(self, cluster, user, alias=None): + """ Generate a context to associate cluster and user with a given alias.""" + return OrderedDict([ + ("context", OrderedDict([ + ("cluster", cluster["name"]), + ("user", user["name"]) + ])), + ("name", alias or user["name"]) + ]) + + def insert_cluster_user_pair(self, config, cluster, user, alias=None): + """ + Insert the passed cluster entry and user entry, + then make a context to associate them + and set current-context to be the new context. + Returns the new context + + :param config: the Kubeconfig to insert the pair into + :type config: Kubeconfig + + :param cluster: the cluster entry + :type cluster: OrderedDict + + :param user: the user entry + :type user: OrderedDict + + :param alias: the alias for the context; defaults top user entry name + :type context: str + + :return: The generated context + :rtype: OrderedDict + """ + context = self._make_context(cluster, user, alias=alias) + self.insert_entry(config, "clusters", cluster) + self.insert_entry(config, "users", user) + self.insert_entry(config, "contexts", context) + + config.content["current-context"] = context["name"] + + return context diff --git a/pulumi/python/automation/stack_config_parser.py b/pulumi/python/automation/stack_config_parser.py new file mode 100644 index 00000000..48f5af8b --- /dev/null +++ b/pulumi/python/automation/stack_config_parser.py @@ -0,0 +1,77 @@ +import json +import os +from typing import Optional, MutableMapping + +from pulumi.automation import ConfigValue + +import yaml + +# Directory in which script is located +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +# Default path to the directory containing the global MARA Pulumi stack configuration file +DEFAULT_DIR_PATH = os.path.abspath(os.path.sep.join([SCRIPT_DIR, '..', '..', '..', 'config', 'pulumi'])) + + +class EmptyConfigurationException(RuntimeError): + filename: str + + def __init__(self, filename: str, *args: object) -> None: + super().__init__(*args) + self.filename = filename + + +class PulumiStackConfig(dict): + """Object containing the configuration parameters used by Pulumi to stand up projects. When this file is loaded by + Pulumi within the context of a project execution, it is *not* loaded into this object. This object is used only by + the MARA runner for the Pulumi Automation API.""" + + config_path: Optional[str] = None + + def to_pulumi_config_value(self) -> MutableMapping[str, ConfigValue]: + if 'config' not in self: + return {} + + config = self.get('config') + + pulumi_config = {} + for key, val in config.items(): + if type(val) in [str, int, float]: + pulumi_config[key] = ConfigValue(value=val) + elif type(val) is dict and 'secure' in val: + pulumi_config[key] = ConfigValue(value=val['secure'], secret=True) + else: + json_val = json.dumps(val) + pulumi_config[key] = ConfigValue(value=json_val) + + return pulumi_config + + +def _stack_config_path(stack_name: str) -> str: + """Path to the stack configuration file on the file system""" + return os.path.sep.join([DEFAULT_DIR_PATH, f'Pulumi.{stack_name}.yaml']) + + +def _read(config_file_path: str) -> PulumiStackConfig: + """Reads the "stack configuration file from the specified path, parses it, and loads it into the PulumiStackConfig + data structure.""" + + # Return empty config for empty config files + if os.path.getsize(config_file_path) == 0: + raise EmptyConfigurationException(filename=config_file_path) + + with open(config_file_path, 'r') as f: + stack_config = PulumiStackConfig() + stack_config.config_path = config_file_path + stack_config.update(yaml.safe_load(f)) + return stack_config + + +def read(stack_name: str) -> PulumiStackConfig: + """Generate the configuration file path based on the stack name, reads the "stack configuration file, parse it, + and load it into the PulumiStackConfig data structure. + + :param stack_name: stack name to read configuration for + :return: new instance of PulumiStackConfig + """ + stack_config_path = _stack_config_path(stack_name) + return _read(stack_config_path) diff --git a/pulumi/python/config/README.md b/pulumi/python/config/README.md index 871d5c7e..0d575716 100644 --- a/pulumi/python/config/README.md +++ b/pulumi/python/config/README.md @@ -1,24 +1,28 @@ -## Directory +# Directory `/pulumi/python/config` ## Purpose -This directory is used for configuration management in Pulumi. In previous versions of this project, the -`vpc` directory was used to manage writes to the configuration file. This is required because you can only run -the `pulumi config` command if you have a `Pulumi.yaml` somewhere in your directory or above that allows you to use the -Pulumi tooling. +This directory is used for configuration management in Pulumi. In previous +versions of this project, the `vpc` directory was used to manage writes to the +configuration file. This is required because you can only run the `pulumi config` +command if you have a `Pulumi.yaml` somewhere in your directory or above that +allows you to use the Pulumi tooling. -Why not use each stack directory as it's own configuration? Using different directories will result in failures -encrypting/decrypting the values in the main configuration file if different stacks are used. This is a stopgap -workaround that will be obsoleted at such time that Pulumi provides nested/included configuration files. +Why not use each stack directory as its own configuration? Using different +directories will result in failures encrypting/decrypting the values in the +main configuration file if different stacks are used. This is a stopgap +workaround that will be obsoleted at such time that Pulumi provides +nested/included configuration files. This is also the reason why we have created +the `secrets` project. ## Key Files -- [`Pulumi.yaml`](./Pulumi.yaml) This file tells the `pulumi` command where to find it's virtual envrionment and it's - configuration. +* [`Pulumi.yaml`](./Pulumi.yaml) This file tells the `pulumi` command where to +* find its virtual environment and its configuration. ## Notes -Once Pulumi adds nested configuration files to the product we should be able to remove this work-around. - +Once Pulumi adds nested configuration files to the product we should be able to +remove this work-around. diff --git a/pulumi/python/infrastructure/README.md b/pulumi/python/infrastructure/README.md index c8bf6eb3..42750d5e 100644 --- a/pulumi/python/infrastructure/README.md +++ b/pulumi/python/infrastructure/README.md @@ -1,4 +1,4 @@ -## Directory +# Directory `/python/pulumi/infrastructure` @@ -8,14 +8,17 @@ Holds all infrastructure related files. ## Key Files -- [`aws`](./aws) Files to stand up a K8 cluster in AWS using VPC, EKS, and ECR. -- [`digitalocean`](./digitalocean) Files to stand up a K8 cluster in DigitalOcean using DO Managed K8s. -- [`linode`](./linode) Files to stand up a K8 cluster in Linode using Linode Kubernetes Engine. -- [`kubeconfig`](./kubeconfig) Files to allow users to connect to any kubernetes installation that can be specified via - a `kubeconfig` file. +* [`aws`](./aws) Files to stand up a K8 cluster in AWS using VPC, EKS, and ECR. +* [`digitalocean`](./digitalocean) Files to stand up a K8 cluster in + DigitalOcean using DO Managed K8s. +* [`linode`](./linode) Files to stand up a K8 cluster in Linode using Linode + Kubernetes Engine. +* [`kubeconfig`](./kubeconfig) Files to allow users to connect to any kubernetes + installation that can be specified via a `kubeconfig` file. ## Notes -The `kubeconfig` project is intended to serve as a shim between infrastructure providers and the rest of the project. -For example, even if you use the AWS logic you will still use the logic inside the `kubeconfig` stack as part of the +The `kubeconfig` project is intended to serve as a shim between infrastructure +providers and the rest of the project. For example, even if you use the AWS +logic you will still use the logic inside the `kubeconfig` stack as part of the process. Additional infrastructures added will need to follow this pattern. diff --git a/pulumi/python/infrastructure/aws/ecr/__main__.py b/pulumi/python/infrastructure/aws/ecr/__main__.py index 86febaa3..1c29377a 100644 --- a/pulumi/python/infrastructure/aws/ecr/__main__.py +++ b/pulumi/python/infrastructure/aws/ecr/__main__.py @@ -8,6 +8,7 @@ ecr_repo = ecr.Repository(name=f'ingress-controller-{stack_name}', resource_name=f'nginx-ingress-repository-{stack_name}', image_tag_mutability="MUTABLE", + force_delete=False, tags={"Project": project_name, "Stack": stack_name}) pulumi.export('repository_url', ecr_repo.repository_url) diff --git a/pulumi/python/infrastructure/digitalocean/container-registry-credentials/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/Pulumi.yaml new file mode 100644 index 00000000..44fec005 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/Pulumi.yaml @@ -0,0 +1,7 @@ +name: container-registry-credentials +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Adds container registry login credentials to the k8s cluster diff --git a/pulumi/python/infrastructure/digitalocean/container-registry-credentials/__main__.py b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/__main__.py new file mode 100644 index 00000000..d94686fc --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/container-registry-credentials/__main__.py @@ -0,0 +1,57 @@ +import os + +import pulumi +from pulumi import StackReference +from pulumi_digitalocean import ContainerRegistryDockerCredentials +from kic_util import pulumi_config +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_from_same_parent(directory: str): + project_path = os.path.join(script_dir, '..', directory) + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_of_namespace_project(): + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', 'nginx', 'ingress-controller-namespace') + return pulumi_config.get_pulumi_project_name(project_path) + + +k8_project_name = project_name_from_same_parent('domk8s') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +container_registry_stack_ref_id = f"{pulumi_user}/{project_name_from_same_parent('container-registry')}/{stack_name}" +cr_stack_ref = StackReference(container_registry_stack_ref_id) +container_registry_output = cr_stack_ref.require_output('container_registry') +registry_name_output = cr_stack_ref.require_output('container_registry_name') + +namespace_stack_ref_id = f"{pulumi_user}/{project_name_of_namespace_project()}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +namespace_name_output = ns_stack_ref.require_output('ingress_namespace_name') + +fifty_years_in_seconds = 1_576_800_000 +registry_credentials = ContainerRegistryDockerCredentials(resource_name='do_k8s_docker_credentials', + expiry_seconds=fifty_years_in_seconds, + registry_name=registry_name_output, + write=False) +docker_credentials = registry_credentials.docker_credentials + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) + +secret = Secret(resource_name='ingress-controller-registry-secret', + args=SecretInitArgs(string_data={'.dockerconfigjson': docker_credentials}, + type='kubernetes.io/dockerconfigjson', + metadata={'namespace': namespace_name_output, + 'name': 'ingress-controller-registry'}), + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +pulumi.export('ingress-controller-registry-secret', secret) diff --git a/pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml new file mode 100644 index 00000000..9039d8eb --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/container-registry/Pulumi.yaml @@ -0,0 +1,7 @@ +name: container-registry +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates new Digital Ocean Container Registry diff --git a/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py new file mode 100644 index 00000000..f326c632 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/container-registry/__main__.py @@ -0,0 +1,48 @@ +import os + +import pulumi +import pulumi_digitalocean as docean + +from kic_util import external_process + +config = pulumi.Config('docean') +# valid values: starter, basic, professional +subscription_tier = config.get('container_registry_subscription_tier') +if not subscription_tier: + subscription_tier = 'starter' +region = config.get('region') +if not region: + region = 'sfo3' + + +def token(): + if config.get('token'): + return config.get('token') + if config.get_secret('token'): + return config.get_secret('token') + if 'DIGITALOCEAN_TOKEN' in os.environ: + return os.environ['DIGITALOCEAN_TOKEN'] + raise 'No valid token for Digital Ocean found' + + +stack_name = pulumi.get_stack() + +# Digital Ocean allows only a single container registry per user. This means that we need to use doctl +# to check to see if a registry already exists, and if so use it. We must do this using an external +# command because Pulumi does not support the model of checking to see if a resource created outside of +# Pulumi already exists and thereby forking logic. +registry_name_query_cmd = f'doctl --access-token {token()} registry get --format Name --no-header --output text' +registry_name, err = external_process.run(cmd=registry_name_query_cmd, suppress_error=True) +registry_name = registry_name.strip() +if not err and registry_name and not registry_name.startswith('shared-global-container-registry-'): + pulumi.log.info(f'Using already existing global Digital Ocean container registry: {registry_name}') + container_registry = docean.ContainerRegistry.get(registry_name, id=registry_name) +else: + pulumi.log.info('Creating new global Digital Ocean container registry') + container_registry = docean.ContainerRegistry('shared-global-container-registry', + subscription_tier_slug=subscription_tier, + region=region) + +pulumi.export('container_registry_id', container_registry.id) +pulumi.export('container_registry_name', container_registry.name) +pulumi.export('container_registry', container_registry) diff --git a/pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml b/pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml new file mode 100644 index 00000000..de744a25 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/dns-record/Pulumi.yaml @@ -0,0 +1,7 @@ +name: dns-record +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates new DNS record for Ingress Controller diff --git a/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py b/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py new file mode 100644 index 00000000..9c826190 --- /dev/null +++ b/pulumi/python/infrastructure/digitalocean/dns-record/__main__.py @@ -0,0 +1,45 @@ +import os + +import pulumi +from pulumi import StackReference +import pulumi_digitalocean as docean + +from kic_util import pulumi_config + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_of_ingress_controller_project(): + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', 'nginx', 'ingress-controller') + return pulumi_config.get_pulumi_project_name(project_path) + + +def extract_ip_address(lb_ingress): + return lb_ingress['load_balancer']['ingress'][0]['ip'] + + +namespace_stack_ref_id = f"{pulumi_user}/{project_name_of_ingress_controller_project()}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +ip = ns_stack_ref.require_output('lb_ingress').apply(extract_ip_address) + +config = pulumi.Config('kic-helm') +fqdn = config.require('fqdn') + +# +# Split our hostname off the domain name to build the DNS records +# +hostname, domainname = fqdn.split('.',1) + +ingress_domain = docean.Domain.get(resource_name='ingress-domain', id=domainname, name=domainname) +ingress_a_record = docean.DnsRecord(resource_name='ingress-a-record', + name=hostname, + domain=ingress_domain.id, + type="A", + ttl=1800, + value=ip) + +pulumi.export('ingress_domain', ingress_domain) +pulumi.export('ingress_a_record', ingress_a_record) diff --git a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py index 30521c2d..c82a734d 100644 --- a/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py +++ b/pulumi/python/infrastructure/digitalocean/domk8s/__main__.py @@ -1,41 +1,52 @@ +import os + import pulumi -import pulumi_digitalocean as docean -from kic_util import pulumi_config +from pulumi_digitalocean import KubernetesCluster, KubernetesClusterNodePoolArgs +from kic_util import pulumi_config # Configuration details for the K8 cluster -config = pulumi.Config('domk8s') +config = pulumi.Config('docean') instance_size = config.get('instance_size') if not instance_size: - instance_size = 's-2vcpu-4gb' + instance_size = 's-4vcpu-8gb' region = config.get('region') if not region: region = 'sfo3' -node_count = config.get('node_count') +node_count = config.get_int('node_count') if not node_count: node_count = 3 k8s_version = config.get('k8s_version') if not k8s_version: - k8s_version = 'latest' + k8s_version = '1.22.8-do.1' stack_name = pulumi.get_stack() project_name = pulumi.get_project() pulumi_user = pulumi_config.get_pulumi_user() + +def container_registry_project_name(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', 'container-registry') + return pulumi_config.get_pulumi_project_name(project_path) + + # Derive our names for the cluster and the pool -resource_name = "do-" + stack_name + "-cluster" -pool_name = "do-" + stack_name + "-pool" +resource_name = f'do-{stack_name}-cluster' +pool_name = f'do-{stack_name}-pool' # Create a digital ocean cluster -cluster = docean.KubernetesCluster(resource_name=resource_name, - region=region, - version=k8s_version, - node_pool=docean.KubernetesClusterNodePoolArgs( - name=pool_name, - size=instance_size, - node_count=node_count, - )) +cluster = KubernetesCluster(resource_name=resource_name, + region=region, + version=k8s_version, + node_pool=KubernetesClusterNodePoolArgs( + name=pool_name, + size=instance_size, + node_count=node_count + )) + +kubeconfig = cluster.kube_configs[0].raw_config # Export the clusters' kubeconfig -pulumi.export("cluster_name", resource_name) +pulumi.export("cluster_name", cluster.name) pulumi.export("cluster_id", cluster.id) -pulumi.export("kubeconfig", pulumi.Output.unsecret(cluster.kube_configs[0].raw_config)) +pulumi.export("kubeconfig", pulumi.Output.unsecret(kubeconfig)) diff --git a/pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml b/pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml new file mode 100644 index 00000000..44fec005 --- /dev/null +++ b/pulumi/python/infrastructure/linode/container-registry-credentials/Pulumi.yaml @@ -0,0 +1,7 @@ +name: container-registry-credentials +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Adds container registry login credentials to the k8s cluster diff --git a/pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py b/pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py new file mode 100644 index 00000000..32420919 --- /dev/null +++ b/pulumi/python/infrastructure/linode/container-registry-credentials/__main__.py @@ -0,0 +1,81 @@ +import json +import os +import base64 +from typing import List + +import pulumi +from pulumi import StackReference +from kic_util import pulumi_config +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_from_kubeconfig(): + project_path = os.path.join(script_dir, '..', '..', 'kubeconfig') + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_from_same_parent(directory: str): + project_path = os.path.join(script_dir, '..', directory) + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_of_namespace_project(): + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', 'nginx', 'ingress-controller-namespace') + return pulumi_config.get_pulumi_project_name(project_path) + + +k8_project_name = project_name_from_kubeconfig() +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +container_registry_stack_ref_id = f"{pulumi_user}/{project_name_from_same_parent('harbor')}/{stack_name}" +harbor_stack_ref = StackReference(container_registry_stack_ref_id) +harbor_hostname_output = harbor_stack_ref.require_output('harbor_hostname') +harbor_user_output = harbor_stack_ref.require_output('harbor_user') +harbor_password_output = harbor_stack_ref.require_output('harbor_password') + +namespace_stack_ref_id = f"{pulumi_user}/{project_name_of_namespace_project()}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +namespace_name_output = ns_stack_ref.require_output('ingress_namespace_name') + + +def build_docker_credentials(params: List[str]): + registry_host = params[0] + username = params[1] + password = params[2] + auth_string = f'{username}:{password}' + auth_base64 = str(base64.encodebytes(auth_string.encode('ascii')), 'ascii') + + data = { + 'auths': { + registry_host: { + 'auth': auth_base64 + } + } + } + + return json.dumps(data) + + +docker_credentials = pulumi.Output.all(harbor_hostname_output, + harbor_user_output, + harbor_password_output).apply(build_docker_credentials) + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) + +secret = Secret(resource_name='ingress-controller-registry-secret', + args=SecretInitArgs(string_data={'.dockerconfigjson': docker_credentials}, + type='kubernetes.io/dockerconfigjson', + metadata={'namespace': namespace_name_output, + 'name': 'ingress-controller-registry'}), + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +pulumi.export('ingress-controller-registry-secret', secret) diff --git a/pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml b/pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml new file mode 100644 index 00000000..4f6722b7 --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor-configuration/Pulumi.yaml @@ -0,0 +1,7 @@ +name: harbor-configuration +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Configures Harbor Container Registry diff --git a/pulumi/python/infrastructure/linode/harbor-configuration/__main__.py b/pulumi/python/infrastructure/linode/harbor-configuration/__main__.py new file mode 100644 index 00000000..38b42e37 --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor-configuration/__main__.py @@ -0,0 +1,96 @@ +import base64 +import json +import urllib.request +import urllib.error +import os +import time +from typing import List + +import pulumi +from kic_util import pulumi_config + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + + +def project_name_from_harbor_dir(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', 'harbor') + return pulumi_config.get_pulumi_project_name(project_path) + + +harbor_project_name = project_name_from_harbor_dir() +stack_ref_id = f"{pulumi_user}/{harbor_project_name}/{stack_name}" +stack_ref = pulumi.StackReference(stack_ref_id) +harbor_hostname_output = stack_ref.require_output('harbor_hostname') +harbor_user_output = stack_ref.require_output('harbor_user') +harbor_password_output = stack_ref.require_output('harbor_password') + + +def configure_harbor(params: List[str]) -> bool: + hostname = params[0] + user = params[1] + password = params[2] + base_url = f'https://{hostname}/api/v2.0' + base64creds = str(base64.b64encode(f'{user}:{password}'.encode('ascii')), 'ascii') + max_retries = 12 + retries = 0 + timeout = 1000 + + def is_harbor_is_up() -> bool: + url = f'{base_url}/health' + request = urllib.request.Request(url=url, method='GET') + request.add_header(key='Authorization', val=f'Basic {base64creds}') + + try: + with urllib.request.urlopen(url=request, timeout=timeout) as context: + if context.getcode() != 200: + return False + + health_check = json.load(context) + components = health_check['components'] + for component in components: + if component['status'] != 'healthy': + pulumi.log.info(f"Harbor component [{component['name']}] is not healthy") + return False + + return True + except urllib.error.URLError as e: + # Don't retry for name resolution failures + if e.errno == -3: + raise e + + pulumi.log.info(f'Unable to connect to Harbor [try {retries+1} of {max_retries}]: {e}') + return False + + def modify_default_project_registry(): + url = f'{base_url}/projects/library/metadatas/public' + request = urllib.request.Request(url=url, method='PUT') + request.add_header(key='Authorization', val=f'Basic {base64creds}') + request.add_header(key='Content-Type', val='application/json') + body = { + 'public': 'false' + } + body_json = json.dumps(body) + request.data = body_json.encode('utf-8') + urllib.request.urlopen(url=request, timeout=timeout) + + while not is_harbor_is_up(): + retries += 1 + timeout = 1000 * retries + time.sleep(timeout) + + if retries >= max_retries: + raise f'Harbor has not come up after {retries} retries' + + pulumi.log.info('Harbor is up, modifying default registry') + modify_default_project_registry() + + return True + + +harbor_is_alive = pulumi.Output.all(harbor_hostname_output, harbor_user_output, harbor_password_output)\ + .apply(configure_harbor) + +pulumi.export('harbor_is_alive', harbor_is_alive) diff --git a/pulumi/python/infrastructure/linode/harbor/Pulumi.yaml b/pulumi/python/infrastructure/linode/harbor/Pulumi.yaml new file mode 100644 index 00000000..28dabbcf --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor/Pulumi.yaml @@ -0,0 +1,7 @@ +name: harbor +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates new Harbor Container Registry diff --git a/pulumi/python/infrastructure/linode/harbor/__main__.py b/pulumi/python/infrastructure/linode/harbor/__main__.py new file mode 100644 index 00000000..3aa914aa --- /dev/null +++ b/pulumi/python/infrastructure/linode/harbor/__main__.py @@ -0,0 +1,154 @@ +import base64 +import os +from typing import Mapping +from collections import namedtuple + +import pulumi +import pulumi_linode as linode +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret +from kic_util import pulumi_config + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +# Configuration details for the K8 cluster +config = pulumi.Config('linode') + +api_token = config.get('token') or \ + config.get_secret('token') or \ + os.getenv('LINODE_TOKEN') or \ + os.getenv('LINODE_CLI_TOKEN') + +# For whatever reason, the Linode provider does not pickup the token from the +# stack configuration nor from the environment variables, so we do that work +# here. +provider = linode.Provider(resource_name='linode_provider', token=api_token) + +instance_type = config.get('harbor_instance_type') or 'g6-nanode-1' +region = config.require('region') + +# harbor_api_token = linode.Token(resource_name='harbor_token', +# scopes='domains:read_write', +# expiry=None, +# label='Token used by Harbor to create DNS records', +# opts=pulumi.ResourceOptions(provider=provider)) + +# This is the internal Linode ID used to refer to the StackScript +# (https://www.linode.com/products/stackscripts/) that backs the +# Harbor marketplace image. +harbor_stackscript_id = 912262 +# Valid options are: linode/ubuntu20.04 and linode/debian11 + +harbor_os_image = 'linode/ubuntu20.04' + + +def project_name_from_kubernetes_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'kubernetes', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +def project_name_from_infrastructure_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +k8_project_name = project_name_from_infrastructure_dir('kubeconfig') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) +k8s_provider = k8s.Provider(resource_name=f'lke-provider', + kubeconfig=kubeconfig) + +secrets_project_name = project_name_from_kubernetes_dir('secrets') +secrets_stack_ref_id = f"{pulumi_user}/{secrets_project_name}/{stack_name}" +secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) +pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') + +harbor_k8s_secrets = Secret.get(resource_name='pulumi-secret-linode', + id=pulumi_secrets['linode'], + opts=pulumi.ResourceOptions(provider=k8s_provider)).data + +HarborSecrets = namedtuple('HarborSecrets', + ['harbor_password', 'harbor_db_password', 'harbor_sudo_user_password']) + + +def extract_secrets(secrets: Mapping[str, str]) -> HarborSecrets: + def decode_k8s_secret(key: str): + base64_string = secrets[key] + byte_data = base64.b64decode(base64_string) + password = str(byte_data, 'utf-8') + return password + + return HarborSecrets(harbor_password=decode_k8s_secret('harbor_password'), + harbor_db_password=decode_k8s_secret('harbor_db_password'), + harbor_sudo_user_password=decode_k8s_secret('harbor_sudo_user_password')) + + +def build_stackscript_data(params) -> Mapping[str, str]: + # token: linode.Token = params[0] + secrets: HarborSecrets = params[0] + + # Read a public key into memory if specified in the config + pubkey_path = config.get('harbor_ssh_key_path') + if pubkey_path and os.path.exists(pubkey_path): + with open(pubkey_path, 'r') as fp: + pubkey = fp.readline() + else: + pubkey = None + + return { + # The Harbor admin password + 'harbor_password': secrets.harbor_password, + # The Harbor database password + 'harbor_db_password': secrets.harbor_db_password, + # Admin Email for the Harbor server + 'soa_email_address': config.require('soa_email'), + # The subdomain for the Linode's DNS record (Requires API token) + 'subdomain': 'registry', + # The limited sudo user to be created for the Linode + 'username': 'harbor', + # The password for the limited sudo user + 'password': secrets.harbor_sudo_user_password, + # The SSH Public Key that will be used to access the Linode + 'pubkey': pubkey, + # Disable root access over SSH? (Yes/No) + 'disable_root': 'Yes' + } + + +harbor_user = 'admin' +harbor_secrets = pulumi.Output.unsecret(harbor_k8s_secrets).apply(extract_secrets) +stackscript_data = pulumi.Output.all(harbor_secrets).apply(build_stackscript_data) + +instance = linode.Instance(resource_name='harbor', + region=region, + image=harbor_os_image, + stackscript_id=harbor_stackscript_id, + stackscript_data=stackscript_data, + type=instance_type, + private_ip=False, + opts=pulumi.ResourceOptions(provider=provider)) + + +def build_hostname(ip_address: str) -> str: + ip_parts = ip_address.split(sep='.') + hostname = '' + for i, part in enumerate(ip_parts): + hostname += part + if i != len(ip_parts) - 1: + hostname += '-' + + hostname += '.ip.linodeusercontent.com' + return hostname + + +harbor_hostname = instance.ip_address.apply(build_hostname) + +pulumi.export('harbor_instance', instance) +pulumi.export('harbor_hostname', harbor_hostname) +pulumi.export('harbor_user', pulumi.Output.secret(harbor_user)) +pulumi.export('harbor_password', pulumi.Output.secret(harbor_secrets.harbor_password)) diff --git a/pulumi/python/infrastructure/linode/lke/__main__.py b/pulumi/python/infrastructure/linode/lke/__main__.py index f9b7d538..886d09cc 100644 --- a/pulumi/python/infrastructure/linode/lke/__main__.py +++ b/pulumi/python/infrastructure/linode/lke/__main__.py @@ -1,44 +1,42 @@ +import os import pulumi import pulumi_linode as linode -from kic_util import pulumi_config # Configuration details for the K8 cluster -config = pulumi.Config('lke') -instance_size = config.get('instance_size') -if not instance_size: - instance_size = 'g6-standard-4' -region = config.get('region') -if not region: - region = 'us-west' -node_count = config.get('node_count') -if not node_count: - node_count = 3 -k8s_version = config.get('k8s_version') -if not k8s_version: - k8s_version = '1.22' -k8s_ha = config.get('k8s_ha') -if not k8s_ha: - k8s_ha = True +config = pulumi.Config('linode') -stack_name = pulumi.get_stack() -project_name = pulumi.get_project() -pulumi_user = pulumi_config.get_pulumi_user() +api_token = config.get('token') or \ + config.get_secret('token') or \ + os.getenv('LINODE_TOKEN') or \ + os.getenv('LINODE_CLI_TOKEN') -# Derive our names for the cluster and the pool -resource_name = "lke-" + stack_name + "-cluster" +# For whatever reason, the Linode provider does not pickup the token from the +# stack configuration nor from the environment variables, so we do that work +# here. +provider = linode.Provider(resource_name='linode_provider', token=api_token) + +instance_type = config.require('instance_type') +region = config.require('region') +node_count = config.require_int('node_count') +k8s_version = config.require('k8s_version') +k8s_ha = config.require_bool('k8s_ha') + +stack = pulumi.get_stack() +resource_name = f'lke-{stack}-cluster' # Create a linode cluster -cluster = linode.LkeCluster(resource_name, +cluster = linode.LkeCluster(resource_name=resource_name, k8s_version=k8s_version, control_plane=linode.LkeClusterControlPlaneArgs( high_availability=k8s_ha), - label=resource_name, + label=f'MARA [{stack}]', pools=[linode.LkeClusterPoolArgs( count=node_count, - type=instance_size, + type=instance_type, )], region=region, - tags=["mara"]) + tags=["mara"], + opts=pulumi.ResourceOptions(provider=provider)) # Export the clusters' kubeconfig pulumi.export("cluster_name", resource_name) diff --git a/pulumi/python/kubernetes/README.md b/pulumi/python/kubernetes/README.md index 081a0b2e..ff753d16 100644 --- a/pulumi/python/kubernetes/README.md +++ b/pulumi/python/kubernetes/README.md @@ -1,18 +1,19 @@ -## Directory +# Directory `/pulumi/python/kubernetes` ## Purpose -All kubernetes deployments are stored in this directory; all of these stacks will use the -[`infrastructure/kubeconfig`](../infrastructure/kubeconfig) stack as a source of information about the kubernetes -installation that is being used. +All kubernetes deployments are stored in this directory; all of these stacks +will use the [`infrastructure/kubeconfig`](../infrastructure/kubeconfig) stack as +a source of information about the kubernetes installation that is being used. ## Key Files -- [`nginx`](./nginx) NGINX related components; Ingress Controller, Service Mesh, App Protect, etc. Each in a separate +* [`nginx`](./nginx) NGINX related components; Ingress Controller, Service + Mesh, App Protect, etc. Each in a separate directory. -- [`applications`](./applications) Applications; each in it's own directory. +* [`applications`](./applications) Applications; each in it's own directory. ## Notes diff --git a/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml b/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml index de0bf82d..e1928210 100644 --- a/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml +++ b/pulumi/python/kubernetes/applications/sirius/Pulumi.yaml @@ -3,5 +3,5 @@ runtime: name: python options: virtualenv: ../../../venv -config: ./config +config: ../../../../../config/pulumi description: Creates the Bank of Sirius App diff --git a/pulumi/python/kubernetes/applications/sirius/__main__.py b/pulumi/python/kubernetes/applications/sirius/__main__.py index bdc471f7..06736fca 100644 --- a/pulumi/python/kubernetes/applications/sirius/__main__.py +++ b/pulumi/python/kubernetes/applications/sirius/__main__.py @@ -1,9 +1,10 @@ import base64 import os - +from typing import Mapping import pulumi import pulumi_kubernetes as k8s from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs +from pulumi_kubernetes.core.v1 import Secret from Crypto.PublicKey import RSA from pulumi_kubernetes.yaml import ConfigFile from pulumi_kubernetes.yaml import ConfigGroup @@ -18,43 +19,69 @@ def remove_status_field(obj): del obj['status'] -def pulumi_k8_project_name(): +def project_name_from_infrastructure_dir(): script_dir = os.path.dirname(os.path.abspath(__file__)) - eks_project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'kubeconfig') + eks_project_path = os.path.join( + script_dir, '..', '..', '..', 'infrastructure', 'kubeconfig') return pulumi_config.get_pulumi_project_name(eks_project_path) -def pulumi_ingress_project_name(): +def project_name_from_kubernetes_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) - ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller') - return pulumi_config.get_pulumi_project_name(ingress_project_path) + project_path = os.path.join(script_dir, '..', '..', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + +# +# This is just used for the kubernetes config deploy.... +# + def pulumi_repo_ingress_project_name(): script_dir = os.path.dirname(os.path.abspath(__file__)) - ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') + ingress_project_path = os.path.join( + script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only') + return pulumi_config.get_pulumi_project_name(ingress_project_path) + + +def pulumi_ingress_project_name(): + script_dir = os.path.dirname(os.path.abspath(__file__)) + ingress_project_path = os.path.join( + script_dir, '..', '..', 'nginx', 'ingress-controller') return pulumi_config.get_pulumi_project_name(ingress_project_path) def sirius_manifests_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) - sirius_manifests_path = os.path.join(script_dir, 'src', 'kubernetes-manifests', '*.yaml') + sirius_manifests_path = os.path.join( + script_dir, 'src', 'kubernetes-manifests', '*.yaml') return sirius_manifests_path -# We will only want to be deploying one type of cerficate issuer +def extract_password_from_k8s_secrets(secrets: Mapping[str, str], secret_name: str) -> str: + if secret_name not in secrets: + raise f'Secret [{secret_name}] not found in Kubernetes secret store' + base64_string = secrets[secret_name] + byte_data = base64.b64decode(base64_string) + password = str(byte_data, 'utf-8') + return password + +# +# We will only want to be deploying one type of certificate issuer # as part of this application; this can (and should) be changed as # needed. For example, if the user is taking advantage of ACME let's encrypt # in order to generate certs. +# def k8_manifest_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) k8_manifest_path = os.path.join(script_dir, 'cert', 'self-sign.yaml') return k8_manifest_path - +# # The database password is a secret, and in order to use it in a string concat # we need to decrypt the password with Output.unsecret() before we use it. -# This function provides the logic to accomplish this, while still using the pulumi -# secrets for the resulting string: +# This function provides the logic to accomplish this, while still using the +# pulumi secrets for the resulting string: +# def create_pg_uri(password_object): user = str(accounts_admin) password = str(password_object) @@ -69,7 +96,7 @@ def add_namespace(obj): stack_name = pulumi.get_stack() project_name = pulumi.get_project() -k8_project_name = pulumi_k8_project_name() +k8_project_name = project_name_from_infrastructure_dir() pulumi_user = pulumi_config.get_pulumi_user() k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" @@ -78,80 +105,78 @@ def add_namespace(obj): k8_stack_ref.get_output('cluster_name').apply( lambda s: pulumi.log.info(f'Cluster name: {s}')) -k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) +secrets_project_name = project_name_from_kubernetes_dir('secrets') +secrets_stack_ref_id = f"{pulumi_user}/{secrets_project_name}/{stack_name}" +secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) +pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') + +k8s_provider = k8s.Provider(resource_name='ingress-controller') -# TODO: Streamline the logic for FQDN/IP into something a bit more sane and scalable #82 -# -# Currently, if we are doing an AWS deployment we use the AWS IC deployment, which uses the ELB hostname -# as part of the certificate (self-signed). # -# If we are using a kubeconfig file (ie, not type AWS) we expect we are going to get an IP address and not -# a hostname in return. So we use the hostname variable to create the certificate we need, and then we use -# the IP address in output to the user to tell them to setup DNS or a hostfile. +# This logic is used to manage the kubeconfig deployments, since that uses a +# slightly # different logic path than the mainline. This will be removed once +# the kubeconfig deploys are moved to the Pulumi Automation API. # - -# We use the kubernetes namespace for this config = pulumi.Config('kubernetes') infra_type = config.require('infra_type') -if infra_type == 'AWS': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_ingress_project_name() - ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" - ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) - lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - sirius_host = lb_ingress_hostname -elif infra_type == 'kubeconfig': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_repo_ingress_project_name() - ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" - ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) - lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - # Set back to kubernetes - config = pulumi.Config('kubernetes') - lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') - sirius_host = lb_ingress_hostname -elif infra_type == 'DO': +if infra_type == 'kubeconfig': + # # Logic to extract the FQDN of the load balancer for Ingress + # ingress_project_name = pulumi_repo_ingress_project_name() ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') + # # Set back to kubernetes + # config = pulumi.Config('kubernetes') lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') sirius_host = lb_ingress_hostname -elif infra_type == 'LKE': - # Logic to extract the FQDN of the load balancer for Ingress - ingress_project_name = pulumi_repo_ingress_project_name() +else: + # + # We use the hostname to set the value for our FQDN, which drives the cert + # process as well. + # + ingress_project_name = pulumi_ingress_project_name() ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}" ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id) lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname') - # Set back to kubernetes - config = pulumi.Config('kubernetes') - lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip') sirius_host = lb_ingress_hostname - +# # Create the namespace for Bank of Sirius +# ns = k8s.core.v1.Namespace(resource_name='bos', metadata={'name': 'bos'}, opts=pulumi.ResourceOptions(provider=k8s_provider)) -# Add Config Maps for Bank of Sirius; these are built in Pulumi in order to manage secrets and provide the option -# for users to override defaults in the configuration file. Configuration values that are required use the `require` -# method. Those that are optional use the `get` method, and have additional logic to set defaults if no value is set -# by the user. # -# Note that the Pulumi code will exit with an error message if a required variable is not defined in the configuration -# file. +# Add Config Maps for Bank of Sirius; these are built in Pulumi in order to +# manage secrets and provide the option for users to override defaults in the +# configuration file. Configuration values that are required use the `require` +# method. Those that are optional use the `get` method, and have additional +# logic to set defaults if no value is set by the user. # -# Configuration Values are stored in the configuration: -# ./config/Pulumi.STACKNAME.yaml +# Note that the Pulumi code will exit with an error message if a required +# variable is not defined in the configuration file. +# +# Configuration Values are stored in the "secrets" project # -# Note this config is specific to the sirius code! config = pulumi.Config('sirius') -accounts_pwd = config.require_secret('accounts_pwd') + +sirius_secrets = Secret.get(resource_name='pulumi-secret-sirius', + id=pulumi_secrets['sirius'], + opts=pulumi.ResourceOptions(provider=k8s_provider)).data +accounts_pwd = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'accounts_pwd')) +ledger_pwd = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'ledger_pwd')) +demo_login_user = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'demo_login_user')) +demo_login_pwd = pulumi.Output.unsecret(sirius_secrets).apply( + lambda secrets: extract_password_from_k8s_secrets(secrets, 'demo_login_pwd')) accounts_admin = config.get('accounts_admin') if not accounts_admin: @@ -164,7 +189,8 @@ def add_namespace(obj): accounts_db_uri = pulumi.Output.unsecret(accounts_pwd).apply(create_pg_uri) accounts_db_config_config_map = k8s.core.v1.ConfigMap("accounts_db_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -182,7 +208,8 @@ def add_namespace(obj): }) environment_config_config_map = k8s.core.v1.ConfigMap("environment_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -193,8 +220,10 @@ def add_namespace(obj): "LOCAL_ROUTING_NUM": "883745000", "PUB_KEY_PATH": "/root/.ssh/publickey" }) + tracing_config_config_map = k8s.core.v1.ConfigMap("tracing_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -208,7 +237,8 @@ def add_namespace(obj): }) service_api_config_config_map = k8s.core.v1.ConfigMap("service_api_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -222,12 +252,13 @@ def add_namespace(obj): "CONTACTS_API_ADDR": "contacts:8080", "USERSERVICE_API_ADDR": "userservice:8080", }) - +# # Demo data is hardcoded in the current incarnation of the bank of # sirius project, so we go along with that for now. - +# demo_data_config_config_map = k8s.core.v1.ConfigMap("demo_data_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -236,15 +267,10 @@ def add_namespace(obj): ), data={ "USE_DEMO_DATA": "True", - "DEMO_LOGIN_USERNAME": "testuser", - "DEMO_LOGIN_PASSWORD": "password" + "DEMO_LOGIN_USERNAME": demo_login_user, + "DEMO_LOGIN_PASSWORD": demo_login_pwd }) -# Configuration Values are stored in the configuration: -# ./config/Pulumi.STACKNAME.yaml -config = pulumi.Config('sirius') -ledger_pwd = config.require_secret('ledger_pwd') - ledger_admin = config.get('ledger_admin') if not ledger_admin: ledger_admin = 'admin' @@ -256,7 +282,8 @@ def add_namespace(obj): spring_url = 'jdbc:postgresql://ledger-db:5432/' + str(ledger_db) ledger_db_config_config_map = k8s.core.v1.ConfigMap("ledger_db_configConfigMap", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), api_version="v1", kind="ConfigMap", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -286,7 +313,8 @@ def add_namespace(obj): jwt_key_secret = k8s.core.v1.Secret("jwt_keySecret", api_version="v1", - opts=pulumi.ResourceOptions(depends_on=[ns]), + opts=pulumi.ResourceOptions( + depends_on=[ns]), kind="Secret", metadata=k8s.meta.v1.ObjectMetaArgs( name="jwt-key", @@ -298,11 +326,13 @@ def add_namespace(obj): "jwtRS256.key.pub": str(encode_public, "utf-8") }) -# Create resources for the Bank of Sirius using the Kubernetes YAML manifests which have been pulled from -# the google repository. # -# Note that these have been lightly edited to remove dependencies on GCP where necessary. Additionally, the -# `frontend` service has been updated to use a ClusterIP rather than the external load balancer, as that interaction +# Create resources for the Bank of Sirius using the Kubernetes YAML manifests +# which have been pulled from the google repository. +# +# Note that these have been lightly edited to remove dependencies on GCP where +# necessary. Additionally, the `frontend` service has been updated to use a +# ClusterIP rather than the external load balancer, as that interaction # is now handled by the NGNIX Ingress Controller # sirius_manifests = sirius_manifests_location() @@ -314,10 +344,11 @@ def add_namespace(obj): opts=pulumi.ResourceOptions(depends_on=[tracing_config_config_map]) ) +# # We need to create an issuer for the cert-manager (which is installed in a # separate project directory). This can (and should) be adjusted as required, # as the default issuer is self-signed. - +# k8_manifest = k8_manifest_location() selfissuer = ConfigFile( @@ -325,13 +356,17 @@ def add_namespace(obj): transformations=[add_namespace], file=k8_manifest) -# Add the Ingress controller for the Bank of Sirius application. This uses the NGINX IC that is installed -# as part of this Pulumi stack. +# +# Add the Ingress controller for the Bank of Sirius application. This uses the +# NGINX IC that is installed as part of this Pulumi stack. # -# This block is responsible for creating the Ingress object for the application. This object -# is deployed into the same namespace as the application and requires that an IngressClass -# and Ingress controller be installed (which is done in an earlier step, deploying the KIC). +# +# This block is responsible for creating the Ingress object for the +# application. This object is deployed into the same namespace as the +# application and requires that an IngressClass # and Ingress controller be +# installed (which is done in an earlier step, deploying the KIC). +# bosingress = k8s.networking.v1.Ingress("bosingress", api_version="networking.k8s.io/v1", kind="Ingress", @@ -356,7 +391,7 @@ def add_namespace(obj): # to store the generated certificate. tls=[k8s.networking.v1.IngressTLSArgs( hosts=[sirius_host], - secret_name="sirius-secret", + secret_name="sirius-secret", # pragma: allowlist secret )], # The block below defines the rules for traffic coming into the KIC. # In the example below, we take any traffic on the host for path / @@ -384,22 +419,21 @@ def add_namespace(obj): )], )) -# We use the kubernetes namespace for this +# +# Get the hostname for our connect URL; this logic will be collapsed once the +# kubeconfig # deployments are moved over to the automation api. Until then, +# we have to use a different process. +# + config = pulumi.Config('kubernetes') infra_type = config.require('infra_type') -if infra_type == 'AWS': - application_url = sirius_host.apply(lambda host: f'https://{host}') - pulumi.export('application_url', application_url) -elif infra_type == 'kubeconfig': +if infra_type == 'kubeconfig': pulumi.export('hostname', lb_ingress_hostname) pulumi.export('ipaddress', lb_ingress_ip) - #pulumi.export('application_url', f'https://{lb_ingress_hostname}') application_url = sirius_host.apply(lambda host: f'https://{host}') -elif infra_type == 'DO': - pulumi.export('hostname', lb_ingress_hostname) - pulumi.export('ipaddress', lb_ingress_ip) - #pulumi.export('application_url', f'https://{lb_ingress_hostname}') +else: application_url = sirius_host.apply(lambda host: f'https://{host}') + pulumi.export('application_url', application_url) # # Get the chart values for both monitoring charts, switch back to the Sirius @@ -429,11 +463,11 @@ def add_namespace(obj): namespace=ns, # Values from Chart's parameters specified hierarchically, - values = { + values={ "serviceMonitor": { "enabled": True, "namespace": "prometheus" - }, + }, "config": { "datasource": { "host": "accounts-db", @@ -476,11 +510,11 @@ def add_namespace(obj): namespace=ns, # Values from Chart's parameters specified hierarchically, - values = { + values={ "serviceMonitor": { "enabled": True, "namespace": "prometheus" - }, + }, "config": { "datasource": { "host": "ledger-db", diff --git a/pulumi/python/kubernetes/applications/sirius/config/.gitignore b/pulumi/python/kubernetes/applications/sirius/config/.gitignore deleted file mode 100644 index 2a616051..00000000 --- a/pulumi/python/kubernetes/applications/sirius/config/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.yaml \ No newline at end of file diff --git a/pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example b/pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example deleted file mode 100644 index b5a09bea..00000000 --- a/pulumi/python/kubernetes/applications/sirius/config/Pulumi.stackname.yaml.example +++ /dev/null @@ -1,29 +0,0 @@ -config: - # These parameters define the name of the database and the database credentials - # used by the Bank of Sirius ledger application. - # - # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius - # deployment if no password is provided. - # sirius:ledger_pwd: Password # Required - sirius:ledger_admin: admin - sirius:ledger_db: postgresdb - - # This optional parameter supplies a hostname for the Bank of Sirius Ingress - # controller. If not set, the FQDN of the LB is used. - #sirius:hostname: demo.example.com - - # These parameters define the name of the database and the database credentials - # used by the Bank of Sirius accounts application. - # - # Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius - # deployment if no password is provided. - #sirius:accounts_pwd: Password # Required - sirius:accounts_admin: admin - sirius:accounts_db: postgresdb - - # Prometheus Configuration - sirius:chart_version: 2.3.5 - # Chart version for the Pulumi chart for prometheus - sirius:helm_repo_name: prometheus-community - # Name of the repo to pull the prometheus chart from - sirius:helm_repo_url: https://prometheus-community.github.io/helm-charts diff --git a/pulumi/python/kubernetes/applications/sirius/verify.py b/pulumi/python/kubernetes/applications/sirius/verify.py index 415510c0..99883ec6 100755 --- a/pulumi/python/kubernetes/applications/sirius/verify.py +++ b/pulumi/python/kubernetes/applications/sirius/verify.py @@ -8,20 +8,23 @@ stdin_json = json.load(sys.stdin) if 'application_url' not in stdin_json: - raise ValueError("Missing expected key 'application_url' in STDIN json data") + raise ValueError( + "Missing expected key 'application_url' in STDIN json data") url = f"{stdin_json['application_url']}/login" payload = 'username=testuser&password=password' headers = { - 'Content-Type': 'application/x-www-form-urlencoded' + 'Content-Type': 'application/x-www-form-urlencoded' } urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) -response = requests.request("POST", url, headers=headers, data=payload, verify=False) +response = requests.request( + "POST", url, headers=headers, data=payload, verify=False) response_code = response.status_code if response_code != 200: - print(f'Application failed health check [url={url},response_code={response_code}', file=sys.stderr) + print( + f'Application failed health check [url={url},response_code={response_code}', file=sys.stderr) sys.exit(1) else: print('Application passed health check', file=sys.stderr) diff --git a/pulumi/python/kubernetes/certmgr/__main__.py b/pulumi/python/kubernetes/certmgr/__main__.py index 6ed1d41f..49507c84 100644 --- a/pulumi/python/kubernetes/certmgr/__main__.py +++ b/pulumi/python/kubernetes/certmgr/__main__.py @@ -8,12 +8,6 @@ from kic_util import pulumi_config -def crd_deployment_manifest(): - script_dir = os.path.dirname(os.path.abspath(__file__)) - crd_deployment_path = os.path.join(script_dir, 'manifests', 'cert-manager.crds.yaml') - return crd_deployment_path - - def project_name_from_project_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname) @@ -40,24 +34,13 @@ def add_namespace(obj): metadata={'name': 'cert-manager'}, opts=pulumi.ResourceOptions(provider=k8s_provider)) -# Config Manifests -crd_deployment = crd_deployment_manifest() - -crd_dep = ConfigFile( - 'crd-dep', - file=crd_deployment, - transformations=[add_namespace], # Need to review w/ operator - opts=pulumi.ResourceOptions(depends_on=[ns]) -) - - config = pulumi.Config('certmgr') chart_name = config.get('chart_name') if not chart_name: chart_name = 'cert-manager' chart_version = config.get('chart_version') if not chart_version: - chart_version = 'v1.7.0' + chart_version = 'v1.9.1' helm_repo_name = config.get('certmgr_helm_repo_name') if not helm_repo_name: helm_repo_name = 'jetstack' @@ -81,6 +64,9 @@ def add_namespace(obj): ), version=chart_version, namespace=ns.metadata.name, + values={ + "installCRDs": "True" + }, # Configure the timeout value. timeout=helm_timeout, # By default Release resource will wait till all created resources @@ -96,7 +82,7 @@ def add_namespace(obj): # Force update if required force_update=True) -certmgr_release = Release("certmgr", args=certmgr_release_args, opts=pulumi.ResourceOptions(depends_on=crd_dep)) +certmgr_release = Release("certmgr", args=certmgr_release_args, opts=pulumi.ResourceOptions(depends_on=ns)) status = certmgr_release.status -pulumi.export("certmgr_status", status) +pulumi.export("certmgr_status", status) \ No newline at end of file diff --git a/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml b/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml index 1df1e068..5b3f0621 100644 --- a/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml +++ b/pulumi/python/kubernetes/certmgr/manifests/cert-manager.crds.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 The cert-manager Authors. +# Copyright 2021 The cert-manager Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,20 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. ---- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: certificaterequests.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -134,7 +131,7 @@ spec: description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. type: array items: - description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' type: string enum: - signing @@ -205,6 +202,9 @@ spec: type: description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map failureTime: description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. type: string @@ -212,19 +212,17 @@ spec: served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: certificates.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -284,7 +282,7 @@ spec: - secretName properties: additionalOutputFormats: - description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option. + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. type: array items: description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. @@ -388,6 +386,9 @@ spec: name: description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + literalSubject: + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. + type: string privateKey: description: Options to control private keys used for the Certificate. type: object @@ -408,6 +409,9 @@ spec: rotationPolicy: description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. type: string + enum: + - Never + - Always size: description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. type: integer @@ -486,7 +490,7 @@ spec: description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. type: array items: - description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' + description: 'KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 Valid KeyUsage values are as follows: "signing", "digital signature", "content commitment", "key encipherment", "key agreement", "data encipherment", "cert sign", "crl sign", "encipher only", "decipher only", "any", "server auth", "client auth", "code signing", "email protection", "s/mime", "ipsec end system", "ipsec tunnel", "ipsec user", "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"' type: string enum: - signing @@ -550,6 +554,12 @@ spec: type: description: Type of the condition, known values are (`Ready`, `Issuing`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer lastFailureTime: description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. type: string @@ -575,19 +585,17 @@ spec: served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: challenges.acme.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: acme.cert-manager.io names: @@ -908,8 +916,20 @@ spec: - region properties: accessKeyID: - description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -920,7 +940,7 @@ spec: description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name @@ -956,10 +976,49 @@ spec: type: object properties: labels: - description: The labels that cert-manager will use when creating the temporary HTTPRoute needed for solving the HTTP-01 challenge. These labels must match the label selector of at least one Gateway. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentRef identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string @@ -1187,7 +1246,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1217,7 +1276,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1268,7 +1327,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1298,7 +1357,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1356,7 +1415,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1386,7 +1445,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1437,7 +1496,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -1467,7 +1526,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -1573,19 +1632,17 @@ spec: subresources: status: {} --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: clusterissuers.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -1941,8 +1998,20 @@ spec: - region properties: accessKeyID: - description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -1953,7 +2022,7 @@ spec: description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name @@ -1989,10 +2058,49 @@ spec: type: object properties: labels: - description: The labels that cert-manager will use when creating the temporary HTTPRoute needed for solving the HTTP-01 challenge. These labels must match the label selector of at least one Gateway. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentRef identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string @@ -2220,7 +2328,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2250,7 +2358,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2301,7 +2409,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2331,7 +2439,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2389,7 +2497,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2419,7 +2527,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2470,7 +2578,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -2500,7 +2608,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -2780,22 +2888,23 @@ spec: type: description: Type of the condition, known values are (`Ready`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: issuers.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: cert-manager.io names: @@ -3151,8 +3260,20 @@ spec: - region properties: accessKeyID: - description: 'The AccessKeyID is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + description: 'The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: string + accessKeyIDSecretRef: + description: 'The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: 'Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string hostedZoneID: description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. type: string @@ -3163,7 +3284,7 @@ spec: description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata type: string secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication. If not set we fall-back to using env vars, shared credentials file or AWS Instance metadata https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials + description: 'The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials' type: object required: - name @@ -3199,10 +3320,49 @@ spec: type: object properties: labels: - description: The labels that cert-manager will use when creating the temporary HTTPRoute needed for solving the HTTP-01 challenge. These labels must match the label selector of at least one Gateway. + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. type: object additionalProperties: type: string + parentRefs: + description: 'When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute/#attaching-to-gateways' + type: array + items: + description: "ParentRef identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) Support: Custom (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified (or empty string), this refers to the local namespace of the Route. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ serviceType: description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. type: string @@ -3430,7 +3590,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3460,7 +3620,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3511,7 +3671,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3541,7 +3701,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3599,7 +3759,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3629,7 +3789,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3680,7 +3840,7 @@ spec: additionalProperties: type: string namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. type: object properties: matchExpressions: @@ -3710,7 +3870,7 @@ spec: additionalProperties: type: string namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". type: array items: type: string @@ -3990,22 +4150,23 @@ spec: type: description: Type of the condition, known values are (`Ready`). type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map served: true storage: true --- -# Source: cert-manager/templates/templates.out +# Source: cert-manager/templates/crd-templates.yaml apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: orders.acme.cert-manager.io - annotations: - cert-manager.io/inject-ca-from-secret: 'cert-manager/cert-manager-webhook-ca' labels: app: 'cert-manager' app.kubernetes.io/name: 'cert-manager' app.kubernetes.io/instance: 'cert-manager' # Generated labels - app.kubernetes.io/version: "v1.7.0" + app.kubernetes.io/version: "v1.9.1" spec: group: acme.cert-manager.io names: diff --git a/pulumi/python/kubernetes/logagent/__main__.py b/pulumi/python/kubernetes/logagent/__main__.py index fdd85769..4b022311 100644 --- a/pulumi/python/kubernetes/logagent/__main__.py +++ b/pulumi/python/kubernetes/logagent/__main__.py @@ -13,7 +13,7 @@ chart_name = 'filebeat' chart_version = config.get('chart_version') if not chart_version: - chart_version = '7.16.3' + chart_version = '7.17.3' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'elastic' diff --git a/pulumi/python/kubernetes/logstore/__main__.py b/pulumi/python/kubernetes/logstore/__main__.py index 874c0b22..a736500a 100644 --- a/pulumi/python/kubernetes/logstore/__main__.py +++ b/pulumi/python/kubernetes/logstore/__main__.py @@ -13,7 +13,7 @@ chart_name = 'elasticsearch' chart_version = config.get('chart_version') if not chart_version: - chart_version = '17.6.2' + chart_version = '19.1.4' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'bitnami' @@ -131,7 +131,7 @@ def project_name_from_project_dir(dirname: str): elastic_rname = elastic_release.status.name -elastic_fqdn = Output.concat(elastic_rname, "-coordinating-only.logstore.svc.cluster.local") +elastic_fqdn = Output.concat(elastic_rname, "-elasticsearch.logstore.svc.cluster.local") kibana_fqdn = Output.concat(elastic_rname, "-kibana.logstore.svc.cluster.local") pulumi.export('elastic_hostname', pulumi.Output.unsecret(elastic_fqdn)) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml new file mode 100644 index 00000000..e3d81e21 --- /dev/null +++ b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/Pulumi.yaml @@ -0,0 +1,7 @@ +name: ingress-controller-namespace +runtime: + name: python + options: + virtualenv: ../../../venv +config: ../../../../../config/pulumi +description: Creates the NGINX Kubernetes Ingress Controller Namespace diff --git a/pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py new file mode 100644 index 00000000..388cb7cc --- /dev/null +++ b/pulumi/python/kubernetes/nginx/ingress-controller-namespace/__main__.py @@ -0,0 +1,38 @@ +import os + +import pulumi +import pulumi_kubernetes as k8s + +from kic_util import pulumi_config + + +def infrastructure_project_name_from_project_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +k8_project_name = infrastructure_project_name_from_project_dir('kubeconfig') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) +cluster_name = k8_stack_ref.require_output('cluster_name').apply(lambda c: str(c)) + +k8s_provider = k8s.Provider(resource_name=f'ingress-controller', + kubeconfig=kubeconfig) + +namespace_name = 'nginx-ingress' + +ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', + metadata={'name': namespace_name, + 'labels': { + 'prometheus': 'scrape'} + }, + opts=pulumi.ResourceOptions(provider=k8s_provider)) + +pulumi.export('ingress_namespace', ns) +pulumi.export('ingress_namespace_name', namespace_name) diff --git a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py index 8a05e07a..0cb4ba95 100644 --- a/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py +++ b/pulumi/python/kubernetes/nginx/ingress-controller/__main__.py @@ -1,29 +1,32 @@ import os -import typing -from typing import Dict +from typing import Dict, Mapping, Any, Optional import pulumi -from pulumi import Output +from pulumi import Output, StackReference import pulumi_kubernetes as k8s from pulumi_kubernetes.core.v1 import Service -import pulumi_kubernetes.helm.v3 as helm from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs from kic_util import pulumi_config +script_dir = os.path.dirname(os.path.abspath(__file__)) + config = pulumi.Config('kic-helm') chart_name = config.get('chart_name') if not chart_name: chart_name = 'nginx-ingress' chart_version = config.get('chart_version') if not chart_version: - chart_version = '0.13.0' + chart_version = '0.14.0' helm_repo_name = config.get('helm_repo_name') if not helm_repo_name: helm_repo_name = 'nginx-stable' helm_repo_url = config.get('helm_repo_url') if not helm_repo_url: helm_repo_url = 'https://helm.nginx.com/stable' + +pulumi.log.info(f'NGINX Ingress Controller will be deployed with the Helm Chart [{chart_name}@{chart_version}]') + # # Allow the user to set timeout per helm chart; otherwise # we default to 5 minutes. @@ -33,19 +36,22 @@ helm_timeout = 300 -def aws_project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) +def infrastructure_project_name_from_project_dir(dirname: str): project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname) return pulumi_config.get_pulumi_project_name(project_path) -def project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) +def project_name_from_utility_dir(dirname: str): project_path = os.path.join(script_dir, '..', '..', '..', 'utility', dirname) return pulumi_config.get_pulumi_project_name(project_path) -def find_image_tag(repository: dict) -> typing.Optional[str]: +def project_name_from_same_parent(directory: str): + project_path = os.path.join(script_dir, '..', directory) + return pulumi_config.get_pulumi_project_name(project_path) + + +def find_image_tag(repository: dict) -> Optional[str]: """ Inspect the repository dictionary as returned from a stack reference for a valid image_tag_alias or image_tag. If found, return the image_tag_alias or image_tag if found, otherwise return None @@ -62,8 +68,8 @@ def find_image_tag(repository: dict) -> typing.Optional[str]: return None -def build_chart_values(repository: dict) -> helm.ChartOpts: - values: Dict[str, Dict[str, typing.Any]] = { +def build_chart_values(repo_push: dict) -> Mapping[str, Any]: + values: Dict[str, Dict[str, Any]] = { 'controller': { 'healthStatus': True, 'appprotect': { @@ -78,6 +84,12 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: '$upstream_bytes_sent $upstream_response_time $upstream_status $request_id ' } }, + 'serviceAccount': { + # This references the name of the secret used to pull the ingress container image + # from a remote repository. When using EKS on AWS, authentication to ECR happens + # via a different mechanism, so this value is ignored. + 'imagePullSecretName': 'ingress-controller-registry', + }, 'service': { 'annotations': { 'co.elastic.logs/module': 'nginx' @@ -117,18 +129,18 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: "opentracing": True } - image_tag = find_image_tag(repository) + image_tag = find_image_tag(repo_push) if not image_tag: pulumi.log.debug('No image_tag or image_tag_alias found') - if 'repository_url' in repository and image_tag: - repository_url = repository['repository_url'] + if 'repository_url' in repo_push and image_tag: + repository_url = repo_push['repository_url'] if 'image' not in values['controller']: values['controller']['image'] = {} if repository_url and image_tag: - pulumi.log.info(f"Using ingress controller image: {repository_url}:{image_tag}") + pulumi.log.info(f"Using Ingress Controller image: {repository_url}:{image_tag}") values['controller']['image'].update({ 'repository': repository_url, 'tag': image_tag @@ -147,28 +159,34 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: project_name = pulumi.get_project() pulumi_user = pulumi_config.get_pulumi_user() -k8_project_name = aws_project_name_from_project_dir('kubeconfig') +k8_project_name = infrastructure_project_name_from_project_dir('kubeconfig') k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" -k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +k8_stack_ref = StackReference(k8_stack_ref_id) kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) cluster_name = k8_stack_ref.require_output('cluster_name').apply(lambda c: str(c)) -image_push_project_name = project_name_from_project_dir('kic-image-push') +namespace_stack_ref_id = f"{pulumi_user}/{project_name_from_same_parent('ingress-controller-namespace')}/{stack_name}" +ns_stack_ref = StackReference(namespace_stack_ref_id) +ns_name_output = ns_stack_ref.require_output('ingress_namespace_name') + +image_push_project_name = project_name_from_utility_dir('kic-image-push') image_push_ref_id = f"{pulumi_user}/{image_push_project_name}/{stack_name}" -image_push_ref = pulumi.StackReference(image_push_ref_id) -ecr_repository = image_push_ref.get_output('ecr_repository') +image_push_ref = StackReference(image_push_ref_id) +container_repo_push = image_push_ref.get_output('container_repo_push') k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) -ns = k8s.core.v1.Namespace(resource_name='nginx-ingress', - metadata={'name': 'nginx-ingress', - 'labels': { - 'prometheus': 'scrape'} - }, - opts=pulumi.ResourceOptions(provider=k8s_provider)) -chart_values = ecr_repository.apply(build_chart_values) +def namespace_by_name(name): + return k8s.core.v1.Namespace.get(resource_name=name, + id=name, + opts=pulumi.ResourceOptions(provider=k8s_provider)) + + +ns = ns_name_output.apply(namespace_by_name) + +chart_values = container_repo_push.apply(build_chart_values) kic_release_args = ReleaseArgs( chart=chart_name, @@ -196,16 +214,34 @@ def build_chart_values(repository: dict) -> helm.ChartOpts: # Force update if required force_update=True) -kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns])) +kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns], + provider=k8s_provider)) pstatus = kic_chart.status -srv = Service.get("nginx-ingress", - Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress")) +srv = Service.get(resource_name="nginx-ingress", + id=Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress"), + opts=pulumi.ResourceOptions(provider=k8s_provider)) ingress_service = srv.status -pulumi.export('lb_ingress_hostname', pulumi.Output.unsecret(ingress_service.load_balancer.ingress[0].hostname)) + +def ingress_hostname(_ingress_service): + # Attempt to get the hostname as returned from the helm chart + if 'load_balancer' in _ingress_service: + load_balancer = _ingress_service['load_balancer'] + if 'ingress' in load_balancer and len(load_balancer['ingress']) > 0: + first_ingress = load_balancer['ingress'][0] + if 'hostname' in first_ingress: + return first_ingress['hostname'] + + # If we can't get the hostname, then use the FQDN coded in the config file + fqdn = config.require('fqdn') + return fqdn + + +pulumi.export('lb_ingress_hostname', pulumi.Output.unsecret(ingress_service).apply(ingress_hostname)) +pulumi.export('lb_ingress', pulumi.Output.unsecret(ingress_service)) # Print out our status pulumi.export("kic_status", pstatus) pulumi.export('nginx_plus', pulumi.Output.unsecret(chart_values['controller']['nginxplus'])) diff --git a/pulumi/python/kubernetes/observability/otel-objects/README.md b/pulumi/python/kubernetes/observability/otel-objects/README.md index 012808cd..28873e43 100644 --- a/pulumi/python/kubernetes/observability/otel-objects/README.md +++ b/pulumi/python/kubernetes/observability/otel-objects/README.md @@ -1,44 +1,56 @@ -## Sample Configurations -This directory contains a number of sample configurations that can be used with the -[OTEL kubernetes operator](https://github.com/open-telemetry/opentelemetry-operator) that is installed as part of the -MARA project. +# Sample Configurations -Each configuration currently uses the `simplest` deployment, which uses an in-memory store for data being processed. -This is obviously not suited to a production deployment, but it is intended to illustrate the steps required to work -with the OTEL deployment. +This directory contains a number of sample configurations that can be used with +the +[OTEL kubernetes operator](https://github.com/open-telemetry/opentelemetry-operator) +that is installed as part of the MARA project. + +Each configuration currently uses the `simplest` deployment, which uses an +in-memory store for data being processed. This is obviously not suited to a +production deployment, but it is intended to illustrate the steps required to +work with the OTEL deployment. ## Commonality ### Listening Ports -Each of the sample files is configured to listen on the -[OTLP protocol](https://opentelemetry.io/docs/reference/specification/protocol/otlp/). The listen ports configured are: + +Each of the sample files is configured to listen on the +[OTLP protocol](https://opentelemetry.io/docs/reference/specification/protocol/otlp/) +. The listen ports configured are: + * grpc on port 9978 * http on port 9979 -### Logging -All the examples log to the container's stdout. However, the basic configuration is configured to only show the -condensed version of the traces being received. In order to see the full traces, you need to set the logging level to +### Logging + +All the examples log to the container's stdout. However, the basic configuration +is configured to only show the condensed version of the traces being received. +In order to see the full traces, you need to set the logging level to `DEBUG`. The basic-debug object is configured to do this automatically. ## Configurations -### `otel-collector.yaml.basic` -This is the default collector that only listens and logs summary spans to the container's stdout. ### `otel-collector.yaml.basic` -This is a variant of the default collector that will output full spans to the container's stdout. + +This is the default collector that only listens and logs summary spans to the +container's stdout. ### `otel-collector.yaml.full` -This is a more complex variant that contains multiple receivers, processors, and exporters. Please see the file for -details. + +This is a more complex variant that contains multiple receivers, processors, +and exporters. Please see the file for details. ### `otel-collector.yaml.lightstep` -This configuration file deploys lightstep as an ingester. Please note you will need to have a -[lightstep](https://lightstep.com/) account to use this option, and you will need to add your lightstep access token -to the file in the field noted. -## Usage -By default, the `otel-collector.yaml.basic` configuration is copied into the live `otel-collector.yaml`. The logic for -this project runs all files ending in `.yaml` as part of the configuration, so you simply need to either rename your -chosen file to `otel-collector.yaml` or add ensuring only the files you want to use have the `.yaml` extension. +This configuration file deploys lightstep as an ingester. Please note you will +need to have a [lightstep](https://lightstep.com/) account to use this option, +and you will need to add your lightstep access token to the file in the field +noted. +## Usage +By default, the `otel-collector.yaml.basic` configuration is copied into the +live `otel-collector.yaml`. The logic for this project runs all files ending in +`.yaml` as part of the configuration, so you simply need to either rename your +chosen file to `otel-collector.yaml` or add ensuring only the files you want to +use have the `.yaml` extension. diff --git a/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml b/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml index a7069297..fed12a4f 100644 --- a/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml +++ b/pulumi/python/kubernetes/observability/otel-objects/otel-collector.yaml @@ -12,20 +12,17 @@ spec: endpoint: 0.0.0.0:9978 http: endpoint: 0.0.0.0:9979 - # Collect Prometheus Metrics - exporters: - otlp: - endpoint: https://ingest.lightstep.com:443 - headers: {"lightstep-service-name":"my-service","lightstep-access-token":"XXXX"} + processors: batch: + + exporters: + logging: + logLevel: + service: pipelines: traces: receivers: [otlp] processors: [batch] - exporters: [otlp] - metrics: - receivers: [otlp] - processors: [batch] - exporters: [otlp] + exporters: [logging] diff --git a/pulumi/python/kubernetes/observability/otel-operator/README.md b/pulumi/python/kubernetes/observability/otel-operator/README.md index ed83b055..e599436e 100644 --- a/pulumi/python/kubernetes/observability/otel-operator/README.md +++ b/pulumi/python/kubernetes/observability/otel-operator/README.md @@ -1,12 +1,16 @@ # Directory + `/pulumi/python/kubernetes/observablity/otel-operator` ## Purpose + Deploys the OpenTelemetry Operator via a YAML manifest. ## Key Files -- [`opentelemetry-operator.yaml`](./opentelemetry-operator.yaml) This file is used by the Pulumi code in the -directory above to deploy the OTEL operator. + +* [`opentelemetry-operator.yaml`](./opentelemetry-operator.yaml) This file is + used by the Pulumi code in the directory above to deploy the OTEL operator. ## Notes -The OTEL operator had dependencies on [cert-manager](../../certmgr + +The OTEL operator had dependencies on [cert-manager](../../certmgr) diff --git a/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml b/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml index b48e156e..7459e064 100644 --- a/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml +++ b/pulumi/python/kubernetes/observability/otel-operator/opentelemetry-operator.yaml @@ -2,6 +2,7 @@ apiVersion: v1 kind: Namespace metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager name: opentelemetry-operator-system --- @@ -9,8 +10,10 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.6.0-beta.0 + controller-gen.kubebuilder.io/version: v0.8.0 creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator name: instrumentations.opentelemetry.io spec: group: opentelemetry.io @@ -28,6 +31,15 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .spec.exporter.endpoint + name: Endpoint + type: string + - jsonPath: .spec.sampler.type + name: Sampler + type: string + - jsonPath: .spec.sampler.argument + name: Sampler Arg + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -49,6 +61,115 @@ spec: description: InstrumentationSpec defines the desired state of OpenTelemetry SDK and instrumentation. properties: + env: + description: 'Env defines common env vars. There are four layers for + env vars'' definitions and the precedence order is: `original container + env vars` > `language specific env vars` > `common env vars` > `instrument + spec configs'' vars`. If the former var had been defined, then the + other vars would be ignored.' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previously defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + Double $$ are reduced to a single $, which allows for escaping + the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the + string literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array exporter: description: Exporter defines exporter configuration. properties: @@ -59,6 +180,120 @@ spec: java: description: Java defines configuration for java auto-instrumentation. properties: + env: + description: 'Env defines java specific env vars. There are four + layers for env vars'' definitions and the precedence order is: + `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array image: description: Image is a container image with javaagent auto-instrumentation JAR. @@ -67,6 +302,120 @@ spec: nodejs: description: NodeJS defines configuration for nodejs auto-instrumentation. properties: + env: + description: 'Env defines nodejs specific env vars. There are + four layers for env vars'' definitions and the precedence order + is: `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array image: description: Image is a container image with NodeJS SDK and auto-instrumentation. type: string @@ -90,6 +439,120 @@ spec: python: description: Python defines configuration for python auto-instrumentation. properties: + env: + description: 'Env defines python specific env vars. There are + four layers for env vars'' definitions and the precedence order + is: `original container env vars` > `language specific env vars` + > `common env vars` > `instrument spec configs'' vars`. If the + former var had been defined, then the other vars would be ignored.' + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array image: description: Image is a container image with Python SDK and auto-instrumentation. type: string @@ -152,7 +615,9 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: opentelemetry-operator-system/opentelemetry-operator-serving-cert - controller-gen.kubebuilder.io/version: v0.6.0-beta.0 + controller-gen.kubebuilder.io/version: v0.8.0 + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetrycollectors.opentelemetry.io spec: group: opentelemetry.io @@ -364,6 +829,16 @@ spec: description: ImagePullPolicy indicates the pull policy to be used for retrieving the container image (Always, Never, IfNotPresent) type: string + maxReplicas: + description: MaxReplicas sets an upper bound to the autoscaling feature. + If MaxReplicas is set autoscaling is enabled. + format: int32 + type: integer + minReplicas: + description: MinReplicas sets a lower bound to the autoscaling feature. Set + this if your are using autoscaling. It must be at least 1 + format: int32 + type: integer mode: description: Mode represents how the collector should be deployed (deployment, daemonset, statefulset or sidecar) @@ -373,6 +848,13 @@ spec: - sidecar - statefulset type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector to schedule OpenTelemetry Collector pods. + This is only relevant to daemonset, statefulset, and deployment + mode + type: object podAnnotations: additionalProperties: type: string @@ -393,7 +875,8 @@ spec: set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of - any volume." + any volume. Note that this field cannot be set when spec.os.name + is windows." format: int64 type: integer fsGroupChangePolicy: @@ -403,13 +886,15 @@ spec: support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". - If not specified, "Always" is used.' + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' type: string runAsGroup: description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -426,7 +911,8 @@ spec: Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: @@ -435,6 +921,7 @@ spec: SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that applies to @@ -455,7 +942,8 @@ spec: type: object seccompProfile: description: The seccomp options to use by the containers in this - pod. + pod. Note that this field cannot be set when spec.os.name is + windows. properties: localhostProfile: description: localhostProfile indicates a profile defined @@ -477,7 +965,8 @@ spec: supplementalGroups: description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If - unspecified, no groups will be added to any container. + unspecified, no groups will be added to any container. Note + that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -485,7 +974,8 @@ spec: sysctls: description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) - might fail to launch. + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. items: description: Sysctl defines a kernel parameter to be set properties: @@ -504,7 +994,8 @@ spec: description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where the GMSA admission @@ -549,7 +1040,7 @@ spec: description: The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 - and http://www.iana.org/assignments/service-names). Non-standard + and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. type: string name: @@ -600,7 +1091,7 @@ spec: x-kubernetes-list-type: atomic replicas: description: Replicas is the number of pod instances for the underlying - OpenTelemetry Collector + OpenTelemetry Collector. Set this if your are not using autoscaling format: int32 type: integer resources: @@ -638,12 +1129,14 @@ spec: can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when - the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set when spec.os.name + is windows. properties: add: description: Added capabilities @@ -661,23 +1154,27 @@ spec: privileged: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults - to false. + to false. Note that this field cannot be set when spec.os.name + is windows. type: boolean procMount: description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only root filesystem. - Default is false. + Default is false. Note that this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence. + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -694,7 +1191,8 @@ spec: Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence. + takes precedence. Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -702,7 +1200,8 @@ spec: If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value - specified in SecurityContext takes precedence. + specified in SecurityContext takes precedence. Note that this + field cannot be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that applies to @@ -724,7 +1223,8 @@ spec: seccompProfile: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, - the container options override the pod options. + the container options override the pod options. Note that this + field cannot be set when spec.os.name is windows. properties: localhostProfile: description: localhostProfile indicates a profile defined @@ -747,7 +1247,8 @@ spec: description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext takes precedence. + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where the GMSA admission @@ -795,10 +1296,27 @@ spec: description: Image indicates the container image to use for the OpenTelemetry TargetAllocator. type: string + prometheusCR: + description: PrometheusCR defines the configuration for the retrieval + of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 + and podmonitor.monitoring.coreos.com/v1 ) retrieval. All CR + instances which the ServiceAccount has access to will be retrieved. + This includes other namespaces. + properties: + enabled: + description: Enabled indicates whether to use a PrometheusOperator + custom resources as targets or not. + type: boolean + type: object + serviceAccount: + description: ServiceAccount indicates the name of an existing + service account to use with this instance. + type: string type: object tolerations: description: Toleration to schedule OpenTelemetry Collector pods. - This is only relevant to daemonsets, statefulsets and deployments + This is only relevant to daemonset, statefulset, and deployment + mode items: description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching @@ -884,17 +1402,17 @@ spec: type: string type: object spec: - description: 'Spec defines the desired characteristics of a + description: 'spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: accessModes: - description: 'AccessModes contains the desired access modes + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'This field can be used to specify either: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data @@ -920,26 +1438,27 @@ spec: - name type: object dataSourceRef: - description: 'Specifies the object from which to populate - the volume with data, if a non-empty volume is desired. - This may be any local object from a non-empty API group - (non core object) or a PersistentVolumeClaim object. When - this field is specified, volume binding will only succeed - if the type of the specified object matches some installed - volume populator or dynamic provisioner. This field will - replace the functionality of the DataSource field and - as such if both fields are non-empty, they must have the - same value. For backwards compatibility, both fields (DataSource - and DataSourceRef) will be set to the same value automatically - if one of them is empty and the other is non-empty. There - are two important differences between DataSource and DataSourceRef: - * While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as well as - PersistentVolumeClaim objects. * While DataSource ignores - disallowed values (dropping them), DataSourceRef preserves - all values, and generates an error if a disallowed value - is specified. (Alpha) Using this field requires the - AnyVolumeDataSource feature gate to be enabled.' + description: 'dataSourceRef specifies the object from which + to populate the volume with data, if a non-empty volume + is desired. This may be any local object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding will + only succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the DataSource + field and as such if both fields are non-empty, they must + have the same value. For backwards compatibility, both + fields (DataSource and DataSourceRef) will be set to the + same value automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While DataSource + only allows two specific types of objects, DataSourceRef + allows any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -958,8 +1477,12 @@ spec: - name type: object resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but must + still be higher than capacity recorded in the status field + of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: limits: additionalProperties: @@ -986,8 +1509,8 @@ spec: type: object type: object selector: - description: A label query over volumes to consider for - binding. + description: selector is a label query over volumes to consider + for binding. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -1032,8 +1555,8 @@ spec: type: object type: object storageClassName: - description: 'Name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: description: volumeMode defines what type of volume is required @@ -1041,20 +1564,40 @@ spec: included in claim spec. type: string volumeName: - description: VolumeName is the binding reference to the + description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object status: - description: 'Status represents the current information/status + description: 'status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: accessModes: - description: 'AccessModes contains the actual access modes + description: 'accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: allocatedResources is the storage resource + within AllocatedResources tracks the capacity allocated + to a PVC. It may be larger than the actual capacity when + a volume expansion operation is requested. For storage + quota, the larger value from allocatedResources and PVC.spec.resources + is used. If allocatedResources is not set, PVC.spec.resources + alone is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower than + the requested capacity. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: object capacity: additionalProperties: anyOf: @@ -1062,36 +1605,37 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: Represents the actual resources of the underlying - volume. + description: capacity represents the actual resources of + the underlying volume. type: object conditions: - description: Current Condition of persistent volume claim. - If underlying persistent volume is being resized then - the Condition will be set to 'ResizeStarted'. + description: conditions is the current Condition of persistent + volume claim. If underlying persistent volume is being + resized then the Condition will be set to 'ResizeStarted'. items: description: PersistentVolumeClaimCondition contails details about state of pvc properties: lastProbeTime: - description: Last time we probed the condition. + description: lastProbeTime is the time we probed the + condition. format: date-time type: string lastTransitionTime: - description: Last time the condition transitioned - from one status to another. + description: lastTransitionTime is the time the condition + transitioned from one status to another. format: date-time type: string message: - description: Human-readable message indicating details - about last transition. + description: message is the human-readable message + indicating details about last transition. type: string reason: - description: Unique, this should be a short, machine - understandable string that gives the reason for - condition's last transition. If it reports "ResizeStarted" - that means the underlying persistent volume is being - resized. + description: reason is a unique, this should be a + short, machine understandable string that gives + the reason for condition's last transition. If it + reports "ResizeStarted" that means the underlying + persistent volume is being resized. type: string status: type: string @@ -1105,7 +1649,14 @@ spec: type: object type: array phase: - description: Phase represents the current phase of PersistentVolumeClaim. + description: phase represents the current phase of PersistentVolumeClaim. + type: string + resizeStatus: + description: resizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. type: string type: object type: object @@ -1160,117 +1711,121 @@ spec: be accessed by any container in the pod. properties: awsElasticBlockStore: - description: 'AWSElasticBlockStore represents an AWS Disk resource + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' properties: fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' type: string partition: - description: 'The partition in the volume that you want - to mount. If omitted, the default is to mount by volume - name. Examples: For volume /dev/sda1, you specify the - partition as "1". Similarly, the volume partition for - /dev/sda is "0" (or you can leave the property empty).' + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' format: int32 type: integer readOnly: - description: 'Specify "true" to force and set the ReadOnly - property in VolumeMounts to "true". If omitted, the default - is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: boolean volumeID: - description: 'Unique ID of the persistent disk resource - in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' type: string required: - volumeID type: object azureDisk: - description: AzureDisk represents an Azure Data Disk mount on + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. properties: cachingMode: - description: 'Host Caching mode: None, Read Only, Read Write.' + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' type: string diskName: - description: The Name of the data disk in the blob storage + description: diskName is the Name of the data disk in the + blob storage type: string diskURI: - description: The URI the data disk in the blob storage + description: diskURI is the URI of data disk in the blob + storage type: string fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string kind: - description: 'Expected values Shared: multiple blob disks - per storage account Dedicated: single blob disk per storage - account Managed: azure managed data disk (only in managed - availability set). defaults to shared' + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean required: - diskName - diskURI type: object azureFile: - description: AzureFile represents an Azure File Service mount + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. properties: readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean secretName: - description: the name of secret that contains Azure Storage - Account Name and Key + description: secretName is the name of secret that contains + Azure Storage Account Name and Key type: string shareName: - description: Share Name + description: shareName is the azure share Name type: string required: - secretName - shareName type: object cephfs: - description: CephFS represents a Ceph FS mount on the host that + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime properties: monitors: - description: 'Required: Monitors is a collection of Ceph - monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' items: type: string type: array path: - description: 'Optional: Used as the mounted root, rather - than the full Ceph tree, default is /' + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' type: string readOnly: - description: 'Optional: Defaults to false (read/write). + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: boolean secretFile: - description: 'Optional: SecretFile is the path to key ring - for User, default is /etc/ceph/user.secret More info: - https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string secretRef: - description: 'Optional: SecretRef is reference to the authentication - secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -1278,30 +1833,30 @@ spec: type: string type: object user: - description: 'Optional: User is the rados user name, default - is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' type: string required: - monitors type: object cinder: - description: 'Cinder represents a cinder volume attached and + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' properties: fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Examples: - "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" - if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string readOnly: - description: 'Optional: Defaults to false (read/write). - ReadOnly here will force the ReadOnly setting in VolumeMounts. + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: boolean secretRef: - description: 'Optional: points to a secret object containing - parameters used to connect to OpenStack.' + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -1309,31 +1864,31 @@ spec: type: string type: object volumeID: - description: 'volume id used to identify the volume in cinder. + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' type: string required: - volumeID type: object configMap: - description: ConfigMap represents a configMap that should populate + description: configMap represents a configMap that should populate this volume properties: defaultMode: - description: 'Optional: mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Defaults to 0644. Directories within - the path are not affected by this setting. This might - be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits - set.' + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer items: - description: If unspecified, each key-value pair in the - Data field of the referenced ConfigMap will be projected + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be @@ -1345,25 +1900,25 @@ spec: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. type: string required: - key @@ -1375,28 +1930,28 @@ spec: TODO: Add other useful fields. apiVersion, kind, uid?' type: string optional: - description: Specify whether the ConfigMap or its keys must - be defined + description: optional specify whether the ConfigMap or its + keys must be defined type: boolean type: object csi: - description: CSI (Container Storage Interface) represents ephemeral + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). properties: driver: - description: Driver is the name of the CSI driver that handles + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. type: string fsType: - description: Filesystem type to mount. Ex. "ext4", "xfs", - "ntfs". If not provided, the empty value is passed to - the associated CSI driver which will determine the default - filesystem to apply. + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. type: string nodePublishSecretRef: - description: NodePublishSecretRef is a reference to the + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, @@ -1410,13 +1965,13 @@ spec: type: string type: object readOnly: - description: Specifies a read-only configuration for the - volume. Defaults to false (read/write). + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). type: boolean volumeAttributes: additionalProperties: type: string - description: VolumeAttributes stores driver-specific properties + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. type: object @@ -1424,7 +1979,7 @@ spec: - driver type: object downwardAPI: - description: DownwardAPI represents downward API about the pod + description: downwardAPI represents downward API about the pod that should populate this volume properties: defaultMode: @@ -1511,50 +2066,47 @@ spec: type: array type: object emptyDir: - description: 'EmptyDir represents a temporary directory that + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' properties: medium: - description: 'What type of storage medium should back this - directory. The default is "" which means to use the node''s - default medium. Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' type: string sizeLimit: anyOf: - type: integer - type: string - description: 'Total amount of local storage required for - this EmptyDir volume. The size limit is also applicable - for memory medium. The maximum usage on memory medium - EmptyDir would be the minimum value between the SizeLimit - specified here and the sum of memory limits of all containers - in a pod. The default is nil which means that the limit - is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: "Ephemeral represents a volume that is handled + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity - \ tracking are needed, c) the storage driver is specified - through a storage class, and d) the storage driver supports - dynamic volume provisioning through a PersistentVolumeClaim - (see EphemeralVolumeSource for more information on the - connection between this volume type and PersistentVolumeClaim). - \n Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. \n Use CSI for light-weight local ephemeral - volumes if the CSI driver is meant to be used that way - see - the documentation of the driver for more information. \n A - pod can use both types of ephemeral volumes and persistent - volumes at the same time. \n This is a beta feature and only - available when the GenericEphemeralVolume feature gate is - enabled." + tracking are needed, c) the storage driver is specified through + a storage class, and d) the storage driver supports dynamic + volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource + for more information on the connection between this volume + type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that persist + for longer than the lifecycle of an individual pod. \n Use + CSI for light-weight local ephemeral volumes if the CSI driver + is meant to be used that way - see the documentation of the + driver for more information. \n A pod can use both types of + ephemeral volumes and persistent volumes at the same time." properties: volumeClaimTemplate: description: "Will be used to create a stand-alone PVC to @@ -1606,13 +2158,13 @@ spec: as in a PersistentVolumeClaim are also valid here. properties: accessModes: - description: 'AccessModes contains the desired access + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' items: type: string type: array dataSource: - description: 'This field can be used to specify + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support @@ -1642,14 +2194,14 @@ spec: - name type: object dataSourceRef: - description: 'Specifies the object from which to - populate the volume with data, if a non-empty - volume is desired. This may be any local object - from a non-empty API group (non core object) or - a PersistentVolumeClaim object. When this field - is specified, volume binding will only succeed - if the type of the specified object matches some - installed volume populator or dynamic provisioner. + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + local object from a non-empty API group (non core + object) or a PersistentVolumeClaim object. When + this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For @@ -1659,13 +2211,13 @@ spec: other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, - DataSourceRef allows any non-core object, as - well as PersistentVolumeClaim objects. * While - DataSource ignores disallowed values (dropping - them), DataSourceRef preserves all values, and - generates an error if a disallowed value is specified. - (Alpha) Using this field requires the AnyVolumeDataSource - feature gate to be enabled.' + DataSourceRef allows any non-core object, as well + as PersistentVolumeClaim objects. * While DataSource + ignores disallowed values (dropping them), DataSourceRef + preserves all values, and generates an error if + a disallowed value is specified. (Beta) Using + this field requires the AnyVolumeDataSource feature + gate to be enabled.' properties: apiGroup: description: APIGroup is the group for the resource @@ -1687,8 +2239,12 @@ spec: - name type: object resources: - description: 'Resources represents the minimum resources - the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: limits: additionalProperties: @@ -1716,8 +2272,8 @@ spec: type: object type: object selector: - description: A label query over volumes to consider - for binding. + description: selector is a label query over volumes + to consider for binding. properties: matchExpressions: description: matchExpressions is a list of label @@ -1767,8 +2323,9 @@ spec: type: object type: object storageClassName: - description: 'Name of the StorageClass required - by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' type: string volumeMode: description: volumeMode defines what type of volume @@ -1776,7 +2333,7 @@ spec: is implied when not included in claim spec. type: string volumeName: - description: VolumeName is the binding reference + description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object @@ -1785,32 +2342,33 @@ spec: type: object type: object fc: - description: FC represents a Fibre Channel resource that is + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. properties: fsType: - description: 'Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from - compromising the machine' + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' type: string lun: - description: 'Optional: FC target lun number' + description: 'lun is Optional: FC target lun number' format: int32 type: integer readOnly: - description: 'Optional: Defaults to false (read/write). + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean targetWWNs: - description: 'Optional: FC target worldwide names (WWNs)' + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' items: type: string type: array wwids: - description: 'Optional: FC volume world wide identifiers + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' items: @@ -1818,34 +2376,36 @@ spec: type: array type: object flexVolume: - description: FlexVolume represents a generic volume resource + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. properties: driver: - description: Driver is the name of the driver to use for + description: driver is the name of the driver to use for this volume. type: string fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". The default filesystem depends on FlexVolume - script. + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. type: string options: additionalProperties: type: string - description: 'Optional: Extra command options if any.' + description: 'options is Optional: this field holds extra + command options if any.' type: object readOnly: - description: 'Optional: Defaults to false (read/write). + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' type: boolean secretRef: - description: 'Optional: SecretRef is reference to the secret - object containing sensitive information to pass to the - plugin scripts. This may be empty if no secret object - is specified. If the secret object contains more than - one secret, all secrets are passed to the plugin scripts.' + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -1856,90 +2416,92 @@ spec: - driver type: object flocker: - description: Flocker represents a Flocker volume attached to + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running properties: datasetName: - description: Name of the dataset stored as metadata -> name - on the dataset for Flocker should be considered as deprecated + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated type: string datasetUUID: - description: UUID of the dataset. This is unique identifier - of a Flocker dataset + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset type: string type: object gcePersistentDisk: - description: 'GCEPersistentDisk represents a GCE Disk resource + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' properties: fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' type: string partition: - description: 'The partition in the volume that you want - to mount. If omitted, the default is to mount by volume - name. Examples: For volume /dev/sda1, you specify the - partition as "1". Similarly, the volume partition for - /dev/sda is "0" (or you can leave the property empty). + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' format: int32 type: integer pdName: - description: 'Unique name of the PD resource in GCE. Used - to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: string readOnly: - description: 'ReadOnly here will force the ReadOnly setting + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' type: boolean required: - pdName type: object gitRepo: - description: 'GitRepo represents a git repository at a particular + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' properties: directory: - description: Target directory name. Must not contain or - start with '..'. If '.' is supplied, the volume directory - will be the git repository. Otherwise, if specified, - the volume will contain the git repository in the subdirectory - with the given name. + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. type: string repository: - description: Repository URL + description: repository is the URL type: string revision: - description: Commit hash for the specified revision. + description: revision is the commit hash for the specified + revision. type: string required: - repository type: object glusterfs: - description: 'Glusterfs represents a Glusterfs mount on the + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' properties: endpoints: - description: 'EndpointsName is the endpoint name that details + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string path: - description: 'Path is the Glusterfs volume path. More info: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: string readOnly: - description: 'ReadOnly here will force the Glusterfs volume + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' type: boolean @@ -1948,7 +2510,7 @@ spec: - path type: object hostPath: - description: 'HostPath represents a pre-existing file or directory + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers @@ -1957,68 +2519,70 @@ spec: mounts and who can/can not mount host directories as read/write.' properties: path: - description: 'Path of the directory on the host. If the + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string type: - description: 'Type for HostPath Volume Defaults to "" More + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' type: string required: - path type: object iscsi: - description: 'ISCSI represents an ISCSI Disk resource that is + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' properties: chapAuthDiscovery: - description: whether support iSCSI Discovery CHAP authentication + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication type: boolean chapAuthSession: - description: whether support iSCSI Session CHAP authentication + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication type: boolean fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' type: string initiatorName: - description: Custom iSCSI Initiator Name. If initiatorName - is specified with iscsiInterface simultaneously, new iSCSI - interface : will be created - for the connection. + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. type: string iqn: - description: Target iSCSI Qualified Name. + description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: - description: iSCSI Interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). type: string lun: - description: iSCSI Target Lun number. + description: lun represents iSCSI Target Lun number. format: int32 type: integer portals: - description: iSCSI Target Portal List. The portal is either - an IP or ip_addr:port if the port is other than default - (typically TCP ports 860 and 3260). + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). items: type: string type: array readOnly: - description: ReadOnly here will force the ReadOnly setting + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. type: boolean secretRef: - description: CHAP Secret for iSCSI target and initiator - authentication + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication properties: name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names @@ -2026,9 +2590,9 @@ spec: type: string type: object targetPortal: - description: iSCSI Target Portal. The Portal is either an - IP or ip_addr:port if the port is other than default (typically - TCP ports 860 and 3260). + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). type: string required: - iqn @@ -2036,24 +2600,24 @@ spec: - targetPortal type: object name: - description: 'Volume''s name. Must be a DNS_LABEL and unique + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string nfs: - description: 'NFS represents an NFS mount on the host that shares + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' properties: path: - description: 'Path that is exported by the NFS server. More + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string readOnly: - description: 'ReadOnly here will force the NFS export to + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: boolean server: - description: 'Server is the hostname or IP address of the + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' type: string required: @@ -2061,86 +2625,87 @@ spec: - server type: object persistentVolumeClaim: - description: 'PersistentVolumeClaimVolumeSource represents a + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' properties: claimName: - description: 'ClaimName is the name of a PersistentVolumeClaim + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' type: string readOnly: - description: Will force the ReadOnly setting in VolumeMounts. - Default false. + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. type: boolean required: - claimName type: object photonPersistentDisk: - description: PhotonPersistentDisk represents a PhotonController + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string pdID: - description: ID that identifies Photon Controller persistent - disk + description: pdID is the ID that identifies Photon Controller + persistent disk type: string required: - pdID type: object portworxVolume: - description: PortworxVolume represents a portworx volume attached + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine properties: fsType: - description: FSType represents the filesystem type to mount + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean volumeID: - description: VolumeID uniquely identifies a Portworx volume + description: volumeID uniquely identifies a Portworx volume type: string required: - volumeID type: object projected: - description: Items for all in one resources secrets, configmaps, - and downward API + description: projected items for all in one resources secrets, + configmaps, and downward API properties: defaultMode: - description: Mode bits used to set permissions on created - files by default. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML accepts - both octal and decimal values, JSON requires decimal values - for mode bits. Directories within the path are not affected - by this setting. This might be in conflict with other - options that affect the file mode, like fsGroup, and the - result can be other mode bits set. + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. format: int32 type: integer sources: - description: list of volume projections + description: sources is the list of volume projections items: description: Projection that may be projected along with other supported volume types properties: configMap: - description: information about the configMap data - to project + description: configMap information about the configMap + data to project properties: items: - description: If unspecified, each key-value pair - in the Data field of the referenced ConfigMap + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected @@ -2155,27 +2720,28 @@ spec: a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to - set permissions on this file. Must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element - '..'. May not start with the string '..'. + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. type: string required: - key @@ -2189,13 +2755,13 @@ spec: uid?' type: string optional: - description: Specify whether the ConfigMap or - its keys must be defined + description: optional specify whether the ConfigMap + or its keys must be defined type: boolean type: object downwardAPI: - description: information about the downwardAPI data - to project + description: downwardAPI information about the downwardAPI + data to project properties: items: description: Items is a list of DownwardAPIVolume @@ -2276,15 +2842,15 @@ spec: type: array type: object secret: - description: information about the secret data to - project + description: secret information about the secret data + to project properties: items: - description: If unspecified, each key-value pair - in the Data field of the referenced Secret will - be projected into the volume as a file whose - name is the key and content is the value. If - specified, the listed keys will be projected + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup @@ -2296,27 +2862,28 @@ spec: a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to - set permissions on this file. Must be - an octal value between 0000 and 0777 or - a decimal value between 0 and 511. YAML - accepts both octal and decimal values, - JSON requires decimal values for mode - bits. If not specified, the volume defaultMode - will be used. This might be in conflict - with other options that affect the file - mode, like fsGroup, and the result can - be other mode bits set.' + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file - to map the key to. May not be an absolute - path. May not contain the path element - '..'. May not start with the string '..'. + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. type: string required: - key @@ -2330,16 +2897,16 @@ spec: uid?' type: string optional: - description: Specify whether the Secret or its - key must be defined + description: optional field specify whether the + Secret or its key must be defined type: boolean type: object serviceAccountToken: - description: information about the serviceAccountToken - data to project + description: serviceAccountToken is information about + the serviceAccountToken data to project properties: audience: - description: Audience is the intended audience + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the @@ -2347,7 +2914,7 @@ spec: of the apiserver. type: string expirationSeconds: - description: ExpirationSeconds is the requested + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate @@ -2359,7 +2926,7 @@ spec: format: int64 type: integer path: - description: Path is the path relative to the + description: path is the path relative to the mount point of the file to project the token into. type: string @@ -2370,35 +2937,35 @@ spec: type: array type: object quobyte: - description: Quobyte represents a Quobyte mount on the host + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime properties: group: - description: Group to map volume access to Default is no + description: group to map volume access to Default is no group type: string readOnly: - description: ReadOnly here will force the Quobyte volume + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. type: boolean registry: - description: Registry represents a single or multiple Quobyte + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes type: string tenant: - description: Tenant owning the given Quobyte volume in the + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin type: string user: - description: User to map volume access to Defaults to serivceaccount + description: user to map volume access to Defaults to serivceaccount user type: string volume: - description: Volume is a string that references an already + description: volume is a string that references an already created Quobyte volume by name. type: string required: @@ -2406,41 +2973,42 @@ spec: - volume type: object rbd: - description: 'RBD represents a Rados Block Device mount on the + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' properties: fsType: - description: 'Filesystem type of the volume that you want - to mount. Tip: Ensure that the filesystem type is supported - by the host operating system. Examples: "ext4", "xfs", - "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' type: string image: - description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string keyring: - description: 'Keyring is the path to key ring for RBDUser. + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string monitors: - description: 'A collection of Ceph monitors. More info: - https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' items: type: string type: array pool: - description: 'The rados pool name. Default is rbd. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string readOnly: - description: 'ReadOnly here will force the ReadOnly setting + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: boolean secretRef: - description: 'SecretRef is name of the authentication secret + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' properties: @@ -2450,35 +3018,36 @@ spec: type: string type: object user: - description: 'The rados user name. Default is admin. More - info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' type: string required: - image - monitors type: object scaleIO: - description: ScaleIO represents a ScaleIO persistent volume + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Default is "xfs". + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". type: string gateway: - description: The host address of the ScaleIO API Gateway. + description: gateway is the host address of the ScaleIO + API Gateway. type: string protectionDomain: - description: The name of the ScaleIO Protection Domain for - the configured storage. + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: SecretRef references to the secret for ScaleIO + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. properties: @@ -2488,25 +3057,26 @@ spec: type: string type: object sslEnabled: - description: Flag to enable/disable SSL communication with - Gateway, default false + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false type: boolean storageMode: - description: Indicates whether the storage for a volume - should be ThickProvisioned or ThinProvisioned. Default - is ThinProvisioned. + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. type: string storagePool: - description: The ScaleIO Storage Pool associated with the - protection domain. + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. type: string system: - description: The name of the storage system as configured - in ScaleIO. + description: system is the name of the storage system as + configured in ScaleIO. type: string volumeName: - description: The name of a volume already created in the - ScaleIO system that is associated with this volume source. + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. type: string required: - gateway @@ -2514,24 +3084,24 @@ spec: - system type: object secret: - description: 'Secret represents a secret that should populate + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' properties: defaultMode: - description: 'Optional: mode bits used to set permissions - on created files by default. Must be an octal value between - 0000 and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires decimal - values for mode bits. Defaults to 0644. Directories within - the path are not affected by this setting. This might - be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits - set.' + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer items: - description: If unspecified, each key-value pair in the - Data field of the referenced Secret will be projected + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be @@ -2543,25 +3113,25 @@ spec: description: Maps a string key to a path within a volume. properties: key: - description: The key to project. + description: key is the key to project. type: string mode: - description: 'Optional: mode bits used to set permissions - on this file. Must be an octal value between 0000 - and 0777 or a decimal value between 0 and 511. YAML - accepts both octal and decimal values, JSON requires - decimal values for mode bits. If not specified, - the volume defaultMode will be used. This might - be in conflict with other options that affect the - file mode, like fsGroup, and the result can be other - mode bits set.' + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' format: int32 type: integer path: - description: The relative path of the file to map - the key to. May not be an absolute path. May not - contain the path element '..'. May not start with - the string '..'. + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. type: string required: - key @@ -2569,29 +3139,30 @@ spec: type: object type: array optional: - description: Specify whether the Secret or its keys must - be defined + description: optional field specify whether the Secret or + its keys must be defined type: boolean secretName: - description: 'Name of the secret in the pod''s namespace - to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' type: string type: object storageos: - description: StorageOS represents a StorageOS volume attached + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string readOnly: - description: Defaults to false (read/write). ReadOnly here - will force the ReadOnly setting in VolumeMounts. + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: SecretRef specifies the secret to use for obtaining + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. properties: @@ -2601,12 +3172,12 @@ spec: type: string type: object volumeName: - description: VolumeName is the human-readable name of the + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. type: string volumeNamespace: - description: VolumeNamespace specifies the scope of the + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS @@ -2617,24 +3188,26 @@ spec: type: string type: object vsphereVolume: - description: VsphereVolume represents a vSphere volume attached + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine properties: fsType: - description: Filesystem type to mount. Must be a filesystem - type supported by the host operating system. Ex. "ext4", - "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. type: string storagePolicyID: - description: Storage Policy Based Management (SPBM) profile - ID associated with the StoragePolicyName. + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. type: string storagePolicyName: - description: Storage Policy Based Management (SPBM) profile - name. + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. type: string volumePath: - description: Path that identifies vSphere volume vmdk + description: volumePath is the path that identifies vSphere + volume vmdk type: string required: - volumePath @@ -2650,17 +3223,32 @@ spec: OpenTelemetryCollector. properties: messages: - description: Messages about actions performed by the operator on this - resource. + description: 'Messages about actions performed by the operator on + this resource. Deprecated: use Kubernetes events instead.' items: type: string type: array x-kubernetes-list-type: atomic replicas: - description: Replicas is currently not being set and might be removed - in the next version. + description: 'Replicas is currently not being set and might be removed + in the next version. Deprecated: use "OpenTelemetryCollector.Status.Scale.Replicas" + instead.' format: int32 type: integer + scale: + description: Scale is the OpenTelemetryCollector's scale subresource + status. + properties: + replicas: + description: The total number non-terminated pods targeted by + this OpenTelemetryCollector's deployment or statefulSet. + format: int32 + type: integer + selector: + description: The selector used to match the OpenTelemetryCollector's + deployment or statefulSet pods. + type: string + type: object version: description: Version of the managed OpenTelemetry Collector (operand) type: string @@ -2670,8 +3258,9 @@ spec: storage: true subresources: scale: + labelSelectorPath: .status.scale.selector specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas + statusReplicasPath: .status.scale.replicas status: {} status: acceptedNames: @@ -2683,12 +3272,16 @@ status: apiVersion: v1 kind: ServiceAccount metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-controller-manager namespace: opentelemetry-operator-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-leader-election-role namespace: opentelemetry-operator-system rules: @@ -2724,6 +3317,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-manager-role rules: - apiGroups: @@ -2820,6 +3415,18 @@ rules: - patch - update - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - coordination.k8s.io resources: @@ -2871,6 +3478,8 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-metrics-reader rules: - nonResourceURLs: @@ -2881,6 +3490,8 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-proxy-role rules: - apiGroups: @@ -2899,6 +3510,8 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-leader-election-rolebinding namespace: opentelemetry-operator-system roleRef: @@ -2913,6 +3526,8 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io @@ -2926,6 +3541,8 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io @@ -2940,6 +3557,7 @@ apiVersion: v1 kind: Service metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager name: opentelemetry-operator-controller-manager-metrics-service namespace: opentelemetry-operator-system @@ -2950,11 +3568,14 @@ spec: protocol: TCP targetPort: https selector: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager --- apiVersion: v1 kind: Service metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-webhook-service namespace: opentelemetry-operator-system spec: @@ -2963,12 +3584,14 @@ spec: protocol: TCP targetPort: 9443 selector: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager --- apiVersion: apps/v1 kind: Deployment metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager name: opentelemetry-operator-controller-manager namespace: opentelemetry-operator-system @@ -2976,17 +3599,19 @@ spec: replicas: 1 selector: matchLabels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager template: metadata: labels: + app.kubernetes.io/name: opentelemetry-operator control-plane: controller-manager spec: containers: - args: - --metrics-addr=127.0.0.1:8080 - --enable-leader-election - image: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:v0.42.0 + image: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.56.0 livenessProbe: httpGet: path: /healthz @@ -3019,13 +3644,20 @@ spec: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - - --v=10 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + - --v=0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0 name: kube-rbac-proxy ports: - containerPort: 8443 name: https protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi serviceAccountName: opentelemetry-operator-controller-manager terminationGracePeriodSeconds: 10 volumes: @@ -3037,6 +3669,8 @@ spec: apiVersion: cert-manager.io/v1 kind: Certificate metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-serving-cert namespace: opentelemetry-operator-system spec: @@ -3054,6 +3688,8 @@ spec: apiVersion: cert-manager.io/v1 kind: Issuer metadata: + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-selfsigned-issuer namespace: opentelemetry-operator-system spec: @@ -3064,11 +3700,12 @@ kind: MutatingWebhookConfiguration metadata: annotations: cert-manager.io/inject-ca-from: opentelemetry-operator-system/opentelemetry-operator-serving-cert + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-mutating-webhook-configuration webhooks: - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3089,7 +3726,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3110,7 +3746,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3135,11 +3770,12 @@ kind: ValidatingWebhookConfiguration metadata: annotations: cert-manager.io/inject-ca-from: opentelemetry-operator-system/opentelemetry-operator-serving-cert + labels: + app.kubernetes.io/name: opentelemetry-operator name: opentelemetry-operator-validating-webhook-configuration webhooks: - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3160,7 +3796,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3180,7 +3815,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3201,7 +3835,6 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 clientConfig: service: name: opentelemetry-operator-webhook-service @@ -3219,3 +3852,4 @@ webhooks: resources: - opentelemetrycollectors sideEffects: None + diff --git a/pulumi/python/kubernetes/prometheus/__main__.py b/pulumi/python/kubernetes/prometheus/__main__.py index d0c42b0b..69969fb7 100644 --- a/pulumi/python/kubernetes/prometheus/__main__.py +++ b/pulumi/python/kubernetes/prometheus/__main__.py @@ -1,8 +1,10 @@ import os - +import base64 +from typing import Mapping import pulumi import pulumi_kubernetes as k8s from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs +from pulumi_kubernetes.core.v1 import Secret from pulumi import Output from pulumi_kubernetes.yaml import ConfigGroup from pulumi import CustomTimeouts @@ -10,27 +12,47 @@ from kic_util import pulumi_config -def project_name_from_project_dir(dirname: str): +def project_name_from_infrastructure_dir(dirname: str): script_dir = os.path.dirname(os.path.abspath(__file__)) project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname) return pulumi_config.get_pulumi_project_name(project_path) +def project_name_from_kubernetes_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + def servicemon_manifests_location(): script_dir = os.path.dirname(os.path.abspath(__file__)) servicemon_manifests_path = os.path.join(script_dir, 'manifests', '*.yaml') return servicemon_manifests_path +def extract_adminpass_from_k8s_secrets(secrets: Mapping[str, str]) -> str: + if 'adminpass' not in secrets: + raise 'Secret [adminpass] not found in Kubernetes secret store' + base64_string = secrets['adminpass'] + byte_data = base64.b64decode(base64_string) + password = str(byte_data, 'utf-8') + return password + + stack_name = pulumi.get_stack() project_name = pulumi.get_project() pulumi_user = pulumi_config.get_pulumi_user() -k8_project_name = project_name_from_project_dir('kubeconfig') +k8_project_name = project_name_from_infrastructure_dir('kubeconfig') k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) +secrets_project_name = project_name_from_kubernetes_dir('secrets') +secrets_stack_ref_id = f"{pulumi_user}/{secrets_project_name}/{stack_name}" +secrets_stack_ref = pulumi.StackReference(secrets_stack_ref_id) +pulumi_secrets = secrets_stack_ref.require_output('pulumi_secrets') + k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig) @@ -44,7 +66,7 @@ def servicemon_manifests_location(): chart_name = 'kube-prometheus-stack' chart_version = config.get('chart_version') if not chart_version: - chart_version = '30.0.1' + chart_version = '39.2.1' helm_repo_name = config.get('prometheus_helm_repo_name') if not helm_repo_name: helm_repo_name = 'prometheus-community' @@ -58,12 +80,13 @@ def servicemon_manifests_location(): # helm_timeout = config.get_int('helm_timeout') if not helm_timeout: - helm_timeout = 300 + helm_timeout = 600 -# Require an admin password, but do not encrypt it due to the -# issues we experienced with Anthos; this can be adjusted at the -# same time that we fix the Anthos issues. -adminpass = config.require('adminpass') +# Use Prometheus administrator password stored in Kubernetes secrets +prometheus_secrets = Secret.get(resource_name='pulumi-secret-prometheus', + id=pulumi_secrets['prometheus'], + opts=pulumi.ResourceOptions(provider=k8s_provider)).data +adminpass = pulumi.Output.unsecret(prometheus_secrets).apply(extract_adminpass_from_k8s_secrets) prometheus_release_args = ReleaseArgs( chart=chart_name, @@ -184,7 +207,7 @@ def servicemon_manifests_location(): statsd_chart_name = 'prometheus-statsd-exporter' statsd_chart_version = config.get('statsd_chart_version') if not statsd_chart_version: - statsd_chart_version = '0.4.2' + statsd_chart_version = '0.5.0' helm_repo_name = config.get('prometheus_helm_repo_name') if not helm_repo_name: helm_repo_name = 'prometheus-community' diff --git a/pulumi/python/kubernetes/prometheus/extras/README.md b/pulumi/python/kubernetes/prometheus/extras/README.md index a485fc80..8fae4c43 100644 --- a/pulumi/python/kubernetes/prometheus/extras/README.md +++ b/pulumi/python/kubernetes/prometheus/extras/README.md @@ -1,13 +1,14 @@ -## Purpose -This directory contains a manifest that can be used to change the metrics bind port -for the kube-proxy from 127.0.0.1 to 0.0.0.0 in order to allow the metrics to be scraped -by the prometheus service. +# Purpose -This is not being automatically applied, since it is changing the bind address that is -being used for the metrics port. That said, this should be secure since it's internal -to the installation and the connection is done via HTTPS. +This directory contains a manifest that can be used to change the metrics +bind port for the kube-proxy from 127.0.0.1 to 0.0.0.0 in order to allow the +metrics to be scraped by the prometheus service. + +This is not being automatically applied, since it is changing the bind address +that is being used for the metrics port. That said, this should be secure +since it is internal to the installation and the connection is done via HTTPS. + +However, please see this -However, please see this [github issue](https://github.com/prometheus-community/helm-charts/issues/977) for the full discussion of why this is required. - diff --git a/pulumi/python/kubernetes/secrets/.gitignore b/pulumi/python/kubernetes/secrets/.gitignore new file mode 100644 index 00000000..31cefeff --- /dev/null +++ b/pulumi/python/kubernetes/secrets/.gitignore @@ -0,0 +1 @@ +Pulumi.*.yaml \ No newline at end of file diff --git a/pulumi/python/kubernetes/secrets/Pulumi.yaml b/pulumi/python/kubernetes/secrets/Pulumi.yaml new file mode 100644 index 00000000..ad441f83 --- /dev/null +++ b/pulumi/python/kubernetes/secrets/Pulumi.yaml @@ -0,0 +1,6 @@ +name: secrets +runtime: + name: python + options: + virtualenv: ../../venv +description: Adds Kubernetes Secrets diff --git a/pulumi/python/kubernetes/secrets/__main__.py b/pulumi/python/kubernetes/secrets/__main__.py new file mode 100644 index 00000000..440e9c3b --- /dev/null +++ b/pulumi/python/kubernetes/secrets/__main__.py @@ -0,0 +1,47 @@ +import os + +import pulumi +import pulumi_kubernetes as k8s +from pulumi_kubernetes.core.v1 import Secret, SecretInitArgs + +from kic_util import pulumi_config + +script_dir = os.path.dirname(os.path.abspath(__file__)) + + +def project_name_from_project_dir(dirname: str): + global script_dir + project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() + +k8_project_name = project_name_from_project_dir('kubeconfig') +k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}" +k8_stack_ref = pulumi.StackReference(k8_stack_ref_id) +kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c)) + +k8s_provider = k8s.Provider(resource_name='kubernetes', kubeconfig=kubeconfig) +keys = pulumi.runtime.get_config_secret_keys_env() + +config_secrets = {} +for key in keys: + bag_name, config_key = key.split(':') + config_bag = pulumi.config.Config(bag_name) + if bag_name not in config_secrets.keys(): + config_secrets[bag_name] = {} + + config_secrets[bag_name][config_key] = pulumi.Output.unsecret(config_bag.require_secret(config_key)) + +secrets_output = {} +for k, v in config_secrets.items(): + resource_name = f'pulumi-secret-{k}' + secret = Secret(resource_name=resource_name, + args=SecretInitArgs(string_data=v), + opts=pulumi.ResourceOptions(provider=k8s_provider)) + secrets_output[k] = secret.id + +pulumi.export('pulumi_secrets', secrets_output) diff --git a/pulumi/python/requirements.txt b/pulumi/python/requirements.txt new file mode 100644 index 00000000..0ad7699f --- /dev/null +++ b/pulumi/python/requirements.txt @@ -0,0 +1,21 @@ +awscli~=1.25.35 +grpcio==1.43.0 +fart~=0.1.5 +lolcat~=1.4 +nodeenv~=1.6.0 +passlib~=1.7.4 +pulumi-aws>=4.39.0 +pulumi-docker==3.1.0 +pulumi-eks>=0.41.2 +pulumi-kubernetes==3.20.1 +pycryptodome~=3.14.0 +PyYAML~=5.4.1 +requests~=2.27.1 +setuptools==62.1.0 +setuptools-git-versioning==1.9.2 +wheel==0.37.1 +yamlreader==3.0.4 +pulumi-digitalocean==4.12.0 +pulumi-linode==3.7.1 +linode-cli~=5.17.2 +pulumi~=3.36.0 \ No newline at end of file diff --git a/pulumi/python/runner b/pulumi/python/runner new file mode 100755 index 00000000..0c96c7e5 --- /dev/null +++ b/pulumi/python/runner @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -o errexit # abort on nonzero exit status +set -o pipefail # don't hide errors within pipes + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +PYENV_ROOT="${script_dir}/.pyenv" + +if [ -d "${PYENV_ROOT}" ]; then + PATH="${PATH}:${PYENV_ROOT}/bin" + eval "$(pyenv init --path)" + eval "$(pyenv init -)" +fi + +if [ -d "${script_dir}/venv" ]; then + source "${script_dir}/venv/bin/activate" +else + >&2 echo "Python virtual environment not found at path: ${script_dir}/venv" + >&2 echo "Have you run setup_venv.sh to initialize the environment?" +fi + +exec "$script_dir/automation/main.py" $@ \ No newline at end of file diff --git a/pulumi/python/tools/README.md b/pulumi/python/tools/README.md index ba27c045..61e9b065 100644 --- a/pulumi/python/tools/README.md +++ b/pulumi/python/tools/README.md @@ -1,35 +1,40 @@ -## Directory +# Directory `/pulumi/python/tools` -## Deprecation Notice -These tools are no longer supported by the MARA team and will be removed in a future release. They *should* work -correctly, but this is not guaranteed. Any use is at your own risk. +## _Deprecation Notice_ + +These tools are no longer supported by the MARA team and will be removed in a +future release. They *should* work correctly, but this is not guaranteed. Any +use is at your own risk. ## Purpose -This directory holds common tools that *may* be required by kubernetes installations that do not meet the minimum -requirements of MARA as checked by the [testcap.sh](../../../bin/testcap.sh) script. +This directory holds common tools that *may* be required by kubernetes +installations that do not meet the minimum requirements of MARA. These tools address two main areas: -- Ability to create persistent volumes. -- Ability to obtain an external egress IP. +* Ability to create persistent volumes. +* Ability to obtain an external egress IP. -Note that these tools are not specifically endorsed by the creators of MARA, and you should do your own determination of -the best way to provide these capabilities. Many kubernetes distributions have recommended approaches to solving these -problems. +Note that these tools are not specifically endorsed by the creators of MARA, and +you should do your own determination of the best way to provide these +capabilities. Many kubernetes distributions have recommended approaches to +solving these problems. -To use these tools you will need to run the [kubernetes-extras.sh](../../../bin/kubernetes-extras.sh) script from the -main `bin` directory. This will walk you through the process of setting up these tools. +To use these tools you will need to run the +[kubernetes-extras.sh](../../../bin/kubernetes-extras.sh) script from the +main `bin` directory. This will walk you through the process of setting up +these tools. ## Key Files -- [`common`](./common) Common directory to hold the pulumi configuration file. -- [`kubevip`](./kubevip) Install directory for the `kubevip` package. Currently WIP. -- [`metallb`](./metallb) Install directory for the `metallb` package. -- [`nfsvolumes`](./nfsvolumes) Install directory for the `nfsvolumes` package. +* [`common`](./common) Common directory to hold the pulumi configuration file. +* [`metallb`](./metallb) Install directory for the `metallb` package. +* [`nfsvolumes`](./nfsvolumes) Install directory for the `nfsvolumes` package. ## Notes -Please read the comments inside the installation script, as there are some important caveats. +Please read the comments inside the installation script, as there are some +important caveats. diff --git a/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py b/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py index 983f6d9b..8609e4af 100644 --- a/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py +++ b/pulumi/python/utility/kic-image-build/ingress_controller_image_base_provider.py @@ -1,5 +1,5 @@ import os -from typing import Optional, Dict, List, Any +from typing import Optional, Dict, List, Any, Callable import pulumi from pulumi import Resource @@ -21,8 +21,8 @@ def __init__(self, if debug_logger_func: self.debug_logger = debug_logger_func - else: - self.debug_logger = self.__debug_logger_func + elif self._debug_logger_func: + self.debug_logger = self._debug_logger_func super().__init__() @@ -32,7 +32,7 @@ def delete(self, _id: str, _props: Any) -> None: pulumi.log.info(f'deleting image {image_id}') self._docker_delete_image(image_id) - def __debug_logger_func(self, msg): + def _debug_logger_func(self, msg): pulumi.log.debug(msg, self.resource) def _run_docker(self, cmd: str, suppress_error: bool = False) -> (str, str): diff --git a/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py b/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py index 89b7f939..288b15e2 100644 --- a/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py +++ b/pulumi/python/utility/kic-image-build/test_ingress_controller_image_builder_provider.py @@ -1,6 +1,6 @@ import os import unittest -import ingress_controller_image_builder_provider as image_builder +from ingress_controller_image_builder_provider import IngressControllerImageBuilderProvider from kic_util.docker_image_name import DockerImageName @@ -8,7 +8,7 @@ class TestIngressControllerImageBuilderProvider(unittest.TestCase): def setUp(self) -> None: super().setUp() - self.provider = image_builder.IngressControllerImageBuilderProvider() + self.provider = IngressControllerImageBuilderProvider() def assertStrEqual(self, first, second, msg=None): self.assertEqual(first=str(first), second=str(second), msg=msg) diff --git a/pulumi/python/utility/kic-image-push/__main__.py b/pulumi/python/utility/kic-image-push/__main__.py index dd569351..f377bdbe 100644 --- a/pulumi/python/utility/kic-image-push/__main__.py +++ b/pulumi/python/utility/kic-image-push/__main__.py @@ -1,18 +1,11 @@ -import base64 +import importlib import os - import pulumi -from pulumi_aws import ecr - +from pulumi import Output from kic_util import pulumi_config -from repository_push import RepositoryPush, RepositoryPushArgs, RepositoryCredentialsArgs - +from registries.base_registry import ContainerRegistry -# Leaving to use EKS since this is tied to AWS.... -def aws_project_name_from_project_dir(dirname: str): - script_dir = os.path.dirname(os.path.abspath(__file__)) - project_path = os.path.join(script_dir, '..', '..', 'infrastructure', 'aws', dirname) - return pulumi_config.get_pulumi_project_name(project_path) +from repository_push import RepositoryPush, RepositoryPushArgs def project_name_from_project_dir(dirname: str): @@ -21,36 +14,6 @@ def project_name_from_project_dir(dirname: str): return pulumi_config.get_pulumi_project_name(project_path) -# Get login credentials for ECR, so that we can use it to store Docker images -def get_ecr_credentials(registry_id: str): - credentials = ecr.get_credentials(registry_id) - token = credentials.authorization_token - decoded = str(base64.b64decode(token), 'utf-8') - parts = decoded.split(':', 2) - if len(parts) != 2: - raise ValueError("Unexpected format for decoded ECR authorization token") - username = pulumi.Output.secret(parts[0]) - password = pulumi.Output.secret(parts[1]) - return RepositoryCredentialsArgs(username=username, password=password) - - -stack_name = pulumi.get_stack() -project_name = pulumi.get_project() -pulumi_user = pulumi_config.get_pulumi_user() - -ecr_project_name = aws_project_name_from_project_dir('ecr') -ecr_stack_ref_id = f"{pulumi_user}/{ecr_project_name}/{stack_name}" -ecr_stack_ref = pulumi.StackReference(ecr_stack_ref_id) -ecr_repository_url = ecr_stack_ref.require_output('repository_url') -ecr_registry_id = ecr_stack_ref.require_output('registry_id') -ecr_credentials = ecr_registry_id.apply(get_ecr_credentials) - -kic_image_build_project_name = project_name_from_project_dir('kic-image-build') -kic_image_build_stack_ref_id = f"{pulumi_user}/{kic_image_build_project_name}/{stack_name}" -kick_image_build_stack_ref = pulumi.StackReference(kic_image_build_stack_ref_id) -ingress_image = kick_image_build_stack_ref.require_output('ingress_image') - - def select_image_name(image): if 'image_name_alias' in image: return image['image_name_alias'] @@ -77,6 +40,16 @@ def select_image_tag(image): return image['image_tag'] +stack_name = pulumi.get_stack() +project_name = pulumi.get_project() +pulumi_user = pulumi_config.get_pulumi_user() +k8s_config = pulumi.Config('kubernetes') + +kic_image_build_project_name = project_name_from_project_dir('kic-image-build') +kic_image_build_stack_ref_id = f"{pulumi_user}/{kic_image_build_project_name}/{stack_name}" +kick_image_build_stack_ref = pulumi.StackReference(kic_image_build_stack_ref_id) +ingress_image = kick_image_build_stack_ref.require_output('ingress_image') + # We default to using the image name alias because it is a more precise definition # of the image type when we build from source. image_name = ingress_image.apply(select_image_name) @@ -84,15 +57,34 @@ def select_image_tag(image): image_id = ingress_image.apply(select_image_id) image_tag = ingress_image.apply(select_image_tag) -repo_args = RepositoryPushArgs(repository_url=ecr_repository_url, - credentials=ecr_credentials, - image_id=image_id, - image_name=image_name, - image_tag=image_tag, - image_tag_alias=image_tag_alias) -# Push the images to the ECR repo -ecr_repo_push = RepositoryPush(name='ingress-controller-repository-push', - repository_args=repo_args) +def push_to_container_registry(container_registry: ContainerRegistry) -> RepositoryPush: + if container_registry.login_to_registry(): + repo_args = RepositoryPushArgs(repository_url=container_registry.registry_url, + image_id=image_id, + image_name=image_name, + image_tag=image_tag, + image_tag_alias=image_tag_alias) + + # Push the images to the container registry + _repo_push = RepositoryPush(name='ingress-controller-registry-push', + repository_args=repo_args, + check_if_id_matches_tag_func=container_registry.check_if_id_matches_tag) + + pulumi.info('Pushing NGINX Ingress Controller container image to ' + f'{container_registry.registry_implementation_name()}') + + return _repo_push + else: + raise 'Unable to log into container registry' + + +# Dynamically determine the infrastructure provider and instantiate the +# correlated class, then apply the pulumi async closures. +infra_type = k8s_config.require('infra_type').lower() +module = importlib.import_module(name=f'registries.{infra_type}') +container_registry_class = module.CLASS +repo_push: Output[RepositoryPush] = container_registry_class.instance(stack_name, pulumi_user)\ + .apply(push_to_container_registry) -pulumi.export('ecr_repository', ecr_repo_push) +pulumi.export('container_repo_push', Output.unsecret(repo_push)) diff --git a/pulumi/python/utility/kic-image-push/registries/aws.py b/pulumi/python/utility/kic-image-push/registries/aws.py new file mode 100644 index 00000000..ea32aa33 --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/aws.py @@ -0,0 +1,70 @@ +import os + +import requests +from typing import List, Any + +from pulumi import Output, StackReference, log +from pulumi_aws import ecr +from kic_util import pulumi_config +from registries.base_registry import ContainerRegistry, RegistryCredentials + + +class ElasticContainerRegistry(ContainerRegistry): + @classmethod + def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: + super().instance(stack_name, pulumi_user) + ecr_project_name = ElasticContainerRegistry.aws_project_name_from_project_dir('ecr') + ecr_stack_ref_id = f"{pulumi_user}/{ecr_project_name}/{stack_name}" + stack_ref = StackReference(ecr_stack_ref_id) + # Async query for credentials from stack reference + ecr_registry_id = stack_ref.require_output('registry_id') + credentials_output = ecr_registry_id.apply(ElasticContainerRegistry.get_ecr_credentials) + # Async query for repository url from stack reference + # Note that AWS ECR refers to itself as a repository and not a registry, we aim to keep + # that naming consistent when referring directly to ECR nouns + repository_url_output = stack_ref.require_output('repository_url') + + def _make_instance(params: List[Any]) -> ElasticContainerRegistry: + return cls(stack_name=stack_name, pulumi_user=pulumi_user, registry_url=params[0], credentials=params[1]) + + return Output.all(repository_url_output, credentials_output).apply(_make_instance) + + @staticmethod + def aws_project_name_from_project_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'aws', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + @staticmethod + def get_ecr_credentials(registry_id: str) -> RegistryCredentials: + credentials = ecr.get_credentials(registry_id) + token = credentials.authorization_token + return ContainerRegistry.decode_credentials(token) + + def registry_implementation_name(self) -> str: + return 'AWS Elastic Container Registry (ECR)' + + def _ecr_docker_api_url(self, ) -> str: + registry_url_parts = self.registry_url.split('/') + ecr_host = registry_url_parts[0] + ecr_path = registry_url_parts[1] + return f'https://{ecr_host}/v2/{ecr_path}' + + def check_if_id_matches_tag(self, image_tag: str, new_image_id: str) -> bool: + docker_api_url = self._ecr_docker_api_url() + auth_tuple = (self.credentials.username, self.credentials.password) + + log.debug(f'Querying for latest image id: {docker_api_url}/manifests/{image_tag}') + with requests.get(f'{docker_api_url}/manifests/{image_tag}', auth=auth_tuple) as response: + if response.status_code != 200: + log.warn(f'Unable to query ECR directly for image id') + return False + json_response = response.json() + if 'config' in json_response and 'digest' in json_response['config']: + remote_image_id = json_response['config']['digest'] + return remote_image_id != new_image_id + else: + return True + + +CLASS = ElasticContainerRegistry diff --git a/pulumi/python/utility/kic-image-push/registries/base_registry.py b/pulumi/python/utility/kic-image-push/registries/base_registry.py new file mode 100644 index 00000000..7171cd5a --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/base_registry.py @@ -0,0 +1,83 @@ +import base64 +import urllib +from urllib import parse +from typing import Optional + +import pulumi.log +from pulumi import Input +import pulumi_docker as docker + +from kic_util import external_process + + +class RegistryCredentials: + username: Input[str] + password: Input[str] + + def __init__(self, + username: Input[str], + password: Input[str]): + self.username = username + self.password = password + + +class ContainerRegistry: + stack_name: str + pulumi_user: str + credentials: Optional[RegistryCredentials] + registry_url: str + + def __init__(self, + stack_name: str, + pulumi_user: str, + registry_url: str, + credentials: Optional[RegistryCredentials]) -> None: + super().__init__() + self.stack_name = stack_name + self.pulumi_user = pulumi_user + self.registry_url = registry_url + self.credentials = credentials + + def format_registry_url_for_docker_login(self): + # We assume that the scheme is https because that's what is used most everywhere + registry_host_url = urllib.parse.urlparse(f'https://{self.registry_url}') + # We strip out the path from the URL because it isn't used when logging into a repository + return f'{registry_host_url.scheme}://{registry_host_url.hostname}' + + def login_to_registry(self) -> Optional[docker.LoginResult]: + registry = docker.Registry(registry=self.format_registry_url_for_docker_login(), + username=self.credentials.username, + password=self.credentials.password) + + docker.login_to_registry(registry=registry, log_resource=None) + pulumi.log.info(f'Logged into container registry: {registry.registry}') + + if not docker.login_results: + return None + if docker.login_results[0]: + return docker.login_results[0] + + def logout_of_registry(self): + docker_cmd = f'docker logout {self.format_registry_url_for_docker_login()}' + res, _ = external_process.run(cmd=docker_cmd) + pulumi.log.info(res) + + def check_if_id_matches_tag(self, image_tag: str, new_image_id: str) -> bool: + return False + + def registry_implementation_name(self) -> str: + raise NotImplemented + + @classmethod + def instance(cls, stack_name: str, pulumi_user: str): + pass + + @staticmethod + def decode_credentials(encoded_token: str) -> RegistryCredentials: + decoded = str(base64.b64decode(encoded_token), 'ascii') + parts = decoded.split(':', 2) + if len(parts) != 2: + raise ValueError("Unexpected format for decoded ECR authorization token") + username = parts[0] + password = parts[1] + return RegistryCredentials(username=username, password=password) diff --git a/pulumi/python/utility/kic-image-push/registries/do.py b/pulumi/python/utility/kic-image-push/registries/do.py new file mode 100644 index 00000000..6659fb9b --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/do.py @@ -0,0 +1,62 @@ +import json +import os +from typing import List, Any +from pulumi import Output, StackReference, ResourceOptions +from pulumi_digitalocean import ContainerRegistryDockerCredentials + +from kic_util import pulumi_config +from registries.base_registry import ContainerRegistry, RegistryCredentials + + +class DigitalOceanContainerRegistry(ContainerRegistry): + @classmethod + def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: + super().instance(stack_name, pulumi_user) + # Pull properties from the Pulumi project that defines the Digital Ocean repository + container_registry_project_name = DigitalOceanContainerRegistry.project_name_from_do_dir( + 'container-registry') + container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name}/{stack_name}" + stack_ref = StackReference(container_registry_stack_ref_id) + container_registry_output = stack_ref.require_output('container_registry') + registry_name_output = stack_ref.require_output('container_registry_name') + + def _docker_credentials() -> Output[str]: + one_hour = 3_600 * 4 + registry_credentials = ContainerRegistryDockerCredentials(resource_name='do_docker_credentials', + registry_name=registry_name_output, + expiry_seconds=one_hour, + write=True, + opts=ResourceOptions(delete_before_replace=True)) + return registry_credentials.docker_credentials + + def _make_instance(params: List[Any]) -> DigitalOceanContainerRegistry: + container_registry = params[0] + do_docker_creds = params[1] + server_url = container_registry['server_url'] + endpoint = container_registry['endpoint'] + registry_url = f'{endpoint}/nginx-ingress' + _credentials = DigitalOceanContainerRegistry._decode_docker_credentials(server_url, do_docker_creds) + + return cls(stack_name=stack_name, pulumi_user=pulumi_user, + registry_url=registry_url, credentials=_credentials) + + return Output.all(container_registry_output, _docker_credentials()).apply(_make_instance) + + def registry_implementation_name(self) -> str: + return 'Digital Ocean Container Registry' + + @staticmethod + def project_name_from_do_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'digitalocean', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + @staticmethod + def _decode_docker_credentials(server_url: str, + docker_credentials_json: str) -> RegistryCredentials: + credential_json = json.loads(docker_credentials_json) + auths_json = credential_json['auths'] + return ContainerRegistry.decode_credentials(auths_json[server_url]['auth']) + + +CLASS = DigitalOceanContainerRegistry diff --git a/pulumi/python/utility/kic-image-push/registries/lke.py b/pulumi/python/utility/kic-image-push/registries/lke.py new file mode 100644 index 00000000..c0fa2fa4 --- /dev/null +++ b/pulumi/python/utility/kic-image-push/registries/lke.py @@ -0,0 +1,44 @@ +import json +import os +from typing import List, Any +from pulumi import Output, StackReference, ResourceOptions, log + +from kic_util import pulumi_config +from registries.base_registry import ContainerRegistry, RegistryCredentials + + +class LinodeHarborRegistry(ContainerRegistry): + @classmethod + def instance(cls, stack_name: str, pulumi_user: str) -> Output[ContainerRegistry]: + super().instance(stack_name, pulumi_user) + # Pull properties from the Pulumi project that defines the Linode Harbor repository + container_registry_project_name = LinodeHarborRegistry.project_name_from_linode_dir('harbor') + container_registry_stack_ref_id = f"{pulumi_user}/{container_registry_project_name}/{stack_name}" + stack_ref = StackReference(container_registry_stack_ref_id) + harbor_hostname_output = stack_ref.require_output('harbor_hostname') + harbor_user_output = stack_ref.require_output('harbor_user') + harbor_password_output = stack_ref.require_output('harbor_password') + + def _make_instance(params: List[Any]) -> LinodeHarborRegistry: + hostname = params[0] + username = params[1] + password = params[2] + + registry_url = f'{hostname}/library/ingress-controller' + credentials = RegistryCredentials(username=username, password=password) + + return cls(stack_name=stack_name, pulumi_user=pulumi_user, registry_url=registry_url, credentials=credentials) + + return Output.all(harbor_hostname_output, harbor_user_output, harbor_password_output).apply(_make_instance) + + @staticmethod + def project_name_from_linode_dir(dirname: str): + script_dir = os.path.dirname(os.path.abspath(__file__)) + project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', 'linode', dirname) + return pulumi_config.get_pulumi_project_name(project_path) + + def registry_implementation_name(self) -> str: + return 'Harbor' + + +CLASS = LinodeHarborRegistry diff --git a/pulumi/python/utility/kic-image-push/repository_push.py b/pulumi/python/utility/kic-image-push/repository_push.py index a0b39e6a..eaf58a90 100644 --- a/pulumi/python/utility/kic-image-push/repository_push.py +++ b/pulumi/python/utility/kic-image-push/repository_push.py @@ -1,8 +1,6 @@ import uuid -from typing import Any, List, Optional -import urllib.parse +from typing import Any, List, Optional, Callable -import requests from pulumi.dynamic import ResourceProvider, Resource, CreateResult, CheckResult, ReadResult, CheckFailure, DiffResult, \ UpdateResult import pulumi @@ -11,31 +9,20 @@ from kic_util.docker_image_name import DockerImageName __all__ = [ - 'RepositoryCredentialsArgs', 'RepositoryPush', 'RepositoryPushArgs' ] -class RepositoryCredentialsArgs: - def __init__(self, - username: pulumi.Input[str], - password: pulumi.Input[str]): - self.username = username - self.password = password - - @pulumi.input_type class RepositoryPushArgs(dict): def __init__(self, repository_url: pulumi.Input[str], - credentials: pulumi.Input[pulumi.InputType['RepositoryCredentialsArgs']], image_id: pulumi.Input[str], image_name: pulumi.Input[str], image_tag: pulumi.Input[str], image_tag_alias: Optional[pulumi.Input[str]] = None): self.repository_url = repository_url - self.credentials = credentials self.image_id = image_id self.image_name = image_name self.image_tag = image_tag @@ -43,8 +30,6 @@ def __init__(self, dict_init = { 'repository_url': self.repository_url, - 'repository_username': self.credentials.username, - 'repository_password': self.credentials.password, 'image_id': self.image_id, 'image_name': self.image_name, 'image_tag': self.image_tag, @@ -58,30 +43,24 @@ def __init__(self, class RepositoryPushProvider(ResourceProvider): resource: Resource + check_if_id_matches_tag_func: Callable[[str, str], bool] REQUIRED_PROPS: List[str] = [ 'repository_url', 'image_id', 'image_name', 'image_tag', - 'repository_username', - 'repository_password' ] - def __init__(self, resource: pulumi.Resource) -> None: + def __init__(self, + resource: pulumi.Resource, + check_if_id_matches_tag_func: Optional[Callable[[str, str], bool]] = None) -> None: self.resource = resource - super().__init__() - - def login_to_ecr_repo(self, repository_url: str, username: str, password: str) -> docker.Registry: - # We assume that the scheme is https because that's what is used most everywhere - repo_host_url = urllib.parse.urlparse(f'https://{repository_url}') - # We strip out the path from the URL because it isn't used when logging into a repository - repo_host = f'{repo_host_url.scheme}://{repo_host_url.hostname}' - registry = docker.Registry(registry=repo_host, - username=username, - password=password) - docker.login_to_registry(registry=registry, log_resource=self.resource) - return registry + if check_if_id_matches_tag_func: + self.check_if_id_matches_tag_func = check_if_id_matches_tag_func + else: + self.check_if_id_matches_tag_func = lambda image_tag, new_image_id: False + super().__init__() def push_image_to_repo(self, repository_url: str, @@ -136,8 +115,6 @@ def check_for_param(param: str): def create(self, props: Any) -> CreateResult: repository_url = props['repository_url'] - repository_username = props['repository_username'] - repository_password = props['repository_password'] image_name = props['image_name'] image_tag = props['image_tag'] @@ -146,28 +123,24 @@ def create(self, props: Any) -> CreateResult: else: image_tag_alias = None - self.login_to_ecr_repo(repository_url=repository_url, - username=repository_username, - password=repository_password) - # Push the KIC tag and tag_alias, so that the KIC image can be easily identified on the repository - ecr_image_name = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=image_name, - image_tag=image_tag) - pulumi.log.info(msg=f'Tagged and pushed image [{image_name}] to [{ecr_image_name}]', + repo_image_name = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=image_name, + image_tag=image_tag) + pulumi.log.info(msg=f'Tagged and pushed image [{image_name}] to [{repo_image_name}]', resource=self.resource) - outputs = {'ecr_image_name': str(ecr_image_name), - 'ecr_image_id': props['image_id']} + outputs = {'repo_image_name': str(repo_image_name), + 'repo_image_id': props['image_id']} if image_tag_alias: - ecr_image_name_alias = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=image_name, - image_tag=image_tag_alias) - outputs['ecr_image_name_alias'] = str(ecr_image_name_alias) - pulumi.log.info(msg=f'Tagged and pushed image alias [{image_name}] to [{ecr_image_name_alias}]', + repo_image_name_alias = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=image_name, + image_tag=image_tag_alias) + outputs['repo_image_name_alias'] = str(repo_image_name_alias) + pulumi.log.info(msg=f'Tagged and pushed image alias [{image_name}] to [{repo_image_name_alias}]', resource=self.resource) id_ = str(uuid.uuid4()) @@ -175,28 +148,12 @@ def create(self, props: Any) -> CreateResult: def update(self, _id: str, _olds: Any, _news: Any) -> UpdateResult: repository_url: str = _news['repository_url'] - repository_url_parts = repository_url.split('/') - ecr_host = repository_url_parts[0] - ecr_path = repository_url_parts[1] - ecr_docker_api_url = f'https://{ecr_host}/v2/{ecr_path}' - - def check_if_id_matches_tag_in_ecr(image_tag: str) -> bool: - pulumi.log.debug(f'Querying for latest image id: {ecr_docker_api_url}/manifests/{image_tag}') - with requests.get(f'{ecr_docker_api_url}/manifests/{image_tag}', - auth=(_news['repository_username'], _news['repository_password'])) as response: - json_response = response.json() - if 'config' in json_response and 'digest' in json_response['config']: - remote_image_id = json_response['config']['digest'] - return remote_image_id != _news['image_id'] - else: - return True - - image_tag_outdated = check_if_id_matches_tag_in_ecr(_news['image_tag']) + image_tag_outdated = self.check_if_id_matches_tag_func(_news['image_tag'], _news['image_id']) has_tag_alias = 'image_tag_alias' in _news and _news['image_tag_alias'] if has_tag_alias: - image_tag_alias_outdated = check_if_id_matches_tag_in_ecr(_news['image_tag_alias']) + image_tag_alias_outdated = self.check_if_id_matches_tag_func(_news['image_tag_alias'], _news['image_id']) else: image_tag_alias_outdated = False @@ -205,37 +162,33 @@ def check_if_id_matches_tag_in_ecr(image_tag: str) -> bool: pulumi.log.info(msg=f"Tags [{_news['image_tag']}] and [{_news['image_tag_alias']}] " f"are up to date", resource=self.resource) else: - pulumi.log.info(msg=f"Tag [{_news['image_tag']}] is up to date", resource=self.resource) + pulumi.log.info(msg=f"Tag [{_news['image_tag']}] on remote registry is up to date", resource=self.resource) return UpdateResult() outputs = { - 'ecr_image_id': _news['image_id'] + 'repo_image_id': _news['image_id'] } - self.login_to_ecr_repo(repository_url=repository_url, - username=_news['repository_username'], - password=_news['repository_password']) - if image_tag_outdated: - ecr_image_name = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=_news['image_name'], - image_tag=_news['image_tag']) - pulumi.log.info(msg=f"Tagged and pushed image [{_news['image_name']}] to [{ecr_image_name}]", + repo_image_name = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=_news['image_name'], + image_tag=_news['image_tag']) + pulumi.log.info(msg=f"Tagged and pushed image [{_news['image_name']}] to [{repo_image_name}]", resource=self.resource) - outputs['ecr_image_name'] = str(ecr_image_name) + outputs['repo_image_name'] = str(repo_image_name) else: pulumi.log.info(msg=f"Tag [{_news['image_tag']}] is up to date", resource=self.resource) if has_tag_alias and image_tag_alias_outdated: - ecr_image_name_alias = self.push_image_to_repo(repository_url=repository_url, - # source image ref - image_name=_news['image_name'], - image_tag=_news['image_tag_alias']) - pulumi.log.info(msg=f"Tagged and pushed image alias [{_news['image_name']}] to [{ecr_image_name_alias}]", + repo_image_name_alias = self.push_image_to_repo(repository_url=repository_url, + # source image ref + image_name=_news['image_name'], + image_tag=_news['image_tag_alias']) + pulumi.log.info(msg=f"Tagged and pushed image alias [{_news['image_name']}] to [{repo_image_name_alias}]", resource=self.resource) - outputs['ecr_image_name_alias'] = str(ecr_image_name_alias) + outputs['repo_image_name_alias'] = str(repo_image_name_alias) elif has_tag_alias: pulumi.log.info(msg=f"Tag alias [{_news['image_tag_alias']}] is up to date", resource=self.resource) @@ -246,11 +199,12 @@ class RepositoryPush(Resource): def __init__(self, name: str, repository_args: pulumi.InputType['RepositoryPushArgs'], + check_if_id_matches_tag_func: Callable[[str, str], bool] = None, opts: Optional[pulumi.ResourceOptions] = None) -> None: props = dict() props.update(repository_args) - def build_ecr_image_alias(args): + def build_repo_image_alias(args): repository_url = args[0] image_tag = args[1] @@ -259,18 +213,21 @@ def build_ecr_image_alias(args): else: return f'{repository_url}:{image_tag}' - if 'ecr_image_name' not in props: - props['ecr_image_name'] = pulumi.Output.concat(repository_args.repository_url, - ':', - repository_args.image_tag) - if 'ecr_image_name_alias' not in props and repository_args.image_tag_alias: - ecr_image_alias_args = pulumi.Output.all(repository_args.repository_url, - repository_args.image_tag_alias) - props['ecr_image_name_alias'] = ecr_image_alias_args.apply(build_ecr_image_alias) - if 'ecr_image_id' not in props: - props['ecr_image_id'] = repository_args.image_id + if 'repo_image_name' not in props: + props['repo_image_name'] = pulumi.Output.concat(repository_args.repository_url, + ':', + repository_args.image_tag) + if 'repo_image_name_alias' not in props and repository_args.image_tag_alias: + repo_image_alias_args = pulumi.Output.all(repository_args.repository_url, + repository_args.image_tag_alias) + props['repo_image_name_alias'] = repo_image_alias_args.apply(build_repo_image_alias) + if 'repo_image_id' not in props: + props['repo_image_id'] = repository_args.image_id if not opts: opts = pulumi.ResourceOptions() - super().__init__(name=name, opts=opts, props=props, provider=RepositoryPushProvider(resource=self)) + provider = RepositoryPushProvider(resource=self, + check_if_id_matches_tag_func=check_if_id_matches_tag_func) + + super().__init__(name=name, opts=opts, props=props, provider=provider)