1
0

[QTI-308] Add Enos integration tests (#16760)

Add our initial Enos integration tests to Vault. The Enos scenario
workflow will automatically be run on branches that are created from the
`hashicorp/vault` repository. See the README.md in ./enos a full description
of how to compose and execute scenarios locally.

* Simplify the metadata build workflow jobs
* Automatically determine the Go version from go.mod
* Add formatting check for Enos integration scenarios
* Add Enos smoke and upgrade integration scenarios
  * Add Consul backend matrix support
  * Add Ubuntu and RHEL distro support
  * Add Vault edition support
  * Add Vault architecture support
  * Add Vault builder support
  * Add Vault Shamir and awskms auto-unseal support
  * Add Raft storage support
  * Add Raft auto-join voter verification
  * Add Vault version verification
  * Add Vault seal verification
  * Add in-place upgrade support for all variants
* Add four scenario variants to CI. These test a maximal distribution of
  the aforementioned variants with the `linux/amd64` Vault install
  bundle.

Signed-off-by: Ryan Cragun <me@ryan.ec>
Co-authored-by: Rebecca Willett <rwillett@hashicorp.com>
Co-authored-by: Jaymala <jaymalasinha@gmail.com>
This commit is contained in:
Ryan Cragun 2022-08-23 13:53:41 -06:00 committed by GitHub
parent bab1063593
commit 78e0656b55
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 2157 additions and 48 deletions

View File

@ -12,35 +12,31 @@ env:
GO_TAGS: "ui"
jobs:
get-product-version:
product-metadata:
runs-on: ubuntu-latest
outputs:
product-version: ${{ steps.get-product-version.outputs.product-version }}
product-base-version: ${{ steps.get-product-version.outputs.product-base-version }}
build-date: ${{ steps.get-build-date.outputs.build-date }}
steps:
- uses: actions/checkout@v2
- name: get product version
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- name: Get product version
id: get-product-version
run: |
make version
IFS="-" read BASE_VERSION _other <<< "$(make version)"
echo "::set-output name=product-version::$(make version)"
echo "::set-output name=product-base-version::${BASE_VERSION}"
get-build-date:
runs-on: ubuntu-latest
outputs:
build-date: ${{ steps.get-build-date.outputs.build-date }}
steps:
- uses: actions/checkout@v2
- name: get build date
- name: Get build date
id: get-build-date
run: |
make build-date
echo "::set-output name=build-date::$(make build-date)"
generate-metadata-file:
needs: get-product-version
needs: product-metadata
runs-on: ubuntu-latest
outputs:
filepath: ${{ steps.generate-metadata-file.outputs.filepath }}
@ -51,22 +47,20 @@ jobs:
id: generate-metadata-file
uses: hashicorp/actions-generate-metadata@v1
with:
version: ${{ needs.get-product-version.outputs.product-version }}
version: ${{ needs.product-metadata.outputs.product-version }}
product: ${{ env.PKG_NAME }}
- uses: actions/upload-artifact@v2
with:
name: metadata.json
path: ${{ steps.generate-metadata-file.outputs.filepath }}
build-other:
needs: [ get-product-version, get-build-date ]
needs: [ product-metadata ]
runs-on: ubuntu-latest
strategy:
matrix:
goos: [ freebsd, windows, netbsd, openbsd, solaris ]
goarch: [ "386", "amd64", "arm" ]
go: [ "1.18.4" ]
exclude:
- goos: solaris
goarch: 386
@ -76,14 +70,14 @@ jobs:
goarch: arm
fail-fast: true
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
name: Go ${{ matrix.goos }} ${{ matrix.goarch }} build
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
go-version-file: go.mod
- name: Setup node and yarn
uses: actions/setup-node@v2
with:
@ -104,32 +98,31 @@ jobs:
CGO_ENABLED: 0
run: |
mkdir dist out
GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.get-product-version.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.product-metadata.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
- uses: actions/upload-artifact@v2
with:
name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
build-linux:
needs: [ get-product-version, get-build-date ]
needs: [ product-metadata ]
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux]
goarch: ["arm", "arm64", "386", "amd64"]
go: ["1.18.4"]
fail-fast: true
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
name: Go ${{ matrix.goos }} ${{ matrix.goarch }} build
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
go-version-file: go.mod
- name: Setup node and yarn
uses: actions/setup-node@v2
with:
@ -150,12 +143,12 @@ jobs:
CGO_ENABLED: 0
run: |
mkdir dist out
GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.get-product-version.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.product-metadata.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
- uses: actions/upload-artifact@v2
with:
name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
- name: Package
uses: hashicorp/actions-packaging-linux@v1
@ -163,7 +156,7 @@ jobs:
name: ${{ github.event.repository.name }}
description: "Vault is a tool for secrets management, encryption as a service, and privileged access management."
arch: ${{ matrix.goarch }}
version: ${{ needs.get-product-version.outputs.product-version }}
version: ${{ needs.product-metadata.outputs.product-version }}
maintainer: "HashiCorp"
homepage: "https://github.com/hashicorp/vault"
license: "MPL-2.0"
@ -189,22 +182,21 @@ jobs:
path: out/${{ env.DEB_PACKAGE }}
build-darwin:
needs: [ get-product-version, get-build-date ]
needs: [ product-metadata ]
runs-on: macos-latest
strategy:
matrix:
goos: [ darwin ]
goarch: [ "amd64", "arm64" ]
go: [ "1.18.4" ]
fail-fast: true
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
name: Go ${{ matrix.goos }} ${{ matrix.goarch }} build
steps:
- uses: actions/checkout@v2
- name: Setup go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
go-version-file: go.mod
- name: Setup node and yarn
uses: actions/setup-node@v2
with:
@ -226,17 +218,17 @@ jobs:
CGO_ENABLED: 0
run: |
mkdir dist out
GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.get-product-version.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
GO_TAGS="${{ env.GO_TAGS }}" VAULT_VERSION=${{ needs.product-metadata.outputs.product-base-version }} VAULT_REVISION="$(git rev-parse HEAD)" VAULT_BUILD_DATE="${{ needs.get-build-date.outputs.build-date }}" make build
zip -r -j out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip dist/
- uses: actions/upload-artifact@v2
with:
name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
path: out/${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_${{ matrix.goos }}_${{ matrix.goarch }}.zip
build-docker:
name: Docker ${{ matrix.arch }} build
needs:
- get-product-version
- product-metadata
- build-linux
runs-on: ubuntu-latest
strategy:
@ -244,7 +236,7 @@ jobs:
arch: ["arm", "arm64", "386", "amd64"]
env:
repo: ${{github.event.repository.name}}
version: ${{needs.get-product-version.outputs.product-version}}
version: ${{needs.product-metadata.outputs.product-version}}
steps:
- uses: actions/checkout@v2
- name: Docker Build (Action)
@ -253,7 +245,7 @@ jobs:
version: ${{env.version}}
target: default
arch: ${{matrix.arch}}
zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip
zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_linux_${{ matrix.arch }}.zip
tags: |
docker.io/hashicorp/${{env.repo}}:${{env.version}}
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}
@ -261,7 +253,7 @@ jobs:
build-ubi:
name: Red Hat UBI ${{ matrix.arch }} build
needs:
- get-product-version
- product-metadata
- build-linux
runs-on: ubuntu-latest
strategy:
@ -269,7 +261,7 @@ jobs:
arch: ["amd64"]
env:
repo: ${{github.event.repository.name}}
version: ${{needs.get-product-version.outputs.product-version}}
version: ${{needs.product-metadata.outputs.product-version}}
steps:
- uses: actions/checkout@v2
- name: Docker Build (Action)
@ -278,5 +270,22 @@ jobs:
version: ${{env.version}}
target: ubi
arch: ${{matrix.arch}}
zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip
zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.product-version }}_linux_${{ matrix.arch }}.zip
redhat_tag: scan.connect.redhat.com/ospid-f0a92725-d8c6-4023-9a87-ba785b94c3fd/${{env.repo}}:${{env.version}}-ubi
enos:
name: Enos
# Only run the Enos workflow against branches that are created from the
# hashicorp/vault repository. This has the effect of limiting execution of
# Enos scenarios to branches that originate from authors that have write
# access to hashicorp/vault repository. This is required as Github Actions
# will not populate the required secrets for branches created by outside
# contributors in order to protect the secrets integrity.
if: "! github.event.pull_request.head.repo.fork"
needs:
- product-metadata
- build-linux
uses: ./.github/workflows/enos-run.yml
with:
artifact-name: "vault_${{ needs.product-metadata.outputs.product-version }}_linux_amd64.zip"
secrets: inherit

27
.github/workflows/enos-fmt.yml vendored Normal file
View File

@ -0,0 +1,27 @@
---
name: enos_fmt
on:
pull_request:
paths:
- enos/**
jobs:
fmt_check:
# Only run this workflow on pull requests from hashicorp/vault branches
# as we need secrets to install enos.
if: "! github.event.pull_request.head.repo.fork"
runs-on: ubuntu-latest
env:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v3
- uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- uses: hashicorp/action-setup-enos@v1
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: "check formatting"
working-directory: ./enos
run: make check-fmt

114
.github/workflows/enos-run.yml vendored Normal file
View File

@ -0,0 +1,114 @@
---
name: enos
on:
# Only trigger this working using workflow_call. It assumes that secrets are
# being inherited from the caller.
workflow_call:
inputs:
artifact-name:
required: true
type: string
env:
PKG_NAME: vault
jobs:
enos:
name: Integration
strategy:
matrix:
# Run four scenarios to get a maximal distribution of variants in as
# few jobs as possible.
include:
- scenario: "smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms"
aws_region: "us-west-1"
- scenario: "smoke backend:raft consul_version:1.12.3 distro:ubuntu seal:shamir"
aws_region: "us-west-2"
- scenario: "upgrade backend:raft consul_version:1.11.7 distro:rhel seal:shamir"
aws_region: "us-west-1"
- scenario: "upgrade backend:consul consul_version:1.11.7 distro:rhel seal:awskms"
aws_region: "us-west-2"
runs-on: ubuntu-latest
env:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Terraform
uses: hashicorp/setup-terraform@v2
with:
# the Terraform wrapper will break Terraform execution in Enos because
# it changes the output to text when we expect it to be JSON.
terraform_wrapper: false
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ matrix.aws_region }}
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
role-skip-session-tagging: true
role-duration-seconds: 3600
- name: Set up Enos
uses: hashicorp/action-setup-enos@v1
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Set up AWS SSH private key
run: |
mkdir -p ./enos/support
echo "${{ secrets.ENOS_CI_SSH_KEY }}" > ./enos/support/private_key.pem
chmod 600 ./enos/support/private_key.pem
- name: Download Linux AMD64 Vault bundle
id: download
uses: actions/download-artifact@v3
with:
name: ${{ inputs.artifact-name }}
path: ./enos/support/downloads
- name: Prepare for scenario execution
run: |
unzip ${{steps.download.outputs.download-path}}/*.zip -d enos/support
mv ${{steps.download.outputs.download-path}}/*.zip enos/support/vault.zip
mkdir -p enos/support/terraform-plugin-cache
- name: Run Enos scenario
id: run
# Continue once and retry to handle occasional blips when creating
# infrastructure.
continue-on-error: true
env:
ENOS_VAR_aws_region: ${{ matrix.aws_region }}
ENOS_VAR_aws_ssh_keypair_name: enos-ci-ssh-key
ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache
ENOS_VAR_vault_bundle_path: ./support/vault.zip
run: |
enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} arch:amd64 builder:crt edition:oss
- name: Retry Enos scenario
id: run_retry
if: steps.run.outcome == 'failure'
env:
ENOS_VAR_aws_region: ${{ matrix.aws_region }}
ENOS_VAR_aws_ssh_keypair_name: enos-ci-ssh-key
ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache
ENOS_VAR_vault_bundle_path: ./support/vault.zip
run: |
enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} arch:amd64 builder:crt edition:oss
- name: Destroy Enos scenario
if: ${{ always() }}
env:
ENOS_VAR_aws_region: ${{ matrix.aws_region }}
ENOS_VAR_aws_ssh_keypair_name: enos-ci-ssh-key
ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache
ENOS_VAR_vault_bundle_path: ./support/vault.zip
run: |
enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} arch:amd64 builder:crt edition:oss
- name: Output debug information on failure
if: ${{ failure() }}
run: |
env
find ./enos -name "scenario.tf" -exec cat {} \;

4
.gitignore vendored
View File

@ -56,7 +56,11 @@ Vagrantfile
!.release/linux/package/etc/vault.d/vault.hcl
!command/agent/config/test-fixtures/*.hcl
!command/server/test-fixtures/**/*.hcl
!enos/*.hcl
# Enos
enos/.enos
enos/support
.DS_Store
.idea

24
enos/Makefile Normal file
View File

@ -0,0 +1,24 @@
.PHONY: default
default: check-fmt
.PHONY: check-fmt
check-fmt: check-fmt-enos check-fmt-modules
.PHONY: fmt
fmt: fmt-enos fmt-modules
.PHONY: check-fmt-enos
check-fmt-enos:
enos fmt --check --diff .
.PHONY: fmt-enos
fmt-enos:
enos fmt .
.PHONY: check-fmt-modules
check-fmt-modules:
terraform fmt -check -diff -recursive ./modules
.PHONY: fmt-modules
fmt-modules:
terraform fmt -diff -recursive ./modules

142
enos/README.md Normal file
View File

@ -0,0 +1,142 @@
# Enos
Enos is an quality testing framework that allows composing and executing quality
requirement scenarios as code. For Vault, it is currently used to perform
infrastructure integration testing using the artifacts that are created as part
of the `build` workflow. While intended to be executed via Github Actions using
the results of the `build` workflow, scenarios are also executable from a developer
machine that has the requisite dependencies and configuration.
Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs)
for further information regarding installation, execution or composing Enos scenarios.
## When to use Enos
Determining whether to use `vault.NewTestCluster()` or Enos for testing a feature
or scenario is ultimately up to the author. Sometimes one, the other, or both
might be appropriate depending on the requirements. Generally, `vault.NewTestCluster()`
is going to give you faster feedback and execution time, whereas Enos is going
to give you a real-world execution and validation of the requirement. Consider
the following cases as examples of when one might opt for an Enos scenario:
* The feature require third-party integrations. Whether that be networked
dependencies like a real Consul backend, a real KMS key to test awskms
auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's.
* The feature might behave differently under multiple configuration variants
and therefore should be tested with both combinations, e.g. auto-unseal and
manual shamir unseal or replication in HA mode with integrated storage or
Consul storage.
* The scenario requires coordination between multiple targets. For example,
consider the complex lifecycle event of migrating the seal type or storage,
or manually triggering a raft disaster scenario by partitioning the network
between the leader and follower nodes. Or perhaps an auto-pilot upgrade between
a stable version of Vault and our candidate version.
* The scenario has specific deployment strategy requirements. For example,
if we want to add a regression test for an issue that only arises when the
software is deployed in a certain manner.
* The scenario needs to use actual build artifacts that will be promoted
through the pipeline.
## Requirements
* AWS access. HashiCorp Vault developers should use Doormat.
* Terraform >= 1.2
* Enos >= v0.0.10. You can [install it from a release channel](https://github.com/hashicorp/Enos-Docs/blob/main/installation.md).
* Access to the QTI org in Terraform Cloud. HashiCorp Vault developers can
access a shared token in 1Password or request their own in #team-quality on
Slack.
* An SSH keypair in the AWS region you wish to run the scenario. You can use
Doormat to log in to the AWS console to create or upload an existing keypair.
* A Vault install bundle downloaded from releases.hashicorp.com or Artifactory
when using the `builder:crt` variants. When using the `builder:local` variants
Enos will build a Vault bundle from the current branch for you.
## Scenario Variables
In CI, each scenario is executed via Github Actions and has been configured using
environment variable inputs that follow the `ENOS_VAR_varname` pattern.
For local execution you can specify all the required variables using environment
variables, or you can update `enos.vars.hcl` with values and uncomment the lines.
Variables that are required:
* `aws_ssh_keypair_name`
* `aws_ssh_private_key_path`
* `tfc_api_token`
* `vault_bundle_path`
* `vault_license_path` (only required for non-OSS editions)
See [enos.vars.hcl](./enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl)
for further descriptions of the variables.
## Executing Scenarios
From the `enos` directory:
```bash
# List all available scenarios
enos scenario list
# Run the smoke or upgrade scenario with an artifact that is built locally. Make sure
# the local machine has been configured as detailed in the requirements
# section. This will execute the scenario and clean up any resources if successful.
enos scenario run smoke builder:local
enos scenario run upgrade builder:local
# To run the same scenario variants that are run in CI, refer to the scenarios listed
# in .github/workflows/enos-run.yml under `jobs.enos.strategy.matrix.include`,
# adding `builder:local` to run locally.
enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms builder:local arch:amd64 edition:oss
# Launch an individual scenario but leave infrastructure up after execution
enos scenario launch smoke builder:local
# Check an individual scenario for validity. This is useful during scenario
# authoring and debugging.
enos scenario validate smoke builder:local
# If you've run the tests and desire to see the outputs, such as the URL or
# credentials, you can run the output command to see them. Please note that
# after "run" or destroy there will be no "outputs" as the infrastructure
# will have been destroyed and state cleared.
enos scenario output smoke builder:local
# Explicitly destroy all existing infrastructure
enos scenario destroy smoke builder:local
```
Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs)
for further information regarding installation, execution or composing scenarios.
# Scenarios
There are current two scenarios: `smoke` and `upgrade`. Both begin by building Vault
as specified by the selected `builder` variant (see Variants section below for more
information).
## Smoke
The [`smoke` scenario](./enos-scenario-smoke.hcl) creates a Vault cluster using
the version from the current branch (either in CI or locally), with the backend
specified by the `backend` variant (`raft` or `consul`). Next, it unseals with the
appropriate method (`awskms` or `shamir`) and performs different verifications
depending on the backend and seal type.
## Upgrade
The [`upgrade` scenario](./enos-scenario-upgrade.hcl) creates a Vault cluster using
the version specified in `vault_upgrade_initial_release`, with the backend specified
by the `backend` variant (`raft` or `consul`). Next, it upgrades the Vault binary
that is determined by the `builder` variant. After the upgrade, it verifies that
cluster is at the desired version, along with additional verifications.
## Autopilot
The [`autopilot` scenario](./enos-scenario-autopilot.hcl) creates a Vault cluster using
the version specified in `vault_upgrade_initial_release`. Next, it creates additional
nodes with the candiate version of Vault as determined by the `builder` variant.
The module uses AWS auto-join to handle discovery and unseals with auto-unseal
or Shamir depending on the `seal` variant. After the new nodes have joined and been
unsealed, it waits for Autopilot to upgrade the new nodes and demote the old nodes.
# Variants
Both scenarios support a matrix of variants. In order to achieve broad coverage while
keeping test run time reasonable, the variants executed by the `enos-run` Github
Actions are tailored to maximize variant distribution per scenario.
## `builder:crt`
This variant is designed for use in Github Actions. The `enos-run.yml` workflow
downloads the artifact built by the `build.yml` workflow, unzips it, and sets the
`vault_bundle_path` to the zip file and the `vault_local_binary_path` to the binary.
## `builder:local`
This variant is for running the Enos scenario locally. It builds the Vault bundle
from the current branch, placing the bundle at the `vault_bundle_path` and the
unzipped Vault binary at the `vault_local_binary_path`.

96
enos/enos-modules.hcl Normal file
View File

@ -0,0 +1,96 @@
module "autopilot_upgrade_storageconfig" {
source = "./modules/autopilot_upgrade_storageconfig"
}
module "az_finder" {
source = "./modules/az_finder"
}
module "backend_consul" {
source = "app.terraform.io/hashicorp-qti/aws-consul/enos"
project_name = var.project_name
environment = "ci"
common_tags = var.tags
ssh_aws_keypair = var.aws_ssh_keypair_name
# Set this to a real license vault if using an Enterprise edition of Consul
consul_license = var.backend_license_path == null ? "none" : file(abspath(var.backend_license_path))
}
module "backend_raft" {
source = "./modules/backend_raft"
}
module "build_crt" {
source = "./modules/build_crt"
}
module "build_local" {
source = "./modules/build_local"
}
module "create_vpc" {
source = "app.terraform.io/hashicorp-qti/aws-infra/enos"
project_name = var.project_name
environment = "ci"
common_tags = var.tags
ami_architectures = ["amd64", "arm64"]
}
module "get_local_version_from_make" {
source = "./modules/get_local_version_from_make"
}
module "read_license" {
source = "./modules/read_license"
}
module "vault_cluster" {
source = "app.terraform.io/hashicorp-qti/aws-vault/enos"
# source = "../../terraform-enos-aws-vault"
common_tags = var.tags
environment = "ci"
instance_count = var.vault_instance_count
project_name = var.project_name
ssh_aws_keypair = var.aws_ssh_keypair_name
vault_install_dir = var.vault_install_dir
}
module "vault_upgrade" {
source = "./modules/vault_upgrade"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
module "vault_verify_autopilot" {
source = "./modules/vault_verify_autopilot"
vault_autopilot_upgrade_status = "await-server-removal"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
module "vault_verify_raft_auto_join_voter" {
source = "./modules/vault_verify_raft_auto_join_voter"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
module "vault_verify_unsealed" {
source = "./modules/vault_verify_unsealed"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
module "vault_verify_version" {
source = "./modules/vault_verify_version"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}

21
enos/enos-providers.hcl Normal file
View File

@ -0,0 +1,21 @@
provider "aws" "default" {
region = var.aws_region
}
provider "enos" "rhel" {
transport = {
ssh = {
user = "ec2-user"
private_key_path = abspath(var.aws_ssh_private_key_path)
}
}
}
provider "enos" "ubuntu" {
transport = {
ssh = {
user = "ubuntu"
private_key_path = abspath(var.aws_ssh_private_key_path)
}
}
}

View File

@ -0,0 +1,252 @@
scenario "autopilot" {
matrix {
arch = ["amd64", "arm64"]
builder = ["local", "crt"]
distro = ["ubuntu", "rhel"]
edition = ["ent"]
seal = ["awskms", "shamir"]
}
terraform_cli = terraform_cli.default
terraform = terraform.default
providers = [
provider.aws.default,
provider.enos.ubuntu,
provider.enos.rhel
]
locals {
build_tags = {
"ent" = ["enterprise", "ent"]
}
bundle_path = abspath(var.vault_bundle_path)
dependencies_to_install = ["jq"]
enos_provider = {
rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu
}
tags = merge({
"Project Name" : var.project_name
"Project" : "Enos",
"Environment" : "ci"
}, var.tags)
vault_instance_types = {
amd64 = "t3a.small"
arm64 = "t4g.small"
}
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
}
step "build_vault" {
module = matrix.builder == "crt" ? module.build_crt : module.build_local
variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition]
bundle_path = local.bundle_path
goarch = matrix.arch
goos = "linux"
}
}
step "find_azs" {
module = module.az_finder
variables {
instance_type = [
local.vault_instance_type
]
}
}
step "create_vpc" {
module = module.create_vpc
variables {
ami_architectures = [matrix.arch]
}
}
step "read_license" {
module = module.read_license
variables {
file_name = abspath(joinpath(path.root, "./support/vault.hclic"))
}
}
step "create_vault_cluster" {
module = module.vault_cluster
depends_on = [
step.create_vpc,
step.build_vault,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
common_tags = local.tags
dependencies_to_install = local.dependencies_to_install
instance_type = local.vault_instance_type
kms_key_arn = step.create_vpc.kms_key_arn
storage_backend = "raft"
storage_backend_addl_config = {
autopilot_upgrade_version = var.vault_autopilot_initial_release.version
}
unseal_method = matrix.seal
vault_release = var.vault_autopilot_initial_release
vault_license = step.read_license.license
vpc_id = step.create_vpc.vpc_id
}
}
step "get_local_version" {
module = module.get_local_version_from_make
}
step "create_autopilot_upgrade_storageconfig" {
module = module.autopilot_upgrade_storageconfig
depends_on = [step.get_local_version]
variables {
vault_product_version = step.get_local_version.version
}
}
step "upgrade_vault_cluster_with_autopilot" {
module = module.vault_cluster
depends_on = [
step.create_vault_cluster,
step.create_autopilot_upgrade_storageconfig,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
common_tags = local.tags
dependencies_to_install = local.dependencies_to_install
instance_type = local.vault_instance_type
kms_key_arn = step.create_vpc.kms_key_arn
storage_backend = "raft"
storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config
unseal_method = matrix.seal
vault_cluster_tag = step.create_vault_cluster.vault_cluster_tag
vault_init = false
vault_license = step.read_license.license
vault_local_artifact_path = local.bundle_path
vault_node_prefix = "upgrade_node"
vault_root_token = step.create_vault_cluster.vault_root_token
vault_unseal_when_no_init = matrix.seal == "shamir"
vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null
vpc_id = step.create_vpc.vpc_id
}
}
step "verify_autopilot_upgraded_vault_cluster" {
module = module.vault_verify_autopilot
depends_on = [step.upgrade_vault_cluster_with_autopilot]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_autopilot_upgrade_version = step.get_local_version.version
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [
step.create_vault_cluster,
step.upgrade_vault_cluster_with_autopilot,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
step "verify_raft_auto_join_voter" {
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_vault_cluster,
step.upgrade_vault_cluster_with_autopilot,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
output "vault_cluster_instance_ids" {
description = "The Vault cluster instance IDs"
value = step.create_vault_cluster.instance_ids
}
output "vault_cluster_pub_ips" {
description = "The Vault cluster public IPs"
value = step.create_vault_cluster.instance_public_ips
}
output "vault_cluster_priv_ips" {
description = "The Vault cluster private IPs"
value = step.create_vault_cluster.instance_private_ips
}
output "vault_cluster_key_id" {
description = "The Vault cluster Key ID"
value = step.create_vault_cluster.key_id
}
output "vault_cluster_root_token" {
description = "The Vault cluster root token"
value = step.create_vault_cluster.vault_root_token
}
output "vault_cluster_unseal_keys_b64" {
description = "The Vault cluster unseal keys"
value = step.create_vault_cluster.vault_unseal_keys_b64
}
output "vault_cluster_unseal_keys_hex" {
description = "The Vault cluster unseal keys hex"
value = step.create_vault_cluster.vault_unseal_keys_hex
}
output "vault_cluster_tag" {
description = "The Vault cluster tag"
value = step.create_vault_cluster.vault_cluster_tag
}
output "upgraded_vault_cluster_instance_ids" {
description = "The Vault cluster instance IDs"
value = step.upgrade_vault_cluster_with_autopilot.instance_ids
}
output "upgraded_vault_cluster_pub_ips" {
description = "The Vault cluster public IPs"
value = step.upgrade_vault_cluster_with_autopilot.instance_public_ips
}
output "upgraded_vault_cluster_priv_ips" {
description = "The Vault cluster private IPs"
value = step.upgrade_vault_cluster_with_autopilot.instance_private_ips
}
}

View File

@ -0,0 +1,206 @@
scenario "smoke" {
matrix {
arch = ["amd64", "arm64"]
backend = ["consul", "raft"]
builder = ["local", "crt"]
consul_version = ["1.12.3", "1.11.7", "1.10.12"]
distro = ["ubuntu", "rhel"]
edition = ["oss", "ent"]
seal = ["awskms", "shamir"]
}
terraform_cli = terraform_cli.default
terraform = terraform.default
providers = [
provider.aws.default,
provider.enos.ubuntu,
provider.enos.rhel
]
locals {
build_tags = {
"oss" = ["ui"]
"ent" = ["enterprise", "ent"]
}
bundle_path = abspath(var.vault_bundle_path)
dependencies_to_install = ["jq"]
enos_provider = {
rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu
}
tags = merge({
"Project Name" : var.project_name
"Project" : "Enos",
"Environment" : "ci"
}, var.tags)
vault_instance_types = {
amd64 = "t3a.small"
arm64 = "t4g.small"
}
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
}
step "build_vault" {
module = matrix.builder == "crt" ? module.build_crt : module.build_local
variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition]
bundle_path = local.bundle_path
goarch = matrix.arch
goos = "linux"
}
}
step "find_azs" {
module = module.az_finder
variables {
instance_type = [
var.backend_instance_type,
local.vault_instance_type
]
}
}
step "create_vpc" {
module = module.create_vpc
variables {
ami_architectures = [matrix.arch]
availability_zones = step.find_azs.availability_zones
common_tags = local.tags
}
}
step "read_license" {
skip_step = matrix.edition == "oss"
module = module.read_license
variables {
file_name = abspath(joinpath(path.root, "./support/vault.hclic"))
}
}
step "create_backend_cluster" {
module = "backend_${matrix.backend}"
depends_on = [
step.create_vpc,
step.build_vault,
]
providers = {
enos = provider.enos.ubuntu
}
variables {
ami_id = step.create_vpc.ami_ids["ubuntu"][matrix.arch]
common_tags = local.tags
consul_release = {
edition = var.backend_edition
version = matrix.consul_version
}
instance_type = var.backend_instance_type
kms_key_arn = step.create_vpc.kms_key_arn
vpc_id = step.create_vpc.vpc_id
}
}
step "create_vault_cluster" {
module = module.vault_cluster
depends_on = [
step.create_vpc,
step.create_backend_cluster,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
common_tags = local.tags
consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag
dependencies_to_install = local.dependencies_to_install
instance_type = local.vault_instance_type
kms_key_arn = step.create_vpc.kms_key_arn
storage_backend = matrix.backend
unseal_method = matrix.seal
vault_local_artifact_path = local.bundle_path
vault_license = matrix.edition != "oss" ? step.read_license.license : null
vpc_id = step.create_vpc.vpc_id
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [
step.create_vault_cluster,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_vault_cluster,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
output "vault_cluster_instance_ids" {
description = "The Vault cluster instance IDs"
value = step.create_vault_cluster.instance_ids
}
output "vault_cluster_pub_ips" {
description = "The Vault cluster public IPs"
value = step.create_vault_cluster.instance_public_ips
}
output "vault_cluster_priv_ips" {
description = "The Vault cluster private IPs"
value = step.create_vault_cluster.instance_private_ips
}
output "vault_cluster_key_id" {
description = "The Vault cluster Key ID"
value = step.create_vault_cluster.key_id
}
output "vault_cluster_root_token" {
description = "The Vault cluster root token"
value = step.create_vault_cluster.vault_root_token
}
output "vault_cluster_unseal_keys_b64" {
description = "The Vault cluster unseal keys"
value = step.create_vault_cluster.vault_unseal_keys_b64
}
output "vault_cluster_unseal_keys_hex" {
description = "The Vault cluster unseal keys hex"
value = step.create_vault_cluster.vault_unseal_keys_hex
}
output "vault_cluster_tag" {
description = "The Vault cluster tag"
value = step.create_vault_cluster.vault_cluster_tag
}
}

View File

@ -0,0 +1,243 @@
scenario "upgrade" {
matrix {
arch = ["amd64", "arm64"]
backend = ["consul", "raft"]
builder = ["local", "crt"]
consul_version = ["1.12.3", "1.11.7", "1.10.12"]
distro = ["ubuntu", "rhel"]
edition = ["oss", "ent"]
seal = ["awskms", "shamir"]
}
terraform_cli = terraform_cli.default
terraform = terraform.default
providers = [
provider.aws.default,
provider.enos.ubuntu,
provider.enos.rhel
]
locals {
build_tags = {
"oss" = ["ui"]
"ent" = ["enterprise", "ent"]
}
bundle_path = abspath(var.vault_bundle_path)
dependencies_to_install = ["jq"]
enos_provider = {
rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu
}
tags = merge({
"Project Name" : var.project_name
"Project" : "Enos",
"Environment" : "ci"
}, var.tags)
vault_instance_types = {
amd64 = "t3a.small"
arm64 = "t4g.small"
}
vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch])
}
step "build_vault" {
module = matrix.builder == "crt" ? module.build_crt : module.build_local
variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition]
bundle_path = local.bundle_path
goarch = matrix.arch
goos = "linux"
}
}
step "find_azs" {
module = module.az_finder
variables {
instance_type = [
var.backend_instance_type,
local.vault_instance_type,
]
}
}
step "create_vpc" {
module = module.create_vpc
variables {
ami_architectures = [matrix.arch]
availability_zones = step.find_azs.availability_zones
common_tags = local.tags
}
}
step "read_license" {
skip_step = matrix.edition == "oss"
module = module.read_license
variables {
file_name = abspath(joinpath(path.root, "./support/vault.hclic"))
}
}
step "create_backend_cluster" {
module = "backend_${matrix.backend}"
depends_on = [
step.create_vpc,
step.build_vault,
]
providers = {
enos = provider.enos.ubuntu
}
variables {
ami_id = step.create_vpc.ami_ids["ubuntu"][matrix.arch]
common_tags = local.tags
consul_release = {
edition = var.backend_edition
version = matrix.consul_version
}
instance_type = var.backend_instance_type
kms_key_arn = step.create_vpc.kms_key_arn
vpc_id = step.create_vpc.vpc_id
}
}
step "create_vault_cluster" {
module = module.vault_cluster
depends_on = [
step.create_vpc,
step.create_backend_cluster,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch]
common_tags = local.tags
consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag
dependencies_to_install = local.dependencies_to_install
instance_type = local.vault_instance_type
kms_key_arn = step.create_vpc.kms_key_arn
storage_backend = matrix.backend
unseal_method = matrix.seal
vault_release = var.vault_upgrade_initial_release
vault_license = matrix.edition != "oss" ? step.read_license.license : null
vpc_id = step.create_vpc.vpc_id
}
}
step "upgrade_vault" {
module = module.vault_upgrade
depends_on = [
step.create_vault_cluster,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_api_addr = "http://localhost:8200"
vault_instances = step.create_vault_cluster.vault_instances
vault_local_bundle_path = local.bundle_path
vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null
vault_seal_type = matrix.seal
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [
step.create_backend_cluster,
step.upgrade_vault,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [
step.create_vault_cluster,
step.upgrade_vault,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_backend_cluster,
step.upgrade_vault,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster.vault_instances
vault_root_token = step.create_vault_cluster.vault_root_token
}
}
output "vault_cluster_instance_ids" {
description = "The Vault cluster instance IDs"
value = step.create_vault_cluster.instance_ids
}
output "vault_cluster_pub_ips" {
description = "The Vault cluster public IPs"
value = step.create_vault_cluster.instance_public_ips
}
output "vault_cluster_priv_ips" {
description = "The Vault cluster private IPs"
value = step.create_vault_cluster.instance_private_ips
}
output "vault_cluster_key_id" {
description = "The Vault cluster Key ID"
value = step.create_vault_cluster.key_id
}
output "vault_cluster_root_token" {
description = "The Vault cluster root token"
value = step.create_vault_cluster.vault_root_token
}
output "vault_cluster_unseal_keys_b64" {
description = "The Vault cluster unseal keys"
value = step.create_vault_cluster.vault_unseal_keys_b64
}
output "vault_cluster_unseal_keys_hex" {
description = "The Vault cluster unseal keys hex"
value = step.create_vault_cluster.vault_unseal_keys_hex
}
output "vault_cluster_tag" {
description = "The Vault cluster tag"
value = step.create_vault_cluster.vault_cluster_tag
}
}

30
enos/enos-terraform.hcl Normal file
View File

@ -0,0 +1,30 @@
terraform_cli "default" {
plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null
credentials "app.terraform.io" {
token = var.tfc_api_token
}
/*
provider_installation {
dev_overrides = {
"app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider")
}
direct {}
}
*/
}
terraform "default" {
required_version = ">= 1.2.0"
required_providers {
aws = {
source = "hashicorp/aws"
}
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}

111
enos/enos-variables.hcl Normal file
View File

@ -0,0 +1,111 @@
variable "aws_region" {
description = "The AWS region where we'll create infrastructure"
type = string
default = "us-west-1"
}
variable "aws_ssh_keypair_name" {
description = "The AWS keypair to use for SSH"
type = string
default = "enos-ci-ssh-key"
}
variable "aws_ssh_private_key_path" {
description = "The path to the AWS keypair private key"
type = string
default = "./support/private_key.pem"
}
variable "backend_edition" {
description = "The backend release edition if applicable"
type = string
default = "oss"
}
variable "backend_instance_type" {
description = "The instance type to use for the Vault backend"
type = string
default = "t3.small"
}
variable "backend_license_path" {
description = "The license for the backend if applicable (Consul Enterprise)"
type = string
default = null
}
variable "project_name" {
description = "The description of the project"
type = string
default = "vault-enos-integration"
}
variable "tags" {
description = "Tags that will be applied to infrastructure resources that support tagging"
type = map(string)
default = null
}
variable "terraform_plugin_cache_dir" {
description = "The directory to cache Terraform modules and providers"
type = string
default = null
}
variable "tfc_api_token" {
description = "The Terraform Cloud QTI Organization API token."
type = string
}
variable "vault_autopilot_initial_release" {
description = "The Vault release to deploy before upgrading with autopilot"
default = {
edition = "ent"
version = "1.11.0"
}
}
variable "vault_bundle_path" {
description = "Path to CRT generated or local vault.zip bundle"
type = string
default = "/tmp/vault.zip"
}
variable "vault_install_dir" {
type = string
description = "The directory where the vault binary will be installed"
default = "/opt/vault/bin"
}
variable "vault_instance_type" {
description = "The instance type to use for the Vault backend"
type = string
default = null
}
variable "vault_instance_count" {
description = "How many instances to create for the Vault cluster"
type = number
default = 3
}
variable "vault_license_path" {
description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions"
type = string
default = null
}
variable "vault_local_build_tags" {
description = "The build tags to pass to the Go compiler for builder:local variants"
type = list(string)
default = null
}
variable "vault_upgrade_initial_release" {
description = "The Vault release to deploy before upgrading"
default = {
edition = "oss"
// vault 1.10.5 has a known issue with retry_join.
version = "1.10.4"
}
}

48
enos/enos.vars.hcl Normal file
View File

@ -0,0 +1,48 @@
# aws_region is the AWS region where we'll create infrastructure
# for the smoke scenario
# aws_region = "us-west-1"
# aws_ssh_keypair_name is the AWS keypair to use for SSH
# aws_ssh_keypair_name = "enos-ci-ssh-key"
# aws_ssh_private_key_path is the path to the AWS keypair private key
# aws_ssh_private_key_path = "./support/private_key.pem"
# backend_instance_type is the instance type to use for the Vault backend
# backend_instance_type = "t3.small"
# tags are a map of tags that will be applied to infrastructure resources that
# support tagging.
# tags = { "Project Name" : "Vault", "Something Cool" : "Value" }
# terraform_plugin_cache_dir is the directory to cache Terraform modules and providers.
# It must exist.
# terraform_plugin_cache_dir = "/Users/<user>/.terraform/plugin-cache-dir
# tfc_api_token is the Terraform Cloud QTI Organization API token. We need this
# to download the enos Terraform provider and the enos Terraform modules.
# tfc_api_token = "XXXXX.atlasv1.XXXXX..."
# vault_bundle_path is the path to CRT generated or local vault.zip bundle. When
# using the "builder:local" variant a bundle will be built from the current branch.
# In CI it will use the output of the build workflow.
# vault_bundle_path = "./dist/vault.zip"
# vault_install_dir is the directory where the vault binary will be installed on
# the remote machines.
# vault_install_dir = "/opt/vault/bin"
# vault_local_binary_path is the path of the local binary that we're upgrading to.
# vault_local_binary_path = "./support/vault"
# vault_instance_type is the instance type to use for the Vault backend
# vault_instance_type = "t3.small"
# vault_instance_count is how many instances to create for the Vault cluster.
# vault_instance_count = 3
# vault_license_path is the path to a valid Vault enterprise edition license.
# This is only required for non-oss editions"
# vault_license_path = "./support/vault.hclic"
# vault_upgrade_initial_release is the Vault release to deploy before upgrading.

View File

@ -0,0 +1,7 @@
variable "vault_product_version" {}
output "storage_addl_config" {
value = {
autopilot_upgrade_version = var.vault_product_version
}
}

View File

@ -0,0 +1,25 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
}
}
variable "instance_type" {
default = ["t3.small"]
type = list(string)
}
data "aws_ec2_instance_type_offerings" "infra" {
filter {
name = "instance-type"
values = var.instance_type
}
location_type = "availability-zone"
}
output "availability_zones" {
value = data.aws_ec2_instance_type_offerings.infra.locations
}

View File

@ -0,0 +1,46 @@
// Shim module to handle the fact that Vault doesn't actually need a backend module
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "ami_id" {
default = null
}
variable "common_tags" {
default = null
}
variable "consul_license" {
default = null
}
variable "consul_release" {
default = null
}
variable "environment" {
default = null
}
variable "instance_type" {
default = null
}
variable "kms_key_arn" {
default = null
}
variable "project_name" {
default = null
}
variable "ssh_aws_keypair" {
default = null
}
variable "vpc_id" {
default = null
}
output "consul_cluster_tag" {
value = null
}

View File

@ -0,0 +1,20 @@
# Shim module since CRT provided things will use the crt_bundle_path variable
variable "bundle_path" {
default = "/tmp/vault.zip"
}
variable "build_tags" {
default = ["ui"]
}
variable "goarch" {
type = string
description = "The Go architecture target"
default = "amd64"
}
variable "goos" {
type = string
description = "The Go OS target"
default = "linux"
}

View File

@ -0,0 +1,38 @@
terraform {
required_providers {
enos = {
source = "hashicorp.com/qti/enos"
}
}
}
variable "bundle_path" {
type = string
default = "/tmp/vault.zip"
}
variable "build_tags" {
type = list(string)
description = "The build tags to pass to the Go compiler"
}
variable "goarch" {
type = string
description = "The Go architecture target"
default = "amd64"
}
variable "goos" {
type = string
description = "The Go OS target"
default = "linux"
}
resource "enos_local_exec" "build" {
content = templatefile("${path.module}/templates/build.sh", {
bundle_path = var.bundle_path,
build_tags = join(" ", var.build_tags)
goarch = var.goarch
goos = var.goos
})
}

View File

@ -0,0 +1,39 @@
#!/bin/bash
set -eux -o pipefail
# Requirements
npm install --global yarn || true
# Set up the environment for building Vault.
root_dir="$(git rev-parse --show-toplevel)"
pushd "$root_dir" > /dev/null
export GO_TAGS=${build_tags}
export CGO_ENABLED=0
IFS="-" read -r BASE_VERSION _other <<< "$(make version)"
export VAULT_VERSION=$BASE_VERSION
build_date="$(make build-date)"
export VAULT_BUILD_DATE=$build_date
revision="$(git rev-parse HEAD)"
export VAULT_REVISION=$revision
popd > /dev/null
# Go to the UI directory of the Vault repo and build the UI
pushd "$root_dir/ui" > /dev/null
yarn install --ignore-optional
npm rebuild node-sass
yarn --verbose run build
popd > /dev/null
# Build for linux/amd64 and create a bundle since we're deploying it to linux/amd64
pushd "$root_dir" > /dev/null
export GOARCH=${goarch}
export GOOS=${goos}
make build
zip -r -j ${bundle_path} dist/
popd > /dev/null

View File

@ -0,0 +1,15 @@
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
resource "enos_local_exec" "get_version" {
scripts = ["${path.module}/scripts/version.sh"]
}
output "version" {
value = trimspace(enos_local_exec.get_version.stdout)
}

View File

@ -0,0 +1,10 @@
#!/bin/env bash
set -eu -o pipefail
# Set up the environment for building Vault.
root_dir="$(git rev-parse --show-toplevel)"
pushd "$root_dir" > /dev/null
IFS="-" read -r VAULT_VERSION _other <<< "$(make version)"
echo $VAULT_VERSION

View File

@ -0,0 +1,5 @@
variable "file_name" {}
output "license" {
value = file(var.file_name)
}

View File

@ -0,0 +1,163 @@
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
}
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_api_addr" {
type = string
description = "The API address of the Vault cluster"
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_instance_count" {
type = number
description = "How many vault instances are in the cluster"
}
variable "vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were created"
}
variable "vault_local_bundle_path" {
type = string
description = "The path to the local Vault (vault.zip) bundle"
}
variable "vault_seal_type" {
type = string
description = "The Vault seal type"
}
variable "vault_unseal_keys" {
type = list(string)
description = "The keys to use to unseal Vault when not using auto-unseal"
default = null
}
locals {
instances = {
for idx in range(var.vault_instance_count) : idx => {
public_ip = values(var.vault_instances)[idx].public_ip
private_ip = values(var.vault_instances)[idx].private_ip
}
}
followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)])
follower_ips = compact(split(" ", enos_remote_exec.get_follower_public_ips.stdout))
vault_bin_path = "${var.vault_install_dir}/vault"
}
resource "enos_bundle_install" "upgrade_vault_binary" {
for_each = local.instances
destination = var.vault_install_dir
path = var.vault_local_bundle_path
transport = {
ssh = {
host = each.value.public_ip
}
}
}
resource "enos_remote_exec" "get_leader_public_ip" {
depends_on = [enos_bundle_install.upgrade_vault_binary]
content = templatefile("${path.module}/templates/get-leader-public-ip.sh", {
vault_install_dir = var.vault_install_dir,
vault_instances = jsonencode(local.instances)
})
transport = {
ssh = {
host = local.instances[0].public_ip
}
}
}
resource "enos_remote_exec" "get_follower_public_ips" {
depends_on = [enos_bundle_install.upgrade_vault_binary]
content = templatefile("${path.module}/templates/get-follower-public-ips.sh", {
vault_install_dir = var.vault_install_dir,
vault_instances = jsonencode(local.instances)
})
transport = {
ssh = {
host = local.instances[0].public_ip
}
}
}
resource "enos_remote_exec" "restart_followers" {
for_each = local.followers
depends_on = [enos_remote_exec.get_follower_public_ips]
content = file("${path.module}/templates/restart-vault.sh")
transport = {
ssh = {
host = trimspace(local.follower_ips[tonumber(each.key)])
}
}
}
resource "enos_vault_unseal" "followers" {
depends_on = [enos_remote_exec.restart_followers]
for_each = {
for idx, follower in local.followers : idx => follower
if var.vault_seal_type == "shamir"
}
bin_path = local.vault_bin_path
vault_addr = var.vault_api_addr
seal_type = var.vault_seal_type
unseal_keys = var.vault_unseal_keys
transport = {
ssh = {
host = trimspace(local.follower_ips[each.key])
}
}
}
resource "enos_remote_exec" "restart_leader" {
depends_on = [enos_vault_unseal.followers]
content = file("${path.module}/templates/restart-vault.sh")
transport = {
ssh = {
host = trimspace(enos_remote_exec.get_leader_public_ip.stdout)
}
}
}
resource "enos_vault_unseal" "leader" {
count = var.vault_seal_type == "shamir" ? 1 : 0
depends_on = [enos_remote_exec.restart_leader]
bin_path = local.vault_bin_path
vault_addr = var.vault_api_addr
seal_type = var.vault_seal_type
unseal_keys = var.vault_unseal_keys
transport = {
ssh = {
host = trimspace(enos_remote_exec.get_leader_public_ip.stdout)
}
}
}

View File

@ -0,0 +1,16 @@
#!/bin/bash
set -e
binpath=${vault_install_dir}/vault
export VAULT_ADDR="http://localhost:8200"
instances='${vault_instances}'
# Find the leader
leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")')
# Get the public ip addresses of the followers
follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances")
echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' '

View File

@ -0,0 +1,15 @@
#!/bin/bash
set -e
binpath=${vault_install_dir}/vault
export VAULT_ADDR="http://localhost:8200"
instances='${vault_instances}'
# Find the leader
leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")')
# Get the public ip address of the leader
leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances")
echo "$leader_public" | sed 's/\"//g'

View File

@ -0,0 +1,5 @@
#!/bin/bash
set -eux
sudo systemctl restart vault

View File

@ -0,0 +1,68 @@
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_instance_count" {
type = number
description = "How many vault instances are in the cluster"
}
variable "vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were created"
}
variable "vault_root_token" {
type = string
description = "The vault root token"
}
variable "vault_autopilot_upgrade_version" {
type = string
description = "The directory where the Vault binary will be installed"
default = null
}
variable "vault_autopilot_upgrade_status" {
type = string
description = "The directory where the Vault binary will be installed"
default = null
}
locals {
public_ips = {
for idx in range(var.vault_instance_count) : idx => {
public_ip = values(var.vault_instances)[idx].public_ip
private_ip = values(var.vault_instances)[idx].private_ip
}
}
}
resource "enos_remote_exec" "smoke-verify-autopilot" {
for_each = local.public_ips
content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", {
vault_install_dir = var.vault_install_dir
vault_token = var.vault_root_token
vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status,
vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version,
})
transport = {
ssh = {
host = each.value.public_ip
}
}
}

View File

@ -0,0 +1,34 @@
#!/bin/bash
token="${vault_token}"
autopilot_version="${vault_autopilot_upgrade_version}"
autopilot_status="${vault_autopilot_upgrade_status}"
export VAULT_ADDR="http://localhost:8200"
export VAULT_TOKEN="$token"
function fail() {
echo "$1" 1>&2
exit 1
}
count=0
retries=7
while :; do
state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state)
status="$(jq -r '.data.upgrade_info.status' <<< "$state")"
target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")"
if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then
exit 0
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "$state"
sleep "$wait"
else
fail "Autopilot did not get into the correct status"
fi
done

View File

@ -0,0 +1,62 @@
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_cluster_addr_port" {
description = "The Raft cluster address port"
type = string
default = "8201"
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_instance_count" {
type = number
description = "How many vault instances are in the cluster"
}
variable "vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were created"
}
variable "vault_root_token" {
type = string
description = "The vault root token"
}
locals {
instances = {
for idx in range(var.vault_instance_count) : idx => {
public_ip = values(var.vault_instances)[idx].public_ip
private_ip = values(var.vault_instances)[idx].private_ip
}
}
}
resource "enos_remote_exec" "verify_raft_auto_join_voter" {
for_each = local.instances
content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", {
vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault"
vault_token = var.vault_root_token
})
transport = {
ssh = {
host = each.value.public_ip
}
}
}

View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
set -e
binpath=${vault_install_dir}/vault
fail() {
echo "$1" 2>&1
return 1
}
retry() {
local retries=$1
shift
local count=0
until "$@"; do
exit=$?
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
echo "retry $count"
else
return "$exit"
fi
done
return 0
}
check_voter_status() {
voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected')
if [[ "$voter_status" != 'true' ]]; then
fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')"
fi
}
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}'
# Retry a few times because it can take some time for things to settle after
# all the nodes are unsealed
retry 5 check_voter_status

View File

@ -0,0 +1,62 @@
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_cluster_addr_port" {
description = "The Raft cluster address port"
type = string
default = "8201"
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_instance_count" {
type = number
description = "How many vault instances are in the cluster"
}
variable "vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were created"
}
variable "vault_root_token" {
type = string
description = "The vault root token"
}
locals {
instances = {
for idx in range(var.vault_instance_count) : idx => {
public_ip = values(var.vault_instances)[idx].public_ip
private_ip = values(var.vault_instances)[idx].private_ip
}
}
}
resource "enos_remote_exec" "verify_raft_auto_join_voter" {
for_each = local.instances
content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", {
vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault"
vault_token = var.vault_root_token
})
transport = {
ssh = {
host = each.value.public_ip
}
}
}

View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e
binpath=${vault_install_dir}/vault
fail() {
echo "$1" 1>&2
return 1
}
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}'
unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected')
if [[ "$unseal_status" != 'true' ]]; then
fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status"
fi

View File

@ -0,0 +1,48 @@
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_instance_count" {
type = number
description = "How many vault instances are in the cluster"
}
variable "vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were created"
}
locals {
instances = {
for idx in range(var.vault_instance_count) : idx => {
public_ip = values(var.vault_instances)[idx].public_ip
private_ip = values(var.vault_instances)[idx].private_ip
}
}
}
resource "enos_remote_exec" "verify_all_nodes_have_updated_version" {
for_each = local.instances
content = templatefile("${path.module}/templates/verify-cluster-version.sh", {
vault_install_dir = var.vault_install_dir,
})
transport = {
ssh = {
host = each.value.public_ip
}
}
}

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bash
# The Vault smoke test to verify the Vault version installed
set -e
binpath=${vault_install_dir}/vault
fail() {
echo "$1" 1>&2
exit 1
}
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
binary_version_full=$($binpath version)
# Get the Vault build tag
binary_version=$(cut -d ' ' -f2 <<< $binary_version_full)
# Strip the leading v
semantic=$${binary_version:1}
# Get the build timestamp
build_date=$(cut -d ' ' -f5 <<< $binary_version_full)
export VAULT_ADDR='http://127.0.0.1:8200'
# Ensure that the cluster version and build time match the binary installed
vault_status=$("$binpath" status -format json)
result=$(jq -Mr \
--arg version "$semantic" \
--arg build_date "$build_date" \
'select(.version == $version) | .build_date == $build_date' \
<<< $vault_status
)
if [[ "$result" != "true" ]]; then
fail "expected version $binary_version with build_date $build_date, got status $vault_status"
fi