1
0
vault-redux/Makefile

380 lines
15 KiB
Makefile
Raw Normal View History

2017-10-23 19:38:30 +03:00
# Determine this makefile's path.
# Be sure to place this BEFORE `include` directives, if any.
THIS_FILE := $(lastword $(MAKEFILE_LIST))
MAIN_PACKAGES=$$($(GO_CMD) list ./... | grep -v vendor/ )
SDK_PACKAGES=$$(cd $(CURDIR)/sdk && $(GO_CMD) list ./... | grep -v vendor/ )
API_PACKAGES=$$(cd $(CURDIR)/api && $(GO_CMD) list ./... | grep -v vendor/ )
ALL_PACKAGES=$(MAIN_PACKAGES) $(SDK_PACKAGES) $(API_PACKAGES)
TEST=$$(echo $(ALL_PACKAGES) | grep -v integ/ )
TEST_TIMEOUT?=45m
EXTENDED_TEST_TIMEOUT=60m
INTEG_TEST_TIMEOUT=120m
2015-03-04 10:14:18 +03:00
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor)
SED?=$(shell command -v gsed || command -v sed)
GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version)
PROTOC_VERSION_MIN=3.21.12
GO_CMD?=go
CGO_ENABLED?=0
2018-07-16 17:18:09 +03:00
ifneq ($(FDB_ENABLED), )
CGO_ENABLED=1
BUILD_TAGS+=foundationdb
endif
2017-02-06 04:30:40 +03:00
default: dev
2015-03-04 10:14:18 +03:00
2018-03-20 21:54:10 +03:00
# bin generates the releasable binaries for Vault
2017-10-23 19:38:30 +03:00
bin: prep
2018-07-16 17:18:09 +03:00
@CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'"
2015-03-04 10:14:18 +03:00
# dev creates binaries for testing Vault locally. These are put
2018-04-10 00:36:05 +03:00
# into ./bin/ as well as $GOPATH/bin
2023-05-02 15:46:13 +03:00
dev: BUILD_TAGS+=testonly
dev: prep
2023-03-21 23:59:40 +03:00
@CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
2023-05-02 15:46:13 +03:00
dev-ui: BUILD_TAGS+=testonly
dev-ui: assetcheck prep
2018-07-16 17:18:09 +03:00
@CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
2023-05-02 15:46:13 +03:00
dev-dynamic: BUILD_TAGS+=testonly
dev-dynamic: prep
2016-05-10 06:17:38 +03:00
@CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
# *-mem variants will enable memory profiling which will write snapshots of heap usage
# to $TMP/vaultprof every 5 minutes. These can be analyzed using `$ go tool pprof <profile_file>`.
# Note that any build can have profiling added via: `$ BUILD_TAGS=memprofiler make ...`
dev-mem: BUILD_TAGS+=memprofiler
dev-mem: dev
dev-ui-mem: BUILD_TAGS+=memprofiler
dev-ui-mem: assetcheck dev-ui
dev-dynamic-mem: BUILD_TAGS+=memprofiler
dev-dynamic-mem: dev-dynamic
# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin.
# The resulting image is tagged "vault:dev".
2023-05-02 15:46:13 +03:00
docker-dev: BUILD_TAGS+=testonly
docker-dev: prep
docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile -t vault:dev .
2023-05-02 15:46:13 +03:00
docker-dev-ui: BUILD_TAGS+=testonly
docker-dev-ui: prep
docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile.ui -t vault:dev-ui .
2015-03-04 10:14:18 +03:00
# test runs the unit tests and vets the code
2023-05-02 15:46:13 +03:00
test: BUILD_TAGS+=testonly
test: prep
2018-07-16 17:18:09 +03:00
@CGO_ENABLED=$(CGO_ENABLED) \
VAULT_ADDR= \
VAULT_TOKEN= \
VAULT_DEV_ROOT_TOKEN_ID= \
VAULT_ACC= \
$(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20
2015-03-04 10:14:18 +03:00
2023-05-02 15:46:13 +03:00
testcompile: BUILD_TAGS+=testonly
testcompile: prep
@for pkg in $(TEST) ; do \
$(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \
done
2015-03-20 19:59:48 +03:00
# testacc runs acceptance tests
2023-05-02 15:46:13 +03:00
testacc: BUILD_TAGS+=testonly
testacc: prep
2015-03-20 19:59:48 +03:00
@if [ "$(TEST)" = "./..." ]; then \
echo "ERROR: Set TEST to a specific package"; \
exit 1; \
fi
VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT)
2015-03-20 19:59:48 +03:00
2015-03-04 10:14:18 +03:00
# testrace runs the race checker
2023-05-02 15:46:13 +03:00
testrace: BUILD_TAGS+=testonly
testrace: prep
@CGO_ENABLED=1 \
VAULT_ADDR= \
VAULT_TOKEN= \
VAULT_DEV_ROOT_TOKEN_ID= \
VAULT_ACC= \
$(GO_CMD) test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) -parallel=20
2015-03-04 10:14:18 +03:00
cover:
./scripts/coverage.sh --html
2015-03-04 10:14:18 +03:00
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
@$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \
| grep -v '.*github.com/hashicorp/vault$$' \
| xargs $(GO_CMD) vet ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for reviewal."; \
fi
2015-03-04 10:14:18 +03:00
# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it
# has deprecated function, variable, constant or field
Backport of Limit number of tests in CI comment into release/1.14.x (#21971) * backport of commit dc104898f700447f7764919445c7559baeb7e987 (#21853) * fix multiline * shellcheck, and success message for builds * add full path * cat the summary * fix and faster * fix if condition * base64 in a separate step * echo * check against empty string * add echo * only use matrix ids * only id * echo matrix * remove wrapping array * tojson * try echo again * use jq to get packages * don't quote * only run binary tests once * only run binary tests once * test what's wrong with the binary * separate file * use matrix file * failed test * update comment on success * correct variable name * bae64 fix * output to file * use multiline * fix * fix formatting * fix newline * fix whitespace * correct body, remove comma * small fixes * shellcheck * another shellcheck fix * fix deprecation checker * only run comments for prs * Update .github/workflows/test-go.yml Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> * Update .github/workflows/test-go.yml Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> * fixes --------- Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com> * backport of commit 3b00dde1ba4d479fbd67b1d0767e421e495d8cce (#21936) * limit test comments * remove unecessary tee * fix go test condition * fix * fail test * remove ailways entirely * fix columns * make a bunch of tests fail * separate line * include Failures: * remove test fails * fix whitespace * backport of commit 245430215c00d80a38283020fca114bade022e0f (#21973) * only add binary tests if they exist * shellcheck --------- Co-authored-by: miagilepner <mia.epner@hashicorp.com> Co-authored-by: Mike Palmiotto <mike.palmiotto@hashicorp.com>
2023-07-20 16:07:20 +03:00
deprecations: bootstrap prep
@BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh ""
# ci-deprecations runs staticcheck tool to look for deprecations. All output gets piped to revgrep
# which will only return an error if changes that is not on main has deprecated function, variable, constant or field
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
ci-deprecations: prep check-tools-external
@BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh main
# vet-codechecker runs our custom linters on the test functions. All output gets
# piped to revgrep which will only return an error if new piece of code violates
# the check
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
vet-codechecker: check-tools-internal
@echo "==> Running go vet with ./tools/codechecker..."
@$(GO_CMD) vet -vettool=$$(which codechecker) -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep
# vet-codechecker runs our custom linters on the test functions. All output gets
# piped to revgrep which will only return an error if new piece of code that is
# not on main violates the check
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
ci-vet-codechecker: tools-internal check-tools-external
@echo "==> Running go vet with ./tools/codechecker..."
@$(GO_CMD) vet -vettool=$$(which codechecker) -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep origin/main
# lint runs vet plus a number of other checkers, it is more comprehensive, but louder
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
lint: check-tools-external
@$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
| xargs golangci-lint run --timeout 10m; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Lint found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for reviewal."; \
fi
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
# for ci jobs, runs lint against the changed packages in the commit
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
ci-lint: check-tools-external
@golangci-lint run --deadline 10m --new-from-rev=HEAD~
2017-09-05 02:16:11 +03:00
# prep runs `go generate` to build the dynamically generated
2015-03-04 10:14:18 +03:00
# source files.
#
# n.b.: prep used to depend on fmtcheck, but since fmtcheck is
# now run as a pre-commit hook (and there's little value in
# making every build run the formatter), we've removed that
# dependency.
prep: check-go-version clean
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
@echo "==> Running go generate..."
@GOARCH= GOOS= $(GO_CMD) generate $(MAIN_PACKAGES)
@GOARCH= GOOS= cd api && $(GO_CMD) generate $(API_PACKAGES)
@GOARCH= GOOS= cd sdk && $(GO_CMD) generate $(SDK_PACKAGES)
# Git doesn't allow us to store shared hooks in .git. Instead, we make sure they're up-to-date
# whenever a make target is invoked.
.PHONY: hooks
hooks:
@if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi
2015-03-04 10:14:18 +03:00
-include hooks # Make sure they're always up-to-date
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
# bootstrap the build by generating any necessary code and downloading additional tools that may
# be used by devs.
bootstrap: prep tools
# Note: if you have plugins in GOPATH you can update all of them via something like:
# for i in $(ls | grep vault-plugin-); do cd $i; git remote update; git reset --hard origin/master; dep ensure -update; git add .; git commit; git push; cd ..; done
2018-04-10 09:32:41 +03:00
update-plugins:
grep vault-plugin- go.mod | cut -d ' ' -f 1 | while read -r P; do echo "Updating $P..."; go get -v "$P"; done
2018-04-10 09:32:41 +03:00
static-assets-dir:
@mkdir -p ./http/web_ui
2018-04-03 17:46:45 +03:00
install-ui-dependencies:
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
@echo "==> Installing JavaScript assets"
Sidebar Navigation (#19296) * Add Helios Design System Components (#19278) * adds hds dependency * updates reset import path * sets minifyCSS advanced option to false * Remove node-sass (#19376) * removes node-sass and fixes sass compilation * fixes active tab li class * Sidebar Navigation Components (#19446) * links ember-shared-components addon and imports styles * adds sidebar frame and nav components * updates HcNav component name to HcAppFrame and adds sidebar UserMenu component * adds tests for sidebar components * fixes tests * updates user menu styling * fixes typos in nav cluster component * changes padding value in sidebar stylesheet to use variable * Replace and remove old nav components with new ones (#19447) * links ember-shared-components addon and imports styles * adds sidebar frame and nav components * updates activeCluster on auth service and adds activeSession prop for sidebar visibility * replaces old nav components with new ones in templates * fixes sidebar visibility issue and updates user menu label class * removes NavHeader usage * adds clients index route to redirect to dashboard * removes unused HcAppFrame footer block and reduces page header top margin * Nav component cleanup (#19681) * removes nav-header components * removes navbar styling * removes status-menu component and styles * removes cluster and auth info components * removes menu-sidebar component and styling * fixes tests * Console Panel Updates (#19741) * updates console panel styling * adds test for opening and closing the console panel * updates console panel background color to use hds token * adds right margin to console panel input * updates link-status banner styling * updates hc nav components to new API * Namespace Picker Updates (#19753) * updates namespace-picker * updates namespace picker menu styling * adds bottom margin to env banner * updates class order on namespace picker link * restores manage namespaces refresh icon * removes manage namespaces nav icon * removes home link component (#20027) * Auth and Error View Updates (#19749) * adds vault logo to auth page * updates top level error template * updates loading substate handling and moves policies link from access to cluster nav (#20033) * moves console panel to bottom of viewport (#20183) * HDS Sidebar Nav Components (#20197) * updates nav components to hds * upgrades project yarn version to 3.5 * fixes issues in app frame component * updates sidenav actions to use icon button component * Sidebar navigation acceptance tests (#20270) * adds sidebar navigation acceptance tests and fixes other test failures * console panel styling tweaks * bumps addon version * remove and ignore yarn install-state file * fixes auth service and console tests * moves classes from deleted files after bulma merge * fixes sass syntax errors blocking build * cleans up dart sass deprecation warnings * adds changelog entry * hides namespace picker when sidebar nav panel is minimized * style tweaks * fixes sidebar nav tests * bumps hds addon to latest version and removes style override * updates modify-passthrough-response helper * updates sidebar nav tests * mfa-setup test fix attempt * fixes cluster mfa setup test * remove deprecated yarn ignore-optional flag from makefile * removes another instance of yarn ignore-optional and updates ui readme * removes unsupported yarn verbose flag from ci-helper * hides nav headings when user does not have access to any sub links * removes unused optional deps and moves lint-staged to dev deps * updates has-permission helper and permissions service tests * fixes issue with console panel not filling container width
2023-05-03 04:36:15 +03:00
@cd ui && yarn
test-ember: install-ui-dependencies
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
@echo "==> Running ember tests"
@cd ui && yarn run test:oss
2018-04-03 17:46:45 +03:00
test-ember-enos: install-ui-dependencies
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
@echo "==> Running ember tests with a real backend"
@cd ui && yarn run test:enos
ember-dist: install-ui-dependencies
2018-04-03 17:46:45 +03:00
@cd ui && npm rebuild node-sass
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
@echo "==> Building Ember application"
@cd ui && yarn run build
2018-04-03 17:46:45 +03:00
@rm -rf ui/if-you-need-to-delete-this-open-an-issue-async-disk-cache
ember-dist-dev: install-ui-dependencies
@cd ui && npm rebuild node-sass
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
@echo "==> Building Ember application"
2021-06-11 18:36:44 +03:00
@cd ui && yarn run build:dev
static-dist: ember-dist
static-dist-dev: ember-dist-dev
2018-04-03 17:46:45 +03:00
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
proto: check-tools-external
@echo "==> Generating Go code from protobufs..."
@sh -c "'$(CURDIR)/scripts/protocversioncheck.sh' '$(PROTOC_VERSION_MIN)'"
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/*.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/activity_log.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/generation/generate_data.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/storagepacker/types.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/forwarding/types.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/logical/*.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative physical/raft/types.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/mfa/types.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/types.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/*.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/v5/proto/*.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/plugin/pb/*.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/tokens/token.proto
feature: multiplexing support for database plugins (#14033) * feat: DB plugin multiplexing (#13734) * WIP: start from main and get a plugin runner from core * move MultiplexedClient map to plugin catalog - call sys.NewPluginClient from PluginFactory - updates to getPluginClient - thread through isMetadataMode * use go-plugin ClientProtocol interface - call sys.NewPluginClient from dbplugin.NewPluginClient * move PluginSets to dbplugin package - export dbplugin HandshakeConfig - small refactor of PluginCatalog.getPluginClient * add removeMultiplexedClient; clean up on Close() - call client.Kill from plugin catalog - set rpcClient when muxed client exists * add ID to dbplugin.DatabasePluginClient struct * only create one plugin process per plugin type * update NewPluginClient to return connection ID to sdk - wrap grpc.ClientConn so we can inject the ID into context - get ID from context on grpc server * add v6 multiplexing protocol version * WIP: backwards compat for db plugins * Ensure locking on plugin catalog access - Create public GetPluginClient method for plugin catalog - rename postgres db plugin * use the New constructor for db plugins * grpc server: use write lock for Close and rlock for CRUD * cleanup MultiplexedClients on Close * remove TODO * fix multiplexing regression with grpc server connection * cleanup grpc server instances on close * embed ClientProtocol in Multiplexer interface * use PluginClientConfig arg to make NewPluginClient plugin type agnostic * create a new plugin process for non-muxed plugins * feat: plugin multiplexing: handle plugin client cleanup (#13896) * use closure for plugin client cleanup * log and return errors; add comments * move rpcClient wrapping to core for ID injection * refactor core plugin client and sdk * remove unused ID method * refactor and only wrap clientConn on multiplexed plugins * rename structs and do not export types * Slight refactor of system view interface * Revert "Slight refactor of system view interface" This reverts commit 73d420e5cd2f0415e000c5a9284ea72a58016dd6. * Revert "Revert "Slight refactor of system view interface"" This reverts commit f75527008a1db06d04a23e04c3059674be8adb5f. * only provide pluginRunner arg to the internal newPluginClient method * embed ClientProtocol in pluginClient and name logger * Add back MLock support * remove enableMlock arg from setupPluginCatalog * rename plugin util interface to PluginClient Co-authored-by: Brian Kassouf <bkassouf@hashicorp.com> * feature: multiplexing: fix unit tests (#14007) * fix grpc_server tests and add coverage * update run_config tests * add happy path test case for grpc_server ID from context * update test helpers * feat: multiplexing: handle v5 plugin compiled with new sdk * add mux supported flag and increase test coverage * set multiplexingSupport field in plugin server * remove multiplexingSupport field in sdk * revert postgres to non-multiplexed * add comments on grpc server fields * use pointer receiver on grpc server methods * add changelog * use pointer for grpcserver instance * Use a gRPC server to determine if a plugin should be multiplexed * Apply suggestions from code review Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com> * add lock to removePluginClient * add multiplexingSupport field to externalPlugin struct * do not send nil to grpc MultiplexingSupport * check err before logging * handle locking scenario for cleanupFunc * allow ServeConfigMultiplex to dispense v5 plugin * reposition structs, add err check and comments * add comment on locking for cleanupExternalPlugin Co-authored-by: Brian Kassouf <bkassouf@hashicorp.com> Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2022-02-17 17:50:33 +03:00
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/helper/pluginutil/*.proto
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/proto/*/*.proto
# No additional sed expressions should be added to this list. Going forward
# we should just use the variable names choosen by protobuf. These are left
# here for backwards compatability, namely for SDK compilation.
$(SED) -i -e 's/Id/ID/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' vault/request_forwarding_service.pb.go
$(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go
# This will inject the sentinel struct tags as decorated in the proto files.
protoc-go-inject-tag -input=./helper/identity/types.pb.go
protoc-go-inject-tag -input=./helper/identity/mfa/types.pb.go
2016-10-20 19:39:19 +03:00
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
fmt:
find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -w
fmtcheck: check-go-fmt
.PHONY: go-mod-download
go-mod-download:
@$(CURDIR)/scripts/go-helper.sh mod-download
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
.PHONY: go-mod-tidy
go-mod-tidy:
@$(CURDIR)/scripts/go-helper.sh mod-tidy
semgrep:
semgrep --include '*.go' --exclude 'vendor' -a -f tools/semgrep .
assetcheck:
@echo "==> Checking compiled UI assets..."
@sh -c "'$(CURDIR)/scripts/assetcheck.sh'"
spellcheck:
@echo "==> Spell checking website..."
@misspell -error -source=text website/source
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
.PHONY check-go-fmt:
check-go-fmt:
@$(CURDIR)/scripts/go-helper.sh check-fmt
.PHONY check-go-version:
check-go-version:
@$(CURDIR)/scripts/go-helper.sh check-version $(GO_VERSION_MIN)
.PHONY:check-sempgrep
check-sempgrep: check-tools-external
@echo "==> Checking semgrep..."
@semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci .
.PHONY: check-tools
check-tools:
@$(CURDIR)/tools/tools.sh check
.PHONY: check-tools-external
check-tools-external:
@$(CURDIR)/tools/tools.sh check-external
.PHONY: check-tools-internal
check-tools-internal:
@$(CURDIR)/tools/tools.sh check-internal
check-vault-in-path:
@VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \
[ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \
printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)"
.PHONY: tools
tools:
@$(CURDIR)/tools/tools.sh install
.PHONY: tools-external
tools-external:
@$(CURDIR)/tools/tools.sh install-external
.PHONY: tools-internal
tools-internal:
@$(CURDIR)/tools/tools.sh install-internal
mysql-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin
mysql-legacy-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin
cassandra-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin
2019-01-18 04:14:57 +03:00
influxdb-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/influxdb-database-plugin ./plugins/database/influxdb/influxdb-database-plugin
2019-01-18 04:14:57 +03:00
postgresql-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin
[QT-436] Pseudo random artifact test scenarios (#18056) Introducing a new approach to testing Vault artifacts before merge and after merge/notorization/signing. Rather than run a few static scenarios across the artifacts, we now have the ability to run a pseudo random sample of scenarios across many different build artifacts. We've added 20 possible scenarios for the AMD64 and ARM64 binary bundles, which we've broken into five test groups. On any given push to a pull request branch, we will now choose a random test group and execute its corresponding scenarios against the resulting build artifacts. This gives us greater test coverage but lets us split the verification across many different pull requests. The post-merge release testing pipeline behaves in a similar fashion, however, the artifacts that we use for testing have been notarized and signed prior to testing. We've also reduce the number of groups so that we run more scenarios after merge to a release branch. We intend to take what we've learned building this in Github Actions and roll it into an easier to use feature that is native to Enos. Until then, we'll have to manually add scenarios to each matrix file and manually number the test group. It's important to note that Github requires every matrix to include at least one vector, so every artifact that is being tested must include a single scenario in order for all workflows to pass and thus satisfy branch merge requirements. * Add support for different artifact types to enos-run * Add support for different runner type to enos-run * Add arm64 scenarios to build matrix * Expand build matrices to include different variants * Update Consul versions in Enos scenarios and matrices * Refactor enos-run environment * Add minimum version filtering support to enos-run. This allows us to automatically exclude scenarios that require a more recent version of Vault * Add maximum version filtering support to enos-run. This allows us to automatically exclude scenarios that require an older version of Vault * Fix Node 12 deprecation warnings * Rename enos-verify-stable to enos-release-testing-oss * Convert artifactory matrix into enos-release-testing-oss matrices * Add all Vault editions to Enos scenario matrices * Fix verify version with complex Vault edition metadata * Rename the crt-builder to ci-helper * Add more version helpers to ci-helper and Makefile * Update CODEOWNERS for quality team * Add support for filtering matrices by group and version constraints * Add support for pseudo random test scenario execution Signed-off-by: Ryan Cragun <me@ryan.ec>
2022-12-12 23:46:04 +03:00
# These ci targets are used for used for building and testing in Github Actions
# workflows and for Enos scenarios.
.PHONY: ci-build
ci-build:
@$(CURDIR)/scripts/ci-helper.sh build
.PHONY: ci-build-ui
ci-build-ui:
@$(CURDIR)/scripts/ci-helper.sh build-ui
[QT-506] Use enos scenario samples for testing (#22641) (#22933) Replace our prior implementation of Enos test groups with the new Enos sampling feature. With this feature we're able to describe which scenarios and variant combinations are valid for a given artifact and allow enos to create a valid sample field (a matrix of all compatible scenarios) and take an observation (select some to run) for us. This ensures that every valid scenario and variant combination will now be a candidate for testing in the pipeline. See QT-504[0] for further details on the Enos sampling capabilities. Our prior implementation only tested the amd64 and arm64 zip artifacts, as well as the Docker container. We now include the following new artifacts in the test matrix: * CE Amd64 Debian package * CE Amd64 RPM package * CE Arm64 Debian package * CE Arm64 RPM package Each artifact includes a sample definition for both pre-merge/post-merge (build) and release testing. Changes: * Remove the hand crafted `enos-run-matrices` ci matrix targets and replace them with per-artifact samples. * Use enos sampling to generate different sample groups on all pull requests. * Update the enos scenario matrices to handle HSM and FIPS packages. * Simplify enos scenarios by using shared globals instead of cargo-culted locals. Note: This will require coordination with vault-enterprise to ensure a smooth migration to the new system. Integrating new scenarios or modifying existing scenarios/variants should be much smoother after this initial migration. [0] https://github.com/hashicorp/enos/pull/102 Signed-off-by: Ryan Cragun <me@ryan.ec> Co-authored-by: Ryan Cragun <me@ryan.ec>
2023-09-08 22:31:09 +03:00
.PHONY: ci-bundle
ci-bundle:
@$(CURDIR)/scripts/ci-helper.sh bundle
.PHONY: ci-get-artifact-basename
ci-get-artifact-basename:
@$(CURDIR)/scripts/ci-helper.sh artifact-basename
[QT-436] Pseudo random artifact test scenarios (#18056) Introducing a new approach to testing Vault artifacts before merge and after merge/notorization/signing. Rather than run a few static scenarios across the artifacts, we now have the ability to run a pseudo random sample of scenarios across many different build artifacts. We've added 20 possible scenarios for the AMD64 and ARM64 binary bundles, which we've broken into five test groups. On any given push to a pull request branch, we will now choose a random test group and execute its corresponding scenarios against the resulting build artifacts. This gives us greater test coverage but lets us split the verification across many different pull requests. The post-merge release testing pipeline behaves in a similar fashion, however, the artifacts that we use for testing have been notarized and signed prior to testing. We've also reduce the number of groups so that we run more scenarios after merge to a release branch. We intend to take what we've learned building this in Github Actions and roll it into an easier to use feature that is native to Enos. Until then, we'll have to manually add scenarios to each matrix file and manually number the test group. It's important to note that Github requires every matrix to include at least one vector, so every artifact that is being tested must include a single scenario in order for all workflows to pass and thus satisfy branch merge requirements. * Add support for different artifact types to enos-run * Add support for different runner type to enos-run * Add arm64 scenarios to build matrix * Expand build matrices to include different variants * Update Consul versions in Enos scenarios and matrices * Refactor enos-run environment * Add minimum version filtering support to enos-run. This allows us to automatically exclude scenarios that require a more recent version of Vault * Add maximum version filtering support to enos-run. This allows us to automatically exclude scenarios that require an older version of Vault * Fix Node 12 deprecation warnings * Rename enos-verify-stable to enos-release-testing-oss * Convert artifactory matrix into enos-release-testing-oss matrices * Add all Vault editions to Enos scenario matrices * Fix verify version with complex Vault edition metadata * Rename the crt-builder to ci-helper * Add more version helpers to ci-helper and Makefile * Update CODEOWNERS for quality team * Add support for filtering matrices by group and version constraints * Add support for pseudo random test scenario execution Signed-off-by: Ryan Cragun <me@ryan.ec>
2022-12-12 23:46:04 +03:00
.PHONY: ci-get-date
ci-get-date:
@$(CURDIR)/scripts/ci-helper.sh date
.PHONY: ci-get-revision
ci-get-revision:
@$(CURDIR)/scripts/ci-helper.sh revision
.PHONY: ci-get-version-package
ci-get-version-package:
@$(CURDIR)/scripts/ci-helper.sh version-package
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
.PHONY: ci-install-external-tools
ci-install-external-tools:
@$(CURDIR)/scripts/ci-helper.sh install-external-tools
[QT-506] Use enos scenario samples for testing (#22641) (#22933) Replace our prior implementation of Enos test groups with the new Enos sampling feature. With this feature we're able to describe which scenarios and variant combinations are valid for a given artifact and allow enos to create a valid sample field (a matrix of all compatible scenarios) and take an observation (select some to run) for us. This ensures that every valid scenario and variant combination will now be a candidate for testing in the pipeline. See QT-504[0] for further details on the Enos sampling capabilities. Our prior implementation only tested the amd64 and arm64 zip artifacts, as well as the Docker container. We now include the following new artifacts in the test matrix: * CE Amd64 Debian package * CE Amd64 RPM package * CE Arm64 Debian package * CE Arm64 RPM package Each artifact includes a sample definition for both pre-merge/post-merge (build) and release testing. Changes: * Remove the hand crafted `enos-run-matrices` ci matrix targets and replace them with per-artifact samples. * Use enos sampling to generate different sample groups on all pull requests. * Update the enos scenario matrices to handle HSM and FIPS packages. * Simplify enos scenarios by using shared globals instead of cargo-culted locals. Note: This will require coordination with vault-enterprise to ensure a smooth migration to the new system. Integrating new scenarios or modifying existing scenarios/variants should be much smoother after this initial migration. [0] https://github.com/hashicorp/enos/pull/102 Signed-off-by: Ryan Cragun <me@ryan.ec> Co-authored-by: Ryan Cragun <me@ryan.ec>
2023-09-08 22:31:09 +03:00
.PHONY: ci-prepare-legal
ci-prepare-legal:
@$(CURDIR)/scripts/ci-helper.sh prepare-legal
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
.PHONY: ci-update-external-tool-modules
ci-update-external-tool-modules:
@$(CURDIR)/scripts/ci-helper.sh update-external-tool-modules
.PHONY: ci-copywriteheaders
ci-copywriteheaders:
copywrite headers --plan
# Special case for MPL headers in /api, /sdk, and /shamir
cd api && $(CURDIR)/scripts/copywrite-exceptions.sh
cd sdk && $(CURDIR)/scripts/copywrite-exceptions.sh
cd shamir && $(CURDIR)/scripts/copywrite-exceptions.sh
2024-07-01 00:58:25 +03:00
.PHONY: all bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker clean dev
Backport of [QT-637] Streamline our build pipeline into release/1.14.x (#25241) * [QT-637] Streamline our build pipeline (#24892) Context ------- Building and testing Vault artifacts on pull requests and merges is responsible for about 1/3rd of our overall spend on Vault CI. Of the artifacts that we ship as part of a release, we do Enos testing scenarios on the `linux/amd64` and `linux/arm64` binaries and their derivative artifacts. The extended build artifacts for non-Linux platforms or less common machine architectures are not tested at this time. They are built, notarized, and signed as part of every pull request update and merge. As we don't actually test these artifacts, the only gain we get from this rather expensive behavior is that we wont merge a change that would prevent Vault from building on one of the extended targets. Extended platform or architecture changes are quite rare, so performing this work as frequently as we do is costly in both monetary and developer time for little relative safety benefit. Goals ----- Rethink and implement how and when we build binaries and artifacts of Vault so that we can spend less money on repetitive work and while also reducing the time it takes for the build and test pipelines to complete. Solution -------- Instead of building all release artifacts on every push, we'll opt to build only our testable (core) artifacts. With this change we are introducing a bit of risk. We could merge a change that breaks an extended platform and only find out after the fact when we trigger a complete build for a release. We'll hedge against that risk by building all of the release targets on a scheduled cadence to ensure that they are still buildable. We'll make building all of the targets optional on any pull request by use of a `build/all` label on the pull request. Further considerations ---------------------- * We want to reduce the total number of workflows and runners for all of our pipelines if possible. As each workflow runner has infrastructure cost and runner time penalties, using a single runner over many is often preferred. * Many of our jobs runners have been optimized for cost and performance. We should simplify the choices of which runners to use. * CRT requires us to use the same build workflow in both CE and Ent. Historically that meant that modifying `build.yml` in CE would result in a merge conflict with `build.yml` in Ent, and break our merge workflows. * Workflow flow control in both `build.yml` and `ci.yml` can be quite complicated, as each needs to maintain compatibility whether executed as CE or Ent, and when triggered with various Github events like pull_request, push, and workflow_call, each with their own requirements. * Many jobs utilize similar patterns of flow control and metadata but are not reusable. * Workflow call depth has a maximum of four, so we need to be quite considerate when calling other workflows. * Called workflows can only have 10 inputs. Implementation -------------- * Refactor the `build.yml` workflow to be agnostic to whether or not it is executing in CE or Ent. That makes future updates to the build much easier as we won't have to worry about merge conflicts when the change is merged downstream. * Extract common steps in workflows into composite actions that we can reuse. * Fix bugs where some but not all workflows would use different Git references when building and testing a pull request. * We rewrite the application, docs, and UI change helpers as a composite action. This allows us to re-use this logic to make consistent behavior choices across build and CI. * We combine several `build.yml` and `ci.yml` jobs into our final job. This reduces the number of workflows required for the same behavior while saving time overall. * Update most of our action pins. Results ------- | Metric | Before | After | Diff | |-------------------|----------|---------|-------| | Duration: | ~14-18m | ~15-18m | ~ = | | Workflows: | 43 | 18 | - 58% | | Billable time: | ~1h15m | 16m | - 79% | | Saved artifacts: | 34 | 12 | - 65% | Infra costs should map closely to billable time. Network I/O costs should map closely to the workflow count. Storage costs should map directly with saved artifacts. We could probably get parity with duration by getting more clever with our UBI container build, as that's where we're seeing the increase. I'm not yet concerned as it takes roughly the same time for this job to complete as it did before. While the CI workflow was not the focus on the PR, some shared refactoring does show some marginal improvements there. | Metric | Before | After | Diff | |-------------------|----------|----------|--------| | Duration: | ~24m | ~12.75m | - 15% | | Workflows: | 55 | 47 | - 8% | | Billable time: | ~4h20m | ~3h36m | - 7% | Further focus on streamlining the CI workflows would likely result in a few more marginal improvements, but nothing on the order like we've seen with the build workflow. [0] https://github.com/hashicorp/vault-enterprise/actions/runs/7875954928/job/21490054433?pr=5411#step:3:39 Signed-off-by: Ryan Cragun <me@ryan.ec>
2024-02-14 01:12:48 +03:00
.NOTPARALLEL: ember-dist ember-dist-dev
.PHONY: all-packages
all-packages:
@echo $(ALL_PACKAGES) | tr ' ' '\n'
.PHONY: clean
clean:
@echo "==> Cleaning..."