1
0
vault-redux/command/server_util.go

133 lines
4.5 KiB
Go
Raw Normal View History

2018-10-23 09:34:02 +03:00
package command
import (
"context"
"crypto/rand"
2018-10-23 09:34:02 +03:00
"fmt"
"io"
2018-10-23 09:34:02 +03:00
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 22:14:58 +03:00
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/command/server"
2018-10-23 09:34:02 +03:00
"github.com/hashicorp/vault/vault"
2018-10-23 11:12:23 +03:00
vaultseal "github.com/hashicorp/vault/vault/seal"
2018-10-23 09:34:02 +03:00
"github.com/pkg/errors"
)
var (
onEnterprise = false
createSecureRandomReaderFunc = createSecureRandomReader
2018-10-23 09:34:02 +03:00
)
func createSecureRandomReader(config *server.Config, seal *vault.Seal) (io.Reader, error) {
return rand.Reader, nil
}
Raft Storage Backend (#6888) * Work on raft backend * Add logstore locally * Add encryptor and unsealable interfaces * Add clustering support to raft * Remove client and handler * Bootstrap raft on init * Cleanup raft logic a bit * More raft work * Work on TLS config * More work on bootstrapping * Fix build * More work on bootstrapping * More bootstrapping work * fix build * Remove consul dep * Fix build * merged oss/master into raft-storage * Work on bootstrapping * Get bootstrapping to work * Clean up FMS and node-id * Update local node ID logic * Cleanup node-id change * Work on snapshotting * Raft: Add remove peer API (#906) * Add remove peer API * Add some comments * Fix existing snapshotting (#909) * Raft get peers API (#912) * Read raft configuration * address review feedback * Use the Leadership Transfer API to step-down the active node (#918) * Raft join and unseal using Shamir keys (#917) * Raft join using shamir * Store AEAD instead of master key * Split the raft join process to answer the challenge after a successful unseal * get the follower to standby state * Make unseal work * minor changes * Some input checks * reuse the shamir seal access instead of new default seal access * refactor joinRaftSendAnswer function * Synchronously send answer in auto-unseal case * Address review feedback * Raft snapshots (#910) * Fix existing snapshotting * implement the noop snapshotting * Add comments and switch log libraries * add some snapshot tests * add snapshot test file * add TODO * More work on raft snapshotting * progress on the ConfigStore strategy * Don't use two buckets * Update the snapshot store logic to hide the file logic * Add more backend tests * Cleanup code a bit * [WIP] Raft recovery (#938) * Add recovery functionality * remove fmt.Printfs * Fix a few fsm bugs * Add max size value for raft backend (#942) * Add max size value for raft backend * Include physical.ErrValueTooLarge in the message * Raft snapshot Take/Restore API (#926) * Inital work on raft snapshot APIs * Always redirect snapshot install/download requests * More work on the snapshot APIs * Cleanup code a bit * On restore handle special cases * Use the seal to encrypt the sha sum file * Add sealer mechanism and fix some bugs * Call restore while state lock is held * Send restore cb trigger through raft log * Make error messages nicer * Add test helpers * Add snapshot test * Add shamir unseal test * Add more raft snapshot API tests * Fix locking * Change working to initalize * Add underlying raw object to test cluster core * Move leaderUUID to core * Add raft TLS rotation logic (#950) * Add TLS rotation logic * Cleanup logic a bit * Add/Remove from follower state on add/remove peer * add comments * Update more comments * Update request_forwarding_service.proto * Make sure we populate all nodes in the followerstate obj * Update times * Apply review feedback * Add more raft config setting (#947) * Add performance config setting * Add more config options and fix tests * Test Raft Recovery (#944) * Test raft recovery * Leave out a node during recovery * remove unused struct * Update physical/raft/snapshot_test.go * Update physical/raft/snapshot_test.go * fix vendoring * Switch to new raft interface * Remove unused files * Switch a gogo -> proto instance * Remove unneeded vault dep in go.sum * Update helper/testhelpers/testhelpers.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * Update vault/cluster/cluster.go * track active key within the keyring itself (#6915) * track active key within the keyring itself * lookup and store using the active key ID * update docstring * minor refactor * Small text fixes (#6912) * Update physical/raft/raft.go Co-Authored-By: Calvin Leung Huang <cleung2010@gmail.com> * review feedback * Move raft logical system into separate file * Update help text a bit * Enforce cluster addr is set and use it for raft bootstrapping * Fix tests * fix http test panic * Pull in latest raft-snapshot library * Add comment
2019-06-20 22:14:58 +03:00
func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error {
2018-10-23 09:34:02 +03:00
existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background())
if err != nil {
return fmt.Errorf("Error checking for existing seal: %s", err)
}
// If we don't have an existing config or if it's the deprecated auto seal
// which needs an upgrade, skip out
if existBarrierSealConfig == nil || existBarrierSealConfig.Type == wrapping.HSMAutoDeprecated {
return nil
}
if unwrapSeal == nil {
// We have the same barrier type and the unwrap seal is nil so we're not
// migrating from same to same, IOW we assume it's not a migration
if existBarrierSealConfig.Type == barrierSeal.BarrierType() {
2018-10-23 09:34:02 +03:00
return nil
}
// If we're not coming from Shamir, and the existing type doesn't match
// the barrier type, we need both the migration seal and the new seal
if existBarrierSealConfig.Type != wrapping.Shamir && barrierSeal.BarrierType() != wrapping.Shamir {
return errors.New(`Trying to migrate from auto-seal to auto-seal but no "disabled" seal stanza found`)
}
} else {
if unwrapSeal.BarrierType() == wrapping.Shamir {
return errors.New("Shamir seals cannot be set disabled (they should simply not be set)")
2018-10-23 09:34:02 +03:00
}
}
2018-10-23 09:34:02 +03:00
if existBarrierSealConfig.Type != wrapping.Shamir && existRecoverySealConfig == nil {
return errors.New(`Recovery seal configuration not found for existing seal`)
}
2018-10-23 09:34:02 +03:00
if onEnterprise && barrierSeal.BarrierType() == wrapping.Shamir {
return errors.New("Migrating from autoseal to Shamir seal is not currently supported on Vault Enterprise")
}
var migrationSeal vault.Seal
var newSeal vault.Seal
// Determine the migrationSeal. This is either going to be an instance of
// shamir or the unwrapSeal.
switch existBarrierSealConfig.Type {
case wrapping.Shamir:
// The value reflected in config is what we're going to
migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})
default:
// If we're not coming from Shamir we expect the previous seal to be
// in the config and disabled.
migrationSeal = unwrapSeal
}
// newSeal will be the barrierSeal
newSeal = barrierSeal
Seal migration with Raft (#8103) * Seal migration after unsealing * Refactor migration fields migrationInformation in core * Perform seal migration as part of postUnseal * Remove the sleep logic * Use proper seal in the unseal function * Fix migration from Auto to Shamir * Fix the recovery config missing issue * Address the non-ha migration case * Fix the multi cluster case * Avoid re-running seal migration * Run the post migration code in new leaders * Fix the issue of wrong recovery being set * Address review feedback * Add more complete testing coverage for seal migrations. (#8247) * Add more complete testing coverage for seal migrations. Also remove VAULT_ACC gate from some tests that just depend on docker, cleanup dangling recovery config in storage after migration, and fix a call in adjustCoreForSealMigration that seems broken. * Fix the issue of wrong recovery key being set * Adapt tests to work with multiple cores. * Add missing line to disable raft join. Co-authored-by: Vishal Nayak <vishalnayak@users.noreply.github.com> * Fix all known issues * Remove warning * Review feedback. * Revert my previous change that broke raft tests. We'll need to come back and at least comment this once we better understand why it's needed. * Don't allow migration between same types for now * Disable auto to auto tests for now since it uses migration between same types which is not allowed * Update vault/core.go Co-Authored-By: Brian Kassouf <briankassouf@users.noreply.github.com> * Add migration logs * Address review comments * Add the recovery config check back * Skip a few steps if migration is already done * Return from waitForLeadership if migration fails Co-authored-by: ncabatoff <nick.cabatoff@gmail.com> Co-authored-by: Brian Kassouf <briankassouf@users.noreply.github.com>
2020-02-14 00:27:31 +03:00
if migrationSeal != nil && newSeal != nil && migrationSeal.BarrierType() == newSeal.BarrierType() {
return errors.New("Migrating between same seal types is currently not supported")
}
if unwrapSeal != nil && existBarrierSealConfig.Type == barrierSeal.BarrierType() {
// In this case our migration seal is set so we are using it
// (potentially) for unwrapping. Set it on core for that purpose then
// exit.
core.SetSealsForMigration(nil, nil, unwrapSeal)
return nil
}
// Set the appropriate barrier and recovery configs.
switch {
case migrationSeal.RecoveryKeySupported() && newSeal.RecoveryKeySupported():
// Migrating from auto->auto, copy the configs over
newSeal.SetCachedBarrierConfig(existBarrierSealConfig)
newSeal.SetCachedRecoveryConfig(existRecoverySealConfig)
case migrationSeal.RecoveryKeySupported():
// Migrating from auto->shamir, clone auto's recovery config and set
// stored keys to 1.
newSealConfig := existRecoverySealConfig.Clone()
newSealConfig.StoredShares = 1
newSeal.SetCachedBarrierConfig(newSealConfig)
case newSeal.RecoveryKeySupported():
// Migrating from shamir->auto, set a new barrier config and set
// recovery config to a clone of shamir's barrier config with stored
// keys set to 0.
newBarrierSealConfig := &vault.SealConfig{
Type: newSeal.BarrierType(),
SecretShares: 1,
SecretThreshold: 1,
StoredShares: 1,
2018-10-23 09:34:02 +03:00
}
newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
newRecoveryConfig := existBarrierSealConfig.Clone()
newRecoveryConfig.StoredShares = 0
newSeal.SetCachedRecoveryConfig(newRecoveryConfig)
2018-10-23 09:34:02 +03:00
}
core.SetSealsForMigration(migrationSeal, newSeal, unwrapSeal)
2018-10-23 09:34:02 +03:00
return nil
}