1
0

Compare commits

...

2 Commits

Author SHA1 Message Date
747df1e9e8 remove oracle/oci 2024-07-01 15:13:00 +03:00
e0e8caaa58 remove foundationdb 2024-07-01 14:35:09 +03:00
22 changed files with 0 additions and 3146 deletions

View File

@ -18,10 +18,6 @@ GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version)
PROTOC_VERSION_MIN=3.21.12
GO_CMD?=go
CGO_ENABLED?=0
ifneq ($(FDB_ENABLED), )
CGO_ENABLED=1
BUILD_TAGS+=foundationdb
endif
default: dev

View File

@ -1,231 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package agent
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
hclog "github.com/hashicorp/go-hclog"
vaultoci "github.com/hashicorp/vault-plugin-auth-oci"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agentproxyshared/auth"
agentoci "github.com/hashicorp/vault/command/agentproxyshared/auth/oci"
"github.com/hashicorp/vault/command/agentproxyshared/sink"
"github.com/hashicorp/vault/command/agentproxyshared/sink/file"
"github.com/hashicorp/vault/helper/testhelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
const (
envVarOCITestTenancyOCID = "OCI_TEST_TENANCY_OCID"
envVarOCITestUserOCID = "OCI_TEST_USER_OCID"
envVarOCITestFingerprint = "OCI_TEST_FINGERPRINT"
envVarOCITestPrivateKeyPath = "OCI_TEST_PRIVATE_KEY_PATH"
envVAROCITestOCIDList = "OCI_TEST_OCID_LIST"
// The OCI SDK doesn't export its standard env vars so they're captured here.
// These are used for the duration of the test to make sure the agent is able to
// pick up creds from the env.
//
// To run this test, do not set these. Only the above ones need to be set.
envVarOCITenancyOCID = "OCI_tenancy_ocid"
envVarOCIUserOCID = "OCI_user_ocid"
envVarOCIFingerprint = "OCI_fingerprint"
envVarOCIPrivateKeyPath = "OCI_private_key_path"
)
func TestOCIEndToEnd(t *testing.T) {
if !runAcceptanceTests {
t.SkipNow()
}
// Ensure each cred is populated.
credNames := []string{
envVarOCITestTenancyOCID,
envVarOCITestUserOCID,
envVarOCITestFingerprint,
envVarOCITestPrivateKeyPath,
envVAROCITestOCIDList,
}
testhelpers.SkipUnlessEnvVarsSet(t, credNames)
logger := logging.NewVaultLogger(hclog.Trace)
coreConfig := &vault.CoreConfig{
Logger: logger,
CredentialBackends: map[string]logical.Factory{
"oci": vaultoci.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
vault.TestWaitActive(t, cluster.Cores[0].Core)
client := cluster.Cores[0].Client
// Setup Vault
if err := client.Sys().EnableAuthWithOptions("oci", &api.EnableAuthOptions{
Type: "oci",
}); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("auth/oci/config", map[string]interface{}{
"home_tenancy_id": os.Getenv(envVarOCITestTenancyOCID),
}); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("auth/oci/role/test", map[string]interface{}{
"ocid_list": os.Getenv(envVAROCITestOCIDList),
}); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
// We're going to feed oci auth creds via env variables.
if err := setOCIEnvCreds(); err != nil {
t.Fatal(err)
}
defer func() {
if err := unsetOCIEnvCreds(); err != nil {
t.Fatal(err)
}
}()
vaultAddr := "http://" + cluster.Cores[0].Listeners[0].Addr().String()
am, err := agentoci.NewOCIAuthMethod(&auth.AuthConfig{
Logger: logger.Named("auth.oci"),
MountPath: "auth/oci",
Config: map[string]interface{}{
"type": "apikey",
"role": "test",
},
}, vaultAddr)
if err != nil {
t.Fatal(err)
}
ahConfig := &auth.AuthHandlerConfig{
Logger: logger.Named("auth.handler"),
Client: client,
}
ah := auth.NewAuthHandler(ahConfig)
errCh := make(chan error)
go func() {
errCh <- ah.Run(ctx, am)
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.")
if err != nil {
t.Fatal(err)
}
tokenSinkFileName := tmpFile.Name()
tmpFile.Close()
os.Remove(tokenSinkFileName)
t.Logf("output: %s", tokenSinkFileName)
config := &sink.SinkConfig{
Logger: logger.Named("sink.file"),
Config: map[string]interface{}{
"path": tokenSinkFileName,
},
WrapTTL: 10 * time.Second,
}
fs, err := file.NewFileSink(config)
if err != nil {
t.Fatal(err)
}
config.Sink = fs
ss := sink.NewSinkServer(&sink.SinkServerConfig{
Logger: logger.Named("sink.server"),
Client: client,
})
go func() {
errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config})
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
// This has to be after the other defers so it happens first. It allows
// successful test runs to immediately cancel all of the runner goroutines
// and unblock any of the blocking defer calls by the runner's DoneCh that
// comes before this and avoid successful tests from taking the entire
// timeout duration.
defer cancel()
if stat, err := os.Lstat(tokenSinkFileName); err == nil {
t.Fatalf("expected err but got %s", stat)
} else if !os.IsNotExist(err) {
t.Fatal("expected notexist err")
}
// Wait 2 seconds for the env variables to be detected and an auth to be generated.
time.Sleep(time.Second * 2)
token, err := readToken(tokenSinkFileName)
if err != nil {
t.Fatal(err)
}
if token.Token == "" {
t.Fatal("expected token but didn't receive it")
}
}
func setOCIEnvCreds() error {
if err := os.Setenv(envVarOCITenancyOCID, os.Getenv(envVarOCITestTenancyOCID)); err != nil {
return err
}
if err := os.Setenv(envVarOCIUserOCID, os.Getenv(envVarOCITestUserOCID)); err != nil {
return err
}
if err := os.Setenv(envVarOCIFingerprint, os.Getenv(envVarOCITestFingerprint)); err != nil {
return err
}
return os.Setenv(envVarOCIPrivateKeyPath, os.Getenv(envVarOCITestPrivateKeyPath))
}
func unsetOCIEnvCreds() error {
if err := os.Unsetenv(envVarOCITenancyOCID); err != nil {
return err
}
if err := os.Unsetenv(envVarOCIUserOCID); err != nil {
return err
}
if err := os.Unsetenv(envVarOCIFingerprint); err != nil {
return err
}
return os.Unsetenv(envVarOCIPrivateKeyPath)
}

View File

@ -1,264 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package oci
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"os/user"
"path"
"sync"
"time"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/agentproxyshared/auth"
"github.com/oracle/oci-go-sdk/common"
ociAuth "github.com/oracle/oci-go-sdk/common/auth"
)
const (
typeAPIKey = "apikey"
typeInstance = "instance"
/*
IAM creds can be inferred from instance metadata or the container
identity service, and those creds expire at varying intervals with
new creds becoming available at likewise varying intervals. Let's
default to polling once a minute so all changes can be picked up
rather quickly. This is configurable, however.
*/
defaultCredCheckFreqSeconds = 60 * time.Second
defaultConfigFileName = "config"
defaultConfigDirName = ".oci"
configFilePathEnvVarName = "OCI_CONFIG_FILE"
secondaryConfigDirName = ".oraclebmc"
)
func NewOCIAuthMethod(conf *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) {
if conf == nil {
return nil, errors.New("empty config")
}
if conf.Config == nil {
return nil, errors.New("empty config data")
}
a := &ociMethod{
logger: conf.Logger,
vaultAddress: vaultAddress,
mountPath: conf.MountPath,
credsFound: make(chan struct{}),
stopCh: make(chan struct{}),
}
typeRaw, ok := conf.Config["type"]
if !ok {
return nil, errors.New("missing 'type' value")
}
authType, ok := typeRaw.(string)
if !ok {
return nil, errors.New("could not convert 'type' config value to string")
}
roleRaw, ok := conf.Config["role"]
if !ok {
return nil, errors.New("missing 'role' value")
}
a.role, ok = roleRaw.(string)
if !ok {
return nil, errors.New("could not convert 'role' config value to string")
}
// Check for an optional custom frequency at which we should poll for creds.
credCheckFreqSec := defaultCredCheckFreqSeconds
if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok {
checkFreq, err := parseutil.ParseDurationSecond(checkFreqRaw)
if err != nil {
return nil, fmt.Errorf("could not parse credential_poll_interval: %v", err)
}
credCheckFreqSec = checkFreq
}
switch {
case a.role == "":
return nil, errors.New("'role' value is empty")
case authType == "":
return nil, errors.New("'type' value is empty")
case authType != typeAPIKey && authType != typeInstance:
return nil, errors.New("'type' value is invalid")
case authType == typeAPIKey:
defaultConfigFile := getDefaultConfigFilePath()
homeFolder := getHomeFolder()
secondaryConfigFile := path.Join(homeFolder, secondaryConfigDirName, defaultConfigFileName)
environmentProvider := common.ConfigurationProviderEnvironmentVariables("OCI", "")
defaultFileProvider, _ := common.ConfigurationProviderFromFile(defaultConfigFile, "")
secondaryFileProvider, _ := common.ConfigurationProviderFromFile(secondaryConfigFile, "")
provider, _ := common.ComposingConfigurationProvider([]common.ConfigurationProvider{environmentProvider, defaultFileProvider, secondaryFileProvider})
a.configurationProvider = provider
case authType == typeInstance:
configurationProvider, err := ociAuth.InstancePrincipalConfigurationProvider()
if err != nil {
return nil, fmt.Errorf("failed to create instance principal configuration provider: %v", err)
}
a.configurationProvider = configurationProvider
}
// Do an initial population of the creds because we want to err right away if we can't
// even get a first set.
creds, err := a.configurationProvider.KeyID()
if err != nil {
return nil, err
}
a.lastCreds = creds
go a.pollForCreds(credCheckFreqSec)
return a, nil
}
type ociMethod struct {
logger hclog.Logger
vaultAddress string
mountPath string
configurationProvider common.ConfigurationProvider
role string
// These are used to share the latest creds safely across goroutines.
credLock sync.Mutex
lastCreds string
// Notifies the outer environment that it should call Authenticate again.
credsFound chan struct{}
// Detects that the outer environment is closing.
stopCh chan struct{}
}
func (a *ociMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) {
a.credLock.Lock()
defer a.credLock.Unlock()
a.logger.Trace("beginning authentication")
requestPath := fmt.Sprintf("/v1/%s/login/%s", a.mountPath, a.role)
requestURL := fmt.Sprintf("%s%s", a.vaultAddress, requestPath)
request, err := http.NewRequest("GET", requestURL, nil)
if err != nil {
return "", nil, nil, fmt.Errorf("error creating authentication request: %w", err)
}
request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
signer := common.DefaultRequestSigner(a.configurationProvider)
err = signer.Sign(request)
if err != nil {
return "", nil, nil, fmt.Errorf("error signing authentication request: %w", err)
}
parsedVaultAddress, err := url.Parse(a.vaultAddress)
if err != nil {
return "", nil, nil, fmt.Errorf("unable to parse vault address: %w", err)
}
request.Header.Set("Host", parsedVaultAddress.Host)
request.Header.Set("(request-target)", fmt.Sprintf("%s %s", "get", requestPath))
data := map[string]interface{}{
"request_headers": request.Header,
}
return fmt.Sprintf("%s/login/%s", a.mountPath, a.role), nil, data, nil
}
func (a *ociMethod) NewCreds() chan struct{} {
return a.credsFound
}
func (a *ociMethod) CredSuccess() {}
func (a *ociMethod) Shutdown() {
close(a.credsFound)
close(a.stopCh)
}
func (a *ociMethod) pollForCreds(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for {
select {
case <-a.stopCh:
a.logger.Trace("shutdown triggered, stopping OCI auth handler")
return
case <-ticker.C:
if err := a.checkCreds(); err != nil {
a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err)
}
}
}
}
func (a *ociMethod) checkCreds() error {
a.credLock.Lock()
defer a.credLock.Unlock()
a.logger.Trace("checking for new credentials")
currentCreds, err := a.configurationProvider.KeyID()
if err != nil {
return err
}
// These will always have different pointers regardless of whether their
// values are identical, hence the use of DeepEqual.
if currentCreds == a.lastCreds {
a.logger.Trace("credentials are unchanged")
return nil
}
a.lastCreds = currentCreds
a.logger.Trace("new credentials detected, triggering Authenticate")
a.credsFound <- struct{}{}
return nil
}
func getHomeFolder() string {
current, e := user.Current()
if e != nil {
// Give up and try to return something sensible
home, err := os.UserHomeDir()
if err != nil {
return ""
}
return home
}
return current.HomeDir
}
func getDefaultConfigFilePath() string {
homeFolder := getHomeFolder()
defaultConfigFile := path.Join(homeFolder, defaultConfigDirName, defaultConfigFileName)
if _, err := os.Stat(defaultConfigFile); err == nil {
return defaultConfigFile
}
// Read configuration file path from OCI_CONFIG_FILE env var
fallbackConfigFile, existed := os.LookupEnv(configFilePathEnvVarName)
if !existed {
return defaultConfigFile
}
if _, err := os.Stat(fallbackConfigFile); os.IsNotExist(err) {
return defaultConfigFile
}
return fallbackConfigFile
}

View File

@ -19,7 +19,6 @@ import (
"github.com/hashicorp/vault/command/agentproxyshared/auth/jwt"
"github.com/hashicorp/vault/command/agentproxyshared/auth/kerberos"
"github.com/hashicorp/vault/command/agentproxyshared/auth/kubernetes"
"github.com/hashicorp/vault/command/agentproxyshared/auth/oci"
token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file"
"github.com/hashicorp/vault/command/agentproxyshared/cache"
"github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb"
@ -44,8 +43,6 @@ func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.Aut
return kubernetes.NewKubernetesAuthMethod(authConfig)
case "approle":
return approle.NewApproleAuthMethod(authConfig)
case "oci":
return oci.NewOCIAuthMethod(authConfig, vaultAddress)
case "token_file":
return token_file.NewTokenFileAuthMethod(authConfig)
case "pcf": // Deprecated.

View File

@ -30,7 +30,6 @@ import (
credCF "github.com/hashicorp/vault-plugin-auth-cf"
credOIDC "github.com/hashicorp/vault-plugin-auth-jwt"
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
credCert "github.com/hashicorp/vault/builtin/credential/cert"
credGitHub "github.com/hashicorp/vault/builtin/credential/github"
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
@ -42,8 +41,6 @@ import (
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
physConsul "github.com/hashicorp/vault/physical/consul"
physFoundationDB "github.com/hashicorp/vault/physical/foundationdb"
physOCI "github.com/hashicorp/vault/physical/oci"
physRaft "github.com/hashicorp/vault/physical/raft"
physFile "github.com/hashicorp/vault/sdk/physical/file"
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
@ -168,12 +165,10 @@ var (
"consul": physConsul.NewConsulBackend,
"file_transactional": physFile.NewTransactionalFileBackend,
"file": physFile.NewFileBackend,
"foundationdb": physFoundationDB.NewFDBBackend,
"inmem_ha": physInmem.NewInmemHA,
"inmem_transactional_ha": physInmem.NewTransactionalInmemHA,
"inmem_transactional": physInmem.NewTransactionalInmem,
"inmem": physInmem.NewInmem,
"oci": physOCI.NewBackend,
"raft": physRaft.NewRaftBackend,
}
@ -193,7 +188,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co
"github": &credGitHub.CLIHandler{},
"kerberos": &credKerb.CLIHandler{},
"ldap": &credLdap.CLIHandler{},
"oci": &credOCI.CLIHandler{},
"oidc": &credOIDC.CLIHandler{},
"okta": &credOkta.CLIHandler{},
"pcf": &credCF.CLIHandler{}, // Deprecated.

7
go.mod
View File

@ -26,7 +26,6 @@ replace github.com/hashicorp/vault/sdk => ./sdk
require (
github.com/ProtonMail/go-crypto v0.0.0-20230626094100-7e9e0395ebec
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2
github.com/armon/go-metrics v0.4.1
github.com/armon/go-radix v1.0.0
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
@ -64,7 +63,6 @@ require (
github.com/hashicorp/go-kms-wrapping/v2 v2.0.10
github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7
github.com/hashicorp/go-memdb v1.3.4
github.com/hashicorp/go-msgpack v1.1.5
@ -104,7 +102,6 @@ require (
github.com/hashicorp/vault-plugin-auth-jwt v0.16.1
github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0
github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0
github.com/hashicorp/vault-plugin-auth-oci v0.14.0
github.com/hashicorp/vault-plugin-mock v0.16.1
github.com/hashicorp/vault-plugin-secrets-ad v0.16.0
github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0
@ -140,7 +137,6 @@ require (
github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc
github.com/oklog/run v1.1.0
github.com/okta/okta-sdk-golang/v2 v2.12.1
github.com/oracle/oci-go-sdk v24.3.0+incompatible
github.com/ory/dockertest v3.3.5+incompatible
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pires/go-proxyproto v0.6.1
@ -173,7 +169,6 @@ require (
golang.org/x/tools v0.18.0
google.golang.org/grpc v1.61.1
google.golang.org/protobuf v1.34.1
gopkg.in/ory-am/dockertest.v3 v3.3.4
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
layeh.com/radius v0.0.0-20231213012653-1006025d24f8
nhooyr.io/websocket v1.8.7
@ -326,7 +321,6 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
github.com/opencontainers/runc v1.2.0-rc.1 // indirect
github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
@ -341,7 +335,6 @@ require (
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect
github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect

14
go.sum
View File

@ -966,8 +966,6 @@ github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/P
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k=
github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@ -1898,8 +1896,6 @@ github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0
github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1/go.mod h1:b99cDSA+OzcyRoBZroSf174/ss/e6gUuS45wue9ZQfc=
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrYyYznfS1+drq4/jFcqHQVNcL7WhUCo=
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7/go.mod h1:j5vefRoguQUG7iM4reS/hKIZssU1lZRqNPM5Wow6UnM=
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM=
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc=
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo=
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7/go.mod h1:hxNA5oTfAvwPacWVg1axtF/lvTafwlAa6a6K4uzWHhw=
github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c=
@ -2035,8 +2031,6 @@ github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0 h1:YH2x9kIV0jKXk22tVkpyd
github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0/go.mod h1:I6ulXug4oxx77DFYjqI1kVl+72TgXEo3Oju4tTOVfU4=
github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0 h1:vuXNJvtMyoqQ01Sfwf2TNcJNkGcxP1vD3C7gpvuVkCU=
github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0/go.mod h1:onx9W/rDwENQkN+1yEnJvS51PVkkGAPOBXasne7lnnk=
github.com/hashicorp/vault-plugin-auth-oci v0.14.0 h1:B7uyigqgUAO3gebvi8mMmsq7l4QAG0bLEP6rAKyDVuw=
github.com/hashicorp/vault-plugin-auth-oci v0.14.0/go.mod h1:SYdTtQhzMxqOCbdC0E0UOrkc4eGXXcJmXXbe1MHVPtE=
github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0=
github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM=
github.com/hashicorp/vault-plugin-secrets-ad v0.16.0 h1:6RCpd2PbBvmi5xmxXhggE0Xv+/Gag896/NNZeMKH+8A=
@ -2527,10 +2521,6 @@ github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuh
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU=
github.com/oracle/oci-go-sdk v24.3.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM=
github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w=
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
@ -2726,8 +2716,6 @@ github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQv
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0=
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
@ -3997,8 +3985,6 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw=
gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek=
gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=

View File

@ -11,7 +11,6 @@ import (
credJWT "github.com/hashicorp/vault-plugin-auth-jwt"
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin"
logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes"
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
@ -92,7 +91,6 @@ func newRegistry() *registry {
"kerberos": {Factory: credKerb.Factory},
"kubernetes": {Factory: credKube.Factory},
"ldap": {Factory: credLdap.Factory},
"oci": {Factory: credOCI.Factory},
"oidc": {Factory: credJWT.Factory},
"okta": {Factory: credOkta.Factory},
"pcf": {

View File

@ -14,7 +14,6 @@ import (
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2"
"github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2"
"github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-secure-stdlib/parseutil"
@ -171,11 +170,6 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin
case wrapping.WrapperTypeAead:
wrapper, kmsInfo, err = GetAEADKMSFunc(configKMS, opts...)
case wrapping.WrapperTypeOciKms:
if keyId, ok := configKMS.Config["key_id"]; ok {
opts = append(opts, wrapping.WithKeyId(keyId))
}
wrapper, kmsInfo, err = GetOCIKMSKMSFunc(configKMS, opts...)
case wrapping.WrapperTypeTransit:
wrapper, kmsInfo, err = GetTransitKMSFunc(configKMS, opts...)
@ -217,22 +211,6 @@ func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[st
return wrapper, info, nil
}
func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) {
wrapper := ocikms.NewWrapper()
wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...)
if err != nil {
return nil, nil, err
}
info := make(map[string]string)
if wrapperInfo != nil {
info["OCI KMS KeyID"] = wrapperInfo.Metadata[ocikms.KmsConfigKeyId]
info["OCI KMS Crypto Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigCryptoEndpoint]
info["OCI KMS Management Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigManagementEndpoint]
info["OCI KMS Principal Type"] = wrapperInfo.Metadata["principal_type"]
}
return wrapper, info, nil
}
var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) {
wrapper := transit.NewWrapper()
wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...)

View File

@ -1,47 +0,0 @@
# FoundationDB storage backend
Extra steps are required to produce a Vault build containing the FoundationDB
backend; attempts to use the backend on a build produced without following
this procedure will fail with a descriptive error message at runtime.
## Installing the Go bindings
### Picking a version
The version of the Go bindings and the FoundationDB client library used to
build them must match.
This version will determine the minimum API version that can be used, hence
it should be no higher than the version of FoundationDB used in your cluster,
and must also satisfy the requirements of the backend code.
The minimum required API version for the FoundationDB backend is 520.
### Installation
Make sure you have Mono installed (core is enough), then install the
Go bindings using the `fdb-go-install.sh` script:
```
$ physical/foundationdb/fdb-go-install.sh install --fdbver x.y.z
```
By default, if `--fdbver x.y.z` is not specified, version 5.2.4 will be used.
## Building Vault
To build Vault the FoundationDB backend, add FDB_ENABLED=1 when invoking
`make`, e.g.
```
$ make dev FDB_ENABLED=1
```
## Running tests
Similarly, add FDB_ENABLED=1 to your `make` invocation when running tests,
e.g.
```
$ make test TEST=./physical/foundationdb FDB_ENABLED=1
```

View File

@ -1,333 +0,0 @@
#!/bin/bash -eu
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
#
# fdb-go-install.sh
#
# Installs the FoundationDB Go bindings for a client. This will download
# the repository from the remote repo either into the go directory
# with the appropriate semantic version. It will then build a few
# generated files that need to be present for the go build to work.
# At the end, it has some advice for flags to modify within your
# go environment so that other packages may successfully use this
# library.
#
DESTDIR="${DESTDIR:-}"
FDBVER="${FDBVER:-5.2.4}"
REMOTE="${REMOTE:-github.com}"
FDBREPO="${FDBREPO:-apple/foundationdb}"
status=0
platform=$(uname)
if [[ "${platform}" == "Darwin" ]] ; then
FDBLIBDIR="${FDBLIBDIR:-/usr/local/lib}"
libfdbc="libfdb_c.dylib"
elif [[ "${platform}" == "Linux" ]] ; then
libfdbc="libfdb_c.so"
custom_libdir="${FDBLIBDIR:-}"
FDBLIBDIR=""
if [[ -z "${custom_libdir}" ]]; then
search_libdirs=( '/usr/lib' '/usr/lib64' )
else
search_libdirs=( "${custom_libdir}" )
fi
for libdir in "${search_libdirs[@]}" ; do
if [[ -e "${libdir}/${libfdbc}" ]]; then
FDBLIBDIR="${libdir}"
break
fi
done
if [[ -z "${FDBLIBDIR}" ]]; then
echo "The FoundationDB C library could not be found in any of:"
for libdir in "${search_libdirs[@]}" ; do
echo " ${libdir}"
done
echo "Your installation may be incomplete, or you need to set a custom FDBLIBDIR."
let status="${status} + 1"
fi
else
echo "Unsupported platform ${platform}".
echo "At the moment, only macOS and Linux are supported by this script."
let status="${status} + 1"
fi
filedir=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
destdir=""
function printUsage() {
echo "Usage: fdb-go-install.sh <cmd>"
echo
echo "cmd: One of the commands to run. The options are:"
echo " install Download the FDB go bindings and install them"
echo " localinstall Install a into the go path a local copy of the repo"
echo " download Download but do not prepare the FoundationDB bindings"
echo " help Print this help message and then quit"
echo
echo "Command Line Options:"
echo " --fdbver <version> FoundationDB semantic version (default is ${FDBVER})"
echo " -d/--dest-dir <dest> Local location for the repo (default is to place in go path)"
echo
echo "Environment Variable Options:"
echo " REMOTE Remote repository to download from (currently ${REMOTE})"
echo " FDBREPO Repository of FoundationDB library to download (currently ${FDBREPO})"
echo " FDBLIBDIR Directory within which should be the FoundationDB c library (currently ${FDBLIBDIR})"
}
function parseArgs() {
local status=0
if [[ "${#}" -lt 0 ]] ; then
printUsage
let status="${status} + 1"
else
operation="${1}"
shift
if [[ "${operation}" != "install" ]] && [[ "${operation}" != "localinstall" ]] && [[ "${operation}" != "download" ]] && [[ "${operation}" != "help" ]] ; then
echo "Unknown command: ${operation}"
printUsage
let status="${status} + 1"
fi
fi
while [[ "${#}" -gt 0 ]] && [[ "${status}" -eq 0 ]] ; do
local key="${1}"
case "${key}" in
--fdbver)
if [[ "${#}" -lt 2 ]] ; then
echo "No version specified with --fdbver flag"
printUsage
let status="${status} + 1"
else
FDBVER="${2}"
fi
shift
;;
-d|--dest-dir)
if [[ "${#}" -lt 2 ]] ; then
echo "No destination specified with ${key} flag"
printUsage
let status="${status} + 1"
else
destdir="${2}"
fi
shift
;;
*)
echo "Unrecognized argument ${key}"
printUsage
let status="${status} + 1"
esac
shift
done
return "${status}"
}
function checkBin() {
if [[ "${#}" -lt 1 ]] ; then
echo "Usage: checkBin <binary>"
return 1
else
if [[ -n $(which "${1}") ]] ; then
return 0
else
return 1
fi
fi
}
if [[ "${status}" -gt 0 ]] ; then
# We have already failed.
:
elif [[ "${#}" -lt 1 ]] ; then
printUsage
else
required_bins=( 'go' 'git' 'make' 'mono' )
missing_bins=()
for bin in "${required_bins[@]}" ; do
if ! checkBin "${bin}" ; then
missing_bins+=("${bin}")
let status="${status} + 1"
fi
done
if [[ "${status}" -gt 0 ]] ; then
echo "Missing binaries: ${missing_bins[*]}"
elif ! parseArgs ${@} ; then
let status="${status} + 1"
elif [[ "${operation}" == "help" ]] ; then
printUsage
else
# Add go-specific environment variables.
eval $(go env)
golibdir=$(dirname "${GOPATH}/src/${REMOTE}/${FDBREPO}")
if [[ -z "${destdir}" ]] ; then
if [[ "${operation}" == "localinstall" ]] ; then
# Assume its the local directory.
destdir=$(cd "${filedir}/../../.." && pwd)
else
destdir="${golibdir}"
fi
fi
if [[ ! -d "${destdir}" ]] ; then
cmd=( 'mkdir' '-p' "${destdir}" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not create destination directory ${destdir}."
fi
fi
# Step 1: Make sure repository is present.
if [[ "${status}" -eq 0 ]] ; then
destdir=$( cd "${destdir}" && pwd ) # Get absolute path of destination dir.
fdbdir="${destdir}/foundationdb"
if [[ ! -d "${destdir}" ]] ; then
cmd=("mkdir" "-p" "${destdir}")
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
echo "Could not create destination directory ${destdir}."
let status="${status} + 1"
fi
fi
fi
if [[ "${operation}" == "localinstall" ]] ; then
# No download occurs in this case.
:
else
if [[ -d "${fdbdir}" ]] ; then
echo "Directory ${fdbdir} already exists ; checking out appropriate tag"
cmd1=( 'git' '-C' "${fdbdir}" 'fetch' 'origin' )
cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "${FDBVER}" )
if ! echo "${cmd1[*]}" || ! "${cmd1[@]}" ; then
let status="${status} + 1"
echo "Could not pull latest changes from origin"
elif ! echo "${cmd2[*]}" || ! "${cmd2[@]}" ; then
let status="${status} + 1"
echo "Could not checkout tag ${FDBVER}."
fi
else
echo "Downloading foundation repository into ${destdir}:"
cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "${FDBVER}" "https://${REMOTE}/${FDBREPO}.git" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not download repository."
fi
fi
fi
# Step 2: Build generated things.
if [[ "${operation}" == "download" ]] ; then
# The generated files are not created under a strict download.
:
elif [[ "${status}" -eq 0 ]] ; then
echo "Building generated files."
# FoundationDB starting with 6.0 can figure that out on its own
if [ -e '/usr/bin/mcs' ]; then
MCS_BIN=/usr/bin/mcs
else
MCS_BIN=/usr/bin/dmcs
fi
cmd=( 'make' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' "MCS=$MCS_BIN" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not generate required c header"
else
infile="${fdbdir}/fdbclient/vexillographer/fdb.options"
outfile="${fdbdir}/bindings/go/src/fdb/generated.go"
cmd=( 'go' 'run' "${fdbdir}/bindings/go/src/_util/translate_fdb_options.go" )
echo "${cmd[*]} < ${infile} > ${outfile}"
if ! "${cmd[@]}" < "${infile}" > "${outfile}" ; then
let status="${status} + 1"
echo "Could not generate generated go file."
fi
fi
fi
# Step 3: Add to go path.
if [[ "${operation}" == "download" ]] ; then
# The files are not moved under a strict download.
:
elif [[ "${status}" -eq 0 ]] ; then
linkpath="${GOPATH}/src/${REMOTE}/${FDBREPO}"
if [[ "${linkpath}" == "${fdbdir}" ]] ; then
# Downloaded directly into go path. Skip making the link.
:
elif [[ -e "${linkpath}" ]] ; then
echo "Warning: link path (${linkpath}) already exists. Leaving in place."
else
dirpath=$(dirname "${linkpath}")
if [[ ! -d "${dirpath}" ]] ; then
cmd=( 'mkdir' '-p' "${dirpath}" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not create directory for link."
fi
fi
if [[ "${status}" -eq 0 ]] ; then
cmd=( 'ln' '-s' "${fdbdir}" "${linkpath}" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not create link within go path."
fi
fi
fi
fi
# Step 4: Build the binaries.
if [[ "${operation}" == "download" ]] ; then
# Do not install if only downloading
:
elif [[ "${status}" -eq 0 ]] ; then
cgo_cppflags="-I${linkpath}/bindings/c"
cgo_cflags="-g -O2"
cgo_ldflags="-L${FDBLIBDIR}"
fdb_go_path="${REMOTE}/${FDBREPO}/bindings/go/src"
if ! CGO_CPPFLAGS="${cgo_cppflags}" CGO_CFLAGS="${cgo_cflags}" CGO_LDFLAGS="${cgo_ldflags}" go install "${fdb_go_path}/fdb" "${fdb_go_path}/fdb/tuple" "${fdb_go_path}/fdb/subspace" "${fdb_go_path}/fdb/directory" ; then
let status="${status} + 1"
echo "Could not build FoundationDB go libraries."
fi
fi
# Step 5: Explain CGO flags.
if [[ "${status}" -eq 0 && ("${operation}" == "localinstall" || "${operation}" == "install" ) ]] ; then
echo
echo "The FoundationDB go bindings were successfully installed."
echo "To build packages which use the go bindings, you will need to"
echo "set the following environment variables:"
echo " CGO_CPPFLAGS=\"${cgo_cppflags}\""
echo " CGO_CFLAGS=\"${cgo_cflags}\""
echo " CGO_LDFLAGS=\"${cgo_ldflags}\""
fi
fi
fi
exit "${status}"

View File

@ -1,886 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build foundationdb
package foundationdb
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"strconv"
"strings"
"sync"
"time"
log "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
metrics "github.com/armon/go-metrics"
"github.com/hashicorp/vault/sdk/physical"
)
const (
// The minimum acceptable API version
minAPIVersion = 520
// The namespace under our top directory containing keys only for list operations
metaKeysNamespace = "_meta-keys"
// The namespace under our top directory containing the actual data
dataNamespace = "_data"
// The namespace under our top directory containing locks
lockNamespace = "_lock"
// Path hierarchy markers
// - an entry in a directory (included in list)
dirEntryMarker = "/\x01"
// - a path component (excluded from list)
dirPathMarker = "/\x02"
)
var (
// 64bit 1 and -1 for FDB atomic Add()
atomicArgOne = []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
atomicArgMinusOne = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
)
// Verify FDBBackend satisfies the correct interfaces
var (
_ physical.Backend = (*FDBBackend)(nil)
_ physical.Transactional = (*FDBBackend)(nil)
_ physical.HABackend = (*FDBBackend)(nil)
_ physical.Lock = (*FDBBackendLock)(nil)
)
// FDBBackend is a physical backend that stores data at a specific
// prefix within FoundationDB.
type FDBBackend struct {
logger log.Logger
haEnabled bool
db fdb.Database
metaKeysSpace subspace.Subspace
dataSpace subspace.Subspace
lockSpace subspace.Subspace
instanceUUID string
}
func concat(a []byte, b ...byte) []byte {
r := make([]byte, len(a)+len(b))
copy(r, a)
copy(r[len(a):], b)
return r
}
func decoratePrefix(prefix string) ([]byte, error) {
pathElements := strings.Split(prefix, "/")
decoratedPrefix := strings.Join(pathElements[:len(pathElements)-1], dirPathMarker)
return []byte(decoratedPrefix + dirEntryMarker), nil
}
// Turn a path string into a decorated byte array to be used as (part of) a key
// foo /\x01foo
// foo/ /\x01foo/
// foo/bar /\x02foo/\x01bar
// foo/bar/ /\x02foo/\x01bar/
// foo/bar/baz /\x02foo/\x02bar/\x01baz
// foo/bar/baz/ /\x02foo/\x02bar/\x01baz/
// foo/bar/baz/quux /\x02foo/\x02bar/\x02baz/\x01quux
// This allows for range queries to retrieve the "directory" listing. The
// decoratePrefix() function builds the path leading up to the leaf.
func decoratePath(path string) ([]byte, error) {
if path == "" {
return nil, fmt.Errorf("Invalid empty path")
}
path = "/" + path
isDir := strings.HasSuffix(path, "/")
path = strings.TrimRight(path, "/")
lastSlash := strings.LastIndexByte(path, '/')
decoratedPrefix, err := decoratePrefix(path[:lastSlash+1])
if err != nil {
return nil, err
}
leaf := path[lastSlash+1:]
if isDir {
leaf += "/"
}
return concat(decoratedPrefix, []byte(leaf)...), nil
}
// Turn a decorated byte array back into a path string
func undecoratePath(decoratedPath []byte) string {
ret := strings.ReplaceAll(string(decoratedPath), dirPathMarker, "/")
ret = strings.ReplaceAll(ret, dirEntryMarker, "/")
return strings.TrimLeft(ret, "/")
}
// NewFDBBackend constructs a FoundationDB backend storing keys in the
// top-level directory designated by path
func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the top-level directory name
path, ok := conf["path"]
if !ok {
path = "vault"
}
logger.Debug("config path set", "path", path)
dirPath := strings.Split(strings.Trim(path, "/"), "/")
// TLS support
tlsCertFile, hasCertFile := conf["tls_cert_file"]
tlsKeyFile, hasKeyFile := conf["tls_key_file"]
tlsCAFile, hasCAFile := conf["tls_ca_file"]
tlsEnabled := hasCertFile && hasKeyFile && hasCAFile
if (hasCertFile || hasKeyFile || hasCAFile) && !tlsEnabled {
return nil, fmt.Errorf("FoundationDB TLS requires all 3 of tls_cert_file, tls_key_file, and tls_ca_file")
}
tlsVerifyPeers, ok := conf["tls_verify_peers"]
if !ok && tlsEnabled {
return nil, fmt.Errorf("Required option tls_verify_peers not set in configuration")
}
// FoundationDB API version
fdbApiVersionStr, ok := conf["api_version"]
if !ok {
return nil, fmt.Errorf("FoundationDB API version not specified")
}
fdbApiVersionInt, err := strconv.Atoi(fdbApiVersionStr)
if err != nil {
return nil, fmt.Errorf("failed to parse fdb_api_version parameter: %w", err)
}
// Check requested FDB API version against minimum required API version
if fdbApiVersionInt < minAPIVersion {
return nil, fmt.Errorf("Configured FoundationDB API version lower than minimum required version: %d < %d", fdbApiVersionInt, minAPIVersion)
}
logger.Debug("FoundationDB API version set", "fdb_api_version", fdbApiVersionInt)
// FoundationDB cluster file
fdbClusterFile, ok := conf["cluster_file"]
if !ok {
return nil, fmt.Errorf("FoundationDB cluster file not specified")
}
haEnabled := false
haEnabledStr, ok := conf["ha_enabled"]
if ok {
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
return nil, fmt.Errorf("failed to parse ha_enabled parameter: %w", err)
}
}
instanceUUID, err := uuid.GenerateUUID()
if err != nil {
return nil, fmt.Errorf("could not generate instance UUID: %w", err)
}
logger.Debug("Instance UUID", "uuid", instanceUUID)
if err := fdb.APIVersion(fdbApiVersionInt); err != nil {
return nil, fmt.Errorf("failed to set FDB API version: %w", err)
}
if tlsEnabled {
opts := fdb.Options()
tlsPassword, ok := conf["tls_password"]
if ok {
err := opts.SetTLSPassword(tlsPassword)
if err != nil {
return nil, fmt.Errorf("failed to set TLS password: %w", err)
}
}
err := opts.SetTLSCaPath(tlsCAFile)
if err != nil {
return nil, fmt.Errorf("failed to set TLS CA bundle path: %w", err)
}
err = opts.SetTLSCertPath(tlsCertFile)
if err != nil {
return nil, fmt.Errorf("failed to set TLS certificate path: %w", err)
}
err = opts.SetTLSKeyPath(tlsKeyFile)
if err != nil {
return nil, fmt.Errorf("failed to set TLS key path: %w", err)
}
err = opts.SetTLSVerifyPeers([]byte(tlsVerifyPeers))
if err != nil {
return nil, fmt.Errorf("failed to set TLS peer verification criteria: %w", err)
}
}
db, err := fdb.Open(fdbClusterFile, []byte("DB"))
if err != nil {
return nil, fmt.Errorf("failed to open database with cluster file %q: %w", fdbClusterFile, err)
}
topDir, err := directory.CreateOrOpen(db, dirPath, nil)
if err != nil {
return nil, fmt.Errorf("failed to create/open top-level directory %q: %w", path, err)
}
// Setup the backend
f := &FDBBackend{
logger: logger,
haEnabled: haEnabled,
db: db,
metaKeysSpace: topDir.Sub(metaKeysNamespace),
dataSpace: topDir.Sub(dataNamespace),
lockSpace: topDir.Sub(lockNamespace),
instanceUUID: instanceUUID,
}
return f, nil
}
// Increase refcount on directories in the path, from the bottom -> up
func (f *FDBBackend) incDirsRefcount(tr fdb.Transaction, path string) error {
pathElements := strings.Split(strings.TrimRight(path, "/"), "/")
for i := len(pathElements) - 1; i != 0; i-- {
dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/")
if err != nil {
return fmt.Errorf("error incrementing directories refcount: %w", err)
}
// Atomic +1
tr.Add(fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...)), atomicArgOne)
tr.Add(fdb.Key(concat(f.dataSpace.Bytes(), dPath...)), atomicArgOne)
}
return nil
}
type DirsDecTodo struct {
fkey fdb.Key
future fdb.FutureByteSlice
}
// Decrease refcount on directories in the path, from the bottom -> up, and remove empty ones
func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error {
pathElements := strings.Split(strings.TrimRight(path, "/"), "/")
dirsTodo := make([]DirsDecTodo, 0, len(pathElements)*2)
for i := len(pathElements) - 1; i != 0; i-- {
dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/")
if err != nil {
return fmt.Errorf("error decrementing directories refcount: %w", err)
}
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...))
dirsTodo = append(dirsTodo, DirsDecTodo{
fkey: metaFKey,
future: tr.Get(metaFKey),
})
dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), dPath...))
dirsTodo = append(dirsTodo, DirsDecTodo{
fkey: dataFKey,
future: tr.Get(dataFKey),
})
}
for _, todo := range dirsTodo {
value, err := todo.future.Get()
if err != nil {
return fmt.Errorf("error getting directory refcount while decrementing: %w", err)
}
// The directory entry does not exist; this is not expected
if value == nil {
return fmt.Errorf("non-existent directory while decrementing directory refcount")
}
var count int64
err = binary.Read(bytes.NewReader(value), binary.LittleEndian, &count)
if err != nil {
return fmt.Errorf("error reading directory refcount while decrementing: %w", err)
}
if count > 1 {
// Atomic -1
tr.Add(todo.fkey, atomicArgMinusOne)
} else {
// Directory is empty, remove it
tr.Clear(todo.fkey)
}
}
return nil
}
func (f *FDBBackend) internalPut(tr fdb.Transaction, decoratedPath []byte, path string, value []byte) error {
// Check that the meta key exists before blindly increasing the refcounts
// in the directory hierarchy; this protects against commit_unknown_result
// and other similar cases where a previous transaction may have gone
// through without us knowing for sure.
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPath...))
metaFuture := tr.Get(metaFKey)
dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
tr.Set(dataFKey, value)
value, err := metaFuture.Get()
if err != nil {
return fmt.Errorf("Put error while getting meta key: %w", err)
}
if value == nil {
tr.Set(metaFKey, []byte{})
return f.incDirsRefcount(tr, path)
}
return nil
}
func (f *FDBBackend) internalClear(tr fdb.Transaction, decoratedPath []byte, path string) error {
// Same as above - check existence of the meta key before taking any
// action, to protect against a possible previous commit_unknown_result
// error.
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPath...))
value, err := tr.Get(metaFKey).Get()
if err != nil {
return fmt.Errorf("Delete error while getting meta key: %w", err)
}
if value != nil {
dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
tr.Clear(dataFKey)
tr.Clear(metaFKey)
return f.decDirsRefcount(tr, path)
}
return nil
}
type TxnTodo struct {
decoratedPath []byte
op *physical.TxnEntry
}
// Used to run multiple entries via a transaction
func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
if len(txns) == 0 {
return nil
}
todo := make([]*TxnTodo, len(txns))
for i, op := range txns {
if op.Operation != physical.DeleteOperation && op.Operation != physical.PutOperation {
return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
}
decoratedPath, err := decoratePath(op.Entry.Key)
if err != nil {
return fmt.Errorf("could not build decorated path for transaction item %s: %w", op.Entry.Key, err)
}
todo[i] = &TxnTodo{
decoratedPath: decoratedPath,
op: op,
}
}
_, err := f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
for _, txnTodo := range todo {
var err error
switch txnTodo.op.Operation {
case physical.DeleteOperation:
err = f.internalClear(tr, txnTodo.decoratedPath, txnTodo.op.Entry.Key)
case physical.PutOperation:
err = f.internalPut(tr, txnTodo.decoratedPath, txnTodo.op.Entry.Key, txnTodo.op.Entry.Value)
}
if err != nil {
return nil, fmt.Errorf("operation %s failed for transaction item %s: %w", txnTodo.op.Operation, txnTodo.op.Entry.Key, err)
}
}
return nil, nil
})
if err != nil {
return fmt.Errorf("transaction failed: %w", err)
}
return nil
}
// Put is used to insert or update an entry
func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"foundationdb", "put"}, time.Now())
decoratedPath, err := decoratePath(entry.Key)
if err != nil {
return fmt.Errorf("could not build decorated path to put item %s: %w", entry.Key, err)
}
_, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
err := f.internalPut(tr, decoratedPath, entry.Key, entry.Value)
if err != nil {
return nil, err
}
return nil, nil
})
if err != nil {
return fmt.Errorf("put failed for item %s: %w", entry.Key, err)
}
return nil
}
// Get is used to fetch an entry
// Return nil for non-existent keys
func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"foundationdb", "get"}, time.Now())
decoratedPath, err := decoratePath(key)
if err != nil {
return nil, fmt.Errorf("could not build decorated path to get item %s: %w", key, err)
}
fkey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
value, err := f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
value, err := rtr.Get(fkey).Get()
if err != nil {
return nil, err
}
return value, nil
})
if err != nil {
return nil, fmt.Errorf("get failed for item %s: %w", key, err)
}
if value.([]byte) == nil {
return nil, nil
}
return &physical.Entry{
Key: key,
Value: value.([]byte),
}, nil
}
// Delete is used to permanently delete an entry
func (f *FDBBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"foundationdb", "delete"}, time.Now())
decoratedPath, err := decoratePath(key)
if err != nil {
return fmt.Errorf("could not build decorated path to delete item %s: %w", key, err)
}
_, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
err := f.internalClear(tr, decoratedPath, key)
if err != nil {
return nil, err
}
return nil, nil
})
if err != nil {
return fmt.Errorf("delete failed for item %s: %w", key, err)
}
return nil
}
// List is used to list all the keys under a given
// prefix, up to the next prefix.
// Return empty string slice for non-existent directories
func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"foundationdb", "list"}, time.Now())
prefix = strings.TrimRight("/"+prefix, "/") + "/"
decoratedPrefix, err := decoratePrefix(prefix)
if err != nil {
return nil, fmt.Errorf("could not build decorated path to list prefix %s: %w", prefix, err)
}
// The beginning of the range is /\x02foo/\x02bar/\x01 (the decorated prefix) to list foo/bar/
rangeBegin := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPrefix...))
rangeEnd := fdb.Key(concat(rangeBegin, 0xff))
pathRange := fdb.KeyRange{rangeBegin, rangeEnd}
keyPrefixLen := len(rangeBegin)
content, err := f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
dirList := make([]string, 0, 0)
ri := rtr.GetRange(pathRange, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll}).Iterator()
for ri.Advance() {
kv := ri.MustGet()
// Strip length of the rangeBegin key off the FDB key, yielding
// the part of the key we're interested in, which does not need
// to be undecorated, by construction.
dirList = append(dirList, string(kv.Key[keyPrefixLen:]))
}
return dirList, nil
})
if err != nil {
return nil, fmt.Errorf("could not list prefix %s: %w", prefix, err)
}
return content.([]string), nil
}
type FDBBackendLock struct {
f *FDBBackend
key string
value string
fkey fdb.Key
lock sync.Mutex
}
// LockWith is used for mutual exclusion based on the given key.
func (f *FDBBackend) LockWith(key, value string) (physical.Lock, error) {
return &FDBBackendLock{
f: f,
key: key,
value: value,
fkey: f.lockSpace.Pack(tuple.Tuple{key}),
}, nil
}
func (f *FDBBackend) HAEnabled() bool {
return f.haEnabled
}
const (
// Position of elements in the lock content tuple
lockContentValueIdx = 0
lockContentOwnerIdx = 1
lockContentExpiresIdx = 2
// Number of elements in the lock content tuple
lockTupleContentElts = 3
lockTTL = 15 * time.Second
lockRenewInterval = 5 * time.Second
lockAcquireRetryInterval = 5 * time.Second
)
type FDBBackendLockContent struct {
value string
ownerUUID string
expires time.Time
}
func packLock(content *FDBBackendLockContent) []byte {
t := tuple.Tuple{content.value, content.ownerUUID, content.expires.UnixNano()}
return t.Pack()
}
func unpackLock(tupleContent []byte) (*FDBBackendLockContent, error) {
t, err := tuple.Unpack(tupleContent)
if err != nil {
return nil, err
}
if len(t) != lockTupleContentElts {
return nil, fmt.Errorf("unexpected lock content, len %d != %d", len(t), lockTupleContentElts)
}
return &FDBBackendLockContent{
value: t[lockContentValueIdx].(string),
ownerUUID: t[lockContentOwnerIdx].(string),
expires: time.Unix(0, t[lockContentExpiresIdx].(int64)),
}, nil
}
func (fl *FDBBackendLock) getLockContent(tr fdb.Transaction) (*FDBBackendLockContent, error) {
tupleContent, err := tr.Get(fl.fkey).Get()
if err != nil {
return nil, err
}
// Lock doesn't exist
if tupleContent == nil {
return nil, fmt.Errorf("non-existent lock %s", fl.key)
}
content, err := unpackLock(tupleContent)
if err != nil {
return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err)
}
return content, nil
}
func (fl *FDBBackendLock) setLockContent(tr fdb.Transaction, content *FDBBackendLockContent) {
tr.Set(fl.fkey, packLock(content))
}
func (fl *FDBBackendLock) isOwned(content *FDBBackendLockContent) bool {
return content.ownerUUID == fl.f.instanceUUID
}
func (fl *FDBBackendLock) isExpired(content *FDBBackendLockContent) bool {
return time.Now().After(content.expires)
}
func (fl *FDBBackendLock) acquireTryLock(acquired chan struct{}, errors chan error) (bool, error) {
wonTheRace, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tupleContent, err := tr.Get(fl.fkey).Get()
if err != nil {
return nil, fmt.Errorf("could not read lock: %w", err)
}
// Lock exists
if tupleContent != nil {
content, err := unpackLock(tupleContent)
if err != nil {
return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err)
}
if fl.isOwned(content) {
return nil, fmt.Errorf("lock %s already held", fl.key)
}
// The lock already exists, is not owned by us, and is not expired
if !fl.isExpired(content) {
return false, nil
}
}
// Lock doesn't exist, or exists but is expired, we can go ahead
content := &FDBBackendLockContent{
value: fl.value,
ownerUUID: fl.f.instanceUUID,
expires: time.Now().Add(lockTTL),
}
fl.setLockContent(tr, content)
return true, nil
})
if err != nil {
errors <- err
return false, err
}
if wonTheRace.(bool) {
close(acquired)
}
return wonTheRace.(bool), nil
}
func (fl *FDBBackendLock) acquireLock(abandon chan struct{}, acquired chan struct{}, errors chan error) {
ticker := time.NewTicker(lockAcquireRetryInterval)
defer ticker.Stop()
lockAcquired, err := fl.acquireTryLock(acquired, errors)
if lockAcquired || err != nil {
return
}
for {
select {
case <-abandon:
return
case <-ticker.C:
lockAcquired, err := fl.acquireTryLock(acquired, errors)
if lockAcquired || err != nil {
return
}
}
}
}
func (fl *FDBBackendLock) maintainLock(lost <-chan struct{}) {
ticker := time.NewTicker(lockRenewInterval)
for {
select {
case <-ticker.C:
_, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
return nil, err
}
// We don't own the lock
if !fl.isOwned(content) {
return nil, fmt.Errorf("lost lock %s", fl.key)
}
// The lock is expired
if fl.isExpired(content) {
return nil, fmt.Errorf("lock %s expired", fl.key)
}
content.expires = time.Now().Add(lockTTL)
fl.setLockContent(tr, content)
return nil, nil
})
if err != nil {
fl.f.logger.Error("lock maintain", "error", err)
}
// Failure to renew the lock will cause another node to take over
// and the watch to fire. DB errors will also be caught by the watch.
case <-lost:
ticker.Stop()
return
}
}
}
func (fl *FDBBackendLock) watchLock(lost chan struct{}) {
for {
watch, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
return nil, err
}
// We don't own the lock
if !fl.isOwned(content) {
return nil, fmt.Errorf("lost lock %s", fl.key)
}
// The lock is expired
if fl.isExpired(content) {
return nil, fmt.Errorf("lock %s expired", fl.key)
}
// Set FDB watch on the lock
future := tr.Watch(fl.fkey)
return future, nil
})
if err != nil {
fl.f.logger.Error("lock watch", "error", err)
break
}
// Wait for the watch to fire, and go again
watch.(fdb.FutureNil).Get()
}
close(lost)
}
func (fl *FDBBackendLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
fl.lock.Lock()
defer fl.lock.Unlock()
var (
// Inform the lock owner that we lost the lock
lost = make(chan struct{})
// Tell our watch and renewal routines the lock has been abandoned
abandon = make(chan struct{})
// Feedback from lock acquisition routine
acquired = make(chan struct{})
errors = make(chan error)
)
// try to acquire the lock asynchronously
go fl.acquireLock(abandon, acquired, errors)
select {
case <-acquired:
// Maintain the lock after initial acquisition
go fl.maintainLock(lost)
// Watch the lock for changes
go fl.watchLock(lost)
case err := <-errors:
// Initial acquisition failed
close(abandon)
return nil, err
case <-stopCh:
// Prospective lock owner cancelling lock acquisition
close(abandon)
return nil, nil
}
return lost, nil
}
func (fl *FDBBackendLock) Unlock() error {
fl.lock.Lock()
defer fl.lock.Unlock()
_, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
return nil, fmt.Errorf("could not get lock content: %w", err)
}
// We don't own the lock
if !fl.isOwned(content) {
return nil, nil
}
tr.Clear(fl.fkey)
return nil, nil
})
if err != nil {
return fmt.Errorf("unlock failed: %w", err)
}
return nil
}
func (fl *FDBBackendLock) Value() (bool, string, error) {
tupleContent, err := fl.f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
tupleContent, err := rtr.Get(fl.fkey).Get()
if err != nil {
return nil, fmt.Errorf("could not read lock: %w", err)
}
return tupleContent, nil
})
if err != nil {
return false, "", fmt.Errorf("get lock value failed for lock %s: %w", fl.key, err)
}
if tupleContent.([]byte) == nil {
return false, "", nil
}
content, err := unpackLock(tupleContent.([]byte))
if err != nil {
return false, "", fmt.Errorf("get lock value failed to unpack lock %s: %w", fl.key, err)
}
return true, content.value, nil
}

View File

@ -1,199 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build foundationdb
package foundationdb
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
dockertest "gopkg.in/ory-am/dockertest.v3"
)
func connectToFoundationDB(clusterFile string) (*fdb.Database, error) {
if err := fdb.APIVersion(520); err != nil {
return nil, fmt.Errorf("failed to set FDB API version: %w", err)
}
db, err := fdb.Open(clusterFile, []byte("DB"))
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
return &db, nil
}
func cleanupTopDir(clusterFile, topDir string) error {
db, err := connectToFoundationDB(clusterFile)
if err != nil {
return fmt.Errorf("could not connect to FDB for cleanup: %w", err)
}
if _, err := directory.Root().Remove(db, []string{topDir}); err != nil {
return fmt.Errorf("could not remove directory: %w", err)
}
return nil
}
func TestFoundationDBPathDecoration(t *testing.T) {
cases := map[string][]byte{
"foo": []byte("/\x01foo"),
"foo/": []byte("/\x01foo/"),
"foo/bar": []byte("/\x02foo/\x01bar"),
"foo/bar/": []byte("/\x02foo/\x01bar/"),
"foo/bar/baz": []byte("/\x02foo/\x02bar/\x01baz"),
"foo/bar/baz/": []byte("/\x02foo/\x02bar/\x01baz/"),
"foo/bar/baz/quux": []byte("/\x02foo/\x02bar/\x02baz/\x01quux"),
}
for path, expected := range cases {
decorated, err := decoratePath(path)
if err != nil {
t.Fatalf("path %s error: %s", path, err)
}
if !bytes.Equal(expected, decorated) {
t.Fatalf("path %s expected %v got %v", path, expected, decorated)
}
undecorated := undecoratePath(decorated)
if undecorated != path {
t.Fatalf("expected %s got %s", path, undecorated)
}
}
}
func TestFoundationDBBackend(t *testing.T) {
if testing.Short() {
t.Skipf("skipping in short mode")
}
testUUID, err := uuid.GenerateUUID()
if err != nil {
t.Fatalf("foundationdb: could not generate UUID to top-level directory: %s", err)
}
topDir := fmt.Sprintf("vault-test-%s", testUUID)
var clusterFile string
clusterFile = os.Getenv("FOUNDATIONDB_CLUSTER_FILE")
if clusterFile == "" {
var cleanup func()
cleanup, clusterFile = prepareFoundationDBTestDirectory(t, topDir)
defer cleanup()
}
// Remove the test data once done
defer func() {
if err := cleanupTopDir(clusterFile, topDir); err != nil {
t.Fatalf("foundationdb: could not cleanup test data at end of test: %s", err)
}
}()
// Remove any leftover test data before starting
if err := cleanupTopDir(clusterFile, topDir); err != nil {
t.Fatalf("foundationdb: could not cleanup test data before starting test: %s", err)
}
// Run vault tests
logger := logging.NewVaultLogger(log.Debug)
config := map[string]string{
"path": topDir,
"api_version": "520",
"cluster_file": clusterFile,
}
b, err := NewFDBBackend(config, logger)
if err != nil {
t.Fatalf("foundationdb: failed to create new backend: %s", err)
}
b2, err := NewFDBBackend(config, logger)
if err != nil {
t.Fatalf("foundationdb: failed to create new backend: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
physical.ExerciseTransactionalBackend(t, b)
physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend))
}
func prepareFoundationDBTestDirectory(t *testing.T, topDir string) (func(), string) {
pool, err := dockertest.NewPool("")
if err != nil {
t.Fatalf("foundationdb: failed to connect to docker: %s", err)
}
resource, err := pool.Run("foundationdb", "5.1.7", nil)
if err != nil {
t.Fatalf("foundationdb: could not start container: %s", err)
}
tmpFile, err := ioutil.TempFile("", topDir)
if err != nil {
t.Fatalf("foundationdb: could not create temporary file for cluster file: %s", err)
}
clusterFile := tmpFile.Name()
cleanup := func() {
var err error
for i := 0; i < 10; i++ {
err = pool.Purge(resource)
if err == nil {
break
}
time.Sleep(1 * time.Second)
}
os.Remove(clusterFile)
if err != nil {
t.Fatalf("Failed to cleanup local container: %s", err)
}
}
setup := func() error {
connectString := fmt.Sprintf("foundationdb:foundationdb@127.0.0.1:%s", resource.GetPort("4500/tcp"))
if err := tmpFile.Truncate(0); err != nil {
return fmt.Errorf("could not truncate cluster file: %w", err)
}
_, err := tmpFile.WriteAt([]byte(connectString), 0)
if err != nil {
return fmt.Errorf("could not write cluster file: %w", err)
}
if _, err := connectToFoundationDB(clusterFile); err != nil {
return fmt.Errorf("could not connect to FoundationDB after starting container: %s", err)
}
return nil
}
if pool.Retry(setup); err != nil {
cleanup()
t.Fatalf("foundationdb: could not setup container: %s", err)
}
tmpFile.Close()
return cleanup, clusterFile
}

View File

@ -1,18 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build !foundationdb
package foundationdb
import (
"fmt"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
)
func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
return nil, fmt.Errorf("FoundationDB backend not available in this Vault build")
}

View File

@ -1,384 +0,0 @@
// Copyright © 2019, Oracle and/or its affiliates.
package oci
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/physical"
"github.com/oracle/oci-go-sdk/common"
"github.com/oracle/oci-go-sdk/common/auth"
"github.com/oracle/oci-go-sdk/objectstorage"
"golang.org/x/net/context"
)
// Verify Backend satisfies the correct interfaces
var _ physical.Backend = (*Backend)(nil)
const (
// Limits maximum outstanding requests
MaxNumberOfPermits = 256
)
var (
metricDelete = []string{"oci", "delete"}
metricGet = []string{"oci", "get"}
metricList = []string{"oci", "list"}
metricPut = []string{"oci", "put"}
metricDeleteFull = []string{"oci", "deleteFull"}
metricGetFull = []string{"oci", "getFull"}
metricListFull = []string{"oci", "listFull"}
metricPutFull = []string{"oci", "putFull"}
metricDeleteHa = []string{"oci", "deleteHa"}
metricGetHa = []string{"oci", "getHa"}
metricPutHa = []string{"oci", "putHa"}
metricDeleteAcquirePool = []string{"oci", "deleteAcquirePool"}
metricGetAcquirePool = []string{"oci", "getAcquirePool"}
metricListAcquirePool = []string{"oci", "listAcquirePool"}
metricPutAcquirePool = []string{"oci", "putAcquirePool"}
metricDeleteFailed = []string{"oci", "deleteFailed"}
metricGetFailed = []string{"oci", "getFailed"}
metricListFailed = []string{"oci", "listFailed"}
metricPutFailed = []string{"oci", "putFailed"}
metricHaWatchLockRetriable = []string{"oci", "haWatchLockRetriable"}
metricPermitsUsed = []string{"oci", "permitsUsed"}
metric5xx = []string{"oci", "5xx"}
)
type Backend struct {
client *objectstorage.ObjectStorageClient
bucketName string
logger log.Logger
permitPool *physical.PermitPool
namespaceName string
haEnabled bool
lockBucketName string
}
func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
bucketName := conf["bucket_name"]
if bucketName == "" {
return nil, errors.New("missing bucket name")
}
namespaceName := conf["namespace_name"]
if bucketName == "" {
return nil, errors.New("missing namespace name")
}
lockBucketName := ""
haEnabled := false
var err error
haEnabledStr := conf["ha_enabled"]
if haEnabledStr != "" {
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
return nil, fmt.Errorf("failed to parse HA enabled: %w", err)
}
if haEnabled {
lockBucketName = conf["lock_bucket_name"]
if lockBucketName == "" {
return nil, errors.New("missing lock bucket name")
}
}
}
authTypeAPIKeyBool := false
authTypeAPIKeyStr := conf["auth_type_api_key"]
if authTypeAPIKeyStr != "" {
authTypeAPIKeyBool, err = strconv.ParseBool(authTypeAPIKeyStr)
if err != nil {
return nil, fmt.Errorf("failed parsing auth_type_api_key parameter: %w", err)
}
}
var cp common.ConfigurationProvider
if authTypeAPIKeyBool {
cp = common.DefaultConfigProvider()
} else {
cp, err = auth.InstancePrincipalConfigurationProvider()
if err != nil {
return nil, fmt.Errorf("failed creating InstancePrincipalConfigurationProvider: %w", err)
}
}
objectStorageClient, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(cp)
if err != nil {
return nil, fmt.Errorf("failed creating NewObjectStorageClientWithConfigurationProvider: %w", err)
}
region := conf["region"]
if region != "" {
objectStorageClient.SetRegion(region)
}
logger.Debug("configuration",
"bucket_name", bucketName,
"region", region,
"namespace_name", namespaceName,
"ha_enabled", haEnabled,
"lock_bucket_name", lockBucketName,
"auth_type_api_key", authTypeAPIKeyBool,
)
return &Backend{
client: &objectStorageClient,
bucketName: bucketName,
logger: logger,
permitPool: physical.NewPermitPool(MaxNumberOfPermits),
namespaceName: namespaceName,
haEnabled: haEnabled,
lockBucketName: lockBucketName,
}, nil
}
func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error {
o.logger.Debug("PUT started")
defer metrics.MeasureSince(metricPutFull, time.Now())
startAcquirePool := time.Now()
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
o.permitPool.Acquire()
defer o.permitPool.Release()
metrics.MeasureSince(metricPutAcquirePool, startAcquirePool)
defer metrics.MeasureSince(metricPut, time.Now())
size := int64(len(entry.Value))
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
metrics.IncrCounter(metricPutFailed, 1)
o.logger.Error("failed to generate UUID")
return fmt.Errorf("failed to generate UUID: %w", err)
}
o.logger.Debug("PUT", "opc-client-request-id", opcClientRequestId)
request := objectstorage.PutObjectRequest{
NamespaceName: &o.namespaceName,
BucketName: &o.bucketName,
ObjectName: &entry.Key,
ContentLength: &size,
PutObjectBody: ioutil.NopCloser(bytes.NewReader(entry.Value)),
OpcMeta: nil,
OpcClientRequestId: &opcClientRequestId,
}
resp, err := o.client.PutObject(ctx, request)
if resp.RawResponse != nil && resp.RawResponse.Body != nil {
defer resp.RawResponse.Body.Close()
}
if err != nil {
metrics.IncrCounter(metricPutFailed, 1)
return fmt.Errorf("failed to put data: %w", err)
}
o.logRequest("PUT", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
o.logger.Debug("PUT completed")
return nil
}
func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) {
o.logger.Debug("GET started")
defer metrics.MeasureSince(metricGetFull, time.Now())
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
startAcquirePool := time.Now()
o.permitPool.Acquire()
defer o.permitPool.Release()
metrics.MeasureSince(metricGetAcquirePool, startAcquirePool)
defer metrics.MeasureSince(metricGet, time.Now())
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
o.logger.Error("failed to generate UUID")
return nil, fmt.Errorf("failed to generate UUID: %w", err)
}
o.logger.Debug("GET", "opc-client-request-id", opcClientRequestId)
request := objectstorage.GetObjectRequest{
NamespaceName: &o.namespaceName,
BucketName: &o.bucketName,
ObjectName: &key,
OpcClientRequestId: &opcClientRequestId,
}
resp, err := o.client.GetObject(ctx, request)
if resp.RawResponse != nil && resp.RawResponse.Body != nil {
defer resp.RawResponse.Body.Close()
}
o.logRequest("GET", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
if err != nil {
if resp.RawResponse != nil && resp.RawResponse.StatusCode == http.StatusNotFound {
return nil, nil
}
metrics.IncrCounter(metricGetFailed, 1)
return nil, fmt.Errorf("failed to read Value: %w", err)
}
body, err := ioutil.ReadAll(resp.Content)
if err != nil {
metrics.IncrCounter(metricGetFailed, 1)
return nil, fmt.Errorf("failed to decode Value into bytes: %w", err)
}
o.logger.Debug("GET completed")
return &physical.Entry{
Key: key,
Value: body,
}, nil
}
func (o *Backend) Delete(ctx context.Context, key string) error {
o.logger.Debug("DELETE started")
defer metrics.MeasureSince(metricDeleteFull, time.Now())
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
startAcquirePool := time.Now()
o.permitPool.Acquire()
defer o.permitPool.Release()
metrics.MeasureSince(metricDeleteAcquirePool, startAcquirePool)
defer metrics.MeasureSince(metricDelete, time.Now())
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
o.logger.Error("Delete: error generating UUID")
return fmt.Errorf("failed to generate UUID: %w", err)
}
o.logger.Debug("Delete", "opc-client-request-id", opcClientRequestId)
request := objectstorage.DeleteObjectRequest{
NamespaceName: &o.namespaceName,
BucketName: &o.bucketName,
ObjectName: &key,
OpcClientRequestId: &opcClientRequestId,
}
resp, err := o.client.DeleteObject(ctx, request)
if resp.RawResponse != nil && resp.RawResponse.Body != nil {
defer resp.RawResponse.Body.Close()
}
o.logRequest("DELETE", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
if err != nil {
if resp.RawResponse != nil && resp.RawResponse.StatusCode == http.StatusNotFound {
return nil
}
metrics.IncrCounter(metricDeleteFailed, 1)
return fmt.Errorf("failed to delete Key: %w", err)
}
o.logger.Debug("DELETE completed")
return nil
}
func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) {
o.logger.Debug("LIST started")
defer metrics.MeasureSince(metricListFull, time.Now())
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
startAcquirePool := time.Now()
o.permitPool.Acquire()
defer o.permitPool.Release()
metrics.MeasureSince(metricListAcquirePool, startAcquirePool)
defer metrics.MeasureSince(metricList, time.Now())
var keys []string
delimiter := "/"
var start *string
for {
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
o.logger.Error("List: error generating UUID")
return nil, fmt.Errorf("failed to generate UUID %w", err)
}
o.logger.Debug("LIST", "opc-client-request-id", opcClientRequestId)
request := objectstorage.ListObjectsRequest{
NamespaceName: &o.namespaceName,
BucketName: &o.bucketName,
Prefix: &prefix,
Delimiter: &delimiter,
Start: start,
OpcClientRequestId: &opcClientRequestId,
}
resp, err := o.client.ListObjects(ctx, request)
o.logRequest("LIST", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
if err != nil {
metrics.IncrCounter(metricListFailed, 1)
return nil, fmt.Errorf("failed to list using prefix: %w", err)
}
for _, commonPrefix := range resp.Prefixes {
commonPrefix := strings.TrimPrefix(commonPrefix, prefix)
keys = append(keys, commonPrefix)
}
for _, object := range resp.Objects {
key := strings.TrimPrefix(*object.Name, prefix)
keys = append(keys, key)
}
// Duplicate keys are not expected
keys = strutil.RemoveDuplicates(keys, false)
if resp.NextStartWith == nil {
resp.RawResponse.Body.Close()
break
}
start = resp.NextStartWith
resp.RawResponse.Body.Close()
}
sort.Strings(keys)
o.logger.Debug("LIST completed")
return keys, nil
}
func (o *Backend) logRequest(operation string, response *http.Response, clientOpcRequestIdPtr *string, opcRequestIdPtr *string, err error) {
statusCode := 0
clientOpcRequestId := " "
opcRequestId := " "
if response != nil {
statusCode = response.StatusCode
if statusCode/100 == 5 {
metrics.IncrCounter(metric5xx, 1)
}
}
if clientOpcRequestIdPtr != nil {
clientOpcRequestId = *clientOpcRequestIdPtr
}
if opcRequestIdPtr != nil {
opcRequestId = *opcRequestIdPtr
}
statusCodeStr := "No response"
if statusCode != 0 {
statusCodeStr = strconv.Itoa(statusCode)
}
logLine := fmt.Sprintf("%s client:opc-request-id %s opc-request-id: %s status-code: %s",
operation, clientOpcRequestId, opcRequestId, statusCodeStr)
if err != nil && statusCode/100 == 5 {
o.logger.Error(logLine, "error", err)
}
}

View File

@ -1,551 +0,0 @@
// Copyright © 2019, Oracle and/or its affiliates.
package oci
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/armon/go-metrics"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/physical"
"github.com/oracle/oci-go-sdk/objectstorage"
)
// The lock implementation below prioritizes ensuring that there are not 2 primary at any given point in time
// over high availability of the primary instance
// Verify Backend satisfies the correct interfaces
var (
_ physical.HABackend = (*Backend)(nil)
_ physical.Lock = (*Lock)(nil)
)
const (
// LockRenewInterval is the time to wait between lock renewals.
LockRenewInterval = 3 * time.Second
// LockRetryInterval is the amount of time to wait if the lock fails before trying again.
LockRetryInterval = 5 * time.Second
// LockWatchRetryInterval is the amount of time to wait if a watch fails before trying again.
LockWatchRetryInterval = 2 * time.Second
// LockTTL is the default lock TTL.
LockTTL = 15 * time.Second
// LockWatchRetryMax is the number of times to retry a failed watch before signaling that leadership is lost.
LockWatchRetryMax = 4
// LockCacheMinAcceptableAge is minimum cache age in seconds to determine that its safe for a secondary instance
// to acquire lock.
LockCacheMinAcceptableAge = 45 * time.Second
// LockWriteRetriesOnFailures is the number of retries that are made on write 5xx failures.
LockWriteRetriesOnFailures = 4
ObjectStorageCallsReadTimeout = 3 * time.Second
ObjectStorageCallsWriteTimeout = 3 * time.Second
)
type LockCache struct {
// ETag values are unique identifiers generated by the OCI service and changed every time the object is modified.
etag string
lastUpdate time.Time
lockRecord *LockRecord
}
type Lock struct {
// backend is the underlying physical backend.
backend *Backend
// Key is the name of the Key. Value is the Value of the Key.
key, value string
// held is a boolean indicating if the lock is currently held.
held bool
// Identity is the internal Identity of this Key (unique to this server instance).
identity string
internalLock sync.Mutex
// stopCh is the channel that stops all operations. It may be closed in the
// event of a leader loss or graceful shutdown. stopped is a boolean
// indicating if we are stopped - it exists to prevent double closing the
// channel. stopLock is a mutex around the locks.
stopCh chan struct{}
stopped bool
stopLock sync.Mutex
lockRecordCache atomic.Value
// Allow modifying the Lock durations for ease of unit testing.
renewInterval time.Duration
retryInterval time.Duration
ttl time.Duration
watchRetryInterval time.Duration
watchRetryMax int
}
type LockRecord struct {
Key string
Value string
Identity string
}
var (
metricLockUnlock = []string{"oci", "lock", "unlock"}
metricLockLock = []string{"oci", "lock", "lock"}
metricLockValue = []string{"oci", "lock", "Value"}
metricLeaderValue = []string{"oci", "leader", "Value"}
)
func (b *Backend) HAEnabled() bool {
return b.haEnabled
}
// LockWith acquires a mutual exclusion based on the given Key.
func (b *Backend) LockWith(key, value string) (physical.Lock, error) {
identity, err := uuid.GenerateUUID()
if err != nil {
return nil, fmt.Errorf("Lock with: %w", err)
}
return &Lock{
backend: b,
key: key,
value: value,
identity: identity,
stopped: true,
renewInterval: LockRenewInterval,
retryInterval: LockRetryInterval,
ttl: LockTTL,
watchRetryInterval: LockWatchRetryInterval,
watchRetryMax: LockWatchRetryMax,
}, nil
}
func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
l.backend.logger.Debug("Lock() called")
defer metrics.MeasureSince(metricLockLock, time.Now().UTC())
l.internalLock.Lock()
defer l.internalLock.Unlock()
if l.held {
return nil, errors.New("lock already held")
}
// Attempt to lock - this function blocks until a lock is acquired or an error
// occurs.
acquired, err := l.attemptLock(stopCh)
if err != nil {
return nil, fmt.Errorf("lock: %w", err)
}
if !acquired {
return nil, nil
}
// We have the lock now
l.held = true
// Build the locks
l.stopLock.Lock()
l.stopCh = make(chan struct{})
l.stopped = false
l.stopLock.Unlock()
// Periodically renew and watch the lock
go l.renewLock()
go l.watchLock()
return l.stopCh, nil
}
// attemptLock attempts to acquire a lock. If the given channel is closed, the
// acquisition attempt stops. This function returns when a lock is acquired or
// an error occurs.
func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) {
l.backend.logger.Debug("AttemptLock() called")
ticker := time.NewTicker(l.retryInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
acquired, err := l.writeLock()
if err != nil {
return false, fmt.Errorf("attempt lock: %w", err)
}
if !acquired {
continue
}
return true, nil
case <-stopCh:
return false, nil
}
}
}
// renewLock renews the given lock until the channel is closed.
func (l *Lock) renewLock() {
l.backend.logger.Debug("RenewLock() called")
ticker := time.NewTicker(l.renewInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
l.writeLock()
case <-l.stopCh:
return
}
}
}
func loadLockRecordCache(l *Lock) *LockCache {
lockRecordCache := l.lockRecordCache.Load()
if lockRecordCache == nil {
return nil
}
return lockRecordCache.(*LockCache)
}
// watchLock checks whether the lock has changed in the table and closes the
// leader channel accordingly. If an error occurs during the check, watchLock
// will retry the operation and then close the leader channel if it can't
// succeed after retries.
func (l *Lock) watchLock() {
l.backend.logger.Debug("WatchLock() called")
retries := 0
ticker := time.NewTicker(l.watchRetryInterval)
defer ticker.Stop()
OUTER:
for {
// Check if the channel is already closed
select {
case <-l.stopCh:
l.backend.logger.Debug("WatchLock():Stop lock signaled/closed.")
break OUTER
default:
}
// Check if we've exceeded retries
if retries >= l.watchRetryMax-1 {
l.backend.logger.Debug("WatchLock: Failed to get lock data from object storage. Giving up the lease after max retries")
break OUTER
}
// Wait for the timer
select {
case <-ticker.C:
case <-l.stopCh:
break OUTER
}
lockRecordCache := loadLockRecordCache(l)
if (lockRecordCache == nil) ||
(lockRecordCache.lockRecord == nil) ||
(lockRecordCache.lockRecord.Identity != l.identity) ||
(time.Now().Sub(lockRecordCache.lastUpdate) > l.ttl) {
l.backend.logger.Debug("WatchLock: Lock record cache is nil, stale or does not belong to self.")
break OUTER
}
lockRecord, _, err := l.get(context.Background())
if err != nil {
retries++
l.backend.logger.Debug("WatchLock: Failed to get lock data from object storage. Retrying..")
metrics.SetGauge(metricHaWatchLockRetriable, 1)
continue
}
if (lockRecord == nil) || (lockRecord.Identity != l.identity) {
l.backend.logger.Debug("WatchLock: Lock record cache is nil or does not belong to self.")
break OUTER
}
// reset retries counter on success
retries = 0
l.backend.logger.Debug("WatchLock() successful")
metrics.SetGauge(metricHaWatchLockRetriable, 0)
}
l.stopLock.Lock()
defer l.stopLock.Unlock()
if !l.stopped {
l.stopped = true
l.backend.logger.Debug("Closing the stop channel to give up leadership.")
close(l.stopCh)
}
}
func (l *Lock) Unlock() error {
l.backend.logger.Debug("Unlock() called")
defer metrics.MeasureSince(metricLockUnlock, time.Now().UTC())
l.internalLock.Lock()
defer l.internalLock.Unlock()
if !l.held {
return nil
}
// Stop any existing locking or renewal attempts
l.stopLock.Lock()
if !l.stopped {
l.stopped = true
close(l.stopCh)
}
l.stopLock.Unlock()
// We are no longer holding the lock
l.held = false
// Get current lock record
currentLockRecord, etag, err := l.get(context.Background())
if err != nil {
return fmt.Errorf("error reading lock record: %w", err)
}
if currentLockRecord != nil && currentLockRecord.Identity == l.identity {
defer metrics.MeasureSince(metricDeleteHa, time.Now())
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
l.backend.logger.Debug("Unlock: error generating UUID")
return fmt.Errorf("failed to generate UUID: %w", err)
}
l.backend.logger.Debug("Unlock", "opc-client-request-id", opcClientRequestId)
request := objectstorage.DeleteObjectRequest{
NamespaceName: &l.backend.namespaceName,
BucketName: &l.backend.lockBucketName,
ObjectName: &l.key,
IfMatch: &etag,
OpcClientRequestId: &opcClientRequestId,
}
response, err := l.backend.client.DeleteObject(context.Background(), request)
l.backend.logRequest("deleteHA", response.RawResponse, response.OpcClientRequestId, response.OpcRequestId, err)
if err != nil {
metrics.IncrCounter(metricDeleteFailed, 1)
return fmt.Errorf("write lock: %w", err)
}
}
return nil
}
func (l *Lock) Value() (bool, string, error) {
l.backend.logger.Debug("Value() called")
defer metrics.MeasureSince(metricLockValue, time.Now().UTC())
lockRecord, _, err := l.get(context.Background())
if err != nil {
return false, "", err
}
if lockRecord == nil {
return false, "", err
}
return true, lockRecord.Value, nil
}
// get retrieves the Value for the lock.
func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) {
l.backend.logger.Debug("Called getLockRecord()")
// Read lock Key
defer metrics.MeasureSince(metricGetHa, time.Now())
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
l.backend.logger.Error("getHa: error generating UUID")
return nil, "", fmt.Errorf("failed to generate UUID: %w", err)
}
l.backend.logger.Debug("getHa", "opc-client-request-id", opcClientRequestId)
request := objectstorage.GetObjectRequest{
NamespaceName: &l.backend.namespaceName,
BucketName: &l.backend.lockBucketName,
ObjectName: &l.key,
OpcClientRequestId: &opcClientRequestId,
}
ctx, cancel := context.WithTimeout(ctx, ObjectStorageCallsReadTimeout)
defer cancel()
response, err := l.backend.client.GetObject(ctx, request)
l.backend.logRequest("getHA", response.RawResponse, response.OpcClientRequestId, response.OpcRequestId, err)
if err != nil {
if response.RawResponse != nil && response.RawResponse.StatusCode == http.StatusNotFound {
return nil, "", nil
}
metrics.IncrCounter(metricGetFailed, 1)
l.backend.logger.Error("Error calling GET", "err", err)
return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err)
}
defer response.RawResponse.Body.Close()
body, err := ioutil.ReadAll(response.Content)
if err != nil {
metrics.IncrCounter(metricGetFailed, 1)
l.backend.logger.Error("Error reading content", "err", err)
return nil, "", fmt.Errorf("failed to decode Value into bytes: %w", err)
}
var lockRecord LockRecord
err = json.Unmarshal(body, &lockRecord)
if err != nil {
metrics.IncrCounter(metricGetFailed, 1)
l.backend.logger.Error("Error un-marshalling content", "err", err)
return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err)
}
return &lockRecord, *response.ETag, nil
}
func (l *Lock) writeLock() (bool, error) {
l.backend.logger.Debug("WriteLock() called")
// Create a transaction to read and the update (maybe)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// The transaction will be retried, and it could sit in a queue behind, say,
// the delete operation. To stop the transaction, we close the context when
// the associated stopCh is received.
go func() {
select {
case <-l.stopCh:
cancel()
case <-ctx.Done():
}
}()
lockRecordCache := loadLockRecordCache(l)
if (lockRecordCache == nil) || lockRecordCache.lockRecord == nil ||
lockRecordCache.lockRecord.Identity != l.identity ||
time.Now().Sub(lockRecordCache.lastUpdate) > l.ttl {
// case secondary
currentLockRecord, currentEtag, err := l.get(ctx)
if err != nil {
return false, fmt.Errorf("error reading lock record: %w", err)
}
if (lockRecordCache == nil) || lockRecordCache.etag != currentEtag {
// update cached lock record
l.lockRecordCache.Store(&LockCache{
etag: currentEtag,
lastUpdate: time.Now().UTC(),
lockRecord: currentLockRecord,
})
lockRecordCache = loadLockRecordCache(l)
}
// Current lock record being null implies that there is no leader. In this case we want to try acquiring lock.
if currentLockRecord != nil && time.Now().Sub(lockRecordCache.lastUpdate) < LockCacheMinAcceptableAge {
return false, nil
}
// cache is old enough and current, try acquiring lock as secondary
}
newLockRecord := &LockRecord{
Key: l.key,
Value: l.value,
Identity: l.identity,
}
newLockRecordJson, err := json.Marshal(newLockRecord)
if err != nil {
return false, fmt.Errorf("error reading lock record: %w", err)
}
defer metrics.MeasureSince(metricPutHa, time.Now())
opcClientRequestId, err := uuid.GenerateUUID()
if err != nil {
l.backend.logger.Error("putHa: error generating UUID")
return false, fmt.Errorf("failed to generate UUID: %w", err)
}
l.backend.logger.Debug("putHa", "opc-client-request-id", opcClientRequestId)
size := int64(len(newLockRecordJson))
putRequest := objectstorage.PutObjectRequest{
NamespaceName: &l.backend.namespaceName,
BucketName: &l.backend.lockBucketName,
ObjectName: &l.key,
ContentLength: &size,
PutObjectBody: ioutil.NopCloser(bytes.NewReader(newLockRecordJson)),
OpcMeta: nil,
OpcClientRequestId: &opcClientRequestId,
}
if lockRecordCache.etag == "" {
noneMatch := "*"
putRequest.IfNoneMatch = &noneMatch
} else {
putRequest.IfMatch = &lockRecordCache.etag
}
newtEtag := ""
for i := 1; i <= LockWriteRetriesOnFailures; i++ {
writeCtx, writeCancel := context.WithTimeout(ctx, ObjectStorageCallsWriteTimeout)
defer writeCancel()
putObjectResponse, putObjectError := l.backend.client.PutObject(writeCtx, putRequest)
l.backend.logRequest("putHA", putObjectResponse.RawResponse, putObjectResponse.OpcClientRequestId, putObjectResponse.OpcRequestId, putObjectError)
if putObjectError == nil {
newtEtag = *putObjectResponse.ETag
putObjectResponse.RawResponse.Body.Close()
break
}
err = putObjectError
if putObjectResponse.RawResponse == nil {
metrics.IncrCounter(metricPutFailed, 1)
l.backend.logger.Error("PUT", "err", err)
break
}
putObjectResponse.RawResponse.Body.Close()
// Retry if the return code is 5xx
if (putObjectResponse.RawResponse.StatusCode / 100) == 5 {
metrics.IncrCounter(metricPutFailed, 1)
l.backend.logger.Warn("PUT. Retrying..", "err", err)
time.Sleep(time.Duration(100*i) * time.Millisecond)
} else {
l.backend.logger.Error("PUT", "err", err)
break
}
}
if err != nil {
return false, fmt.Errorf("write lock: %w", err)
}
l.backend.logger.Debug("Lock written", string(newLockRecordJson))
l.lockRecordCache.Store(&LockCache{
etag: newtEtag,
lastUpdate: time.Now().UTC(),
lockRecord: newLockRecord,
})
metrics.SetGauge(metricLeaderValue, 1)
return true, nil
}

View File

@ -1,39 +0,0 @@
// Copyright © 2019, Oracle and/or its affiliates.
package oci
import (
"os"
"testing"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/physical"
"github.com/oracle/oci-go-sdk/common"
"github.com/oracle/oci-go-sdk/objectstorage"
)
func TestOCIHABackend(t *testing.T) {
// Skip tests if we are not running acceptance tests
if os.Getenv("VAULT_ACC") == "" {
t.SkipNow()
}
if !hasOCICredentials() {
t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.")
}
bucketName, _ := uuid.GenerateUUID()
configProvider := common.DefaultConfigProvider()
objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider)
namespaceName := getNamespaceName(objectStorageClient, t)
createBucket(bucketName, getTenancyOcid(configProvider, t), namespaceName, objectStorageClient, t)
defer deleteBucket(namespaceName, bucketName, objectStorageClient, t)
backend := createBackend(bucketName, namespaceName, "true", bucketName, t)
ha, ok := backend.(physical.HABackend)
if !ok {
t.Fatalf("does not implement")
}
physical.ExerciseHABackend(t, ha, ha)
}

View File

@ -1,105 +0,0 @@
// Copyright © 2019, Oracle and/or its affiliates.
package oci
import (
"os"
"testing"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
"github.com/oracle/oci-go-sdk/common"
"github.com/oracle/oci-go-sdk/objectstorage"
"golang.org/x/net/context"
)
func TestOCIBackend(t *testing.T) {
// Skip tests if we are not running acceptance tests
if os.Getenv("VAULT_ACC") == "" {
t.SkipNow()
}
if !hasOCICredentials() {
t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.")
}
bucketName, _ := uuid.GenerateUUID()
configProvider := common.DefaultConfigProvider()
objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider)
namespaceName := getNamespaceName(objectStorageClient, t)
createBucket(bucketName, getTenancyOcid(configProvider, t), namespaceName, objectStorageClient, t)
defer deleteBucket(namespaceName, bucketName, objectStorageClient, t)
backend := createBackend(bucketName, namespaceName, "false", "", t)
physical.ExerciseBackend(t, backend)
physical.ExerciseBackend_ListPrefix(t, backend)
}
func createBucket(bucketName string, tenancyOcid string, namespaceName string, objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) {
createBucketRequest := objectstorage.CreateBucketRequest{
NamespaceName: &namespaceName,
}
createBucketRequest.CompartmentId = &tenancyOcid
createBucketRequest.Name = &bucketName
createBucketRequest.Metadata = make(map[string]string)
createBucketRequest.PublicAccessType = objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess
_, err := objectStorageClient.CreateBucket(context.Background(), createBucketRequest)
if err != nil {
t.Fatalf("Failed to create bucket: %v", err)
}
}
func deleteBucket(nameSpaceName string, bucketName string, objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) {
request := objectstorage.DeleteBucketRequest{
NamespaceName: &nameSpaceName,
BucketName: &bucketName,
}
_, err := objectStorageClient.DeleteBucket(context.Background(), request)
if err != nil {
t.Fatalf("Failed to delete bucket: %v", err)
}
}
func getTenancyOcid(configProvider common.ConfigurationProvider, t *testing.T) (tenancyOcid string) {
tenancyOcid, err := configProvider.TenancyOCID()
if err != nil {
t.Fatalf("Failed to get tenancy ocid: %v", err)
}
return tenancyOcid
}
func createBackend(bucketName string, namespaceName string, haEnabledStr string, lockBucketName string, t *testing.T) physical.Backend {
backend, err := NewBackend(map[string]string{
"auth_type_api_key": "true",
"bucket_name": bucketName,
"namespace_name": namespaceName,
"ha_enabled": haEnabledStr,
"lock_bucket_name": lockBucketName,
}, logging.NewVaultLogger(log.Trace))
if err != nil {
t.Fatalf("Failed to create new backend: %v", err)
}
return backend
}
func getNamespaceName(objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) string {
response, err := objectStorageClient.GetNamespace(context.Background(), objectstorage.GetNamespaceRequest{})
if err != nil {
t.Fatalf("Failed to get namespaceName: %v", err)
}
nameSpaceName := *response.Value
return nameSpaceName
}
func hasOCICredentials() bool {
configProvider := common.DefaultConfigProvider()
_, err := configProvider.KeyID()
if err != nil {
return false
}
return true
}

View File

@ -59,7 +59,6 @@ vault auth enable "jwt"
vault auth enable "kerberos"
vault auth enable "kubernetes"
vault auth enable "ldap"
vault auth enable "oci"
vault auth enable "okta"
vault auth enable "radius"
vault auth enable "userpass"

View File

@ -86,14 +86,6 @@
{{#if (eq @mode "create")}}
{{#if (eq @model.plugin_name "vault-plugin-database-oracle")}}
<AlertBanner @type="warning">
Please ensure that your Oracle plugin has the default name of
<b>vault-plugin-database-oracle</b>. Custom naming is not supported in the UI at this time. If the plugin is already
named vault-plugin-database-oracle, disregard this warning.
</AlertBanner>
{{/if}}
<form {{on "submit" this.handleCreateConnection}} aria-label="create connection form">
{{#each @model.fieldAttrs as |attr|}}
{{#if (not-eq attr.options.readOnly true)}}

View File

@ -84,24 +84,6 @@ export const AVAILABLE_PLUGIN_TYPES = [
{ attr: 'root_rotation_statements', group: 'statements' },
],
},
{
value: 'vault-plugin-database-oracle',
displayName: 'Oracle',
fields: [
{ attr: 'plugin_name' },
{ attr: 'name' },
{ attr: 'verify_connection', show: false },
{ attr: 'password_policy' },
{ attr: 'connection_url', group: 'pluginConfig' },
{ attr: 'username', group: 'pluginConfig', show: false },
{ attr: 'password', group: 'pluginConfig', show: false },
{ attr: 'max_open_connections', group: 'pluginConfig' },
{ attr: 'max_idle_connections', group: 'pluginConfig' },
{ attr: 'max_connection_lifetime', group: 'pluginConfig' },
{ attr: 'username_template', group: 'pluginConfig' },
{ attr: 'root_rotation_statements', group: 'statements' },
],
},
{
value: 'postgresql-database-plugin',
displayName: 'PostgreSQL',
@ -134,7 +116,6 @@ export const STATEMENT_FIELDS = {
'mysql-aurora-database-plugin': [],
'mysql-legacy-database-plugin': [],
'mysql-rds-database-plugin': [],
'vault-plugin-database-oracle': [],
'postgresql-database-plugin': [],
},
dynamic: {
@ -143,7 +124,6 @@ export const STATEMENT_FIELDS = {
'mysql-aurora-database-plugin': ['creation_statements', 'revocation_statements'],
'mysql-legacy-database-plugin': ['creation_statements', 'revocation_statements'],
'mysql-rds-database-plugin': ['creation_statements', 'revocation_statements'],
'vault-plugin-database-oracle': ['creation_statements', 'revocation_statements'],
'postgresql-database-plugin': [
'creation_statements',
'revocation_statements',

View File

@ -140,8 +140,6 @@ export const structureIconMap = {
'logo-microsoft-monochrome': 'microsoft',
'logo-okta-color': 'okta-color',
'logo-okta-monochrome': 'okta',
'logo-oracle-color': 'oracle-color',
'logo-oracle-monochrome': 'oracle',
'logo-slack-color': 'slack-color',
'logo-slack-monochrome': 'slack',
'logo-vmware-color': 'vmware-color',