remove oracle/oci
This commit is contained in:
parent
e0e8caaa58
commit
747df1e9e8
|
@ -1,231 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
vaultoci "github.com/hashicorp/vault-plugin-auth-oci"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth"
|
||||
agentoci "github.com/hashicorp/vault/command/agentproxyshared/auth/oci"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/sink"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/sink/file"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
)
|
||||
|
||||
const (
|
||||
envVarOCITestTenancyOCID = "OCI_TEST_TENANCY_OCID"
|
||||
envVarOCITestUserOCID = "OCI_TEST_USER_OCID"
|
||||
envVarOCITestFingerprint = "OCI_TEST_FINGERPRINT"
|
||||
envVarOCITestPrivateKeyPath = "OCI_TEST_PRIVATE_KEY_PATH"
|
||||
envVAROCITestOCIDList = "OCI_TEST_OCID_LIST"
|
||||
|
||||
// The OCI SDK doesn't export its standard env vars so they're captured here.
|
||||
// These are used for the duration of the test to make sure the agent is able to
|
||||
// pick up creds from the env.
|
||||
//
|
||||
// To run this test, do not set these. Only the above ones need to be set.
|
||||
envVarOCITenancyOCID = "OCI_tenancy_ocid"
|
||||
envVarOCIUserOCID = "OCI_user_ocid"
|
||||
envVarOCIFingerprint = "OCI_fingerprint"
|
||||
envVarOCIPrivateKeyPath = "OCI_private_key_path"
|
||||
)
|
||||
|
||||
func TestOCIEndToEnd(t *testing.T) {
|
||||
if !runAcceptanceTests {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
// Ensure each cred is populated.
|
||||
credNames := []string{
|
||||
envVarOCITestTenancyOCID,
|
||||
envVarOCITestUserOCID,
|
||||
envVarOCITestFingerprint,
|
||||
envVarOCITestPrivateKeyPath,
|
||||
envVAROCITestOCIDList,
|
||||
}
|
||||
testhelpers.SkipUnlessEnvVarsSet(t, credNames)
|
||||
|
||||
logger := logging.NewVaultLogger(hclog.Trace)
|
||||
coreConfig := &vault.CoreConfig{
|
||||
Logger: logger,
|
||||
CredentialBackends: map[string]logical.Factory{
|
||||
"oci": vaultoci.Factory,
|
||||
},
|
||||
}
|
||||
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
vault.TestWaitActive(t, cluster.Cores[0].Core)
|
||||
client := cluster.Cores[0].Client
|
||||
|
||||
// Setup Vault
|
||||
if err := client.Sys().EnableAuthWithOptions("oci", &api.EnableAuthOptions{
|
||||
Type: "oci",
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := client.Logical().Write("auth/oci/config", map[string]interface{}{
|
||||
"home_tenancy_id": os.Getenv(envVarOCITestTenancyOCID),
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := client.Logical().Write("auth/oci/role/test", map[string]interface{}{
|
||||
"ocid_list": os.Getenv(envVAROCITestOCIDList),
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
|
||||
// We're going to feed oci auth creds via env variables.
|
||||
if err := setOCIEnvCreds(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := unsetOCIEnvCreds(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
vaultAddr := "http://" + cluster.Cores[0].Listeners[0].Addr().String()
|
||||
|
||||
am, err := agentoci.NewOCIAuthMethod(&auth.AuthConfig{
|
||||
Logger: logger.Named("auth.oci"),
|
||||
MountPath: "auth/oci",
|
||||
Config: map[string]interface{}{
|
||||
"type": "apikey",
|
||||
"role": "test",
|
||||
},
|
||||
}, vaultAddr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ahConfig := &auth.AuthHandlerConfig{
|
||||
Logger: logger.Named("auth.handler"),
|
||||
Client: client,
|
||||
}
|
||||
|
||||
ah := auth.NewAuthHandler(ahConfig)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- ah.Run(ctx, am)
|
||||
}()
|
||||
defer func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tokenSinkFileName := tmpFile.Name()
|
||||
tmpFile.Close()
|
||||
os.Remove(tokenSinkFileName)
|
||||
t.Logf("output: %s", tokenSinkFileName)
|
||||
|
||||
config := &sink.SinkConfig{
|
||||
Logger: logger.Named("sink.file"),
|
||||
Config: map[string]interface{}{
|
||||
"path": tokenSinkFileName,
|
||||
},
|
||||
WrapTTL: 10 * time.Second,
|
||||
}
|
||||
|
||||
fs, err := file.NewFileSink(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config.Sink = fs
|
||||
|
||||
ss := sink.NewSinkServer(&sink.SinkServerConfig{
|
||||
Logger: logger.Named("sink.server"),
|
||||
Client: client,
|
||||
})
|
||||
go func() {
|
||||
errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config})
|
||||
}()
|
||||
defer func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// This has to be after the other defers so it happens first. It allows
|
||||
// successful test runs to immediately cancel all of the runner goroutines
|
||||
// and unblock any of the blocking defer calls by the runner's DoneCh that
|
||||
// comes before this and avoid successful tests from taking the entire
|
||||
// timeout duration.
|
||||
defer cancel()
|
||||
|
||||
if stat, err := os.Lstat(tokenSinkFileName); err == nil {
|
||||
t.Fatalf("expected err but got %s", stat)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Fatal("expected notexist err")
|
||||
}
|
||||
|
||||
// Wait 2 seconds for the env variables to be detected and an auth to be generated.
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
token, err := readToken(tokenSinkFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if token.Token == "" {
|
||||
t.Fatal("expected token but didn't receive it")
|
||||
}
|
||||
}
|
||||
|
||||
func setOCIEnvCreds() error {
|
||||
if err := os.Setenv(envVarOCITenancyOCID, os.Getenv(envVarOCITestTenancyOCID)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Setenv(envVarOCIUserOCID, os.Getenv(envVarOCITestUserOCID)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Setenv(envVarOCIFingerprint, os.Getenv(envVarOCITestFingerprint)); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Setenv(envVarOCIPrivateKeyPath, os.Getenv(envVarOCITestPrivateKeyPath))
|
||||
}
|
||||
|
||||
func unsetOCIEnvCreds() error {
|
||||
if err := os.Unsetenv(envVarOCITenancyOCID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Unsetenv(envVarOCIUserOCID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Unsetenv(envVarOCIFingerprint); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Unsetenv(envVarOCIPrivateKeyPath)
|
||||
}
|
|
@ -1,264 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth"
|
||||
"github.com/oracle/oci-go-sdk/common"
|
||||
ociAuth "github.com/oracle/oci-go-sdk/common/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
typeAPIKey = "apikey"
|
||||
typeInstance = "instance"
|
||||
|
||||
/*
|
||||
|
||||
IAM creds can be inferred from instance metadata or the container
|
||||
identity service, and those creds expire at varying intervals with
|
||||
new creds becoming available at likewise varying intervals. Let's
|
||||
default to polling once a minute so all changes can be picked up
|
||||
rather quickly. This is configurable, however.
|
||||
|
||||
*/
|
||||
defaultCredCheckFreqSeconds = 60 * time.Second
|
||||
|
||||
defaultConfigFileName = "config"
|
||||
defaultConfigDirName = ".oci"
|
||||
configFilePathEnvVarName = "OCI_CONFIG_FILE"
|
||||
secondaryConfigDirName = ".oraclebmc"
|
||||
)
|
||||
|
||||
func NewOCIAuthMethod(conf *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) {
|
||||
if conf == nil {
|
||||
return nil, errors.New("empty config")
|
||||
}
|
||||
if conf.Config == nil {
|
||||
return nil, errors.New("empty config data")
|
||||
}
|
||||
|
||||
a := &ociMethod{
|
||||
logger: conf.Logger,
|
||||
vaultAddress: vaultAddress,
|
||||
mountPath: conf.MountPath,
|
||||
credsFound: make(chan struct{}),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
typeRaw, ok := conf.Config["type"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing 'type' value")
|
||||
}
|
||||
authType, ok := typeRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'type' config value to string")
|
||||
}
|
||||
|
||||
roleRaw, ok := conf.Config["role"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing 'role' value")
|
||||
}
|
||||
a.role, ok = roleRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'role' config value to string")
|
||||
}
|
||||
|
||||
// Check for an optional custom frequency at which we should poll for creds.
|
||||
credCheckFreqSec := defaultCredCheckFreqSeconds
|
||||
if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok {
|
||||
checkFreq, err := parseutil.ParseDurationSecond(checkFreqRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse credential_poll_interval: %v", err)
|
||||
}
|
||||
credCheckFreqSec = checkFreq
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.role == "":
|
||||
return nil, errors.New("'role' value is empty")
|
||||
case authType == "":
|
||||
return nil, errors.New("'type' value is empty")
|
||||
case authType != typeAPIKey && authType != typeInstance:
|
||||
return nil, errors.New("'type' value is invalid")
|
||||
case authType == typeAPIKey:
|
||||
defaultConfigFile := getDefaultConfigFilePath()
|
||||
homeFolder := getHomeFolder()
|
||||
secondaryConfigFile := path.Join(homeFolder, secondaryConfigDirName, defaultConfigFileName)
|
||||
|
||||
environmentProvider := common.ConfigurationProviderEnvironmentVariables("OCI", "")
|
||||
defaultFileProvider, _ := common.ConfigurationProviderFromFile(defaultConfigFile, "")
|
||||
secondaryFileProvider, _ := common.ConfigurationProviderFromFile(secondaryConfigFile, "")
|
||||
|
||||
provider, _ := common.ComposingConfigurationProvider([]common.ConfigurationProvider{environmentProvider, defaultFileProvider, secondaryFileProvider})
|
||||
a.configurationProvider = provider
|
||||
case authType == typeInstance:
|
||||
configurationProvider, err := ociAuth.InstancePrincipalConfigurationProvider()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create instance principal configuration provider: %v", err)
|
||||
}
|
||||
a.configurationProvider = configurationProvider
|
||||
}
|
||||
|
||||
// Do an initial population of the creds because we want to err right away if we can't
|
||||
// even get a first set.
|
||||
creds, err := a.configurationProvider.KeyID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.lastCreds = creds
|
||||
|
||||
go a.pollForCreds(credCheckFreqSec)
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
type ociMethod struct {
|
||||
logger hclog.Logger
|
||||
vaultAddress string
|
||||
mountPath string
|
||||
|
||||
configurationProvider common.ConfigurationProvider
|
||||
role string
|
||||
|
||||
// These are used to share the latest creds safely across goroutines.
|
||||
credLock sync.Mutex
|
||||
lastCreds string
|
||||
|
||||
// Notifies the outer environment that it should call Authenticate again.
|
||||
credsFound chan struct{}
|
||||
|
||||
// Detects that the outer environment is closing.
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func (a *ociMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) {
|
||||
a.credLock.Lock()
|
||||
defer a.credLock.Unlock()
|
||||
|
||||
a.logger.Trace("beginning authentication")
|
||||
|
||||
requestPath := fmt.Sprintf("/v1/%s/login/%s", a.mountPath, a.role)
|
||||
requestURL := fmt.Sprintf("%s%s", a.vaultAddress, requestPath)
|
||||
|
||||
request, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("error creating authentication request: %w", err)
|
||||
}
|
||||
|
||||
request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
|
||||
signer := common.DefaultRequestSigner(a.configurationProvider)
|
||||
|
||||
err = signer.Sign(request)
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("error signing authentication request: %w", err)
|
||||
}
|
||||
|
||||
parsedVaultAddress, err := url.Parse(a.vaultAddress)
|
||||
if err != nil {
|
||||
return "", nil, nil, fmt.Errorf("unable to parse vault address: %w", err)
|
||||
}
|
||||
|
||||
request.Header.Set("Host", parsedVaultAddress.Host)
|
||||
request.Header.Set("(request-target)", fmt.Sprintf("%s %s", "get", requestPath))
|
||||
|
||||
data := map[string]interface{}{
|
||||
"request_headers": request.Header,
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/login/%s", a.mountPath, a.role), nil, data, nil
|
||||
}
|
||||
|
||||
func (a *ociMethod) NewCreds() chan struct{} {
|
||||
return a.credsFound
|
||||
}
|
||||
|
||||
func (a *ociMethod) CredSuccess() {}
|
||||
|
||||
func (a *ociMethod) Shutdown() {
|
||||
close(a.credsFound)
|
||||
close(a.stopCh)
|
||||
}
|
||||
|
||||
func (a *ociMethod) pollForCreds(frequency time.Duration) {
|
||||
ticker := time.NewTicker(frequency)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-a.stopCh:
|
||||
a.logger.Trace("shutdown triggered, stopping OCI auth handler")
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := a.checkCreds(); err != nil {
|
||||
a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ociMethod) checkCreds() error {
|
||||
a.credLock.Lock()
|
||||
defer a.credLock.Unlock()
|
||||
|
||||
a.logger.Trace("checking for new credentials")
|
||||
currentCreds, err := a.configurationProvider.KeyID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// These will always have different pointers regardless of whether their
|
||||
// values are identical, hence the use of DeepEqual.
|
||||
if currentCreds == a.lastCreds {
|
||||
a.logger.Trace("credentials are unchanged")
|
||||
return nil
|
||||
}
|
||||
a.lastCreds = currentCreds
|
||||
a.logger.Trace("new credentials detected, triggering Authenticate")
|
||||
a.credsFound <- struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHomeFolder() string {
|
||||
current, e := user.Current()
|
||||
if e != nil {
|
||||
// Give up and try to return something sensible
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return home
|
||||
}
|
||||
return current.HomeDir
|
||||
}
|
||||
|
||||
func getDefaultConfigFilePath() string {
|
||||
homeFolder := getHomeFolder()
|
||||
defaultConfigFile := path.Join(homeFolder, defaultConfigDirName, defaultConfigFileName)
|
||||
if _, err := os.Stat(defaultConfigFile); err == nil {
|
||||
return defaultConfigFile
|
||||
}
|
||||
|
||||
// Read configuration file path from OCI_CONFIG_FILE env var
|
||||
fallbackConfigFile, existed := os.LookupEnv(configFilePathEnvVarName)
|
||||
if !existed {
|
||||
return defaultConfigFile
|
||||
}
|
||||
if _, err := os.Stat(fallbackConfigFile); os.IsNotExist(err) {
|
||||
return defaultConfigFile
|
||||
}
|
||||
return fallbackConfigFile
|
||||
}
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/hashicorp/vault/command/agentproxyshared/auth/jwt"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/kerberos"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/kubernetes"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/oci"
|
||||
token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/cache"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb"
|
||||
|
@ -44,8 +43,6 @@ func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.Aut
|
|||
return kubernetes.NewKubernetesAuthMethod(authConfig)
|
||||
case "approle":
|
||||
return approle.NewApproleAuthMethod(authConfig)
|
||||
case "oci":
|
||||
return oci.NewOCIAuthMethod(authConfig, vaultAddress)
|
||||
case "token_file":
|
||||
return token_file.NewTokenFileAuthMethod(authConfig)
|
||||
case "pcf": // Deprecated.
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
credCF "github.com/hashicorp/vault-plugin-auth-cf"
|
||||
credOIDC "github.com/hashicorp/vault-plugin-auth-jwt"
|
||||
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
|
||||
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
|
||||
credCert "github.com/hashicorp/vault/builtin/credential/cert"
|
||||
credGitHub "github.com/hashicorp/vault/builtin/credential/github"
|
||||
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
|
||||
|
@ -42,7 +41,6 @@ import (
|
|||
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
|
||||
|
||||
physConsul "github.com/hashicorp/vault/physical/consul"
|
||||
physOCI "github.com/hashicorp/vault/physical/oci"
|
||||
physRaft "github.com/hashicorp/vault/physical/raft"
|
||||
physFile "github.com/hashicorp/vault/sdk/physical/file"
|
||||
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
|
||||
|
@ -171,7 +169,6 @@ var (
|
|||
"inmem_transactional_ha": physInmem.NewTransactionalInmemHA,
|
||||
"inmem_transactional": physInmem.NewTransactionalInmem,
|
||||
"inmem": physInmem.NewInmem,
|
||||
"oci": physOCI.NewBackend,
|
||||
"raft": physRaft.NewRaftBackend,
|
||||
}
|
||||
|
||||
|
@ -191,7 +188,6 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co
|
|||
"github": &credGitHub.CLIHandler{},
|
||||
"kerberos": &credKerb.CLIHandler{},
|
||||
"ldap": &credLdap.CLIHandler{},
|
||||
"oci": &credOCI.CLIHandler{},
|
||||
"oidc": &credOIDC.CLIHandler{},
|
||||
"okta": &credOkta.CLIHandler{},
|
||||
"pcf": &credCF.CLIHandler{}, // Deprecated.
|
||||
|
|
5
go.mod
5
go.mod
|
@ -63,7 +63,6 @@ require (
|
|||
github.com/hashicorp/go-kms-wrapping/v2 v2.0.10
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7
|
||||
github.com/hashicorp/go-memdb v1.3.4
|
||||
github.com/hashicorp/go-msgpack v1.1.5
|
||||
|
@ -103,7 +102,6 @@ require (
|
|||
github.com/hashicorp/vault-plugin-auth-jwt v0.16.1
|
||||
github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0
|
||||
github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0
|
||||
github.com/hashicorp/vault-plugin-auth-oci v0.14.0
|
||||
github.com/hashicorp/vault-plugin-mock v0.16.1
|
||||
github.com/hashicorp/vault-plugin-secrets-ad v0.16.0
|
||||
github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0
|
||||
|
@ -139,7 +137,6 @@ require (
|
|||
github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/okta/okta-sdk-golang/v2 v2.12.1
|
||||
github.com/oracle/oci-go-sdk v24.3.0+incompatible
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pires/go-proxyproto v0.6.1
|
||||
|
@ -324,7 +321,6 @@ require (
|
|||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||
github.com/opencontainers/runc v1.2.0-rc.1 // indirect
|
||||
github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect
|
||||
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
|
@ -339,7 +335,6 @@ require (
|
|||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
|
|
10
go.sum
10
go.sum
|
@ -1896,8 +1896,6 @@ github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0
|
|||
github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1/go.mod h1:b99cDSA+OzcyRoBZroSf174/ss/e6gUuS45wue9ZQfc=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrYyYznfS1+drq4/jFcqHQVNcL7WhUCo=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7/go.mod h1:j5vefRoguQUG7iM4reS/hKIZssU1lZRqNPM5Wow6UnM=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo=
|
||||
github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7/go.mod h1:hxNA5oTfAvwPacWVg1axtF/lvTafwlAa6a6K4uzWHhw=
|
||||
github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c=
|
||||
|
@ -2033,8 +2031,6 @@ github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0 h1:YH2x9kIV0jKXk22tVkpyd
|
|||
github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0/go.mod h1:I6ulXug4oxx77DFYjqI1kVl+72TgXEo3Oju4tTOVfU4=
|
||||
github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0 h1:vuXNJvtMyoqQ01Sfwf2TNcJNkGcxP1vD3C7gpvuVkCU=
|
||||
github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0/go.mod h1:onx9W/rDwENQkN+1yEnJvS51PVkkGAPOBXasne7lnnk=
|
||||
github.com/hashicorp/vault-plugin-auth-oci v0.14.0 h1:B7uyigqgUAO3gebvi8mMmsq7l4QAG0bLEP6rAKyDVuw=
|
||||
github.com/hashicorp/vault-plugin-auth-oci v0.14.0/go.mod h1:SYdTtQhzMxqOCbdC0E0UOrkc4eGXXcJmXXbe1MHVPtE=
|
||||
github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0=
|
||||
github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM=
|
||||
github.com/hashicorp/vault-plugin-secrets-ad v0.16.0 h1:6RCpd2PbBvmi5xmxXhggE0Xv+/Gag896/NNZeMKH+8A=
|
||||
|
@ -2525,10 +2521,6 @@ github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuh
|
|||
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU=
|
||||
github.com/oracle/oci-go-sdk v24.3.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
|
||||
github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM=
|
||||
github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w=
|
||||
github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA=
|
||||
github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||
github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4=
|
||||
|
@ -2724,8 +2716,6 @@ github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQv
|
|||
github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0=
|
||||
github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
credJWT "github.com/hashicorp/vault-plugin-auth-jwt"
|
||||
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
|
||||
credKube "github.com/hashicorp/vault-plugin-auth-kubernetes"
|
||||
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
|
||||
logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin"
|
||||
logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes"
|
||||
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
|
||||
|
@ -92,7 +91,6 @@ func newRegistry() *registry {
|
|||
"kerberos": {Factory: credKerb.Factory},
|
||||
"kubernetes": {Factory: credKube.Factory},
|
||||
"ldap": {Factory: credLdap.Factory},
|
||||
"oci": {Factory: credOCI.Factory},
|
||||
"oidc": {Factory: credJWT.Factory},
|
||||
"okta": {Factory: credOkta.Factory},
|
||||
"pcf": {
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/hashicorp/go-hclog"
|
||||
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
|
||||
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
|
@ -171,11 +170,6 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin
|
|||
case wrapping.WrapperTypeAead:
|
||||
wrapper, kmsInfo, err = GetAEADKMSFunc(configKMS, opts...)
|
||||
|
||||
case wrapping.WrapperTypeOciKms:
|
||||
if keyId, ok := configKMS.Config["key_id"]; ok {
|
||||
opts = append(opts, wrapping.WithKeyId(keyId))
|
||||
}
|
||||
wrapper, kmsInfo, err = GetOCIKMSKMSFunc(configKMS, opts...)
|
||||
case wrapping.WrapperTypeTransit:
|
||||
wrapper, kmsInfo, err = GetTransitKMSFunc(configKMS, opts...)
|
||||
|
||||
|
@ -217,22 +211,6 @@ func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[st
|
|||
return wrapper, info, nil
|
||||
}
|
||||
|
||||
func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) {
|
||||
wrapper := ocikms.NewWrapper()
|
||||
wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
info := make(map[string]string)
|
||||
if wrapperInfo != nil {
|
||||
info["OCI KMS KeyID"] = wrapperInfo.Metadata[ocikms.KmsConfigKeyId]
|
||||
info["OCI KMS Crypto Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigCryptoEndpoint]
|
||||
info["OCI KMS Management Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigManagementEndpoint]
|
||||
info["OCI KMS Principal Type"] = wrapperInfo.Metadata["principal_type"]
|
||||
}
|
||||
return wrapper, info, nil
|
||||
}
|
||||
|
||||
var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) {
|
||||
wrapper := transit.NewWrapper()
|
||||
wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...)
|
||||
|
|
|
@ -1,384 +0,0 @@
|
|||
// Copyright © 2019, Oracle and/or its affiliates.
|
||||
package oci
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
"github.com/oracle/oci-go-sdk/common"
|
||||
"github.com/oracle/oci-go-sdk/common/auth"
|
||||
"github.com/oracle/oci-go-sdk/objectstorage"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Verify Backend satisfies the correct interfaces
|
||||
var _ physical.Backend = (*Backend)(nil)
|
||||
|
||||
const (
|
||||
// Limits maximum outstanding requests
|
||||
MaxNumberOfPermits = 256
|
||||
)
|
||||
|
||||
var (
|
||||
metricDelete = []string{"oci", "delete"}
|
||||
metricGet = []string{"oci", "get"}
|
||||
metricList = []string{"oci", "list"}
|
||||
metricPut = []string{"oci", "put"}
|
||||
metricDeleteFull = []string{"oci", "deleteFull"}
|
||||
metricGetFull = []string{"oci", "getFull"}
|
||||
metricListFull = []string{"oci", "listFull"}
|
||||
metricPutFull = []string{"oci", "putFull"}
|
||||
|
||||
metricDeleteHa = []string{"oci", "deleteHa"}
|
||||
metricGetHa = []string{"oci", "getHa"}
|
||||
metricPutHa = []string{"oci", "putHa"}
|
||||
|
||||
metricDeleteAcquirePool = []string{"oci", "deleteAcquirePool"}
|
||||
metricGetAcquirePool = []string{"oci", "getAcquirePool"}
|
||||
metricListAcquirePool = []string{"oci", "listAcquirePool"}
|
||||
metricPutAcquirePool = []string{"oci", "putAcquirePool"}
|
||||
|
||||
metricDeleteFailed = []string{"oci", "deleteFailed"}
|
||||
metricGetFailed = []string{"oci", "getFailed"}
|
||||
metricListFailed = []string{"oci", "listFailed"}
|
||||
metricPutFailed = []string{"oci", "putFailed"}
|
||||
metricHaWatchLockRetriable = []string{"oci", "haWatchLockRetriable"}
|
||||
metricPermitsUsed = []string{"oci", "permitsUsed"}
|
||||
|
||||
metric5xx = []string{"oci", "5xx"}
|
||||
)
|
||||
|
||||
type Backend struct {
|
||||
client *objectstorage.ObjectStorageClient
|
||||
bucketName string
|
||||
logger log.Logger
|
||||
permitPool *physical.PermitPool
|
||||
namespaceName string
|
||||
haEnabled bool
|
||||
lockBucketName string
|
||||
}
|
||||
|
||||
func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
bucketName := conf["bucket_name"]
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("missing bucket name")
|
||||
}
|
||||
|
||||
namespaceName := conf["namespace_name"]
|
||||
if bucketName == "" {
|
||||
return nil, errors.New("missing namespace name")
|
||||
}
|
||||
|
||||
lockBucketName := ""
|
||||
haEnabled := false
|
||||
var err error
|
||||
haEnabledStr := conf["ha_enabled"]
|
||||
if haEnabledStr != "" {
|
||||
haEnabled, err = strconv.ParseBool(haEnabledStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HA enabled: %w", err)
|
||||
}
|
||||
|
||||
if haEnabled {
|
||||
lockBucketName = conf["lock_bucket_name"]
|
||||
if lockBucketName == "" {
|
||||
return nil, errors.New("missing lock bucket name")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
authTypeAPIKeyBool := false
|
||||
authTypeAPIKeyStr := conf["auth_type_api_key"]
|
||||
if authTypeAPIKeyStr != "" {
|
||||
authTypeAPIKeyBool, err = strconv.ParseBool(authTypeAPIKeyStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing auth_type_api_key parameter: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var cp common.ConfigurationProvider
|
||||
if authTypeAPIKeyBool {
|
||||
cp = common.DefaultConfigProvider()
|
||||
} else {
|
||||
cp, err = auth.InstancePrincipalConfigurationProvider()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed creating InstancePrincipalConfigurationProvider: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
objectStorageClient, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(cp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed creating NewObjectStorageClientWithConfigurationProvider: %w", err)
|
||||
}
|
||||
|
||||
region := conf["region"]
|
||||
if region != "" {
|
||||
objectStorageClient.SetRegion(region)
|
||||
}
|
||||
|
||||
logger.Debug("configuration",
|
||||
"bucket_name", bucketName,
|
||||
"region", region,
|
||||
"namespace_name", namespaceName,
|
||||
"ha_enabled", haEnabled,
|
||||
"lock_bucket_name", lockBucketName,
|
||||
"auth_type_api_key", authTypeAPIKeyBool,
|
||||
)
|
||||
|
||||
return &Backend{
|
||||
client: &objectStorageClient,
|
||||
bucketName: bucketName,
|
||||
logger: logger,
|
||||
permitPool: physical.NewPermitPool(MaxNumberOfPermits),
|
||||
namespaceName: namespaceName,
|
||||
haEnabled: haEnabled,
|
||||
lockBucketName: lockBucketName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error {
|
||||
o.logger.Debug("PUT started")
|
||||
defer metrics.MeasureSince(metricPutFull, time.Now())
|
||||
startAcquirePool := time.Now()
|
||||
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
|
||||
o.permitPool.Acquire()
|
||||
defer o.permitPool.Release()
|
||||
metrics.MeasureSince(metricPutAcquirePool, startAcquirePool)
|
||||
|
||||
defer metrics.MeasureSince(metricPut, time.Now())
|
||||
size := int64(len(entry.Value))
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricPutFailed, 1)
|
||||
o.logger.Error("failed to generate UUID")
|
||||
return fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
|
||||
o.logger.Debug("PUT", "opc-client-request-id", opcClientRequestId)
|
||||
request := objectstorage.PutObjectRequest{
|
||||
NamespaceName: &o.namespaceName,
|
||||
BucketName: &o.bucketName,
|
||||
ObjectName: &entry.Key,
|
||||
ContentLength: &size,
|
||||
PutObjectBody: ioutil.NopCloser(bytes.NewReader(entry.Value)),
|
||||
OpcMeta: nil,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
resp, err := o.client.PutObject(ctx, request)
|
||||
if resp.RawResponse != nil && resp.RawResponse.Body != nil {
|
||||
defer resp.RawResponse.Body.Close()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricPutFailed, 1)
|
||||
return fmt.Errorf("failed to put data: %w", err)
|
||||
}
|
||||
|
||||
o.logRequest("PUT", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
|
||||
o.logger.Debug("PUT completed")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
||||
o.logger.Debug("GET started")
|
||||
defer metrics.MeasureSince(metricGetFull, time.Now())
|
||||
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
|
||||
startAcquirePool := time.Now()
|
||||
o.permitPool.Acquire()
|
||||
defer o.permitPool.Release()
|
||||
metrics.MeasureSince(metricGetAcquirePool, startAcquirePool)
|
||||
|
||||
defer metrics.MeasureSince(metricGet, time.Now())
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
o.logger.Error("failed to generate UUID")
|
||||
return nil, fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
o.logger.Debug("GET", "opc-client-request-id", opcClientRequestId)
|
||||
request := objectstorage.GetObjectRequest{
|
||||
NamespaceName: &o.namespaceName,
|
||||
BucketName: &o.bucketName,
|
||||
ObjectName: &key,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
resp, err := o.client.GetObject(ctx, request)
|
||||
if resp.RawResponse != nil && resp.RawResponse.Body != nil {
|
||||
defer resp.RawResponse.Body.Close()
|
||||
}
|
||||
o.logRequest("GET", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
|
||||
|
||||
if err != nil {
|
||||
if resp.RawResponse != nil && resp.RawResponse.StatusCode == http.StatusNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
metrics.IncrCounter(metricGetFailed, 1)
|
||||
return nil, fmt.Errorf("failed to read Value: %w", err)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Content)
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricGetFailed, 1)
|
||||
return nil, fmt.Errorf("failed to decode Value into bytes: %w", err)
|
||||
}
|
||||
|
||||
o.logger.Debug("GET completed")
|
||||
|
||||
return &physical.Entry{
|
||||
Key: key,
|
||||
Value: body,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *Backend) Delete(ctx context.Context, key string) error {
|
||||
o.logger.Debug("DELETE started")
|
||||
defer metrics.MeasureSince(metricDeleteFull, time.Now())
|
||||
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
|
||||
startAcquirePool := time.Now()
|
||||
o.permitPool.Acquire()
|
||||
defer o.permitPool.Release()
|
||||
metrics.MeasureSince(metricDeleteAcquirePool, startAcquirePool)
|
||||
|
||||
defer metrics.MeasureSince(metricDelete, time.Now())
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
o.logger.Error("Delete: error generating UUID")
|
||||
return fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
o.logger.Debug("Delete", "opc-client-request-id", opcClientRequestId)
|
||||
request := objectstorage.DeleteObjectRequest{
|
||||
NamespaceName: &o.namespaceName,
|
||||
BucketName: &o.bucketName,
|
||||
ObjectName: &key,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
resp, err := o.client.DeleteObject(ctx, request)
|
||||
if resp.RawResponse != nil && resp.RawResponse.Body != nil {
|
||||
defer resp.RawResponse.Body.Close()
|
||||
}
|
||||
|
||||
o.logRequest("DELETE", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
|
||||
|
||||
if err != nil {
|
||||
if resp.RawResponse != nil && resp.RawResponse.StatusCode == http.StatusNotFound {
|
||||
return nil
|
||||
}
|
||||
metrics.IncrCounter(metricDeleteFailed, 1)
|
||||
return fmt.Errorf("failed to delete Key: %w", err)
|
||||
}
|
||||
o.logger.Debug("DELETE completed")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
o.logger.Debug("LIST started")
|
||||
defer metrics.MeasureSince(metricListFull, time.Now())
|
||||
metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits()))
|
||||
startAcquirePool := time.Now()
|
||||
o.permitPool.Acquire()
|
||||
defer o.permitPool.Release()
|
||||
|
||||
metrics.MeasureSince(metricListAcquirePool, startAcquirePool)
|
||||
defer metrics.MeasureSince(metricList, time.Now())
|
||||
var keys []string
|
||||
delimiter := "/"
|
||||
var start *string
|
||||
|
||||
for {
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
o.logger.Error("List: error generating UUID")
|
||||
return nil, fmt.Errorf("failed to generate UUID %w", err)
|
||||
}
|
||||
o.logger.Debug("LIST", "opc-client-request-id", opcClientRequestId)
|
||||
request := objectstorage.ListObjectsRequest{
|
||||
NamespaceName: &o.namespaceName,
|
||||
BucketName: &o.bucketName,
|
||||
Prefix: &prefix,
|
||||
Delimiter: &delimiter,
|
||||
Start: start,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
resp, err := o.client.ListObjects(ctx, request)
|
||||
o.logRequest("LIST", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err)
|
||||
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricListFailed, 1)
|
||||
return nil, fmt.Errorf("failed to list using prefix: %w", err)
|
||||
}
|
||||
|
||||
for _, commonPrefix := range resp.Prefixes {
|
||||
commonPrefix := strings.TrimPrefix(commonPrefix, prefix)
|
||||
keys = append(keys, commonPrefix)
|
||||
}
|
||||
|
||||
for _, object := range resp.Objects {
|
||||
key := strings.TrimPrefix(*object.Name, prefix)
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
// Duplicate keys are not expected
|
||||
keys = strutil.RemoveDuplicates(keys, false)
|
||||
|
||||
if resp.NextStartWith == nil {
|
||||
resp.RawResponse.Body.Close()
|
||||
break
|
||||
}
|
||||
|
||||
start = resp.NextStartWith
|
||||
resp.RawResponse.Body.Close()
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
o.logger.Debug("LIST completed")
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (o *Backend) logRequest(operation string, response *http.Response, clientOpcRequestIdPtr *string, opcRequestIdPtr *string, err error) {
|
||||
statusCode := 0
|
||||
clientOpcRequestId := " "
|
||||
opcRequestId := " "
|
||||
|
||||
if response != nil {
|
||||
statusCode = response.StatusCode
|
||||
if statusCode/100 == 5 {
|
||||
metrics.IncrCounter(metric5xx, 1)
|
||||
}
|
||||
}
|
||||
|
||||
if clientOpcRequestIdPtr != nil {
|
||||
clientOpcRequestId = *clientOpcRequestIdPtr
|
||||
}
|
||||
|
||||
if opcRequestIdPtr != nil {
|
||||
opcRequestId = *opcRequestIdPtr
|
||||
}
|
||||
|
||||
statusCodeStr := "No response"
|
||||
if statusCode != 0 {
|
||||
statusCodeStr = strconv.Itoa(statusCode)
|
||||
}
|
||||
|
||||
logLine := fmt.Sprintf("%s client:opc-request-id %s opc-request-id: %s status-code: %s",
|
||||
operation, clientOpcRequestId, opcRequestId, statusCodeStr)
|
||||
if err != nil && statusCode/100 == 5 {
|
||||
o.logger.Error(logLine, "error", err)
|
||||
}
|
||||
}
|
|
@ -1,551 +0,0 @@
|
|||
// Copyright © 2019, Oracle and/or its affiliates.
|
||||
package oci
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
"github.com/oracle/oci-go-sdk/objectstorage"
|
||||
)
|
||||
|
||||
// The lock implementation below prioritizes ensuring that there are not 2 primary at any given point in time
|
||||
// over high availability of the primary instance
|
||||
|
||||
// Verify Backend satisfies the correct interfaces
|
||||
var (
|
||||
_ physical.HABackend = (*Backend)(nil)
|
||||
_ physical.Lock = (*Lock)(nil)
|
||||
)
|
||||
|
||||
const (
|
||||
// LockRenewInterval is the time to wait between lock renewals.
|
||||
LockRenewInterval = 3 * time.Second
|
||||
|
||||
// LockRetryInterval is the amount of time to wait if the lock fails before trying again.
|
||||
LockRetryInterval = 5 * time.Second
|
||||
|
||||
// LockWatchRetryInterval is the amount of time to wait if a watch fails before trying again.
|
||||
LockWatchRetryInterval = 2 * time.Second
|
||||
|
||||
// LockTTL is the default lock TTL.
|
||||
LockTTL = 15 * time.Second
|
||||
|
||||
// LockWatchRetryMax is the number of times to retry a failed watch before signaling that leadership is lost.
|
||||
LockWatchRetryMax = 4
|
||||
|
||||
// LockCacheMinAcceptableAge is minimum cache age in seconds to determine that its safe for a secondary instance
|
||||
// to acquire lock.
|
||||
LockCacheMinAcceptableAge = 45 * time.Second
|
||||
|
||||
// LockWriteRetriesOnFailures is the number of retries that are made on write 5xx failures.
|
||||
LockWriteRetriesOnFailures = 4
|
||||
|
||||
ObjectStorageCallsReadTimeout = 3 * time.Second
|
||||
|
||||
ObjectStorageCallsWriteTimeout = 3 * time.Second
|
||||
)
|
||||
|
||||
type LockCache struct {
|
||||
// ETag values are unique identifiers generated by the OCI service and changed every time the object is modified.
|
||||
etag string
|
||||
lastUpdate time.Time
|
||||
lockRecord *LockRecord
|
||||
}
|
||||
|
||||
type Lock struct {
|
||||
// backend is the underlying physical backend.
|
||||
backend *Backend
|
||||
|
||||
// Key is the name of the Key. Value is the Value of the Key.
|
||||
key, value string
|
||||
|
||||
// held is a boolean indicating if the lock is currently held.
|
||||
held bool
|
||||
|
||||
// Identity is the internal Identity of this Key (unique to this server instance).
|
||||
identity string
|
||||
|
||||
internalLock sync.Mutex
|
||||
|
||||
// stopCh is the channel that stops all operations. It may be closed in the
|
||||
// event of a leader loss or graceful shutdown. stopped is a boolean
|
||||
// indicating if we are stopped - it exists to prevent double closing the
|
||||
// channel. stopLock is a mutex around the locks.
|
||||
stopCh chan struct{}
|
||||
stopped bool
|
||||
stopLock sync.Mutex
|
||||
|
||||
lockRecordCache atomic.Value
|
||||
|
||||
// Allow modifying the Lock durations for ease of unit testing.
|
||||
renewInterval time.Duration
|
||||
retryInterval time.Duration
|
||||
ttl time.Duration
|
||||
watchRetryInterval time.Duration
|
||||
watchRetryMax int
|
||||
}
|
||||
|
||||
type LockRecord struct {
|
||||
Key string
|
||||
Value string
|
||||
Identity string
|
||||
}
|
||||
|
||||
var (
|
||||
metricLockUnlock = []string{"oci", "lock", "unlock"}
|
||||
metricLockLock = []string{"oci", "lock", "lock"}
|
||||
metricLockValue = []string{"oci", "lock", "Value"}
|
||||
metricLeaderValue = []string{"oci", "leader", "Value"}
|
||||
)
|
||||
|
||||
func (b *Backend) HAEnabled() bool {
|
||||
return b.haEnabled
|
||||
}
|
||||
|
||||
// LockWith acquires a mutual exclusion based on the given Key.
|
||||
func (b *Backend) LockWith(key, value string) (physical.Lock, error) {
|
||||
identity, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Lock with: %w", err)
|
||||
}
|
||||
return &Lock{
|
||||
backend: b,
|
||||
key: key,
|
||||
value: value,
|
||||
identity: identity,
|
||||
stopped: true,
|
||||
|
||||
renewInterval: LockRenewInterval,
|
||||
retryInterval: LockRetryInterval,
|
||||
ttl: LockTTL,
|
||||
watchRetryInterval: LockWatchRetryInterval,
|
||||
watchRetryMax: LockWatchRetryMax,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
|
||||
l.backend.logger.Debug("Lock() called")
|
||||
defer metrics.MeasureSince(metricLockLock, time.Now().UTC())
|
||||
l.internalLock.Lock()
|
||||
defer l.internalLock.Unlock()
|
||||
if l.held {
|
||||
return nil, errors.New("lock already held")
|
||||
}
|
||||
|
||||
// Attempt to lock - this function blocks until a lock is acquired or an error
|
||||
// occurs.
|
||||
acquired, err := l.attemptLock(stopCh)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("lock: %w", err)
|
||||
}
|
||||
if !acquired {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// We have the lock now
|
||||
l.held = true
|
||||
|
||||
// Build the locks
|
||||
l.stopLock.Lock()
|
||||
l.stopCh = make(chan struct{})
|
||||
l.stopped = false
|
||||
l.stopLock.Unlock()
|
||||
|
||||
// Periodically renew and watch the lock
|
||||
go l.renewLock()
|
||||
go l.watchLock()
|
||||
|
||||
return l.stopCh, nil
|
||||
}
|
||||
|
||||
// attemptLock attempts to acquire a lock. If the given channel is closed, the
|
||||
// acquisition attempt stops. This function returns when a lock is acquired or
|
||||
// an error occurs.
|
||||
func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) {
|
||||
l.backend.logger.Debug("AttemptLock() called")
|
||||
ticker := time.NewTicker(l.retryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
acquired, err := l.writeLock()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("attempt lock: %w", err)
|
||||
}
|
||||
if !acquired {
|
||||
continue
|
||||
}
|
||||
|
||||
return true, nil
|
||||
case <-stopCh:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// renewLock renews the given lock until the channel is closed.
|
||||
func (l *Lock) renewLock() {
|
||||
l.backend.logger.Debug("RenewLock() called")
|
||||
ticker := time.NewTicker(l.renewInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
l.writeLock()
|
||||
case <-l.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadLockRecordCache(l *Lock) *LockCache {
|
||||
lockRecordCache := l.lockRecordCache.Load()
|
||||
if lockRecordCache == nil {
|
||||
return nil
|
||||
}
|
||||
return lockRecordCache.(*LockCache)
|
||||
}
|
||||
|
||||
// watchLock checks whether the lock has changed in the table and closes the
|
||||
// leader channel accordingly. If an error occurs during the check, watchLock
|
||||
// will retry the operation and then close the leader channel if it can't
|
||||
// succeed after retries.
|
||||
func (l *Lock) watchLock() {
|
||||
l.backend.logger.Debug("WatchLock() called")
|
||||
retries := 0
|
||||
ticker := time.NewTicker(l.watchRetryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
// Check if the channel is already closed
|
||||
select {
|
||||
case <-l.stopCh:
|
||||
l.backend.logger.Debug("WatchLock():Stop lock signaled/closed.")
|
||||
break OUTER
|
||||
default:
|
||||
}
|
||||
|
||||
// Check if we've exceeded retries
|
||||
if retries >= l.watchRetryMax-1 {
|
||||
l.backend.logger.Debug("WatchLock: Failed to get lock data from object storage. Giving up the lease after max retries")
|
||||
break OUTER
|
||||
}
|
||||
|
||||
// Wait for the timer
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-l.stopCh:
|
||||
break OUTER
|
||||
}
|
||||
|
||||
lockRecordCache := loadLockRecordCache(l)
|
||||
if (lockRecordCache == nil) ||
|
||||
(lockRecordCache.lockRecord == nil) ||
|
||||
(lockRecordCache.lockRecord.Identity != l.identity) ||
|
||||
(time.Now().Sub(lockRecordCache.lastUpdate) > l.ttl) {
|
||||
l.backend.logger.Debug("WatchLock: Lock record cache is nil, stale or does not belong to self.")
|
||||
break OUTER
|
||||
}
|
||||
|
||||
lockRecord, _, err := l.get(context.Background())
|
||||
if err != nil {
|
||||
retries++
|
||||
l.backend.logger.Debug("WatchLock: Failed to get lock data from object storage. Retrying..")
|
||||
metrics.SetGauge(metricHaWatchLockRetriable, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
if (lockRecord == nil) || (lockRecord.Identity != l.identity) {
|
||||
l.backend.logger.Debug("WatchLock: Lock record cache is nil or does not belong to self.")
|
||||
break OUTER
|
||||
}
|
||||
|
||||
// reset retries counter on success
|
||||
retries = 0
|
||||
l.backend.logger.Debug("WatchLock() successful")
|
||||
metrics.SetGauge(metricHaWatchLockRetriable, 0)
|
||||
}
|
||||
|
||||
l.stopLock.Lock()
|
||||
defer l.stopLock.Unlock()
|
||||
if !l.stopped {
|
||||
l.stopped = true
|
||||
l.backend.logger.Debug("Closing the stop channel to give up leadership.")
|
||||
close(l.stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Lock) Unlock() error {
|
||||
l.backend.logger.Debug("Unlock() called")
|
||||
defer metrics.MeasureSince(metricLockUnlock, time.Now().UTC())
|
||||
|
||||
l.internalLock.Lock()
|
||||
defer l.internalLock.Unlock()
|
||||
if !l.held {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop any existing locking or renewal attempts
|
||||
l.stopLock.Lock()
|
||||
if !l.stopped {
|
||||
l.stopped = true
|
||||
close(l.stopCh)
|
||||
}
|
||||
l.stopLock.Unlock()
|
||||
|
||||
// We are no longer holding the lock
|
||||
l.held = false
|
||||
|
||||
// Get current lock record
|
||||
currentLockRecord, etag, err := l.get(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading lock record: %w", err)
|
||||
}
|
||||
|
||||
if currentLockRecord != nil && currentLockRecord.Identity == l.identity {
|
||||
|
||||
defer metrics.MeasureSince(metricDeleteHa, time.Now())
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
l.backend.logger.Debug("Unlock: error generating UUID")
|
||||
return fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
l.backend.logger.Debug("Unlock", "opc-client-request-id", opcClientRequestId)
|
||||
request := objectstorage.DeleteObjectRequest{
|
||||
NamespaceName: &l.backend.namespaceName,
|
||||
BucketName: &l.backend.lockBucketName,
|
||||
ObjectName: &l.key,
|
||||
IfMatch: &etag,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
response, err := l.backend.client.DeleteObject(context.Background(), request)
|
||||
l.backend.logRequest("deleteHA", response.RawResponse, response.OpcClientRequestId, response.OpcRequestId, err)
|
||||
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricDeleteFailed, 1)
|
||||
return fmt.Errorf("write lock: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Lock) Value() (bool, string, error) {
|
||||
l.backend.logger.Debug("Value() called")
|
||||
defer metrics.MeasureSince(metricLockValue, time.Now().UTC())
|
||||
|
||||
lockRecord, _, err := l.get(context.Background())
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
if lockRecord == nil {
|
||||
return false, "", err
|
||||
}
|
||||
return true, lockRecord.Value, nil
|
||||
}
|
||||
|
||||
// get retrieves the Value for the lock.
|
||||
func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) {
|
||||
l.backend.logger.Debug("Called getLockRecord()")
|
||||
|
||||
// Read lock Key
|
||||
|
||||
defer metrics.MeasureSince(metricGetHa, time.Now())
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
l.backend.logger.Error("getHa: error generating UUID")
|
||||
return nil, "", fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
l.backend.logger.Debug("getHa", "opc-client-request-id", opcClientRequestId)
|
||||
|
||||
request := objectstorage.GetObjectRequest{
|
||||
NamespaceName: &l.backend.namespaceName,
|
||||
BucketName: &l.backend.lockBucketName,
|
||||
ObjectName: &l.key,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ObjectStorageCallsReadTimeout)
|
||||
defer cancel()
|
||||
|
||||
response, err := l.backend.client.GetObject(ctx, request)
|
||||
l.backend.logRequest("getHA", response.RawResponse, response.OpcClientRequestId, response.OpcRequestId, err)
|
||||
|
||||
if err != nil {
|
||||
if response.RawResponse != nil && response.RawResponse.StatusCode == http.StatusNotFound {
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
metrics.IncrCounter(metricGetFailed, 1)
|
||||
l.backend.logger.Error("Error calling GET", "err", err)
|
||||
return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err)
|
||||
}
|
||||
|
||||
defer response.RawResponse.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(response.Content)
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricGetFailed, 1)
|
||||
l.backend.logger.Error("Error reading content", "err", err)
|
||||
return nil, "", fmt.Errorf("failed to decode Value into bytes: %w", err)
|
||||
}
|
||||
|
||||
var lockRecord LockRecord
|
||||
err = json.Unmarshal(body, &lockRecord)
|
||||
if err != nil {
|
||||
metrics.IncrCounter(metricGetFailed, 1)
|
||||
l.backend.logger.Error("Error un-marshalling content", "err", err)
|
||||
return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err)
|
||||
}
|
||||
|
||||
return &lockRecord, *response.ETag, nil
|
||||
}
|
||||
|
||||
func (l *Lock) writeLock() (bool, error) {
|
||||
l.backend.logger.Debug("WriteLock() called")
|
||||
|
||||
// Create a transaction to read and the update (maybe)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// The transaction will be retried, and it could sit in a queue behind, say,
|
||||
// the delete operation. To stop the transaction, we close the context when
|
||||
// the associated stopCh is received.
|
||||
go func() {
|
||||
select {
|
||||
case <-l.stopCh:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
lockRecordCache := loadLockRecordCache(l)
|
||||
if (lockRecordCache == nil) || lockRecordCache.lockRecord == nil ||
|
||||
lockRecordCache.lockRecord.Identity != l.identity ||
|
||||
time.Now().Sub(lockRecordCache.lastUpdate) > l.ttl {
|
||||
// case secondary
|
||||
currentLockRecord, currentEtag, err := l.get(ctx)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading lock record: %w", err)
|
||||
}
|
||||
|
||||
if (lockRecordCache == nil) || lockRecordCache.etag != currentEtag {
|
||||
// update cached lock record
|
||||
l.lockRecordCache.Store(&LockCache{
|
||||
etag: currentEtag,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
lockRecord: currentLockRecord,
|
||||
})
|
||||
|
||||
lockRecordCache = loadLockRecordCache(l)
|
||||
}
|
||||
|
||||
// Current lock record being null implies that there is no leader. In this case we want to try acquiring lock.
|
||||
if currentLockRecord != nil && time.Now().Sub(lockRecordCache.lastUpdate) < LockCacheMinAcceptableAge {
|
||||
return false, nil
|
||||
}
|
||||
// cache is old enough and current, try acquiring lock as secondary
|
||||
}
|
||||
|
||||
newLockRecord := &LockRecord{
|
||||
Key: l.key,
|
||||
Value: l.value,
|
||||
Identity: l.identity,
|
||||
}
|
||||
|
||||
newLockRecordJson, err := json.Marshal(newLockRecord)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading lock record: %w", err)
|
||||
}
|
||||
|
||||
defer metrics.MeasureSince(metricPutHa, time.Now())
|
||||
|
||||
opcClientRequestId, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
l.backend.logger.Error("putHa: error generating UUID")
|
||||
return false, fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
l.backend.logger.Debug("putHa", "opc-client-request-id", opcClientRequestId)
|
||||
size := int64(len(newLockRecordJson))
|
||||
putRequest := objectstorage.PutObjectRequest{
|
||||
NamespaceName: &l.backend.namespaceName,
|
||||
BucketName: &l.backend.lockBucketName,
|
||||
ObjectName: &l.key,
|
||||
ContentLength: &size,
|
||||
PutObjectBody: ioutil.NopCloser(bytes.NewReader(newLockRecordJson)),
|
||||
OpcMeta: nil,
|
||||
OpcClientRequestId: &opcClientRequestId,
|
||||
}
|
||||
|
||||
if lockRecordCache.etag == "" {
|
||||
noneMatch := "*"
|
||||
putRequest.IfNoneMatch = &noneMatch
|
||||
} else {
|
||||
putRequest.IfMatch = &lockRecordCache.etag
|
||||
}
|
||||
|
||||
newtEtag := ""
|
||||
for i := 1; i <= LockWriteRetriesOnFailures; i++ {
|
||||
writeCtx, writeCancel := context.WithTimeout(ctx, ObjectStorageCallsWriteTimeout)
|
||||
defer writeCancel()
|
||||
|
||||
putObjectResponse, putObjectError := l.backend.client.PutObject(writeCtx, putRequest)
|
||||
l.backend.logRequest("putHA", putObjectResponse.RawResponse, putObjectResponse.OpcClientRequestId, putObjectResponse.OpcRequestId, putObjectError)
|
||||
|
||||
if putObjectError == nil {
|
||||
newtEtag = *putObjectResponse.ETag
|
||||
putObjectResponse.RawResponse.Body.Close()
|
||||
break
|
||||
}
|
||||
|
||||
err = putObjectError
|
||||
|
||||
if putObjectResponse.RawResponse == nil {
|
||||
metrics.IncrCounter(metricPutFailed, 1)
|
||||
l.backend.logger.Error("PUT", "err", err)
|
||||
break
|
||||
}
|
||||
|
||||
putObjectResponse.RawResponse.Body.Close()
|
||||
|
||||
// Retry if the return code is 5xx
|
||||
if (putObjectResponse.RawResponse.StatusCode / 100) == 5 {
|
||||
metrics.IncrCounter(metricPutFailed, 1)
|
||||
l.backend.logger.Warn("PUT. Retrying..", "err", err)
|
||||
time.Sleep(time.Duration(100*i) * time.Millisecond)
|
||||
} else {
|
||||
l.backend.logger.Error("PUT", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("write lock: %w", err)
|
||||
}
|
||||
|
||||
l.backend.logger.Debug("Lock written", string(newLockRecordJson))
|
||||
|
||||
l.lockRecordCache.Store(&LockCache{
|
||||
etag: newtEtag,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
lockRecord: newLockRecord,
|
||||
})
|
||||
|
||||
metrics.SetGauge(metricLeaderValue, 1)
|
||||
return true, nil
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
// Copyright © 2019, Oracle and/or its affiliates.
|
||||
package oci
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
"github.com/oracle/oci-go-sdk/common"
|
||||
"github.com/oracle/oci-go-sdk/objectstorage"
|
||||
)
|
||||
|
||||
func TestOCIHABackend(t *testing.T) {
|
||||
// Skip tests if we are not running acceptance tests
|
||||
if os.Getenv("VAULT_ACC") == "" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
if !hasOCICredentials() {
|
||||
t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.")
|
||||
}
|
||||
|
||||
bucketName, _ := uuid.GenerateUUID()
|
||||
configProvider := common.DefaultConfigProvider()
|
||||
objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider)
|
||||
namespaceName := getNamespaceName(objectStorageClient, t)
|
||||
|
||||
createBucket(bucketName, getTenancyOcid(configProvider, t), namespaceName, objectStorageClient, t)
|
||||
defer deleteBucket(namespaceName, bucketName, objectStorageClient, t)
|
||||
|
||||
backend := createBackend(bucketName, namespaceName, "true", bucketName, t)
|
||||
ha, ok := backend.(physical.HABackend)
|
||||
if !ok {
|
||||
t.Fatalf("does not implement")
|
||||
}
|
||||
|
||||
physical.ExerciseHABackend(t, ha, ha)
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
// Copyright © 2019, Oracle and/or its affiliates.
|
||||
package oci
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
"github.com/oracle/oci-go-sdk/common"
|
||||
"github.com/oracle/oci-go-sdk/objectstorage"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestOCIBackend(t *testing.T) {
|
||||
// Skip tests if we are not running acceptance tests
|
||||
if os.Getenv("VAULT_ACC") == "" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
if !hasOCICredentials() {
|
||||
t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.")
|
||||
}
|
||||
|
||||
bucketName, _ := uuid.GenerateUUID()
|
||||
configProvider := common.DefaultConfigProvider()
|
||||
objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider)
|
||||
namespaceName := getNamespaceName(objectStorageClient, t)
|
||||
|
||||
createBucket(bucketName, getTenancyOcid(configProvider, t), namespaceName, objectStorageClient, t)
|
||||
defer deleteBucket(namespaceName, bucketName, objectStorageClient, t)
|
||||
|
||||
backend := createBackend(bucketName, namespaceName, "false", "", t)
|
||||
physical.ExerciseBackend(t, backend)
|
||||
physical.ExerciseBackend_ListPrefix(t, backend)
|
||||
}
|
||||
|
||||
func createBucket(bucketName string, tenancyOcid string, namespaceName string, objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) {
|
||||
createBucketRequest := objectstorage.CreateBucketRequest{
|
||||
NamespaceName: &namespaceName,
|
||||
}
|
||||
createBucketRequest.CompartmentId = &tenancyOcid
|
||||
createBucketRequest.Name = &bucketName
|
||||
createBucketRequest.Metadata = make(map[string]string)
|
||||
createBucketRequest.PublicAccessType = objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess
|
||||
_, err := objectStorageClient.CreateBucket(context.Background(), createBucketRequest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create bucket: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteBucket(nameSpaceName string, bucketName string, objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) {
|
||||
request := objectstorage.DeleteBucketRequest{
|
||||
NamespaceName: &nameSpaceName,
|
||||
BucketName: &bucketName,
|
||||
}
|
||||
_, err := objectStorageClient.DeleteBucket(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete bucket: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getTenancyOcid(configProvider common.ConfigurationProvider, t *testing.T) (tenancyOcid string) {
|
||||
tenancyOcid, err := configProvider.TenancyOCID()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get tenancy ocid: %v", err)
|
||||
}
|
||||
return tenancyOcid
|
||||
}
|
||||
|
||||
func createBackend(bucketName string, namespaceName string, haEnabledStr string, lockBucketName string, t *testing.T) physical.Backend {
|
||||
backend, err := NewBackend(map[string]string{
|
||||
"auth_type_api_key": "true",
|
||||
"bucket_name": bucketName,
|
||||
"namespace_name": namespaceName,
|
||||
"ha_enabled": haEnabledStr,
|
||||
"lock_bucket_name": lockBucketName,
|
||||
}, logging.NewVaultLogger(log.Trace))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create new backend: %v", err)
|
||||
}
|
||||
return backend
|
||||
}
|
||||
|
||||
func getNamespaceName(objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) string {
|
||||
response, err := objectStorageClient.GetNamespace(context.Background(), objectstorage.GetNamespaceRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get namespaceName: %v", err)
|
||||
}
|
||||
nameSpaceName := *response.Value
|
||||
return nameSpaceName
|
||||
}
|
||||
|
||||
func hasOCICredentials() bool {
|
||||
configProvider := common.DefaultConfigProvider()
|
||||
|
||||
_, err := configProvider.KeyID()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
|
@ -59,7 +59,6 @@ vault auth enable "jwt"
|
|||
vault auth enable "kerberos"
|
||||
vault auth enable "kubernetes"
|
||||
vault auth enable "ldap"
|
||||
vault auth enable "oci"
|
||||
vault auth enable "okta"
|
||||
vault auth enable "radius"
|
||||
vault auth enable "userpass"
|
||||
|
|
|
@ -86,14 +86,6 @@
|
|||
|
||||
{{#if (eq @mode "create")}}
|
||||
|
||||
{{#if (eq @model.plugin_name "vault-plugin-database-oracle")}}
|
||||
<AlertBanner @type="warning">
|
||||
Please ensure that your Oracle plugin has the default name of
|
||||
<b>vault-plugin-database-oracle</b>. Custom naming is not supported in the UI at this time. If the plugin is already
|
||||
named vault-plugin-database-oracle, disregard this warning.
|
||||
</AlertBanner>
|
||||
{{/if}}
|
||||
|
||||
<form {{on "submit" this.handleCreateConnection}} aria-label="create connection form">
|
||||
{{#each @model.fieldAttrs as |attr|}}
|
||||
{{#if (not-eq attr.options.readOnly true)}}
|
||||
|
|
|
@ -84,24 +84,6 @@ export const AVAILABLE_PLUGIN_TYPES = [
|
|||
{ attr: 'root_rotation_statements', group: 'statements' },
|
||||
],
|
||||
},
|
||||
{
|
||||
value: 'vault-plugin-database-oracle',
|
||||
displayName: 'Oracle',
|
||||
fields: [
|
||||
{ attr: 'plugin_name' },
|
||||
{ attr: 'name' },
|
||||
{ attr: 'verify_connection', show: false },
|
||||
{ attr: 'password_policy' },
|
||||
{ attr: 'connection_url', group: 'pluginConfig' },
|
||||
{ attr: 'username', group: 'pluginConfig', show: false },
|
||||
{ attr: 'password', group: 'pluginConfig', show: false },
|
||||
{ attr: 'max_open_connections', group: 'pluginConfig' },
|
||||
{ attr: 'max_idle_connections', group: 'pluginConfig' },
|
||||
{ attr: 'max_connection_lifetime', group: 'pluginConfig' },
|
||||
{ attr: 'username_template', group: 'pluginConfig' },
|
||||
{ attr: 'root_rotation_statements', group: 'statements' },
|
||||
],
|
||||
},
|
||||
{
|
||||
value: 'postgresql-database-plugin',
|
||||
displayName: 'PostgreSQL',
|
||||
|
@ -134,7 +116,6 @@ export const STATEMENT_FIELDS = {
|
|||
'mysql-aurora-database-plugin': [],
|
||||
'mysql-legacy-database-plugin': [],
|
||||
'mysql-rds-database-plugin': [],
|
||||
'vault-plugin-database-oracle': [],
|
||||
'postgresql-database-plugin': [],
|
||||
},
|
||||
dynamic: {
|
||||
|
@ -143,7 +124,6 @@ export const STATEMENT_FIELDS = {
|
|||
'mysql-aurora-database-plugin': ['creation_statements', 'revocation_statements'],
|
||||
'mysql-legacy-database-plugin': ['creation_statements', 'revocation_statements'],
|
||||
'mysql-rds-database-plugin': ['creation_statements', 'revocation_statements'],
|
||||
'vault-plugin-database-oracle': ['creation_statements', 'revocation_statements'],
|
||||
'postgresql-database-plugin': [
|
||||
'creation_statements',
|
||||
'revocation_statements',
|
||||
|
|
|
@ -140,8 +140,6 @@ export const structureIconMap = {
|
|||
'logo-microsoft-monochrome': 'microsoft',
|
||||
'logo-okta-color': 'okta-color',
|
||||
'logo-okta-monochrome': 'okta',
|
||||
'logo-oracle-color': 'oracle-color',
|
||||
'logo-oracle-monochrome': 'oracle',
|
||||
'logo-slack-color': 'slack-color',
|
||||
'logo-slack-monochrome': 'slack',
|
||||
'logo-vmware-color': 'vmware-color',
|
||||
|
|
Loading…
Reference in New Issue
Block a user