remove aws/s3/dynamodb
This commit is contained in:
parent
7fbaa0f061
commit
6afd5d71a4
@ -8,7 +8,6 @@ project {
|
||||
# Supports doublestar glob patterns for more flexibility in defining which
|
||||
# files or folders should be ignored
|
||||
header_ignore = [
|
||||
"builtin/credential/aws/pkcs7/**",
|
||||
"ui/node_modules/**",
|
||||
"enos/modules/k8s_deploy_vault/raft-config.hcl",
|
||||
"plugins/database/postgresql/scram/**",
|
||||
|
@ -5,13 +5,11 @@
|
||||
# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
|
||||
|
||||
# Select Auth engines are owned by Ecosystem
|
||||
/builtin/credential/aws/ @hashicorp/vault-ecosystem
|
||||
/builtin/credential/github/ @hashicorp/vault-ecosystem
|
||||
/builtin/credential/ldap/ @hashicorp/vault-ecosystem
|
||||
/builtin/credential/okta/ @hashicorp/vault-ecosystem
|
||||
|
||||
# Secrets engines (pki, ssh, totp and transit omitted)
|
||||
/builtin/logical/aws/ @hashicorp/vault-ecosystem
|
||||
/builtin/logical/cassandra/ @hashicorp/vault-ecosystem
|
||||
/builtin/logical/consul/ @hashicorp/vault-ecosystem
|
||||
/builtin/logical/database/ @hashicorp/vault-ecosystem
|
||||
|
@ -1,287 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
type AWSAuth struct {
|
||||
// If not provided with the WithRole login option, the Vault server will look for a role
|
||||
// with the friendly name of the IAM principal if using the IAM auth type,
|
||||
// or the name of the EC2 instance's AMI ID if using the EC2 auth type.
|
||||
// If no matching role is found, login will fail.
|
||||
roleName string
|
||||
mountPath string
|
||||
// Can be "iam" or "ec2". Defaults to "iam".
|
||||
authType string
|
||||
// Can be "pkcs7", "identity", or "rsa2048". Defaults to "pkcs7".
|
||||
signatureType string
|
||||
region string
|
||||
iamServerIDHeaderValue string
|
||||
creds *credentials.Credentials
|
||||
nonce string
|
||||
}
|
||||
|
||||
var _ api.AuthMethod = (*AWSAuth)(nil)
|
||||
|
||||
type LoginOption func(a *AWSAuth) error
|
||||
|
||||
const (
|
||||
iamType = "iam"
|
||||
ec2Type = "ec2"
|
||||
pkcs7Type = "pkcs7"
|
||||
identityType = "identity"
|
||||
rsa2048Type = "rsa2048"
|
||||
defaultMountPath = "aws"
|
||||
defaultAuthType = iamType
|
||||
defaultRegion = "us-east-1"
|
||||
defaultSignatureType = pkcs7Type
|
||||
)
|
||||
|
||||
// NewAWSAuth initializes a new AWS auth method interface to be
|
||||
// passed as a parameter to the client.Auth().Login method.
|
||||
//
|
||||
// Supported options: WithRole, WithMountPath, WithIAMAuth, WithEC2Auth,
|
||||
// WithPKCS7Signature, WithIdentitySignature, WithIAMServerIDHeader, WithNonce, WithRegion
|
||||
func NewAWSAuth(opts ...LoginOption) (*AWSAuth, error) {
|
||||
a := &AWSAuth{
|
||||
mountPath: defaultMountPath,
|
||||
authType: defaultAuthType,
|
||||
region: defaultRegion,
|
||||
signatureType: defaultSignatureType,
|
||||
}
|
||||
|
||||
// Loop through each option
|
||||
for _, opt := range opts {
|
||||
// Call the option giving the instantiated
|
||||
// *AWSAuth as the argument
|
||||
err := opt(a)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error with login option: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// return the modified auth struct instance
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Login sets up the required request body for the AWS auth method's /login
|
||||
// endpoint, and performs a write to it. This method defaults to the "iam"
|
||||
// auth type unless NewAWSAuth is called with WithEC2Auth().
|
||||
//
|
||||
// The Vault client will set its credentials to the values of the
|
||||
// AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_SESSION environment
|
||||
// variables. To specify a path to a credentials file on disk instead, set
|
||||
// the environment variable AWS_SHARED_CREDENTIALS_FILE.
|
||||
func (a *AWSAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
loginData := make(map[string]interface{})
|
||||
switch a.authType {
|
||||
case ec2Type:
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating session to probe EC2 metadata: %w", err)
|
||||
}
|
||||
metadataSvc := ec2metadata.New(sess)
|
||||
if !metadataSvc.Available() {
|
||||
return nil, fmt.Errorf("metadata service not available")
|
||||
}
|
||||
|
||||
if a.signatureType == pkcs7Type {
|
||||
// fetch PKCS #7 signature
|
||||
resp, err := metadataSvc.GetDynamicData("/instance-identity/pkcs7")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get PKCS 7 data from metadata service: %w", err)
|
||||
}
|
||||
pkcs7 := strings.TrimSpace(resp)
|
||||
loginData["pkcs7"] = pkcs7
|
||||
} else if a.signatureType == identityType {
|
||||
// fetch signature from identity document
|
||||
doc, err := metadataSvc.GetDynamicData("/instance-identity/document")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error requesting instance identity doc: %w", err)
|
||||
}
|
||||
loginData["identity"] = base64.StdEncoding.EncodeToString([]byte(doc))
|
||||
|
||||
signature, err := metadataSvc.GetDynamicData("/instance-identity/signature")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error requesting signature: %w", err)
|
||||
}
|
||||
loginData["signature"] = signature
|
||||
} else if a.signatureType == rsa2048Type {
|
||||
// fetch RSA 2048 signature, which is also a PKCS#7 signature
|
||||
resp, err := metadataSvc.GetDynamicData("/instance-identity/rsa2048")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get PKCS 7 data from metadata service: %w", err)
|
||||
}
|
||||
pkcs7 := strings.TrimSpace(resp)
|
||||
loginData["pkcs7"] = pkcs7
|
||||
} else {
|
||||
return nil, fmt.Errorf("unknown signature type: %s", a.signatureType)
|
||||
}
|
||||
|
||||
// Add the reauthentication value, if we have one
|
||||
if a.nonce == "" {
|
||||
uid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating uuid for reauthentication value: %w", err)
|
||||
}
|
||||
a.nonce = uid
|
||||
}
|
||||
loginData["nonce"] = a.nonce
|
||||
case iamType:
|
||||
logger := hclog.Default()
|
||||
if a.creds == nil {
|
||||
credsConfig := awsutil.CredentialsConfig{
|
||||
AccessKey: os.Getenv("AWS_ACCESS_KEY_ID"),
|
||||
SecretKey: os.Getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
// the env vars above will take precedence if they are set, as
|
||||
// they will be added to the ChainProvider stack first
|
||||
var hasCredsFile bool
|
||||
credsFilePath := os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
||||
if credsFilePath != "" {
|
||||
hasCredsFile = true
|
||||
credsConfig.Filename = credsFilePath
|
||||
}
|
||||
|
||||
creds, err := credsConfig.GenerateCredentialChain(awsutil.WithSharedCredentials(hasCredsFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if creds == nil {
|
||||
return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
|
||||
}
|
||||
|
||||
_, err = creds.Get()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve credentials from credential chain: %w", err)
|
||||
}
|
||||
|
||||
a.creds = creds
|
||||
}
|
||||
|
||||
data, err := awsutil.GenerateLoginData(a.creds, a.iamServerIDHeaderValue, a.region, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to generate login data for AWS auth endpoint: %w", err)
|
||||
}
|
||||
loginData = data
|
||||
}
|
||||
|
||||
// Add role if we have one. If not, Vault will infer the role name based
|
||||
// on the IAM friendly name (iam auth type) or EC2 instance's
|
||||
// AMI ID (ec2 auth type).
|
||||
if a.roleName != "" {
|
||||
loginData["role"] = a.roleName
|
||||
}
|
||||
|
||||
if a.iamServerIDHeaderValue != "" {
|
||||
client.AddHeader("iam_server_id_header_value", a.iamServerIDHeaderValue)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("auth/%s/login", a.mountPath)
|
||||
resp, err := client.Logical().WriteWithContext(ctx, path, loginData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to log in with AWS auth: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func WithRole(roleName string) LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.roleName = roleName
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithMountPath(mountPath string) LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.mountPath = mountPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithEC2Auth() LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.authType = ec2Type
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithIAMAuth() LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.authType = iamType
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithIdentitySignature will have the client send the cryptographic identity
|
||||
// document signature to verify EC2 auth logins. Only used by EC2 auth type.
|
||||
// If this option is not provided, will default to using the PKCS #7 signature.
|
||||
// The signature type used should match the type of the public AWS cert Vault
|
||||
// has been configured with to verify EC2 instance identity.
|
||||
// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration
|
||||
func WithIdentitySignature() LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.signatureType = identityType
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPKCS7Signature will explicitly tell the client to send the PKCS #7
|
||||
// signature to verify EC2 auth logins. Only used by EC2 auth type.
|
||||
// PKCS #7 is the default, but this method is provided for additional clarity.
|
||||
// The signature type used should match the type of the public AWS cert Vault
|
||||
// has been configured with to verify EC2 instance identity.
|
||||
// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration
|
||||
func WithPKCS7Signature() LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.signatureType = pkcs7Type
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithIAMServerIDHeader(headerValue string) LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.iamServerIDHeaderValue = headerValue
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNonce can be used to specify a named nonce for the ec2 auth login
|
||||
// method. If not provided, an automatically-generated uuid will be used
|
||||
// instead.
|
||||
func WithNonce(nonce string) LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.nonce = nonce
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithRegion(region string) LoginOption {
|
||||
return func(a *AWSAuth) error {
|
||||
a.region = region
|
||||
return nil
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
module github.com/hashicorp/vault/api/auth/aws
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.49.22
|
||||
github.com/hashicorp/go-hclog v0.16.2
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6
|
||||
github.com/hashicorp/go-uuid v1.0.2
|
||||
github.com/hashicorp/vault/api v1.9.2
|
||||
)
|
@ -1,159 +0,0 @@
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.49.22 h1:r01+cQJ3cORQI1PJxG8af0jzrZpUOL9L+/3kU2x1geU=
|
||||
github.com/aws/aws-sdk-go v1.49.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
|
||||
github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
|
||||
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA=
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ=
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as=
|
||||
github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
@ -1,437 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
const (
|
||||
amzHeaderPrefix = "X-Amz-"
|
||||
operationPrefixAWS = "aws"
|
||||
)
|
||||
|
||||
var defaultAllowedSTSRequestHeaders = []string{
|
||||
"X-Amz-Algorithm",
|
||||
"X-Amz-Content-Sha256",
|
||||
"X-Amz-Credential",
|
||||
"X-Amz-Date",
|
||||
"X-Amz-Security-Token",
|
||||
"X-Amz-Signature",
|
||||
"X-Amz-SignedHeaders",
|
||||
}
|
||||
|
||||
func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
|
||||
b, err := Backend(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := b.Setup(ctx, conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type backend struct {
|
||||
*framework.Backend
|
||||
|
||||
// Lock to make changes to any of the backend's configuration endpoints.
|
||||
configMutex sync.RWMutex
|
||||
|
||||
// Lock to make changes to role entries
|
||||
roleMutex sync.Mutex
|
||||
|
||||
// Lock to make changes to the deny list entries
|
||||
denyListMutex sync.RWMutex
|
||||
|
||||
// Guards the deny list/access list tidy functions
|
||||
tidyDenyListCASGuard *uint32
|
||||
tidyAccessListCASGuard *uint32
|
||||
|
||||
// Duration after which the periodic function of the backend needs to
|
||||
// tidy the deny list and access list entries.
|
||||
tidyCooldownPeriod time.Duration
|
||||
|
||||
// nextTidyTime holds the time at which the periodic func should initiate
|
||||
// the tidy operations. This is set by the periodicFunc based on the value
|
||||
// of tidyCooldownPeriod.
|
||||
nextTidyTime time.Time
|
||||
|
||||
// Map to hold the EC2 client objects indexed by region and STS role.
|
||||
// This avoids the overhead of creating a client object for every login request.
|
||||
// When the credentials are modified or deleted, all the cached client objects
|
||||
// will be flushed. The empty STS role signifies the master account
|
||||
EC2ClientsMap map[string]map[string]*ec2.EC2
|
||||
|
||||
// Map to hold the IAM client objects indexed by region and STS role.
|
||||
// This avoids the overhead of creating a client object for every login request.
|
||||
// When the credentials are modified or deleted, all the cached client objects
|
||||
// will be flushed. The empty STS role signifies the master account
|
||||
IAMClientsMap map[string]map[string]*iam.IAM
|
||||
|
||||
// Map to associate a partition to a random region in that partition. Users of
|
||||
// this don't care what region in the partition they use, but there is some client
|
||||
// cache efficiency gain if we keep the mapping stable, hence caching a single copy.
|
||||
partitionToRegionMap map[string]*endpoints.Region
|
||||
|
||||
// Map of AWS unique IDs to the full ARN corresponding to that unique ID
|
||||
// This avoids the overhead of an AWS API hit for every login request
|
||||
// using the IAM auth method when bound_iam_principal_arn contains a wildcard
|
||||
iamUserIdToArnCache *cache.Cache
|
||||
|
||||
// AWS Account ID of the "default" AWS credentials
|
||||
// This cache avoids the need to call GetCallerIdentity repeatedly to learn it
|
||||
// We can't store this because, in certain pathological cases, it could change
|
||||
// out from under us, such as a standby and active Vault server in different AWS
|
||||
// accounts using their IAM instance profile to get their credentials.
|
||||
defaultAWSAccountID string
|
||||
|
||||
// roleCache caches role entries to avoid locking headaches
|
||||
roleCache *cache.Cache
|
||||
|
||||
resolveArnToUniqueIDFunc func(context.Context, logical.Storage, string) (string, error)
|
||||
|
||||
// upgradeCancelFunc is used to cancel the context used in the upgrade
|
||||
// function
|
||||
upgradeCancelFunc context.CancelFunc
|
||||
|
||||
// deprecatedTerms is used to downgrade preferred terminology (e.g. accesslist)
|
||||
// to the legacy term. This allows for consolidated aliasing of the affected
|
||||
// endpoints until the legacy terms are removed.
|
||||
deprecatedTerms *strings.Replacer
|
||||
}
|
||||
|
||||
func Backend(_ *logical.BackendConfig) (*backend, error) {
|
||||
b := &backend{
|
||||
// Setting the periodic func to be run once in an hour.
|
||||
// If there is a real need, this can be made configurable.
|
||||
tidyCooldownPeriod: time.Hour,
|
||||
EC2ClientsMap: make(map[string]map[string]*ec2.EC2),
|
||||
IAMClientsMap: make(map[string]map[string]*iam.IAM),
|
||||
iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour),
|
||||
tidyDenyListCASGuard: new(uint32),
|
||||
tidyAccessListCASGuard: new(uint32),
|
||||
roleCache: cache.New(cache.NoExpiration, cache.NoExpiration),
|
||||
|
||||
deprecatedTerms: strings.NewReplacer(
|
||||
"accesslist", "whitelist",
|
||||
"access-list", "whitelist",
|
||||
"denylist", "blacklist",
|
||||
"deny-list", "blacklist",
|
||||
),
|
||||
}
|
||||
|
||||
b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId
|
||||
|
||||
b.Backend = &framework.Backend{
|
||||
PeriodicFunc: b.periodicFunc,
|
||||
AuthRenew: b.pathLoginRenew,
|
||||
Help: backendHelp,
|
||||
PathsSpecial: &logical.Paths{
|
||||
Unauthenticated: []string{
|
||||
"login",
|
||||
},
|
||||
LocalStorage: []string{
|
||||
identityAccessListStorage,
|
||||
},
|
||||
SealWrapStorage: []string{
|
||||
"config/client",
|
||||
},
|
||||
},
|
||||
Paths: []*framework.Path{
|
||||
b.pathLogin(),
|
||||
b.pathListRole(),
|
||||
b.pathListRoles(),
|
||||
b.pathRole(),
|
||||
b.pathRoleTag(),
|
||||
b.pathConfigClient(),
|
||||
b.pathConfigCertificate(),
|
||||
b.pathConfigIdentity(),
|
||||
b.pathConfigRotateRoot(),
|
||||
b.pathConfigSts(),
|
||||
b.pathListSts(),
|
||||
b.pathListCertificates(),
|
||||
|
||||
// The following pairs of functions are path aliases. The first is the
|
||||
// primary endpoint, and the second is version using deprecated language,
|
||||
// for backwards compatibility. The functionality is identical between the two.
|
||||
b.pathConfigTidyRoletagDenyList(),
|
||||
b.genDeprecatedPath(b.pathConfigTidyRoletagDenyList()),
|
||||
|
||||
b.pathConfigTidyIdentityAccessList(),
|
||||
b.genDeprecatedPath(b.pathConfigTidyIdentityAccessList()),
|
||||
|
||||
b.pathListRoletagDenyList(),
|
||||
b.genDeprecatedPath(b.pathListRoletagDenyList()),
|
||||
|
||||
b.pathRoletagDenyList(),
|
||||
b.genDeprecatedPath(b.pathRoletagDenyList()),
|
||||
|
||||
b.pathTidyRoletagDenyList(),
|
||||
b.genDeprecatedPath(b.pathTidyRoletagDenyList()),
|
||||
|
||||
b.pathListIdentityAccessList(),
|
||||
b.genDeprecatedPath(b.pathListIdentityAccessList()),
|
||||
|
||||
b.pathIdentityAccessList(),
|
||||
b.genDeprecatedPath(b.pathIdentityAccessList()),
|
||||
|
||||
b.pathTidyIdentityAccessList(),
|
||||
b.genDeprecatedPath(b.pathTidyIdentityAccessList()),
|
||||
},
|
||||
Invalidate: b.invalidate,
|
||||
InitializeFunc: b.initialize,
|
||||
BackendType: logical.TypeCredential,
|
||||
Clean: b.cleanup,
|
||||
}
|
||||
|
||||
b.partitionToRegionMap = generatePartitionToRegionMap()
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// periodicFunc performs the tasks that the backend wishes to do periodically.
|
||||
// Currently this will be triggered once in a minute by the RollbackManager.
|
||||
//
|
||||
// The tasks being done currently by this function are to cleanup the expired
|
||||
// entries of both deny list role tags and access list identities. Tidying is done
|
||||
// not once in a minute, but once in an hour, controlled by 'tidyCooldownPeriod'.
|
||||
// Tidying of deny list and access list are by default enabled. This can be
|
||||
// changed using `config/tidy/roletags` and `config/tidy/identities` endpoints.
|
||||
func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error {
|
||||
// Run the tidy operations for the first time. Then run it when current
|
||||
// time matches the nextTidyTime.
|
||||
if b.nextTidyTime.IsZero() || !time.Now().Before(b.nextTidyTime) {
|
||||
if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) {
|
||||
// safetyBuffer defaults to 180 days for roletag deny list
|
||||
safetyBuffer := 15552000
|
||||
tidyBlacklistConfigEntry, err := b.lockedConfigTidyRoleTags(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skipBlacklistTidy := false
|
||||
// check if tidying of role tags was configured
|
||||
if tidyBlacklistConfigEntry != nil {
|
||||
// check if periodic tidying of role tags was disabled
|
||||
if tidyBlacklistConfigEntry.DisablePeriodicTidy {
|
||||
skipBlacklistTidy = true
|
||||
}
|
||||
// overwrite the default safetyBuffer with the configured value
|
||||
safetyBuffer = tidyBlacklistConfigEntry.SafetyBuffer
|
||||
}
|
||||
// tidy role tags if explicitly not disabled
|
||||
if !skipBlacklistTidy {
|
||||
b.tidyDenyListRoleTag(ctx, req, safetyBuffer)
|
||||
}
|
||||
}
|
||||
|
||||
// We don't check for replication state for access list identities as
|
||||
// these are locally stored
|
||||
|
||||
safety_buffer := 259200
|
||||
tidyWhitelistConfigEntry, err := b.lockedConfigTidyIdentities(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skipWhitelistTidy := false
|
||||
// check if tidying of identities was configured
|
||||
if tidyWhitelistConfigEntry != nil {
|
||||
// check if periodic tidying of identities was disabled
|
||||
if tidyWhitelistConfigEntry.DisablePeriodicTidy {
|
||||
skipWhitelistTidy = true
|
||||
}
|
||||
// overwrite the default safety_buffer with the configured value
|
||||
safety_buffer = tidyWhitelistConfigEntry.SafetyBuffer
|
||||
}
|
||||
// tidy identities if explicitly not disabled
|
||||
if !skipWhitelistTidy {
|
||||
b.tidyAccessListIdentity(ctx, req, safety_buffer)
|
||||
}
|
||||
|
||||
// Update the time at which to run the tidy functions again.
|
||||
b.nextTidyTime = time.Now().Add(b.tidyCooldownPeriod)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *backend) cleanup(ctx context.Context) {
|
||||
if b.upgradeCancelFunc != nil {
|
||||
b.upgradeCancelFunc()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) invalidate(ctx context.Context, key string) {
|
||||
switch {
|
||||
case key == "config/client":
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
b.flushCachedEC2Clients()
|
||||
b.flushCachedIAMClients()
|
||||
b.defaultAWSAccountID = ""
|
||||
case strings.HasPrefix(key, "role"):
|
||||
// TODO: We could make this better
|
||||
b.roleCache.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Putting this here so we can inject a fake resolver into the backend for unit testing
|
||||
// purposes
|
||||
func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storage, arn string) (string, error) {
|
||||
entity, err := parseIamArn(arn)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// This odd-looking code is here because IAM is an inherently global service. IAM and STS ARNs
|
||||
// don't have regions in them, and there is only a single global endpoint for IAM; see
|
||||
// http://docs.aws.amazon.com/general/latest/gr/rande.html#iam_region
|
||||
// However, the ARNs do have a partition in them, because the GovCloud and China partitions DO
|
||||
// have their own separate endpoints, and the partition is encoded in the ARN. If Amazon's Go SDK
|
||||
// would allow us to pass a partition back to the IAM client, it would be much simpler. But it
|
||||
// doesn't appear that's possible, so in order to properly support GovCloud and China, we do a
|
||||
// circular dance of extracting the partition from the ARN, finding any arbitrary region in the
|
||||
// partition, and passing that region back back to the SDK, so that the SDK can figure out the
|
||||
// proper partition from the arbitrary region we passed in to look up the endpoint.
|
||||
// Sigh
|
||||
region := b.partitionToRegionMap[entity.Partition]
|
||||
if region == nil {
|
||||
return "", fmt.Errorf("unable to resolve partition %q to a region", entity.Partition)
|
||||
}
|
||||
iamClient, err := b.clientIAM(ctx, s, region.ID(), entity.AccountNumber)
|
||||
if err != nil {
|
||||
return "", awsutil.AppendAWSError(err)
|
||||
}
|
||||
|
||||
switch entity.Type {
|
||||
case "user":
|
||||
userInfo, err := iamClient.GetUserWithContext(ctx, &iam.GetUserInput{UserName: &entity.FriendlyName})
|
||||
if err != nil {
|
||||
return "", awsutil.AppendAWSError(err)
|
||||
}
|
||||
if userInfo == nil {
|
||||
return "", fmt.Errorf("got nil result from GetUser")
|
||||
}
|
||||
return *userInfo.User.UserId, nil
|
||||
case "role":
|
||||
roleInfo, err := iamClient.GetRoleWithContext(ctx, &iam.GetRoleInput{RoleName: &entity.FriendlyName})
|
||||
if err != nil {
|
||||
return "", awsutil.AppendAWSError(err)
|
||||
}
|
||||
if roleInfo == nil {
|
||||
return "", fmt.Errorf("got nil result from GetRole")
|
||||
}
|
||||
return *roleInfo.Role.RoleId, nil
|
||||
case "instance-profile":
|
||||
profileInfo, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName})
|
||||
if err != nil {
|
||||
return "", awsutil.AppendAWSError(err)
|
||||
}
|
||||
if profileInfo == nil {
|
||||
return "", fmt.Errorf("got nil result from GetInstanceProfile")
|
||||
}
|
||||
return *profileInfo.InstanceProfile.InstanceProfileId, nil
|
||||
default:
|
||||
return "", fmt.Errorf("unrecognized error type %#v", entity.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// genDeprecatedPath will return a deprecated version of a framework.Path. The
|
||||
// path pattern and display attributes (if any) will contain deprecated terms,
|
||||
// and the path will be marked as deprecated.
|
||||
func (b *backend) genDeprecatedPath(path *framework.Path) *framework.Path {
|
||||
pathDeprecated := *path
|
||||
pathDeprecated.Pattern = b.deprecatedTerms.Replace(path.Pattern)
|
||||
pathDeprecated.Deprecated = true
|
||||
|
||||
if path.DisplayAttrs != nil {
|
||||
deprecatedDisplayAttrs := *path.DisplayAttrs
|
||||
deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationPrefix)
|
||||
deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationVerb)
|
||||
deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationSuffix)
|
||||
pathDeprecated.DisplayAttrs = &deprecatedDisplayAttrs
|
||||
}
|
||||
|
||||
for i, op := range path.Operations {
|
||||
if op.Properties().DisplayAttrs != nil {
|
||||
deprecatedDisplayAttrs := *op.Properties().DisplayAttrs
|
||||
deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationPrefix)
|
||||
deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationVerb)
|
||||
deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationSuffix)
|
||||
deprecatedProperties := pathDeprecated.Operations[i].(*framework.PathOperation)
|
||||
deprecatedProperties.DisplayAttrs = &deprecatedDisplayAttrs
|
||||
}
|
||||
}
|
||||
|
||||
return &pathDeprecated
|
||||
}
|
||||
|
||||
// Adapted from https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/
|
||||
// the "Enumerating Regions and Endpoint Metadata" section
|
||||
func generatePartitionToRegionMap() map[string]*endpoints.Region {
|
||||
partitionToRegion := make(map[string]*endpoints.Region)
|
||||
|
||||
resolver := endpoints.DefaultResolver()
|
||||
partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
|
||||
for _, p := range partitions {
|
||||
// For most partitions, it's fine to choose a single region randomly.
|
||||
// However, there are a few exceptions:
|
||||
//
|
||||
// For "aws", choose "us-east-1" because it is always enabled (and
|
||||
// enabled for STS) by default.
|
||||
//
|
||||
// For "aws-us-gov", choose "us-gov-west-1" because it is the only
|
||||
// valid region for IAM operations.
|
||||
// ref: https://github.com/aws/aws-sdk-go/blob/v1.34.25/aws/endpoints/defaults.go#L8176-L8194
|
||||
for _, r := range p.Regions() {
|
||||
if p.ID() == "aws" && r.ID() != "us-east-1" {
|
||||
continue
|
||||
}
|
||||
if p.ID() == "aws-us-gov" && r.ID() != "us-gov-west-1" {
|
||||
continue
|
||||
}
|
||||
partitionToRegion[p.ID()] = &r
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return partitionToRegion
|
||||
}
|
||||
|
||||
const backendHelp = `
|
||||
The aws auth method uses either AWS IAM credentials or AWS-signed EC2 metadata
|
||||
to authenticate clients, which are IAM principals or EC2 instances.
|
||||
|
||||
Authentication is backed by a preconfigured role in the backend. The role
|
||||
represents the authorization of resources by containing Vault's policies.
|
||||
Role can be created using 'role/<role>' endpoint.
|
||||
|
||||
Authentication of IAM principals, either IAM users or roles, is done using a
|
||||
specifically signed AWS API request using clients' AWS IAM credentials. IAM
|
||||
principals can then be assigned to roles within Vault. This is known as the
|
||||
"iam" auth method.
|
||||
|
||||
Authentication of EC2 instances is done using either a signed PKCS#7 document
|
||||
or a detached RSA signature of an AWS EC2 instance's identity document along
|
||||
with a client-created nonce. This is known as the "ec2" auth method.
|
||||
|
||||
If there is need to further restrict the capabilities of the role on the instance
|
||||
that is using the role, 'role_tag' option can be enabled on the role, and a tag
|
||||
can be generated using 'role/<role>/tag' endpoint. This tag represents the
|
||||
subset of capabilities set on the role. When the 'role_tag' option is enabled on
|
||||
the role, the login operation requires that a respective role tag is attached to
|
||||
the EC2 instance which performs the login.
|
||||
`
|
@ -1,134 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
)
|
||||
|
||||
func TestBackend_E2E_Initialize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up the cluster. This will trigger an Initialize(); we sleep briefly
|
||||
// awaiting its completion.
|
||||
cluster := setupAwsTestCluster(t, ctx)
|
||||
defer cluster.Cleanup()
|
||||
time.Sleep(time.Second)
|
||||
core := cluster.Cores[0]
|
||||
|
||||
// Fetch the aws auth's path in storage. This is a uuid that is different
|
||||
// every time we run the test
|
||||
authUuids, err := core.UnderlyingStorage.List(ctx, "auth/")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(authUuids) != 1 {
|
||||
t.Fatalf("expected exactly one auth path")
|
||||
}
|
||||
awsPath := "auth/" + authUuids[0]
|
||||
|
||||
// Make sure that the upgrade happened, by fishing the 'config/version'
|
||||
// entry out of storage. We can't use core.Client.Logical().Read() to do
|
||||
// this, because 'config/version' hasn't been exposed as a path.
|
||||
version, err := core.UnderlyingStorage.Get(ctx, awsPath+"config/version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version == nil {
|
||||
t.Fatalf("no config found")
|
||||
}
|
||||
|
||||
// Nuke the version, so we can pretend that Initialize() has never been run
|
||||
if err := core.UnderlyingStorage.Delete(ctx, awsPath+"config/version"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
version, err = core.UnderlyingStorage.Get(ctx, awsPath+"config/version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version != nil {
|
||||
t.Fatalf("version found")
|
||||
}
|
||||
|
||||
// Create a role
|
||||
data := map[string]interface{}{
|
||||
"auth_type": "ec2",
|
||||
"policies": "default",
|
||||
"bound_subnet_id": "subnet-abcdef",
|
||||
}
|
||||
if _, err := core.Client.Logical().Write("auth/aws/role/test-role", data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
role, err := core.Client.Logical().Read("auth/aws/role/test-role")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if role == nil {
|
||||
t.Fatalf("no role found")
|
||||
}
|
||||
|
||||
// There should _still_ be no config version
|
||||
version, err = core.UnderlyingStorage.Get(ctx, awsPath+"config/version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version != nil {
|
||||
t.Fatalf("version found")
|
||||
}
|
||||
|
||||
// Seal, and then Unseal. This will once again trigger an Initialize(),
|
||||
// only this time there will be a role present during the upgrade.
|
||||
core.Seal(t)
|
||||
cluster.UnsealCores(t)
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Now the config version should be there again
|
||||
version, err = core.UnderlyingStorage.Get(ctx, awsPath+"config/version")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version == nil {
|
||||
t.Fatalf("no version found")
|
||||
}
|
||||
}
|
||||
|
||||
func setupAwsTestCluster(t *testing.T, _ context.Context) *vault.TestCluster {
|
||||
// create a cluster with the aws auth backend built-in
|
||||
logger := logging.NewVaultLogger(hclog.Trace)
|
||||
coreConfig := &vault.CoreConfig{
|
||||
Logger: logger,
|
||||
CredentialBackends: map[string]logical.Factory{
|
||||
"aws": Factory,
|
||||
},
|
||||
}
|
||||
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
|
||||
NumCores: 1,
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
})
|
||||
|
||||
cluster.Start()
|
||||
if len(cluster.Cores) != 1 {
|
||||
t.Fatalf("expected exactly one core")
|
||||
}
|
||||
core := cluster.Cores[0]
|
||||
vault.TestWaitActive(t, core.Core)
|
||||
|
||||
// load the auth plugin
|
||||
if err := core.Client.Sys().EnableAuthWithOptions("aws", &api.EnableAuthOptions{
|
||||
Type: "aws",
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cluster
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,886 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var defaultCertificates []*x509.Certificate = nil
|
||||
|
||||
func init() {
|
||||
allCerts := pkcs7RawCerts + signatureRawCerts + rsa2048RawCerts
|
||||
splitCerts := strings.Split(allCerts, "-----BEGIN CERTIFICATE-----")
|
||||
// parse all hard-coded certs
|
||||
for _, cert := range splitCerts {
|
||||
if len(strings.TrimSpace(cert)) == 0 {
|
||||
continue
|
||||
}
|
||||
cert = "-----BEGIN CERTIFICATE-----\n" + cert
|
||||
decodedCert, err := decodePEMAndParseCertificate(cert)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defaultCertificates = append(defaultCertificates, decodedCert)
|
||||
}
|
||||
}
|
||||
|
||||
// These certificates are for verifying PKCS#7 DSA signatures.
|
||||
// Copied from:
|
||||
//
|
||||
// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-pkcs7.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----'
|
||||
//
|
||||
// Last updated: 2022-05-31
|
||||
const pkcs7RawCerts = `-----BEGIN CERTIFICATE-----
|
||||
MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
|
||||
ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
|
||||
ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
|
||||
VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
|
||||
hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
|
||||
k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
|
||||
hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
|
||||
lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
|
||||
MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
|
||||
MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
|
||||
vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
|
||||
7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7zCCAq4CCQCO7MJe5Y3VLjAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTAyMDMwMjIxMjFaFw00
|
||||
NTAyMDMwMjIxMjFaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbgwggEsBgcqhkjOOAQBMIIBHwKBgQDvQ9RzVvf4MAwGbqfX
|
||||
blCvCoVb9957OkLGn/04CowHXJ+vTBR7eyIa6AoXltsQXBOmrJswToFKKxT4gbuw
|
||||
jK7s9QQX4CmTRWcEgO2RXtZSVjOhsUQMh+yf7Ht4OVL97LWnNfGsX2cwjcRWHYgI
|
||||
7lvnuBNBzLQHdSEwMNq0Bk76PwIVAMan6XIEEPnwr4e6u/RNnWBGKd9FAoGBAOCG
|
||||
eSNmxpW4QFu4pIlAykm6EnTZKKHT87gdXkAkfoC5fAfOxxhnE2HezZHp9Ap2tMV5
|
||||
8bWNvoPHvoKCQqwfm+OUBlAxC/3vqoVkKL2mG1KgUH9+hrtpMTkwO3RREnKe7I5O
|
||||
x9qDimJpOihrL4I0dYvy9xUOoz+DzFAW8+ylWVYpA4GFAAKBgQDbnBAKSxWr9QHY
|
||||
6Dt+EFdGz6lAZLedeBKpaP53Z1DTO34J0C55YbJTwBTFGqPtOLxnUVDlGiD6GbmC
|
||||
80f3jvogPR1mSmGsydbNbZnbUEVWrRhe+y5zJ3g9qs/DWmDW0deEFvkhWVnLJkFJ
|
||||
9pdOu/ibRPH1lE2nz6pK7GbOQtLyHTAJBgcqhkjOOAQDAzAAMC0CFQCoJlwGtJQC
|
||||
cLoM4p/jtVFOj26xbgIUUS4pDKyHaG/eaygLTtFpFJqzWHc=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7jCCAq4CCQCVWIgSmP8RhTAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTAyMDUxMzA2MjFaFw00
|
||||
NTAyMDUxMzA2MjFaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbgwggEsBgcqhkjOOAQBMIIBHwKBgQDcwojQfgWdV1QliO0B
|
||||
8n6cLZ38VE7ZmrjZ9OQV//Gst6S1h7euhC23YppKXi1zovefSDwFU54zi3/oJ++q
|
||||
PHlP1WGL8IZ34BUgRTtG4TVolvp0smjkMvyRu5hIdKtzjV93Ccx15gVgyk+o1IEG
|
||||
fZ2Kbw/Dd8JfoPS7KaSCmJKxXQIVAIZbIaDFRGa2qcMkW2HWASyNDl7bAoGBANtz
|
||||
IdhfMq+l2I5iofY2oj3HI21Kj3LtZrWEg3W+/4rVhL3lTm0Nne1rl9yGujrjQwy5
|
||||
Zp9V4A/w9w2O10Lx4K6hj34Eefy/aQnZwNdNhv/FQP7Az0fju+Yl6L13OOHQrL0z
|
||||
Q+9cF7zEosekEnBQx3v6psNknKgD3Shgx+GO/LpCA4GFAAKBgQCVS7m77nuNAlZ8
|
||||
wvUqcooxXMPkxJFl54NxAsAul9KP9KN4svm0O3Zrb7t2FOtXRM8zU3TqMpryq1o5
|
||||
mpMPsZDg6RXo9BF7Hn0DoZ6PJTamkFA6md+NyTJWJKvXC7iJ8fGDBJqTciUHuCKr
|
||||
12AztQ8bFWsrTgTzPE3p6U5ckcgV1TAJBgcqhkjOOAQDAy8AMCwCFB2NZGWm5EDl
|
||||
86ayV3c1PEDukgQIAhQow38rQkN/VwHVeSW9DqEshXHjuQ==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7DCCAqwCCQCncbCtQbjuyzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTA2MDQxMjQ4MDVaFw00
|
||||
NTA2MDQxMjQ4MDVaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQC12Nr1gMrHcFSZ7S/A
|
||||
pQBSCMHWmn2qeoQTMVWqe50fnTd0zGFxDdIjKxUK58/8zjWG5uR4TXRzmZpGpmXB
|
||||
bSufAR6BGqud2LnT/HIWGJAsnX2uOtSyNfCoJigqwhea5w+CqZ6I7iBDdnB4TtTw
|
||||
qO6TlnExHFVj8LMkylZgiaE1CQIVAIhdobse4K0QnbAhCL6R2euQzloXAoGAV/21
|
||||
WUuMz/79Ga0JvQcz1FNy1sT0pU9rU4TenqLQIt5iccn/7EIfNtvVO5TZKulIKq7J
|
||||
gXZr0x/KIT8zsNweetLOaGehPIYRMPX0vunMMR7hN7qA7W17WZv/76adywIsnDKq
|
||||
ekfe15jinaX8MsKUdyDK7Y+ifCG4PVhoM4+W2XwDgYQAAoGAIxOKbVgwLxbn6Pi2
|
||||
6hBOihFv16jKxAQI0hHzXJLV0Vyv9QwnqjJJRfOCy3dB0zicLXiIxeIdYfvqJr+u
|
||||
hlN8rGxEZYYJjEUKMGvsc0DW85jonXz0bNfcP0aaKH0lKKVjL+OZi5n2kn9wgdo5
|
||||
F3CVnMl8BUra8A1Tr2yrrE6TVZ4wCQYHKoZIzjgEAwMvADAsAhQfa7MCJZ+/TEY5
|
||||
AUr0J4wm8VzjoAIUSYZVu2NdRJ/ERPmDfhW5EsjHlCA=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7TCCAqwCCQCMElHPdwG37jAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTA0MjkyMDM1MjJaFw00
|
||||
NTA0MjkyMDM1MjJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQDAkoL4YfdMI/MrQ0oL
|
||||
NPfeEk94eiCQA5xNOnU7+2eVQtEqjFbDADFENh1p3sh9Q9OoheLFH8qpSfNDWn/0
|
||||
ktCS909ApTY6Esx1ExjGSeQq/U+SC2JSuuTT4WFMKJ63a/czMtFkEPPnVIjJJJmT
|
||||
HJSKSsVUgpdDIRvJXuyB0zdB+wIVALQ3OLaVGdlPMNfS1nD/Yyn+32wnAoGAPBQ3
|
||||
7XHg5NLOS4326eFRUT+4ornQFjJjP6dp3pOBEzpImNmZTtkCNNUKE4Go9hv5T4lh
|
||||
R0pODvWv0CBupMAZVBP9ObplXPCyEIZtuDqVa7ukPOUpQNgQhLLAqkigTyXVOSmt
|
||||
ECBj9tu5WNP/x3iTZTHJ+g0rhIqpgh012UwJpKADgYQAAoGAV1OEQPYQUg5/M3xf
|
||||
6vE7jKTxxyFWEyjKfJK7PZCzOIGrE/swgACy4PYQW+AwcUweSlK/Hx2OaZVUKzWo
|
||||
wDUbeu65DcRdw2rSwCbBTU342sitFo/iGCV/Gjf+BaiAJtxniZze7J1ob8vOBeLv
|
||||
uaMQmgOYeZ5e0fl04GtqPl+lhcQwCQYHKoZIzjgEAwMwADAtAhQdoeWLrkm0K49+
|
||||
AeBK+j6m2h9SKQIVAIBNhS2a8cQVABDCQXVXrc0tOmO8
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC8DCCArCgAwIBAgIGAXbVDEikMAkGByqGSM44BAMwXDELMAkGA1UEBhMCVVMx
|
||||
GTAXBgNVBAgMEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcMB1NlYXR0bGUxIDAe
|
||||
BgNVBAoMF0FtYXpvbiBXZWIgU2VydmljZXMgTExDMB4XDTIxMDEwNjAwMTUyMFoX
|
||||
DTQ3MDEwNjAwMTUyMFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgMEFdhc2hpbmd0
|
||||
b24gU3RhdGUxEDAOBgNVBAcMB1NlYXR0bGUxIDAeBgNVBAoMF0FtYXpvbiBXZWIg
|
||||
U2VydmljZXMgTExDMIIBuDCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9K
|
||||
nC7s5Of2EbdSPO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00
|
||||
b/JmYLdrmVClpJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNa
|
||||
FpEy9nXzrith1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA
|
||||
9+GghdabPd7LvKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJ
|
||||
FnEj6EwoFhO3zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7
|
||||
zKTxvqhRkImog9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoDgYUAAoGBAPjuiEx05N3J
|
||||
Q6cVwntJie67D8OuNo4jGRn+crEtL7YO0jSVB9zGE1ga+UgRPIaYETL293S8rTJT
|
||||
VgXAqdpBwfaHC6NUzre8U8iJ8FMNnlP9Gw1oUIlgQBjORyynVJexoB31TDZM+/52
|
||||
g9O/bpq1QqNyKbeIgyBBlc1dAtr1QLnsMAkGByqGSM44BAMDLwAwLAIUK8E6RDIR
|
||||
twK+9qnaTOBhvO/njuQCFFocyT1OxK+UDR888oNsdgtif2Sf
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDNjCCAh4CCQD3yZ1w1AVkTzANBgkqhkiG9w0BAQsFADBcMQswCQYDVQQGEwJV
|
||||
UzEZMBcGA1UECBMQV2FzaGluZ3RvbiBTdGF0ZTEQMA4GA1UEBxMHU2VhdHRsZTEg
|
||||
MB4GA1UEChMXQW1hem9uIFdlYiBTZXJ2aWNlcyBMTEMwIBcNMTUwNTEzMDk1OTE1
|
||||
WhgPMjE5NDEwMTYwOTU5MTVaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNo
|
||||
aW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24g
|
||||
V2ViIFNlcnZpY2VzIExMQzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||
AMWk9vyppSmDU3AxZ2Cy2bvKeK3F1UqNpMuyeriizi+NTsZ8tQqtNloaQcqhto/l
|
||||
gsw9+QSnEJeYWnmivJWOBdn9CyDpN7cpHVmeGgNJL2fvImWyWe2f2Kq/BL9l7N7C
|
||||
P2ZT52/sH9orlck1n2zO8xPi7MItgPHQwu3OxsGQsAdWucdxjHGtdchulpo1uJ31
|
||||
jsTAPKZ3p1/sxPXBBAgBMatPHhRBqhwHO/Twm4J3GmTLWN7oVDds4W3bPKQfnw3r
|
||||
vtBj/SM4/IgQ3xJslFcl90TZbQbgxIi88R/gWTbs7GsyT2PzstU30yLdJhKfdZKz
|
||||
/aIzraHvoDTWFaOdy0+OOaECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAdSzN2+0E
|
||||
V1BfR3DPWJHWRf1b7zl+1X/ZseW2hYE5r6YxrLv+1VPf/L5I6kB7GEtqhZUqteY7
|
||||
zAceoLrVu/7OynRyfQetJVGichaaxLNM3lcr6kcxOowb+WQQ84cwrB3keykH4gRX
|
||||
KHB2rlWSxta+2panSEO1JX2q5jhcFP90rDOtZjlpYv57N/Z9iQ+dvQPJnChdq3BK
|
||||
5pZlnIDnVVxqRike7BFy8tKyPj7HzoPEF5mh9Kfnn1YoSVu+61lMVv/qRjnyKfS9
|
||||
c96nE98sYFj0ZVBzXw8Sq4Gh8FiVmFHbQp1peGC19idOUqxPxWsasWxQXO0azYsP
|
||||
9RyWLHKxH1dMuA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
|
||||
ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
|
||||
ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
|
||||
VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
|
||||
hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
|
||||
k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
|
||||
hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
|
||||
lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
|
||||
MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
|
||||
MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
|
||||
vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
|
||||
7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
// These certificates are for verifying PKCS#7 DSA signatures.
|
||||
// Copied from:
|
||||
// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-signature.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----'
|
||||
// Last updated: 2022-05-31
|
||||
const signatureRawCerts = `-----BEGIN CERTIFICATE-----
|
||||
MIIDIjCCAougAwIBAgIJAKnL4UEDMN/FMA0GCSqGSIb3DQEBBQUAMGoxCzAJBgNV
|
||||
BAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgw
|
||||
FgYDVQQKEw9BbWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3Mu
|
||||
Y29tMB4XDTE0MDYwNTE0MjgwMloXDTI0MDYwNTE0MjgwMlowajELMAkGA1UEBhMC
|
||||
VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1NlYXR0bGUxGDAWBgNV
|
||||
BAoTD0FtYXpvbi5jb20gSW5jLjEaMBgGA1UEAxMRZWMyLmFtYXpvbmF3cy5jb20w
|
||||
gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAIe9GN//SRK2knbjySG0ho3yqQM3
|
||||
e2TDhWO8D2e8+XZqck754gFSo99AbT2RmXClambI7xsYHZFapbELC4H91ycihvrD
|
||||
jbST1ZjkLQgga0NE1q43eS68ZeTDccScXQSNivSlzJZS8HJZjgqzBlXjZftjtdJL
|
||||
XeE4hwvo0sD4f3j9AgMBAAGjgc8wgcwwHQYDVR0OBBYEFCXWzAgVyrbwnFncFFIs
|
||||
77VBdlE4MIGcBgNVHSMEgZQwgZGAFCXWzAgVyrbwnFncFFIs77VBdlE4oW6kbDBq
|
||||
MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2Vh
|
||||
dHRsZTEYMBYGA1UEChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1h
|
||||
em9uYXdzLmNvbYIJAKnL4UEDMN/FMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
|
||||
BQADgYEAFYcz1OgEhQBXIwIdsgCOS8vEtiJYF+j9uO6jz7VOmJqO+pRlAbRlvY8T
|
||||
C1haGgSI/A1uZUKs/Zfnph0oEI0/hu1IIJ/SKBDtN5lvmZ/IzbOPIJWirlsllQIQ
|
||||
7zvWbGd9c9+Rm3p04oTvhup99la7kZqevJK0QRdD/6NpCKsqP/0=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICSzCCAbQCCQDtQvkVxRvK9TANBgkqhkiG9w0BAQsFADBqMQswCQYDVQQGEwJV
|
||||
UzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2VhdHRsZTEYMBYGA1UE
|
||||
ChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1hem9uYXdzLmNvbTAe
|
||||
Fw0xOTAyMDMwMzAwMDZaFw0yOTAyMDIwMzAwMDZaMGoxCzAJBgNVBAYTAlVTMRMw
|
||||
EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgwFgYDVQQKEw9B
|
||||
bWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3MuY29tMIGfMA0G
|
||||
CSqGSIb3DQEBAQUAA4GNADCBiQKBgQC1kkHXYTfc7gY5Q55JJhjTieHAgacaQkiR
|
||||
Pity9QPDE3b+NXDh4UdP1xdIw73JcIIG3sG9RhWiXVCHh6KkuCTqJfPUknIKk8vs
|
||||
M3RXflUpBe8Pf+P92pxqPMCz1Fr2NehS3JhhpkCZVGxxwLC5gaG0Lr4rFORubjYY
|
||||
Rh84dK98VwIDAQABMA0GCSqGSIb3DQEBCwUAA4GBAA6xV9f0HMqXjPHuGILDyaNN
|
||||
dKcvplNFwDTydVg32MNubAGnecoEBtUPtxBsLoVYXCOb+b5/ZMDubPF9tU/vSXuo
|
||||
TpYM5Bq57gJzDRaBOntQbX9bgHiUxw6XZWaTS/6xjRJDT5p3S1E0mPI3lP/eJv4o
|
||||
Ezk5zb3eIf10/sqt4756
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDPDCCAqWgAwIBAgIJAMl6uIV/zqJFMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNV
|
||||
BAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMSAw
|
||||
HgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzEaMBgGA1UEAwwRZWMyLmFt
|
||||
YXpvbmF3cy5jb20wIBcNMTkwNDI2MTQzMjQ3WhgPMjE5ODA5MjkxNDMyNDdaMHIx
|
||||
CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0
|
||||
dGxlMSAwHgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzEaMBgGA1UEAwwR
|
||||
ZWMyLmFtYXpvbmF3cy5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALVN
|
||||
CDTZEnIeoX1SEYqq6k1BV0ZlpY5y3KnoOreCAE589TwS4MX5+8Fzd6AmACmugeBP
|
||||
Qk7Hm6b2+g/d4tWycyxLaQlcq81DB1GmXehRkZRgGeRge1ePWd1TUA0I8P/QBT7S
|
||||
gUePm/kANSFU+P7s7u1NNl+vynyi0wUUrw7/wIZTAgMBAAGjgdcwgdQwHQYDVR0O
|
||||
BBYEFILtMd+T4YgH1cgc+hVsVOV+480FMIGkBgNVHSMEgZwwgZmAFILtMd+T4YgH
|
||||
1cgc+hVsVOV+480FoXakdDByMQswCQYDVQQGEwJVUzETMBEGA1UECAwKV2FzaGlu
|
||||
Z3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEgMB4GA1UECgwXQW1hem9uIFdlYiBTZXJ2
|
||||
aWNlcyBMTEMxGjAYBgNVBAMMEWVjMi5hbWF6b25hd3MuY29tggkAyXq4hX/OokUw
|
||||
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQBhkNTBIFgWFd+ZhC/LhRUY
|
||||
4OjEiykmbEp6hlzQ79T0Tfbn5A4NYDI2icBP0+hmf6qSnIhwJF6typyd1yPK5Fqt
|
||||
NTpxxcXmUKquX+pHmIkK1LKDO8rNE84jqxrxRsfDi6by82fjVYf2pgjJW8R1FAw+
|
||||
mL5WQRFexbfB5aXhcMo0AA==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICNjCCAZ+gAwIBAgIJAKumfZiRrNvHMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTExMjcw
|
||||
NzE0MDVaGA8yMTk5MDUwMjA3MTQwNVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
|
||||
gQDFd571nUzVtke3rPyRkYfvs3jh0C0EMzzG72boyUNjnfw1+m0TeFraTLKb9T6F
|
||||
7TuB/ZEN+vmlYqr2+5Va8U8qLbPF0bRH+FdaKjhgWZdYXxGzQzU3ioy5W5ZM1VyB
|
||||
7iUsxEAlxsybC3ziPYaHI42UiTkQNahmoroNeqVyHNnBpQIDAQABMA0GCSqGSIb3
|
||||
DQEBCwUAA4GBAAJLylWyElEgOpW4B1XPyRVD4pAds8Guw2+krgqkY0HxLCdjosuH
|
||||
RytGDGN+q75aAoXzW5a7SGpxLxk6Hfv0xp3RjDHsoeP0i1d8MD3hAC5ezxS4oukK
|
||||
s5gbPOnokhKTMPXbTdRn5ZifCbWlx+bYN/mTYKvxho7b5SVg2o1La9aK
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICNjCCAZ+gAwIBAgIJAOZ3GEIaDcugMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTEwMjQx
|
||||
NTE5MDlaGA8yMTk5MDMyOTE1MTkwOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
|
||||
gQCjiPgW3vsXRj4JoA16WQDyoPc/eh3QBARaApJEc4nPIGoUolpAXcjFhWplo2O+
|
||||
ivgfCsc4AU9OpYdAPha3spLey/bhHPRi1JZHRNqScKP0hzsCNmKhfnZTIEQCFvsp
|
||||
DRp4zr91/WS06/flJFBYJ6JHhp0KwM81XQG59lV6kkoW7QIDAQABMA0GCSqGSIb3
|
||||
DQEBCwUAA4GBAGLLrY3P+HH6C57dYgtJkuGZGT2+rMkk2n81/abzTJvsqRqGRrWv
|
||||
XRKRXlKdM/dfiuYGokDGxiC0Mg6TYy6wvsR2qRhtXW1OtZkiHWcQCnOttz+8vpew
|
||||
wx8JGMvowtuKB1iMsbwyRqZkFYLcvH+Opfb/Aayi20/ChQLdI6M2R5VU
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICMzCCAZygAwIBAgIGAXbVDG2yMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNVBAYT
|
||||
AlVTMRkwFwYDVQQIDBBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHDAdTZWF0dGxl
|
||||
MSAwHgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0yMTAxMDYwMDE1
|
||||
MzBaGA8yMjAwMDEwNjAwMTUzMFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgMEFdh
|
||||
c2hpbmd0b24gU3RhdGUxEDAOBgNVBAcMB1NlYXR0bGUxIDAeBgNVBAoMF0FtYXpv
|
||||
biBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCn
|
||||
CS/Vbt0gQ1ebWcur2hSO7PnJifE4OPxQ7RgSAlc4/spJp1sDP+ZrS0LO1ZJfKhXf
|
||||
1R9S3AUwLnsc7b+IuVXdY5LK9RKqu64nyXP5dx170zoL8loEyCSuRR2fs+04i2Qs
|
||||
WBVP+KFNAn7P5L1EHRjkgTO8kjNKviwRV+OkP9ab5wIDAQABMA0GCSqGSIb3DQEB
|
||||
BQUAA4GBAI4WUy6+DKh0JDSzQEZNyBgNlSoSuC2owtMxCwGB6nBfzzfcekWvs6eo
|
||||
fLTSGovrReX7MtVgrcJBZjmPIentw5dWUs+87w/g9lNwUnUt0ZHYyh2tuBG6hVJu
|
||||
UEwDJ/z3wDd6wQviLOTF3MITawt9P8siR1hXqLJNxpjRQFZrgHqi
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICSzCCAbQCCQCQu97teKRD4zANBgkqhkiG9w0BAQUFADBqMQswCQYDVQQGEwJV
|
||||
UzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2VhdHRsZTEYMBYGA1UE
|
||||
ChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1hem9uYXdzLmNvbTAe
|
||||
Fw0xMzA4MjExMzIyNDNaFw0yMzA4MjExMzIyNDNaMGoxCzAJBgNVBAYTAlVTMRMw
|
||||
EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgwFgYDVQQKEw9B
|
||||
bWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3MuY29tMIGfMA0G
|
||||
CSqGSIb3DQEBAQUAA4GNADCBiQKBgQC6GFQ2WoBl1xZYH85INUMaTc4D30QXM6f+
|
||||
YmWZyJD9fC7Z0UlaZIKoQATqCO58KNCre+jECELYIX56Uq0lb8LRLP8tijrQ9Sp3
|
||||
qJcXiH66kH0eQ44a5YdewcFOy+CSAYDUIaB6XhTQJ2r7bd4A2vw3ybbxTOWONKdO
|
||||
WtgIe3M3iwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAHzQC5XZVeuD9GTJTsbO5AyH
|
||||
ZQvki/jfARNrD9dgBRYZzLC/NOkWG6M9wlrmks9RtdNxc53nLxKq4I2Dd73gI0yQ
|
||||
wYu9YYwmM/LMgmPlI33Rg2Ohwq4DVgT3hO170PL6Fsgiq3dMvctSImJvjWktBQaT
|
||||
bcAgaZLHGIpXPrWSA2d+
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDCzCCAnSgAwIBAgIJAIe9Hnq82O7UMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0yMTA3MTQx
|
||||
NDI3NTdaFw0yNDA3MTMxNDI3NTdaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
|
||||
qaIcGFFTx/SO1W5G91jHvyQdGP25n1Y91aXCuOOWAUTvSvNGpXrI4AXNrQF+CmIO
|
||||
C4beBASnHCx082jYudWBBl9Wiza0psYc9flrczSzVLMmN8w/c78F/95NfiQdnUQP
|
||||
pvgqcMeJo82cgHkLR7XoFWgMrZJqrcUK0gnsQcb6kakCAwEAAaOB1DCB0TALBgNV
|
||||
HQ8EBAMCB4AwHQYDVR0OBBYEFNWV53gWJz72F5B1ZVY4O/dfFYBPMIGOBgNVHSME
|
||||
gYYwgYOAFNWV53gWJz72F5B1ZVY4O/dfFYBPoWCkXjBcMQswCQYDVQQGEwJVUzEZ
|
||||
MBcGA1UECBMQV2FzaGluZ3RvbiBTdGF0ZTEQMA4GA1UEBxMHU2VhdHRsZTEgMB4G
|
||||
A1UEChMXQW1hem9uIFdlYiBTZXJ2aWNlcyBMTEOCCQCHvR56vNju1DASBgNVHRMB
|
||||
Af8ECDAGAQH/AgEAMA0GCSqGSIb3DQEBCwUAA4GBACrKjWj460GUPZCGm3/z0dIz
|
||||
M2BPuH769wcOsqfFZcMKEysSFK91tVtUb1soFwH4/Lb/T0PqNrvtEwD1Nva5k0h2
|
||||
xZhNNRmDuhOhW1K9wCcnHGRBwY5t4lYL6hNV6hcrqYwGMjTjcAjBG2yMgznSNFle
|
||||
Rwi/S3BFXISixNx9cILu
|
||||
-----END CERTIFICATE-----`
|
||||
|
||||
// These certificates are for verifying RSA 2048 signatures.
|
||||
// Copied from:
|
||||
// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-rsa2048.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----'
|
||||
// Last updated: 2022-05-31
|
||||
const rsa2048RawCerts = `-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJALFpzEAVWaQZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw
|
||||
ODU5MTJaGA8yMTk1MDExNzA4NTkxMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAjS2vqZu9mEOhOq+0bRpAbCUiapbZMFNQqRg7kTlr7Cf+gDqXKpHPjsng
|
||||
SfNz+JHQd8WPI+pmNs+q0Z2aTe23klmf2U52KH9/j1k8RlIbap/yFibFTSedmegX
|
||||
E5r447GbJRsHUmuIIfZTZ/oRlpuIIO5/Vz7SOj22tdkdY2ADp7caZkNxhSP915fk
|
||||
2jJMTBUOzyXUS2rBU/ulNHbTTeePjcEkvzVYPahD30TeQ+/A+uWUu89bHSQOJR8h
|
||||
Um4cFApzZgN3aD5j2LrSMu2pctkQwf9CaWyVznqrsGYjYOY66LuFzSCXwqSnFBfv
|
||||
fFBAFsjCgY24G2DoMyYkF3MyZlu+rwIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQUrynSPp4uqSECwy+PiO4qyJ8TWSkwgY4GA1UdIwSBhjCBg4AUrynS
|
||||
Pp4uqSECwy+PiO4qyJ8TWSmhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJALFpzEAVWaQZMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBADW/s8lXijwdP6NkEoH1m9XLrvK4YTqkNfR6
|
||||
er/uRRgTx2QjFcMNrx+g87gAml11z+D0crAZ5LbEhDMs+JtZYR3ty0HkDk6SJM85
|
||||
haoJNAFF7EQ/zCp1EJRIkLLsC7bcDL/Eriv1swt78/BB4RnC9W9kSp/sxd5svJMg
|
||||
N9a6FAplpNRsWAnbP8JBlAP93oJzblX2LQXgykTghMkQO7NaY5hg/H5o4dMPclTK
|
||||
lYGqlFUCH6A2vdrxmpKDLmTn5//5pujdD2MN0df6sZWtxwZ0osljV4rDjm9Q3VpA
|
||||
NWIsDEcp3GUB4proOR+C7PNkY+VGODitBOw09qBGosCBstwyEqY=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAM07oeX4xevdMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjA2MTAx
|
||||
MjU4MThaGA8yMTk1MTExNDEyNTgxOFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA6v6kGMnRmFDLxBEqXzP4npnL65OO0kmQ7w8YXQygSdmNIoScGSU5wfh9
|
||||
mZdcvCxCdxgALFsFqPvH8fqiE9ttI0fEfuZvHOs8wUsIdKr0Zz0MjSx3cik4tKET
|
||||
ch0EKfMnzKOgDBavraCDeX1rUDU0Rg7HFqNAOry3uqDmnqtk00XC9GenS3z/7ebJ
|
||||
fIBEPAam5oYMVFpX6M6St77WdNE8wEU8SuerQughiMVx9kMB07imeVHBiELbMQ0N
|
||||
lwSWRL/61fA02keGSTfSp/0m3u+lesf2VwVFhqIJs+JbsEscPxOkIRlzy8mGd/JV
|
||||
ONb/DQpTedzUKLgXbw7KtO3HTG9iXQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQU2CTGYE5fTjx7gQXzdZSGPEWAJY4wgY4GA1UdIwSBhjCBg4AU2CTG
|
||||
YE5fTjx7gQXzdZSGPEWAJY6hYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAM07oeX4xevdMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBANdqkIpVypr2PveqUsAKke1wKCOSuw1UmH9k
|
||||
xX1/VRoHbrI/UznrXtPQOPMmHA2LKSTedwsJuorUn3cFH6qNs8ixBDrl8pZwfKOY
|
||||
IBJcTFBbI1xBEFkZoO3wczzo5+8vPQ60RVqAaYb+iCa1HFJpccC3Ovajfa4GRdNb
|
||||
n6FYnluIcDbmpcQePoVQwX7W3oOYLB1QLN7fE6H1j4TBIsFdO3OuKzmaifQlwLYt
|
||||
DVxVCNDabpOr6Uozd5ASm4ihPPoEoKo7Ilp0fOT6fZ41U2xWA4+HF/89UoygZSo7
|
||||
K+cQ90xGxJ+gmlYbLFR5rbJOLfjrgDAb2ogbFy8LzHo2ZtSe60M=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJALZL3lrQCSTMMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw
|
||||
OTAxMzJaGA8yMTk1MDExNzA5MDEzMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA02Y59qtAA0a6uzo7nEQcnJ26OKF+LRPwZfixBH+EbEN/Fx0gYy1jpjCP
|
||||
s5+VRNg6/WbfqAsV6X2VSjUKN59ZMnMY9ALA/Ipz0n00Huxj38EBZmX/NdNqKm7C
|
||||
qWu1q5kmIvYjKGiadfboU8wLwLcHo8ywvfgI6FiGGsEO9VMC56E/hL6Cohko11LW
|
||||
dizyvRcvg/IidazVkJQCN/4zC9PUOVyKdhW33jXy8BTg/QH927QuNk+ZzD7HH//y
|
||||
tIYxDhR6TIZsSnRjz3bOcEHxt1nsidc65mY0ejQty4hy7ioSiapw316mdbtE+RTN
|
||||
fcH9FPIFKQNBpiqfAW5Ebp3Lal3/+wIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQU7coQx8Qnd75qA9XotSWT3IhvJmowgY4GA1UdIwSBhjCBg4AU7coQ
|
||||
x8Qnd75qA9XotSWT3IhvJmqhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJALZL3lrQCSTMMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBAFZ1e2MnzRaXCaLwEC1pW/f0oRG8nHrlPZ9W
|
||||
OYZEWbh+QanRgaikBNDtVTwARQcZm3z+HWSkaIx3cyb6vM0DSkZuiwzm1LJ9rDPc
|
||||
aBm03SEt5v8mcc7sXWvgFjCnUpzosmky6JheCD4O1Cf8k0olZ93FQnTrbg62OK0h
|
||||
83mGCDeVKU3hLH97FYoUq+3N/IliWFDhvibAYYKFJydZLhIdlCiiB99AM6Sg53rm
|
||||
oukS3csyUxZyTU2hQfdjyo1nqW9yhvFAKjnnggiwxNKTTPZzstKW8+cnYwiiTwJN
|
||||
QpVoZdt0SfbuNnmwRUMi+QbuccXweav29QeQ3ADqjgB0CZdSRKk=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJANNPkIpcyEtIMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw
|
||||
OTAzMDdaGA8yMTk1MDQwMzA5MDMwN1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEApHQGvHvq3SVCzDrC7575BW7GWLzcj8CLqYcL3YY7Jffupz7OjcftO57Z
|
||||
4fo5Pj0CaS8DtPzh8+8vdwUSMbiJ6cDd3ooio3MnCq6DwzmsY+pY7CiI3UVG7KcH
|
||||
4TriDqr1Iii7nB5MiPJ8wTeAqX89T3SYaf6Vo+4GCb3LCDGvnkZ9TrGcz2CHkJsj
|
||||
AIGwgopFpwhIjVYm7obmuIxSIUv+oNH0wXgDL029Zd98SnIYQd/njiqkzE+lvXgk
|
||||
4h4Tu17xZIKBgFcTtWPky+POGu81DYFqiWVEyR2JKKm2/iR1dL1YsT39kbNg47xY
|
||||
aR129sS4nB5Vw3TRQA2jL0ToTIxzhQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQUgepyiONs8j+q67dmcWu+mKKDa+gwgY4GA1UdIwSBhjCBg4AUgepy
|
||||
iONs8j+q67dmcWu+mKKDa+ihYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJANNPkIpcyEtIMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBAGLFWyutf1u0xcAc+kmnMPqtc/Q6b79VIX0E
|
||||
tNoKMI2KR8lcV8ZElXDb0NC6v8UeLpe1WBKjaWQtEjL1ifKg9hdY9RJj4RXIDSK7
|
||||
33qCQ8juF4vep2U5TTBd6hfWxt1Izi88xudjixmbpUU4YKr8UPbmixldYR+BEx0u
|
||||
B1KJi9l1lxvuc/Igy/xeHOAZEjAXzVvHp8Bne33VVwMiMxWECZCiJxE4I7+Y6fqJ
|
||||
pLLSFFJKbNaFyXlDiJ3kXyePEZSc1xiWeyRB2ZbTi5eu7vMG4i3AYWuFVLthaBgu
|
||||
lPfHafJpj/JDcqt2vKUKfur5edQ6j1CGdxqqjawhOTEqcN8m7us=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAJNKhJhaJOuMMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjA3Mjkx
|
||||
MTM3MTdaGA8yMTk2MDEwMjExMzcxN1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAhDUh6j1ACSt057nSxAcwMaGr8Ez87VA2RW2HyY8l9XoHndnxmP50Cqld
|
||||
+26AJtltlqHpI1YdtnZ6OrVgVhXcVtbvte0lZ3ldEzC3PMvmISBhHs6A3SWHA9ln
|
||||
InHbToLX/SWqBHLOX78HkPRaG2k0COHpRy+fG9gvz8HCiQaXCbWNFDHZev9OToNI
|
||||
xhXBVzIa3AgUnGMalCYZuh5AfVRCEeALG60kxMMC8IoAN7+HG+pMdqAhJxGUcMO0
|
||||
LBvmTGGeWhi04MUZWfOkwn9JjQZuyLg6B1OD4Y6s0LB2P1MovmSJKGY4JcF8Qu3z
|
||||
xxUbl7Bh9pvzFR5gJN1pjM2n3gJEPwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAJ
|
||||
UNKM+gIIHNk0G0tzv6vZBT+o/vt+tIp8lEoZwaPQh1121iw/I7ZvhMLAigx7eyvf
|
||||
IxUt9/nf8pxWaeGzi98RbSmbap+uxYRynqe1p5rifTamOsguuPrhVpl12OgRWLcT
|
||||
rjg/K60UMXRsmg2w/cxV45pUBcyVb5h6Op5uEVAVq+CVns13ExiQL6kk3guG4+Yq
|
||||
LvP1p4DZfeC33a2Rfre2IHLsJH5D4SdWcYqBsfTpf3FQThH0l0KoacGrXtsedsxs
|
||||
9aRd7OzuSEJ+mBxmzxSjSwM84Ooh78DjkdpQgv967p3d+8NiSLt3/n7MgnUy6WwB
|
||||
KtDujDnB+ttEHwRRngX7
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAMcyoxx4U0xxMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw
|
||||
ODU4MDJaGA8yMTk1MDExNzA4NTgwMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAw45IhGZVbQcy1fHBqzROhO8CsrDzxj/WP4cRbJo/2DAnimVrCCDs5O86
|
||||
FA39Zo1xsDuJHDlwMKqeXYXkJXHYbcPWc6EYYAnR+PlLG+aNSOGUzsy202S03hT0
|
||||
B20hWPCqpPp39itIRhG4id6nbNRJOzLm6evHuepMAHR4/OV7hyGOiGaV/v9zqiNA
|
||||
pMCLhbh2xk0PO35HCVBuWt3HUjsgeks2eEsu9Ws6H3JXTCfiqp0TjyRWapM29OhA
|
||||
cRJfJ/d/+wBTz1fkWOZ7TF+EWRIN5ITEadlDTPnF1r8kBRuDcS/lIGFwrOOHLo4C
|
||||
cKoNgXkhTqDDBDu6oNBb2rS0K+sz3QIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQUqBy7D847Ya/w321Dfr+rBJGsGTwwgY4GA1UdIwSBhjCBg4AUqBy7
|
||||
D847Ya/w321Dfr+rBJGsGTyhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAMcyoxx4U0xxMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBACOoWSBf7b9AlcNrl4lr3QWWSc7k90/tUZal
|
||||
PlT0G3Obl2x9T/ZiBsQpbUvs0lfotG0XqGVVHcIxF38EbVwbw9KJGXbGSCJSEJkW
|
||||
vGCtc/jYMHXfhx67Szmftm/MTYNvnzsyQQ3v8y3Rdah+xe1NPdpFrwmfL6xe3pFF
|
||||
cY33KdHA/3PNLdn9CaEsHmcmj3ctaaXLFIzZhQyyjtsrgGfTLvXeXRokktvsLDS/
|
||||
YgKedQ+jFjzVJqgr4NjfY/Wt7/8kbbdhzaqlB5pCPjLLzv0zp/XmO6k+JvOePOGh
|
||||
JzGk5t1QrSju+MqNPFk3+1O7o910Vrhqw1QRB0gr1ExrviLbyfU=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAKD+v6LeR/WrMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw
|
||||
OTA4MTlaGA8yMTk1MDExNzA5MDgxOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAka8FLhxs1cSJGK+Q+q/vTf8zVnDAPZ3U6oqppOW/cupCtpwMAQcky8DY
|
||||
Yb62GF7+C6usniaq/9W6xPn/3o//wti0cNt6MLsiUeHqNl5H/4U/Q/fR+GA8pJ+L
|
||||
npqZDG2tFi1WMvvGhGgIbScrjR4VO3TuKy+rZXMYvMRk1RXZ9gPhk6evFnviwHsE
|
||||
jV5AEjxLz3duD+u/SjPp1vloxe2KuWnyC+EKInnka909sl4ZAUh+qIYfZK85DAjm
|
||||
GJP4W036E9wTJQF2hZJrzsiB1MGyC1WI9veRISd30izZZL6VVXLXUtHwVHnVASrS
|
||||
zZDVpzj+3yD5hRXsvFigGhY0FCVFnwIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQUxC2l6pvJaRflgu3MUdN6zTuP6YcwgY4GA1UdIwSBhjCBg4AUxC2l
|
||||
6pvJaRflgu3MUdN6zTuP6YehYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAKD+v6LeR/WrMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBAIK+DtbUPppJXFqQMv1f2Gky5/82ZwgbbfXa
|
||||
HBeGSii55b3tsyC3ZW5ZlMJ7Dtnr3vUkiWbV1EUaZGOUlndUFtXUMABCb/coDndw
|
||||
CAr53XTv7UwGVNe/AFO/6pQDdPxXn3xBhF0mTKPrOGdvYmjZUtQMSVb9lbMWCFfs
|
||||
w+SwDLnm5NF4yZchIcTs2fdpoyZpOHDXy0xgxO1gWhKTnYbaZOxkJvEvcckxVAwJ
|
||||
obF8NyJla0/pWdjhlHafEXEN8lyxyTTyOa0BGTuYOBD2cTYYynauVKY4fqHUkr3v
|
||||
Z6fboaHEd4RFamShM8uvSu6eEFD+qRmvqlcodbpsSOhuGNLzhOQ=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJANBx0E2bOCEPMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjA4MTEx
|
||||
NDU2NDJaGA8yMTk2MDExNTE0NTY0MlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEArYS3mJLGaMrh2DmiPLbqr4Z+xWXTzBWCjOwpsuHE9H6dWUUyl2Bgnu+Z
|
||||
d8QvW306Yleec45M4F2RA3J4hWHtShzsMlOJVRt+YulGeTf9OCPr26QmIFfs5nD4
|
||||
fgsJQEry2MBSGA9Fxq3Cw6qkWcrOPsCR+bHOU0XykdKl0MnIbpBf0kTfciAupQEA
|
||||
dEHnM2J1L2iI0NTLBgKxy5PXLH9weX20BFauNmHH9/J07OpwL20SN5f8TxcM9+pj
|
||||
Lbk8h1V4KdIwVQpdWkbDL9BCGlYjyadQJxSxz1J343NzrnDM0M4h4HtVaKOS7bQo
|
||||
Bqt2ruopLRCYgcuFHck/1348iAmbRQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBG
|
||||
wujwU1Otpi3iBgmhjMClgZyMMn0aQIxMigoFNqXMUNx1Mq/e/Tx+SNaOEAu0n2FF
|
||||
aiYjvY0/hXOx75ewzZvM7/zJWIdLdsgewpUqOBH4DXFhbSk2TxggSPb0WRqTBxq5
|
||||
Ed7F7+7GRIeBbRzdLqmISDnfqey8ufW0ks51XcQNomDIRG5s9XZ5KHviDCar8FgL
|
||||
HngBCdFI04CMagM+pwTO9XN1Ivt+NzUj208ca3oP1IwEAd5KhIhPLcihBQA5/Lpi
|
||||
h1s3170z1JQ1HZbDrH1pgp+8hSI0DwwDVb3IIH8kPR/J0Qn+hvOl2HOpaUg2Ly0E
|
||||
pt1RCZe+W7/dF4zsbqwK
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJALWSfgHuT/ARMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNzA1MzEx
|
||||
MTE4MTZaGA8yMTk2MTEwMzExMTgxNlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAy5V7KDqnEvF3DrSProFcgu/oL+QYD62b1U+Naq8aPuljJe127Sm9WnWA
|
||||
EBdOSASkOaQ9fzjCPoG5SGgWKxYoZjsevHpmzjVv9+Ci+F57bSuMbjgUbvbRIFUB
|
||||
bxQojVoXQPHgK5v433ODxkQ4sjRyUbf4YV1AFdfU7zabC698YgPVOExGhXPlTvco
|
||||
8mlc631ubw2g52j0lzaozUkHPSbknTomhQIvO6kUfX0e0TDMH4jLDG2ZIrUB1L4r
|
||||
OWKG4KetduFrRZyDHF6ILZu+s6ywiMicUd+2UllDFC6oas+a8D11hmO/rpWU/ieV
|
||||
jj4rWAFrsebpn+Nhgy96iiVUGS2LuQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDE
|
||||
iYv6FQ6knXCg+svlcaQG9q59xUC5z8HvJZ1+SxzPKKC4PKQdKvIIfE8GxVXqlZG1
|
||||
cl5WKTFDMapnzb9RV/DTaVzWx3cMYT77vm1Hl1XGjhx611CGcENH1egI3lOTILsa
|
||||
+KfopuJEQQ9TDMAIkGjhA+KieU/U5Ctv9fdej6d0GC6OEuwKkTNzPWue6UMq8d4H
|
||||
2xqJboWsE1t4nybEosvZfQJcZ8jyIYcYBnsG13vCLM+ixjuU5MVVQNMY/gBJzqJB
|
||||
V+U0QiGiuT5cYgY/QihxdHt99zwGaE0ZBC7213NKrlNuLSrqhDI2NLu8NsExqOFy
|
||||
OmY0v/xVmQUQl26jJXaM
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAOrmqHuaUt0vMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw
|
||||
OTA2MTlaGA8yMTk1MDQwMzA5MDYxOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAjE7nVu+aHLtzp9FYV25Qs1mvJ1JXD7J0iQ1Gs/RirW9a5ZECCtc4ssnf
|
||||
zQHq2JRVr0GRchvDrbm1HaP/avtFQR/Thvfltwu9AROVT22dUOTvERdkNzveoFCy
|
||||
hf52Rqf0DMrLXG8ZmQPPXPDFAv+sVMWCDftcChxRYZ6mP9O+TpgYNT1krD5PdvJU
|
||||
7HcXrkNHDYqbsg8A+Mu2hzl0QkvUET83Csg1ibeK54HP9w+FsD6F5W+6ZSHGJ88l
|
||||
FI+qYKs7xsjJQYgXWfEt6bbckWs1kZIaIOyMzYdPF6ClYzEec/UhIe/uJyUUNfpT
|
||||
VIsI5OltBbcPF4c7Y20jOIwwI2SgOQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQUF2DgPUZivKQR/Zl8mB/MxIkjZDUwgY4GA1UdIwSBhjCBg4AUF2Dg
|
||||
PUZivKQR/Zl8mB/MxIkjZDWhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAOrmqHuaUt0vMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBAGm6+57W5brzJ3+T8/XsIdLTuiBSe5ALgSqI
|
||||
qnO5usUKAeQsa+kZIJPyEri5i8LEodh46DAF1RlXTMYgXXxl0YggX88XPmPtok17
|
||||
l4hib/D9/lu4IaFIyLzYNSzsETYWKWoGVe7ZFz60MTRTwY2u8YgJ5dec7gQgPSGj
|
||||
avB0vTIgoW41G58sfw5b+wjXCsh0nROon79RcQFFhGnvup0MZ+JbljyhZUYFzCli
|
||||
31jPZiKzqWa87xh2DbAyvj2KZrZtTe2LQ48Z4G8wWytJzxEeZdREe4NoETf+Mu5G
|
||||
4CqoaPR05KWkdNUdGNwXewydb3+agdCgfTs+uAjeXKNdSpbhMYg=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAO/+DgYF78KwMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTA0Mjky
|
||||
MDM1MjJaGA8yMTk4MTAwMjIwMzUyMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAv1ZLV+Z/P6INq+R1qLkzETBg7sFGKPiwHekbpuB6lrRxKHhj8V9vaReM
|
||||
lnv1Ur5LAPpMPYDsuJ4WoUbPYAqVqyMAo7ikJHCCM1cXgZJefgN6z9bpS+uA3YVh
|
||||
V/0ipHh/X2hc2S9wvxKWiSHu6Aq9GVpqL035tJQD+NJuqFd+nXrtcw4yGtmvA6wl
|
||||
5Bjn8WdsP3xOTKjrByYY1BhXpP/f1ohU9jE9dstsRXLa+XTgTPWcWdCS2oRTWPGR
|
||||
c5Aeh47nnDsyQfP9gLxHeYeQItV/BD9kU/2Hn6mnRg/B9/TYH8qzlRTzLapXp4/5
|
||||
iNwusrTNexGl8BgvAPrfhjDpdgYuTwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB7
|
||||
5ya11K/hKgvaRTvZwVV8GlVZt0CGPtNvOi4AR/UN6TMm51BzUB5nurB4z0R2MoYO
|
||||
Uts9sLGvSFALJ4otoB77hyNpH3drttU1CVVwal/yK/RQLSon/IoUkaGEbqalu+mH
|
||||
nYad5IG4tEbmepX456XXcO58MKmnczNbPyw3FRzUZQtI/sf94qBwJ1Xo6XbzPKMy
|
||||
xjL57LHIZCssD+XPifXay69OFlsCIgLim11HgPkRIHEOXLSf3dsW9r+4CjoZqB/Z
|
||||
jj/P4TLCxbYCLkvglwaMjgEWF40Img0fhx7yT2X92MiSrs3oncv/IqfdVTiN8OXq
|
||||
jgnq1bf+EZEZKvb6UCQV
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJALc/uRxg++EnMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xODA0MTAx
|
||||
NDAwMTFaGA8yMTk3MDkxMzE0MDAxMVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAzwCGJEJIxqtr2PD2a1mA6LhRzKhTBa1AZsg3eYfpETXIVlrpojMfvVoN
|
||||
qHvGshWLgrGTT6os/3gsaADheSaJKavxwX3X6tJA8fvEGqr3a1C1MffH9hBWbQqC
|
||||
LbfUTAbkwis4GdTUwOwPjT1Cm3u9R/VzilCNwkj7iQ65AFAI8Enmsw3UGldEsop4
|
||||
yChKB3KW3WI0FTh0+gD0YtjrqqYJxpGOYBpJp5vwdd3fZ4t1vidmDMs7liv4f9Bx
|
||||
p0oSmUobU4GUlFhBchK1DukICVQdnOVzdMonYm7s+HtpFbVHR8yf6QoixBKGdSal
|
||||
mBf7+y0ixjCn0pnC0VLVooGo4mi17QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDG
|
||||
4ONZiixgk2sjJctwbyD5WKLTH6+mxYcDw+3y/F0fWz561YORhP2FNnPOmEkf0Sl/
|
||||
Jqk4svzJbCbQeMzRoyaya/46d7UioXMHRZam5IaGBhOdQbi97R4VsQjwQj0RmQsq
|
||||
yDueDyuKTwWLK9KnvI+ZA6e6bRkdNGflK4N8GGKQ+fBhPwVELkbT9f16OJkezeeN
|
||||
S+F/gDADGJgmPXfjogICb4Kvshq0H5Lm/xZlDULF2g/cYhyNY6EOI/eS5m1I7R8p
|
||||
D/m6WoyZdpInxJfxW616OMkxQMRVsruLTNGtby3u1g6ScjmpFtvAMhYejBSdzKG4
|
||||
FEyxIdEjoeO1jhTsck3R
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJANZkFlQR2rKqMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTAyMDUx
|
||||
MzA2MjBaGA8yMTk4MDcxMTEzMDYyMFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAy4Vnit2eBpEjKgOKBmyupJzJAiT4fr74tuGJNwwa+Is2vH12jMZn9Il1
|
||||
UpvvEUYTIboIgISpf6SJ5LmV5rCv4jT4a1Wm0kjfNbiIlkUi8SxZrPypcw24m6ke
|
||||
BVuxQZrZDs+xDUYIZifTmdgD50u5YE+TLg+YmXKnVgxBU6WZjbuK2INohi71aPBw
|
||||
2zWUR7Gr/ggIpf635JLU3KIBLNEmrkXCVSnDFlsK4eeCrB7+UNak+4BwgpuykSGG
|
||||
Op9+2vsuNqFeU1l9daQeG9roHR+4rIWSPa0opmMxv5nctgypOrE6zKXx2dNXQldd
|
||||
VULv+WH7s6Vm4+yBeG8ctPYH5GOo+QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBs
|
||||
ZcViiZdFdpcXESZP/KmZNDxB/kktlIEIhsQ+MNn29jayE5oLmtGjHj5dtA3XNKlr
|
||||
f6PVygVTKbtQLQqunRT83e8+7iCZMKI5ev7pITUQVvTUwI+Fc01JkYZxRFlVBuFA
|
||||
WGZO+98kxCS4n6tTwVt+nSuJr9BJRVC17apfHBgSS8c5OWna0VU/Cc9ka4eAfQR4
|
||||
7pYSDU3wSRE01cs30q34lXZ629IyFirSJ5TTOIc0osNL7vwMQYj8HOn4OBYqxKy8
|
||||
ZJyvfXsIPh0Na76PaBIs6ZlqAOflLrjGzxBPiwRM/XrGmF8ze4KzoUqJEnK13O6A
|
||||
KHKgfiigQZ1+gv5FlyXH
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAIFI+O5A6/ZIMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTA2MDQx
|
||||
MjQ4MDRaGA8yMTk4MTEwNzEyNDgwNFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAy7/WHBBHOrk+20aumT07g8rxrSM0UXgki3eYgKauPCG4Xx//vwQbuZwI
|
||||
oeVmR9nqnhfij2wOcQdbLandh0EGtbxerete3IoXzd1KXJb11PVmzrzyu5SPBPuP
|
||||
iCeV4qdjjkXo2YWM6t9YQ911hcG96YSp89TBXFYUh3KLxfqAdTVhuC0NRGhXpyii
|
||||
j/czo9njofHhqhTr7UEyPun8NVS2QWctLQ86N5zWR3Q0GRoVqqMrJs0cowHTrVw2
|
||||
9Qr7QBjjBOVbyYmtYxm/DtiKprYV/e6bCAVok015X1sZDd3oCOQNoGlv5XbHJe2o
|
||||
JFD8GRRy2rkWO/lNwVFDcwec6zC3QwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCE
|
||||
goqzjpCpmMgCpszFHwvRaSMbspKtK7wNImUjrSBOfBJsfFulyg1Zgn2nDCK7kQhx
|
||||
jMJmNIvXbps3yMqQ2cHUkKcKf5t+WldfeT4Vk1Rz6HSA8sd0kgVcIesIaoy2aaXU
|
||||
VEB/oQziRGyKdN1d4TGYVZXG44CkrzSDvlbmfiTq5tL+kAieznVF3bzHgPZW6hKP
|
||||
EXC3G/IXrXicFEe6YyE1Rakl62VncYSXiGe/i2XvsiNH3Qlmnx5XS7W0SCN0oAxW
|
||||
EH9twibauv82DVg1WOkQu8EwFw8hFde9X0Rkiu0qVcuU8lJgFEvPWMDFU5sGB6ZM
|
||||
gkEKTzMvlZpPbBhg99Jl
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAL2bOgb+dq9rMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw
|
||||
OTAwNTdaGA8yMTk1MDQwMzA5MDA1N1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAmRcyLWraysQS8yDC1b5Abs3TUaJabjqWu7d5gHik5Icd6dKl8EYpQSeS
|
||||
vz6pLhkgO4xBbCRGlgE8LS/OijcZ5HwdrxBiKbicR1YvIPaIyEQQvF5sX6UWkGYw
|
||||
Ma5IRGj4YbRmJkBybw+AAV9Icb5LJNOMWPi34OWM+2tMh+8L234v/JA6ogpdPuDr
|
||||
sM6YFHMZ0NWo58MQ0FnEj2D7H58Ti//vFPl0TaaPWaAIRF85zBiJtKcFJ6vPidqK
|
||||
f2/SDuAvZmyHC8ZBHg1moX9bR5FsU3QazfbW+c+JzAQWHj2AaQrGSCITxCMlS9sJ
|
||||
l51DeoZBjnx8cnRe+HCaC4YoRBiqIQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQU/wHIo+r5U31VIsPoWoRVsNXGxowwgY4GA1UdIwSBhjCBg4AU/wHI
|
||||
o+r5U31VIsPoWoRVsNXGxoyhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAL2bOgb+dq9rMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBACobLvj8IxlQyORTz/9q7/VJL509/p4HAeve
|
||||
92riHp6+Moi0/dSEYPeFTgdWB9W3YCNc34Ss9TJq2D7t/zLGGlbI4wYXU6VJjL0S
|
||||
hCjWeIyBXUZOZKFCb0DSJeUElsTRSXSFuVrZ9EAwjLvHni3BaC9Ve34iP71ifr75
|
||||
8Tpk6PEj0+JwiijFH8E4GhcV5chB0/iooU6ioQqJrMwFYnwo1cVZJD5v6D0mu9bS
|
||||
TMIJLJKv4QQQqPsNdjiB7G9bfkB6trP8fUVYLHLsVlIy5lGx+tgwFEYkG1N8IOO/
|
||||
2LCawwaWm8FYAFd3IZl04RImNs/IMG7VmH1bf4swHOBHgCN1uYo=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAL9KIB7Fgvg/MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw
|
||||
OTAwMjVaGA8yMTk1MDExNzA5MDAyNVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAz0djWUcmRW85C5CiCKPFiTIvj6y2OuopFxNE5d3Wtab10bm06vnXVKXu
|
||||
tz3AndG+Dg0zIL0gMlU+QmrSR0PH2PfV9iejfLak9iwdm1WbwRrCEAj5VxPe0Q+I
|
||||
KeznOtxzqQ5Wo5NLE9bA61sziUAFNVsTFUzphEwRohcekYyd3bBC4v/RuAjCXHVx
|
||||
40z6AIksnAOGN2VABMlTeMNvPItKOCIeRLlllSqXX1gbtL1gxSW40JWdF3WPB68E
|
||||
e+/1U3F7OEr7XqmNODOL6yh92QqZ8fHjG+afOL9Y2Hc4g+P1nk4w4iohQOPABqzb
|
||||
MPjK7B2Rze0f9OEc51GBQu13kxkWWQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQU5DS5IFdU/QwYbikgtWvkU3fDwRgwgY4GA1UdIwSBhjCBg4AU5DS5
|
||||
IFdU/QwYbikgtWvkU3fDwRihYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAL9KIB7Fgvg/MBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBAG/N7ua8IE9IMyno0n5T57erBvLTOQ79fIJN
|
||||
Mf+mKRM7qRRsdg/eumFft0rLOKo54pJ+Kim2cngCWNhkzctRHBV567AJNt4+ZDG5
|
||||
hDgV0IxWO1+eaLE4qzqWP/9VrO+p3reuumgFZLVpvVpwXBBeBFUf2drUR14aWfI2
|
||||
L/6VGINXYs7uP8v/2VBS7r6XZRnPBUy/R4hv5efYXnjwA9gq8+a3stC2ur8m5ySl
|
||||
faKSwE4H320yAyaZWH4gpwUdbUlYgPHtm/ohRtiWPrN7KEG5Wq/REzMIjZCnxOfS
|
||||
6KR6PNjlhxBsImQhmBvz6j5PLQxOxBZIpDoiK278e/1Wqm9LrBc=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJANuCgCcHtOJhMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA5MTQx
|
||||
NTU3NDRaGA8yMTk1MDIxNzE1NTc0NFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA66iNv6pJPmGM20W8HbVYJSlKcAg2vUGx8xeAbzZIQdpGfkabVcUHGB6m
|
||||
Gy59VXDMDlrJckDDk6dxUOhmcX9z785TtVZURq1fua9QosdbTzX4kAgHGdp4xQEs
|
||||
mO6QZqg5qKjBP6xr3+PshfQ1rB8Bmwg0gXEm22CC7o77+7N7Mu2sWzWbiUR7vil4
|
||||
9FjWS8XmMNwFTlShp4l1TDTevDWW/uYmC30RThM9S4QPvTZ0rAS18hHVam8BCTxa
|
||||
LHaVCH/Yy52rsz0hM/FlghnSnK105ZKj+b+KIp3adBL8OMCjgc/Pxi0+j3HQLdYE
|
||||
32+FaXWU84D2iP2gDT28evnstzuYTQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQC1
|
||||
mA4q+12pxy7By6g3nBk1s34PmWikNRJBwOqhF8ucGRv8aiNhRRye9lokcXomwo8r
|
||||
KHbbqvtK85l0xUZp/Cx4sm4aTgcMvfJP29jGLclDzeqADIvkWEJ4+xncxSYVlS9x
|
||||
+78TvF/+8h9U2LnSl64PXaKdxHy2IsHIVRN4GtoaP2Xhpa1S0M328Jykq/571nfN
|
||||
1WRD1c/fQf1edgzRjhQ4whcAhv7WRRF+qTbfQJ/vDxy8lkiOsvU9XzUaZ0fZSfXX
|
||||
wXxZamQbONvFcxVHY/0PSiM8nQoUmkkBQuKleDwRWvkoJKYKyr3jvXK7HIWtMrO4
|
||||
jmXe0aMy3thyK6g5sJVg
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAMn1yPk22ditMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNzA3MTkx
|
||||
MTEyNThaGA8yMTk2MTIyMjExMTI1OFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEArznEYef8IjhrJoazI0QGZkmlmHm/4rEbyQbMNifxjsDE8YWtHNwaM91z
|
||||
zmyK6Sk/tKlWxcnl3g31iq305ziyFPEewe5Qbwf1iz2cMsvfNBcTh/E6u+mBPH3J
|
||||
gvGanqUJt6c4IbipdEouIjjnynyVWd4D6erLl/ENijeR1OxVpaqSW5SBK7jms49E
|
||||
pw3wtbchEl3qsE42Ip4IYmWxqjgaxB7vps91n4kfyzAjUmklcqTfMfPCkzmJCRgp
|
||||
Vh1C79vRQhmriVKD6BXwfZ8tG3a7mijeDn7kTsQzgO07Z2SAE63PIO48JK8HcObH
|
||||
tXORUQ/XF1jzi/SIaUJZT7kq3kWl8wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBj
|
||||
ThtO9dLvU2QmKuXAhxXjsIdlQgGG3ZGh/Vke4If1ymgLx95v2Vj9Moxk+gJuUSRL
|
||||
BzFte3TT6b3jPolbECgmAorjj8NxjC17N8QAAI1d0S0gI8kqkG7V8iRyPIFekv+M
|
||||
pcai1+cIv5IV5qAz8QOMGYfGdYkcoBjsgiyvMJu/2N2UbZJNGWvcEGkdjGJUYYOO
|
||||
NaspCAFm+6HA/K7BD9zXB1IKsprLgqhiIUgEaW3UFEbThJT+z8UfHG9fQjzzfN/J
|
||||
nT6vuY/0RRu1xAZPyh2gr5okN/s6rnmh2zmBHU1n8cbCc64MVfXe2g3EZ9Glq/9n
|
||||
izPrI09hMypJDP04ugQc
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAPRYyD8TtmC0MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjAzMDcx
|
||||
MDQ1MDFaGA8yMTk1MDgxMTEwNDUwMVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA0LSS5I/eCT2PM0+qusorBx67QL26BIWQHd/yF6ARtHBb/1DdFLRqE5Dj
|
||||
07Xw7eENC+T79mOxOAbeWg91KaODOzw6i9I/2/HpK0+NDEdD6sPKDA1d45jRra+v
|
||||
CqAjI+nV9Vw91wv7HjMk3RcjWGziM8/hw+3YNIutt7aQzZRwIWlBpcqx3/AFd8Eu
|
||||
2UsRMSHgkGUW6UzUF+h/U8218XfrauKNGmNKDYUhtmyBrHT+k6J0hQ4pN7fe6h+Z
|
||||
w9RVHm24BGhlLxLHLmsOIxvbrF277uX9Dxu1HfKfu5D2kimTY7xSZDNLR2dt+kNY
|
||||
/+iWdIeEFpPT0PLSILt52wP6stF+3QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBI
|
||||
E6w+WWC2gCfoJO6c9HMyGLMFEpqZmz1n5IcQt1h9iyO7Vkm1wkJiZsMhXpk73zXf
|
||||
TPxuXEacTX3SOEa07OIMCFwkusO5f6leOyFTynHCzBgZ3U0UkRVZA3WcpbNB6Dwy
|
||||
h7ysVlqyT9WZd7EOYm5j5oue2G2xdei+6etgn5UjyWm6liZGrcOF6WPTdmzqa6WG
|
||||
ApEqanpkQd/HM+hUYex/ZS6zEhd4CCDLgYkIjlrFbFb3pJ1OVLztIfSN5J4Oolpu
|
||||
JVCfIq5u1NkpzL7ys/Ub8eYipbzI6P+yxXiUSuF0v9b98ymczMYjrSQXIf1e8In3
|
||||
OP2CclCHoZ8XDQcvvKAh
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAMoxixvs3YssMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xODA3MjAw
|
||||
ODQ0NDRaGA8yMTk3MTIyMzA4NDQ0NFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA4T1PNsOg0FDrGlWePoHeOSmOJTA3HCRy5LSbYD33GFU2eBrOIxoU/+SM
|
||||
rInKu3GghAMfH7WxPW3etIAZiyTDDU5RLcUq2Qwdr/ZpXAWpYocNc/CEmBFtfbxF
|
||||
z4uwBIN3/drM0RSbe/wP9EcgmNUGQMMZWeAji8sMtwpOblNWAP9BniUG0Flcz6Dp
|
||||
uPovwDTLdAYT3TyhzlohKL3f6O48TR5yTaV+3Ran2SGRhyJjfh3FRpP4VC+z5LnT
|
||||
WPQHN74Kdq35UgrUxNhJraMGCzznolUuoR/tFMwR93401GsM9fVA7SW3jjCGF81z
|
||||
PSzjy+ArKyQqIpLW1YGWDFk3sf08FQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDK
|
||||
2/+C3nPMgtyOFX/I3Cyk+Pui44IgOwCsIdNGwuJysdqp5VIfnjegEu2zIMWJSKGO
|
||||
lMZoQXjffkVZZ97J7RNDW06oB7kj3WVE8a7U4WEOfnO/CbMUf/x99CckNDwpjgW+
|
||||
K8V8SzAsQDvYZs2KaE+18GFfLVF1TGUYK2rPSZMHyX+v/TIlc/qUceBycrIQ/kke
|
||||
jDFsihUMLqgmOV2hXKUpIsmiWMGrFQV4AeV0iXP8L/ZhcepLf1t5SbsGdUA3AUY1
|
||||
3If8s81uTheiQjwY5t9nM0SY/1Th/tL3+RaEI79VNEVfG1FQ8mgqCK0ar4m0oZJl
|
||||
tmmEJM7xeURdpBBx36Di
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAJVMGw5SHkcvMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw
|
||||
ODU3MTlaGA8yMTk1MDQwMzA4NTcxOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAlaSSLfBl7OgmikjLReHuNhVuvM20dCsVzptUyRbut+KmIEEc24wd/xVy
|
||||
2RMIrydGedkW4tUjkUyOyfET5OAyT43jTzDPHZTkRSVkYjBdcYbe9o/0Q4P7IVS3
|
||||
XlvwrUu0qo9nSID0mxMnOoF1l8KAqnn10tQ0W+lNSTkasW7QVzcb+3okPEVhPAOq
|
||||
MnlY3vkMQGI8zX4iOKbEcSVIzf6wuIffXMGHVC/JjwihJ2USQ8fq6oy686g54P4w
|
||||
ROg415kLYcodjqThmGJPNUpAZ7MOc5Z4pymFuCHgNAZNvjhZDA842Ojecqm62zcm
|
||||
Tzh/pNMNeGCRYq2EQX0aQtYOIj7bOQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQU6SSB+3qALorPMVNjToM1Bj3oJMswgY4GA1UdIwSBhjCBg4AU6SSB
|
||||
+3qALorPMVNjToM1Bj3oJMuhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAJVMGw5SHkcvMBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBAF/0dWqkIEZKg5rca8o0P0VS+tolJJE/FRZO
|
||||
atHOeaQbWzyac6NEwjYeeV2kY63skJ+QPuYbSuIBLM8p/uTRIvYM4LZYImLGUvoO
|
||||
IdtJ8mAzq8CZ3ipdMs1hRqF5GRp8lg4w2QpX+PfhnW47iIOBiqSAUkIr3Y3BDaDn
|
||||
EjeXF6qS4iPIvBaQQ0cvdddNh/pE33/ceghbkZNTYkrwMyBkQlRTTVKXFN7pCRUV
|
||||
+L9FuQ9y8mP0BYZa5e1sdkwebydU+eqVzsil98ntkhpjvRkaJ5+Drs8TjGaJWlRw
|
||||
5WuOr8unKj7YxdL1bv7//RtVYVVi296ldoRUYv4SCvJF11z0OdQ=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEEjCCAvqgAwIBAgIJAMtdyRcH51j9MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0yMjA0MDgx
|
||||
MjM5MTZaGA8yMjAxMDkxMjEyMzkxNlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAvUsKCxoH6KXRYJLeYTWAQfaBQeCwhJaR56mfUeFHJE4g8aFjWkiN4uc1
|
||||
TvOyYNnIZKTHWmzmulmdinWNbwP0GiROHb/i7ro0HhvnptyycGt8ag8affiIbx5X
|
||||
7ohdwSN2KJ6G0IKflIx7f2NEI0oAMM/9k+T1eVF+MVWzpZoiDp8frLNkqp8+RAgz
|
||||
ScZsbRfwv3u/if5xJAvdg2nCkIWDMSHEVPoz0lJo7v0ZuDtWWsL1LHnL5ozvsKEk
|
||||
+ZJyEi23r+U1hIT1NTBdp4yoigNQexedtwCSr7q36oOdDwvZpqYlkLi3uxZ4ta+a
|
||||
01pzOSTwMLgQZSbKWQrpMvsIAPrxoQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd
|
||||
BgNVHQ4EFgQU1GgnGdNpbnL3lLF30Jomg7Ji9hYwgY4GA1UdIwSBhjCBg4AU1Ggn
|
||||
GdNpbnL3lLF30Jomg7Ji9hahYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX
|
||||
YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6
|
||||
b24gV2ViIFNlcnZpY2VzIExMQ4IJAMtdyRcH51j9MBIGA1UdEwEB/wQIMAYBAf8C
|
||||
AQAwDQYJKoZIhvcNAQELBQADggEBACVl00qQlatBKVeiWMrhpczsJroxDxlZTOba
|
||||
6wTMZk7c3akb6XMOSZFbGaifkebPZqTHEhDlrClM2j9AIlYcCx6YCrTf4cuhn2mD
|
||||
gcJN33143eOWSaeRY3ee4j+V9ne98y3kO2wLz95VrRgclPFR8po2iWGzGhwUi+FG
|
||||
q8dXeCH3N0DZgQsSgQWwmdNQXZZej6RHLU/8In5trHKLY0ppnLBjn/UZQbeTyW5q
|
||||
RJB3GaveXjfgFUWj2qOcDuRGaikdS+dYaLsi5z9cA3FolHzWxx9MOs8io8vKqQzV
|
||||
XUrLTNWwuhZy88cOlqGPxnoRbw7TmifwPw/cunNrsjUUOgs6ZTk=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAPu4ssY3BlzcMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEyMDMy
|
||||
MTI5MzJaGA8yMTk1MDUwODIxMjkzMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAsOiGi4A6+YTLzCdIyP8b8SCT2M/6PGKwzKJ5XbSBoL3gsnSWiFYqPg9c
|
||||
uJPNbiy9wSA9vlyfWMd90qvTfiNrT6vewP813QdJ3EENZOx4ERcf/Wd22tV72kxD
|
||||
yw1Q3I1OMH4bOItGQAxU5OtXCjBZEEUZooOkU8RoUQOU2Pql4NTiUpzWacNutAn5
|
||||
HHS7MDc4lUlsJqbN+5QW6fFrcNG/0Mrib3JbwdFUNhrQ5j+Yq5h78HarnUivnX/3
|
||||
Ap+oPbentv1qd7wvPJu556LZuhfqI0TohiIT1Ah+yUdN5osoaMxTHKKtf/CsSJ1F
|
||||
w3qXqFJQA0VWsqjFyHXFI32I/GOupwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCn
|
||||
Um00QHvUsJSN6KATbghowLynHn3wZSQsuS8E0COpcFJFxP2SV0NYkERbXu0n/Vhi
|
||||
yq5F8v4/bRA2/xpedLWmvFs7QWlomuXhSnYFkd33Z5gnXPb9vRkLwiMSw4uXls35
|
||||
qQraczUJ9EXDhrv7VmngIk9H3YsxYrlDGEqh/oz4Ze4ULOgnfkauanHikk+BUEsg
|
||||
/jsTD+7e+niEzJPihHdsvKFDlud5pakEzyxovHwNJ1GS2I//yxrJFIL91mehjqEk
|
||||
RLPdNse7N6UvSnuXcOokwu6l6kfzigGkJBxkcq4gre3szZFdCQcUioj7Z4xtuTL8
|
||||
YMqfiDtN5cbD8R8ojw9Y
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJAOtrM5XLDSjCMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQx
|
||||
MDAxNDJaGA8yMTk1MDExNzEwMDE0MlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAvVBz+WQNdPiM9S+aUULOQEriTmNDUrjLWLr7SfaOJScBzis5D5ju0jh1
|
||||
+qJdkbuGKtFX5OTWTm8pWhInX+hIOoS3exC4BaANoa1A3o6quoG+Rsv72qQf8LLH
|
||||
sgEi6+LMlCN9TwnRKOToEabmDKorss4zFl7VSsbQJwcBSfOcIwbdRRaW9Ab6uJHu
|
||||
79L+mBR3Ea+G7vSDrVIA8goAPkae6jY9WGw9KxsOrcvNdQoEkqRVtHo4bs9fMRHU
|
||||
Etphj2gh4ObXlFN92VtvzD6QBs3CcoFWgyWGvzg+dNG5VCbsiiuRdmii3kcijZ3H
|
||||
Nv1wCcZoEAqH72etVhsuvNRC/xAP8wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA8
|
||||
ezx5LRjzUU9EYWYhyYIEShFlP1qDHs7F4L46/5lc4pL8FPoQm5CZuAF31DJhYi/b
|
||||
fcV7i3n++/ymQbCLC6kAg8DUB7NrcROll5ag8d/JXGzcTCnlDXLXx1905fPNa+jI
|
||||
0q5quTmdmiSi0taeaKZmyUdhrB+a7ohWdSdlokEIOtbH1P+g5yll3bI2leYE6Tm8
|
||||
LKbyfK/532xJPqO9abx4Ddn89ZEC6vvWVNDgTsxERg992Wi+/xoSw3XxkgAryIv1
|
||||
zQ4dQ6irFmXwCWJqc6kHg/M5W+z60S/94+wGTXmp+19U6Rkq5jVMLh16XJXrXwHe
|
||||
4KcgIS/aQGVgjM6wivVA
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJANCOF0Q6ohnuMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA5MTAx
|
||||
OTQyNDdaGA8yMTk1MDIxMzE5NDI0N1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAzIcGTzNqie3f1olrrqcfzGfbymSM2QfbTzDIOG6xXXeFrCDAmOq0wUhi
|
||||
3fRCuoeHlKOWAPu76B9os71+zgF22dIDEVkpqHCjBrGzDQZXXUwOzhm+PmBUI8Z1
|
||||
qvbVD4ZYhjCujWWzrsX6Z4yEK7PEFjtf4M4W8euw0RmiNwjy+knIFa+VxK6aQv94
|
||||
lW98URFP2fD84xedHp6ozZlr3+RZSIFZsOiyxYsgiwTbesRMI0Y7LnkKGCIHQ/XJ
|
||||
OwSISWaCddbu59BZeADnyhl4f+pWaSQpQQ1DpXvZAVBYvCH97J1oAxLfH8xcwgSQ
|
||||
/se3wtn095VBt5b7qTVjOvy6vKZazwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA/
|
||||
S8+a9csfASkdtQUOLsBynAbsBCH9Gykq2m8JS7YE4TGvqlpnWehz78rFTzQwmz4D
|
||||
fwq8byPkl6DjdF9utqZ0JUo/Fxelxom0h6oievtBlSkmZJNbgc2WYm1zi6ptViup
|
||||
Y+4S2+vWZyg/X1PXD7wyRWuETmykk73uEyeWFBYKCHWsO9sI+62O4Vf8Jkuj/cie
|
||||
1NSJX8fkervfLrZSHBYhxLbL+actVEo00tiyZz8GnhgWx5faCY38D/k4Y/j5Vz99
|
||||
7lUX/+fWHT3+lTL8ZZK7fOQWh6NQpI0wTP9KtWqfOUwMIbgFQPoxkP00TWRmdmPz
|
||||
WOwTObEf9ouTnjG9OZ20
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDOzCCAiOgAwIBAgIJALPB6hxFhay8MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
|
||||
BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0
|
||||
dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xODA0MTAx
|
||||
MjMyNDlaGA8yMTk3MDkxMzEyMzI0OVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT
|
||||
EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft
|
||||
YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAva9xsI9237KYb/SPWmeCVzi7giKNron8hoRDwlwwMC9+uHPd53UxzKLb
|
||||
pTgtJWAPkZVxEdl2Gdhwr3SULoKcKmkqE6ltVFrVuPT33La1UufguT9k8ZDDuO9C
|
||||
hQNHUdSVEuVrK3bLjaSsMOS7Uxmnn7lYT990IReowvnBNBsBlcabfQTBV04xfUG0
|
||||
/m0XUiUFjOxDBqbNzkEIblW7vK7ydSJtFMSljga54UAVXibQt9EAIF7B8k9l2iLa
|
||||
mu9yEjyQy+ZQICTuAvPUEWe6va2CHVY9gYQLA31/zU0VBKZPTNExjaqK4j8bKs1/
|
||||
7dOV1so39sIGBz21cUBec1o+yCS5SwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBt
|
||||
hO2W/Lm+Nk0qsXW6mqQFsAou0cASc/vtGNCyBfoFNX6aKXsVCHxq2aq2TUKWENs+
|
||||
mKmYu1lZVhBOmLshyllh3RRoL3Ohp3jCwXytkWQ7ElcGjDzNGc0FArzB8xFyQNdK
|
||||
MNvXDi/ErzgrHGSpcvmGHiOhMf3UzChMWbIr6udoDlMbSIO7+8F+jUJkh4Xl1lKb
|
||||
YeN5fsLZp7T/6YvbFSPpmbn1YoE2vKtuGKxObRrhU3h4JHdp1Zel1pZ6lh5iM0ec
|
||||
SD11SximGIYCjfZpRqI3q50mbxCd7ckULz+UUPwLrfOds4VrVVSj+x0ZdY19Plv2
|
||||
9shw5ez6Cn7E3IfzqNHO
|
||||
-----END CERTIFICATE-----`
|
@ -1,138 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/api"
|
||||
)
|
||||
|
||||
type CLIHandler struct{}
|
||||
|
||||
func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) {
|
||||
mount, ok := m["mount"]
|
||||
if !ok {
|
||||
mount = "aws"
|
||||
}
|
||||
|
||||
role, ok := m["role"]
|
||||
if !ok {
|
||||
role = ""
|
||||
}
|
||||
|
||||
headerValue, ok := m["header_value"]
|
||||
if !ok {
|
||||
headerValue = ""
|
||||
}
|
||||
|
||||
logVal, ok := m["log_level"]
|
||||
if !ok {
|
||||
logVal = "info"
|
||||
}
|
||||
level := hclog.LevelFromString(logVal)
|
||||
if level == hclog.NoLevel {
|
||||
return nil, fmt.Errorf("failed to parse 'log_level' value: %q", logVal)
|
||||
}
|
||||
hlogger := hclog.Default()
|
||||
hlogger.SetLevel(level)
|
||||
|
||||
creds, err := awsutil.RetrieveCreds(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], hlogger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
region := m["region"]
|
||||
switch region {
|
||||
case "":
|
||||
// The CLI has always defaulted to "us-east-1" if a region is not provided.
|
||||
region = awsutil.DefaultRegion
|
||||
case "auto":
|
||||
// Beginning in 1.10 we also accept the "auto" value, which uses the region detection logic in
|
||||
// awsutil.GetRegion() to determine the region. That behavior is triggered when region = "".
|
||||
region = ""
|
||||
}
|
||||
|
||||
loginData, err := awsutil.GenerateLoginData(creds, headerValue, region, hlogger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if loginData == nil {
|
||||
return nil, fmt.Errorf("got nil response from GenerateLoginData")
|
||||
}
|
||||
loginData["role"] = role
|
||||
path := fmt.Sprintf("auth/%s/login", mount)
|
||||
secret, err := c.Logical().Write(path, loginData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, fmt.Errorf("empty response from credential provider")
|
||||
}
|
||||
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
func (h *CLIHandler) Help() string {
|
||||
help := `
|
||||
Usage: vault login -method=aws [CONFIG K=V...]
|
||||
|
||||
The AWS auth method allows users to authenticate with AWS IAM
|
||||
credentials. The AWS IAM credentials, and optionally the AWS region, may be
|
||||
specified in a number of ways, listed in order of precedence below:
|
||||
|
||||
1. Explicitly via the command line (not recommended)
|
||||
|
||||
2. Via the standard AWS environment variables (AWS_ACCESS_KEY, etc.)
|
||||
|
||||
3. Via the ~/.aws/credentials file
|
||||
|
||||
4. Via EC2 instance profile
|
||||
|
||||
Authenticate using locally stored credentials:
|
||||
|
||||
$ vault login -method=aws
|
||||
|
||||
Authenticate by passing keys:
|
||||
|
||||
$ vault login -method=aws aws_access_key_id=... aws_secret_access_key=...
|
||||
|
||||
Configuration:
|
||||
|
||||
aws_access_key_id=<string>
|
||||
Explicit AWS access key ID
|
||||
|
||||
aws_secret_access_key=<string>
|
||||
Explicit AWS secret access key
|
||||
|
||||
aws_security_token=<string>
|
||||
Explicit AWS security token for temporary credentials
|
||||
|
||||
header_value=<string>
|
||||
Value for the x-vault-aws-iam-server-id header in requests
|
||||
|
||||
mount=<string>
|
||||
Path where the AWS credential method is mounted. This is usually provided
|
||||
via the -path flag in the "vault login" command, but it can be specified
|
||||
here as well. If specified here, it takes precedence over the value for
|
||||
-path. The default value is "aws".
|
||||
|
||||
region=<string>
|
||||
Explicit AWS region to reach out to for authentication request signing. A value
|
||||
of "auto" enables auto-detection of region based on the precedence described above.
|
||||
Defaults to "us-east-1" if not specified.
|
||||
|
||||
role=<string>
|
||||
Name of the role to request a token against
|
||||
|
||||
log_level=<string>
|
||||
Set logging level during AWS credential acquisition. Valid levels are
|
||||
trace, debug, info, warn, error. Defaults to info.
|
||||
`
|
||||
|
||||
return strings.TrimSpace(help)
|
||||
}
|
@ -1,304 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// getRawClientConfig creates a aws-sdk-go config, which is used to create client
|
||||
// that can interact with AWS API. This builds credentials in the following
|
||||
// order of preference:
|
||||
//
|
||||
// * Static credentials from 'config/client'
|
||||
// * Environment variables
|
||||
// * Instance metadata role
|
||||
func (b *backend) getRawClientConfig(ctx context.Context, s logical.Storage, region, clientType string) (*aws.Config, error) {
|
||||
credsConfig := &awsutil.CredentialsConfig{
|
||||
Region: region,
|
||||
Logger: b.Logger(),
|
||||
}
|
||||
|
||||
// Read the configured secret key and access key
|
||||
config, err := b.nonLockedClientConfigEntry(ctx, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpoint := aws.String("")
|
||||
var maxRetries int = aws.UseServiceDefaultRetries
|
||||
if config != nil {
|
||||
// Override the defaults with configured values.
|
||||
switch {
|
||||
case clientType == "ec2" && config.Endpoint != "":
|
||||
endpoint = aws.String(config.Endpoint)
|
||||
case clientType == "iam" && config.IAMEndpoint != "":
|
||||
endpoint = aws.String(config.IAMEndpoint)
|
||||
case clientType == "sts":
|
||||
if config.STSEndpoint != "" {
|
||||
endpoint = aws.String(config.STSEndpoint)
|
||||
}
|
||||
if config.STSRegion != "" {
|
||||
region = config.STSRegion
|
||||
}
|
||||
}
|
||||
|
||||
credsConfig.AccessKey = config.AccessKey
|
||||
credsConfig.SecretKey = config.SecretKey
|
||||
maxRetries = config.MaxRetries
|
||||
}
|
||||
|
||||
credsConfig.HTTPClient = cleanhttp.DefaultClient()
|
||||
|
||||
creds, err := credsConfig.GenerateCredentialChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if creds == nil {
|
||||
return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
|
||||
}
|
||||
|
||||
// Create a config that can be used to make the API calls.
|
||||
return &aws.Config{
|
||||
Credentials: creds,
|
||||
Region: aws.String(region),
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
Endpoint: endpoint,
|
||||
MaxRetries: aws.Int(maxRetries),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getClientConfig returns an aws-sdk-go config, with optionally assumed credentials
|
||||
// It uses getRawClientConfig to obtain config for the runtime environment, and if
|
||||
// stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed
|
||||
// credentials. The credentials will expire after 15 minutes but will auto-refresh.
|
||||
func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) {
|
||||
config, err := b.getRawClientConfig(ctx, s, region, clientType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("could not compile valid credentials through the default provider chain")
|
||||
}
|
||||
|
||||
stsConfig, err := b.getRawClientConfig(ctx, s, region, "sts")
|
||||
if stsConfig == nil {
|
||||
return nil, fmt.Errorf("could not configure STS client")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stsRole != "" {
|
||||
sess, err := session.NewSession(stsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
assumedCredentials := stscreds.NewCredentials(sess, stsRole)
|
||||
// Test that we actually have permissions to assume the role
|
||||
if _, err = assumedCredentials.Get(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Credentials = assumedCredentials
|
||||
} else {
|
||||
if b.defaultAWSAccountID == "" {
|
||||
sess, err := session.NewSession(stsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := sts.New(sess)
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("could not obtain sts client: %w", err)
|
||||
}
|
||||
inputParams := &sts.GetCallerIdentityInput{}
|
||||
identity, err := client.GetCallerIdentityWithContext(ctx, inputParams)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch current caller: %w", err)
|
||||
}
|
||||
if identity == nil {
|
||||
return nil, fmt.Errorf("got nil result from GetCallerIdentity")
|
||||
}
|
||||
b.defaultAWSAccountID = *identity.Account
|
||||
}
|
||||
if b.defaultAWSAccountID != accountID {
|
||||
return nil, fmt.Errorf("unable to fetch client for account ID %q -- default client is for account %q", accountID, b.defaultAWSAccountID)
|
||||
}
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// flushCachedEC2Clients deletes all the cached ec2 client objects from the backend.
|
||||
// If the client credentials configuration is deleted or updated in the backend, all
|
||||
// the cached EC2 client objects will be flushed. Config mutex lock should be
|
||||
// acquired for write operation before calling this method.
|
||||
func (b *backend) flushCachedEC2Clients() {
|
||||
// deleting items in map during iteration is safe
|
||||
for region := range b.EC2ClientsMap {
|
||||
delete(b.EC2ClientsMap, region)
|
||||
}
|
||||
}
|
||||
|
||||
// flushCachedIAMClients deletes all the cached iam client objects from the
|
||||
// backend. If the client credentials configuration is deleted or updated in
|
||||
// the backend, all the cached IAM client objects will be flushed. Config mutex
|
||||
// lock should be acquired for write operation before calling this method.
|
||||
func (b *backend) flushCachedIAMClients() {
|
||||
// deleting items in map during iteration is safe
|
||||
for region := range b.IAMClientsMap {
|
||||
delete(b.IAMClientsMap, region)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets an entry out of the user ID cache
|
||||
func (b *backend) getCachedUserId(userId string) string {
|
||||
if userId == "" {
|
||||
return ""
|
||||
}
|
||||
if entry, ok := b.iamUserIdToArnCache.Get(userId); ok {
|
||||
b.iamUserIdToArnCache.SetDefault(userId, entry)
|
||||
return entry.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Sets an entry in the user ID cache
|
||||
func (b *backend) setCachedUserId(userId, arn string) {
|
||||
if userId != "" {
|
||||
b.iamUserIdToArnCache.SetDefault(userId, arn)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) stsRoleForAccount(ctx context.Context, s logical.Storage, accountID string) (string, error) {
|
||||
// Check if an STS configuration exists for the AWS account
|
||||
sts, err := b.lockedAwsStsEntry(ctx, s, accountID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error fetching STS config for account ID %q: %w", accountID, err)
|
||||
}
|
||||
// An empty STS role signifies the master account
|
||||
if sts != nil {
|
||||
return sts.StsRole, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// clientEC2 creates a client to interact with AWS EC2 API
|
||||
func (b *backend) clientEC2(ctx context.Context, s logical.Storage, region, accountID string) (*ec2.EC2, error) {
|
||||
stsRole, err := b.stsRoleForAccount(ctx, s, accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.configMutex.RLock()
|
||||
if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
|
||||
defer b.configMutex.RUnlock()
|
||||
// If the client object was already created, return it
|
||||
return b.EC2ClientsMap[region][stsRole], nil
|
||||
}
|
||||
|
||||
// Release the read lock and acquire the write lock
|
||||
b.configMutex.RUnlock()
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
// If the client gets created while switching the locks, return it
|
||||
if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil {
|
||||
return b.EC2ClientsMap[region][stsRole], nil
|
||||
}
|
||||
|
||||
// Create an AWS config object using a chain of providers
|
||||
var awsConfig *aws.Config
|
||||
awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "ec2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if awsConfig == nil {
|
||||
return nil, fmt.Errorf("could not retrieve valid assumed credentials")
|
||||
}
|
||||
|
||||
// Create a new EC2 client object, cache it and return the same
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := ec2.New(sess)
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("could not obtain ec2 client")
|
||||
}
|
||||
if _, ok := b.EC2ClientsMap[region]; !ok {
|
||||
b.EC2ClientsMap[region] = map[string]*ec2.EC2{stsRole: client}
|
||||
} else {
|
||||
b.EC2ClientsMap[region][stsRole] = client
|
||||
}
|
||||
|
||||
return b.EC2ClientsMap[region][stsRole], nil
|
||||
}
|
||||
|
||||
// clientIAM creates a client to interact with AWS IAM API
|
||||
func (b *backend) clientIAM(ctx context.Context, s logical.Storage, region, accountID string) (*iam.IAM, error) {
|
||||
stsRole, err := b.stsRoleForAccount(ctx, s, accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stsRole == "" {
|
||||
b.Logger().Debug(fmt.Sprintf("no stsRole found for %s", accountID))
|
||||
} else {
|
||||
b.Logger().Debug(fmt.Sprintf("found stsRole %s for account %s", stsRole, accountID))
|
||||
}
|
||||
b.configMutex.RLock()
|
||||
if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
|
||||
defer b.configMutex.RUnlock()
|
||||
// If the client object was already created, return it
|
||||
b.Logger().Debug(fmt.Sprintf("returning cached client for region %s and stsRole %s", region, stsRole))
|
||||
return b.IAMClientsMap[region][stsRole], nil
|
||||
}
|
||||
b.Logger().Debug(fmt.Sprintf("no cached client for region %s and stsRole %s", region, stsRole))
|
||||
|
||||
// Release the read lock and acquire the write lock
|
||||
b.configMutex.RUnlock()
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
// If the client gets created while switching the locks, return it
|
||||
if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil {
|
||||
return b.IAMClientsMap[region][stsRole], nil
|
||||
}
|
||||
|
||||
// Create an AWS config object using a chain of providers
|
||||
var awsConfig *aws.Config
|
||||
awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "iam")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if awsConfig == nil {
|
||||
return nil, fmt.Errorf("could not retrieve valid assumed credentials")
|
||||
}
|
||||
|
||||
// Create a new IAM client object, cache it and return the same
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := iam.New(sess)
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("could not obtain iam client")
|
||||
}
|
||||
if _, ok := b.IAMClientsMap[region]; !ok {
|
||||
b.IAMClientsMap[region] = map[string]*iam.IAM{stsRole: client}
|
||||
} else {
|
||||
b.IAMClientsMap[region][stsRole] = client
|
||||
}
|
||||
return b.IAMClientsMap[region][stsRole], nil
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
awsauth "github.com/hashicorp/vault/builtin/credential/aws"
|
||||
"github.com/hashicorp/vault/sdk/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
apiClientMeta := &api.PluginAPIClientMeta{}
|
||||
flags := apiClientMeta.FlagSet()
|
||||
flags.Parse(os.Args[1:])
|
||||
|
||||
tlsConfig := apiClientMeta.GetTLSConfig()
|
||||
tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig)
|
||||
|
||||
if err := plugin.ServeMultiplex(&plugin.ServeOpts{
|
||||
BackendFactoryFunc: awsauth.Factory,
|
||||
// set the TLSProviderFunc so that the plugin maintains backwards
|
||||
// compatibility with Vault versions that don’t support plugin AutoMTLS
|
||||
TLSProviderFunc: tlsProviderFunc,
|
||||
}); err != nil {
|
||||
logger := hclog.New(&hclog.LoggerOptions{})
|
||||
|
||||
logger.Error("plugin shutting down", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -1,418 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// pathListCertificates creates a path that enables listing of all
|
||||
// the AWS public certificates registered with Vault.
|
||||
func (b *backend) pathListCertificates() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/certificates/?",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "certificate-configurations",
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ListOperation: &framework.PathOperation{
|
||||
Callback: b.pathCertificatesList,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathListCertificatesHelpSyn,
|
||||
HelpDescription: pathListCertificatesHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigCertificate() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"),
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"cert_name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the certificate.",
|
||||
},
|
||||
"aws_public_cert": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Base64 encoded AWS Public cert required to verify PKCS7 signature of the EC2 instance metadata.",
|
||||
},
|
||||
"type": {
|
||||
Type: framework.TypeString,
|
||||
Default: "pkcs7",
|
||||
Description: `
|
||||
Takes the value of either "pkcs7" or "identity", indicating the type of
|
||||
document which can be verified using the given certificate. The reason is that
|
||||
the PKCS#7 document will have a DSA digest and the identity signature will have
|
||||
an RSA signature, and accordingly the public certificates to verify those also
|
||||
vary. Defaults to "pkcs7".`,
|
||||
},
|
||||
},
|
||||
|
||||
ExistenceCheck: b.pathConfigCertificateExistenceCheck,
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.CreateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigCertificateCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "certificate",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigCertificateCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "certificate",
|
||||
},
|
||||
},
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigCertificateRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "certificate-configuration",
|
||||
},
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigCertificateDelete,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "certificate-configuration",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigCertificateSyn,
|
||||
HelpDescription: pathConfigCertificateDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
|
||||
// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
|
||||
func (b *backend) pathConfigCertificateExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
|
||||
certName := data.Get("cert_name").(string)
|
||||
if certName == "" {
|
||||
return false, fmt.Errorf("missing cert_name")
|
||||
}
|
||||
|
||||
entry, err := b.lockedAWSPublicCertificateEntry(ctx, req.Storage, certName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
// pathCertificatesList is used to list all the AWS public certificates registered with Vault
|
||||
func (b *backend) pathCertificatesList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
certs, err := req.Storage.List(ctx, "config/certificate/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return logical.ListResponse(certs), nil
|
||||
}
|
||||
|
||||
// Decodes the PEM encoded certificate and parses it into a x509 cert
|
||||
func decodePEMAndParseCertificate(certificate string) (*x509.Certificate, error) {
|
||||
// Decode the PEM block and error out if a block is not detected in the first attempt
|
||||
decodedPublicCert, rest := pem.Decode([]byte(certificate))
|
||||
if len(rest) != 0 {
|
||||
return nil, fmt.Errorf("invalid certificate; should be one PEM block only")
|
||||
}
|
||||
|
||||
// Check if the certificate can be parsed
|
||||
publicCert, err := x509.ParseCertificate(decodedPublicCert.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if publicCert == nil {
|
||||
return nil, fmt.Errorf("invalid certificate; failed to parse certificate")
|
||||
}
|
||||
return publicCert, nil
|
||||
}
|
||||
|
||||
// awsPublicCertificates returns a slice of all the parsed AWS public
|
||||
// certificates, which are used to verify either the identity, RSA 2048
|
||||
// or the PKCS7 signatures of the instance identity documents. This method will
|
||||
// append the certificates registered using `config/certificate/<cert_name>`
|
||||
// endpoint, along with the default certificates in the backend.
|
||||
func (b *backend) awsPublicCertificates(ctx context.Context, s logical.Storage, isPkcs bool) ([]*x509.Certificate, error) {
|
||||
// Lock at beginning and use internal method so that we are consistent as
|
||||
// we iterate through
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
certs := make([]*x509.Certificate, len(defaultCertificates))
|
||||
copy(certs, defaultCertificates)
|
||||
|
||||
// Get the list of all the registered certificates
|
||||
registeredCerts, err := s.List(ctx, "config/certificate/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Iterate through each certificate, parse and append it to a slice
|
||||
for _, cert := range registeredCerts {
|
||||
certEntry, err := b.nonLockedAWSPublicCertificateEntry(ctx, s, cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if certEntry == nil {
|
||||
return nil, fmt.Errorf("certificate storage has a nil entry under the name: %q", cert)
|
||||
}
|
||||
// Append relevant certificates only
|
||||
if (isPkcs && certEntry.Type == "pkcs7") ||
|
||||
(!isPkcs && certEntry.Type == "identity") {
|
||||
decodedCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certs = append(certs, decodedCert)
|
||||
}
|
||||
}
|
||||
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// lockedSetAWSPublicCertificateEntry is used to store the AWS public key in
|
||||
// the storage. This method acquires lock before creating or updating a storage
|
||||
// entry.
|
||||
func (b *backend) lockedSetAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string, certEntry *awsPublicCert) error {
|
||||
if certName == "" {
|
||||
return fmt.Errorf("missing certificate name")
|
||||
}
|
||||
|
||||
if certEntry == nil {
|
||||
return fmt.Errorf("nil AWS public key certificate")
|
||||
}
|
||||
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
return b.nonLockedSetAWSPublicCertificateEntry(ctx, s, certName, certEntry)
|
||||
}
|
||||
|
||||
// nonLockedSetAWSPublicCertificateEntry is used to store the AWS public key in
|
||||
// the storage. This method does not acquire lock before reading the storage.
|
||||
// If locking is desired, use lockedSetAWSPublicCertificateEntry instead.
|
||||
func (b *backend) nonLockedSetAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string, certEntry *awsPublicCert) error {
|
||||
if certName == "" {
|
||||
return fmt.Errorf("missing certificate name")
|
||||
}
|
||||
|
||||
if certEntry == nil {
|
||||
return fmt.Errorf("nil AWS public key certificate")
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON("config/certificate/"+certName, certEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry == nil {
|
||||
return fmt.Errorf("failed to create storage entry for AWS public key certificate")
|
||||
}
|
||||
|
||||
return s.Put(ctx, entry)
|
||||
}
|
||||
|
||||
// lockedAWSPublicCertificateEntry is used to get the configured AWS Public Key
|
||||
// that is used to verify the PKCS#7 signature of the instance identity
|
||||
// document.
|
||||
func (b *backend) lockedAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string) (*awsPublicCert, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
return b.nonLockedAWSPublicCertificateEntry(ctx, s, certName)
|
||||
}
|
||||
|
||||
// nonLockedAWSPublicCertificateEntry reads the certificate information from
|
||||
// the storage. This method does not acquire lock before reading the storage.
|
||||
// If locking is desired, use lockedAWSPublicCertificateEntry instead.
|
||||
func (b *backend) nonLockedAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string) (*awsPublicCert, error) {
|
||||
entry, err := s.Get(ctx, "config/certificate/"+certName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var certEntry awsPublicCert
|
||||
if err := entry.DecodeJSON(&certEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle upgrade for certificate type
|
||||
persistNeeded := false
|
||||
if certEntry.Type == "" {
|
||||
certEntry.Type = "pkcs7"
|
||||
persistNeeded = true
|
||||
}
|
||||
|
||||
if persistNeeded {
|
||||
if err := b.nonLockedSetAWSPublicCertificateEntry(ctx, s, certName, &certEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &certEntry, nil
|
||||
}
|
||||
|
||||
// pathConfigCertificateDelete is used to delete the previously configured AWS
|
||||
// Public Key that is used to verify the PKCS#7 signature of the instance
|
||||
// identity document.
|
||||
func (b *backend) pathConfigCertificateDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
certName := data.Get("cert_name").(string)
|
||||
if certName == "" {
|
||||
return logical.ErrorResponse("missing cert_name"), nil
|
||||
}
|
||||
|
||||
return nil, req.Storage.Delete(ctx, "config/certificate/"+certName)
|
||||
}
|
||||
|
||||
// pathConfigCertificateRead is used to view the configured AWS Public Key that
|
||||
// is used to verify the PKCS#7 signature of the instance identity document.
|
||||
func (b *backend) pathConfigCertificateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
certName := data.Get("cert_name").(string)
|
||||
if certName == "" {
|
||||
return logical.ErrorResponse("missing cert_name"), nil
|
||||
}
|
||||
|
||||
certificateEntry, err := b.lockedAWSPublicCertificateEntry(ctx, req.Storage, certName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if certificateEntry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"aws_public_cert": certificateEntry.AWSPublicCert,
|
||||
"type": certificateEntry.Type,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pathConfigCertificateCreateUpdate is used to register an AWS Public Key that
|
||||
// is used to verify the PKCS#7 signature of the instance identity document.
|
||||
func (b *backend) pathConfigCertificateCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
certName := data.Get("cert_name").(string)
|
||||
if certName == "" {
|
||||
return logical.ErrorResponse("missing certificate name"), nil
|
||||
}
|
||||
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
// Check if there is already a certificate entry registered
|
||||
certEntry, err := b.nonLockedAWSPublicCertificateEntry(ctx, req.Storage, certName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if certEntry == nil {
|
||||
certEntry = &awsPublicCert{}
|
||||
}
|
||||
|
||||
// Check if type information is provided
|
||||
certTypeRaw, ok := data.GetOk("type")
|
||||
if ok {
|
||||
certEntry.Type = strings.ToLower(certTypeRaw.(string))
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
certEntry.Type = data.Get("type").(string)
|
||||
}
|
||||
|
||||
switch certEntry.Type {
|
||||
case "pkcs7":
|
||||
case "identity":
|
||||
default:
|
||||
return logical.ErrorResponse(fmt.Sprintf("invalid certificate type %q", certEntry.Type)), nil
|
||||
}
|
||||
|
||||
// Check if the value is provided by the client
|
||||
certStrData, ok := data.GetOk("aws_public_cert")
|
||||
if ok {
|
||||
if certBytes, err := base64.StdEncoding.DecodeString(certStrData.(string)); err == nil {
|
||||
certEntry.AWSPublicCert = string(certBytes)
|
||||
} else {
|
||||
certEntry.AWSPublicCert = certStrData.(string)
|
||||
}
|
||||
} else {
|
||||
// aws_public_cert should be supplied for both create and update operations.
|
||||
// If it is not provided, throw an error.
|
||||
return logical.ErrorResponse("missing aws_public_cert"), nil
|
||||
}
|
||||
|
||||
// If explicitly set to empty string, error out
|
||||
if certEntry.AWSPublicCert == "" {
|
||||
return logical.ErrorResponse("invalid aws_public_cert"), nil
|
||||
}
|
||||
|
||||
// Verify the certificate by decoding it and parsing it
|
||||
publicCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if publicCert == nil {
|
||||
return logical.ErrorResponse("invalid certificate; failed to decode and parse certificate"), nil
|
||||
}
|
||||
|
||||
// If none of the checks fail, save the provided certificate
|
||||
if err := b.nonLockedSetAWSPublicCertificateEntry(ctx, req.Storage, certName, certEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Struct awsPublicCert holds the AWS Public Key that is used to verify the PKCS#7 signature
|
||||
// of the instance identity document.
|
||||
type awsPublicCert struct {
|
||||
AWSPublicCert string `json:"aws_public_cert"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
const pathConfigCertificateSyn = `
|
||||
Adds the AWS Public Key that is used to verify the PKCS#7 signature of the identity document.
|
||||
`
|
||||
|
||||
const pathConfigCertificateDesc = `
|
||||
AWS Public Key which is used to verify the PKCS#7 signature of the identity document,
|
||||
varies by region. The public key(s) can be found in AWS EC2 instance metadata documentation.
|
||||
The default key that is used to verify the signature is the one that is applicable for
|
||||
following regions: US East (N. Virginia), US West (Oregon), US West (N. California),
|
||||
EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore),
|
||||
Asia Pacific (Sydney), and South America (Sao Paulo).
|
||||
|
||||
If the instances belongs to region other than the above, the public key(s) for the
|
||||
corresponding regions should be registered using this endpoint. PKCS#7 is verified
|
||||
using a collection of certificates containing the default certificate and all the
|
||||
certificates that are registered using this endpoint.
|
||||
`
|
||||
|
||||
const pathListCertificatesHelpSyn = `
|
||||
Lists all the AWS public certificates that are registered with the backend.
|
||||
`
|
||||
|
||||
const pathListCertificatesHelpDesc = `
|
||||
Certificates will be listed by their respective names that were used during registration.
|
||||
`
|
@ -1,395 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func (b *backend) pathConfigClient() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/client$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"access_key": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "AWS Access Key ID for the account used to make AWS API requests.",
|
||||
},
|
||||
|
||||
"secret_key": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "AWS Secret Access Key for the account used to make AWS API requests.",
|
||||
},
|
||||
|
||||
"endpoint": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "URL to override the default generated endpoint for making AWS EC2 API calls.",
|
||||
},
|
||||
|
||||
"iam_endpoint": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "URL to override the default generated endpoint for making AWS IAM API calls.",
|
||||
},
|
||||
|
||||
"sts_endpoint": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "URL to override the default generated endpoint for making AWS STS API calls.",
|
||||
},
|
||||
|
||||
"sts_region": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "The region ID for the sts_endpoint, if set.",
|
||||
},
|
||||
|
||||
"iam_server_id_header_value": {
|
||||
Type: framework.TypeString,
|
||||
Default: "",
|
||||
Description: "Value to require in the X-Vault-AWS-IAM-Server-ID request header",
|
||||
},
|
||||
|
||||
"allowed_sts_header_values": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Default: nil,
|
||||
Description: "List of additional headers that are allowed to be in AWS STS request headers",
|
||||
},
|
||||
|
||||
"max_retries": {
|
||||
Type: framework.TypeInt,
|
||||
Default: aws.UseServiceDefaultRetries,
|
||||
Description: "Maximum number of retries for recoverable exceptions of AWS APIs",
|
||||
},
|
||||
},
|
||||
|
||||
ExistenceCheck: b.pathConfigClientExistenceCheck,
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.CreateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigClientCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "client",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigClientCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "client",
|
||||
},
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigClientDelete,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "client-configuration",
|
||||
},
|
||||
},
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigClientRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "client-configuration",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigClientHelpSyn,
|
||||
HelpDescription: pathConfigClientHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
|
||||
// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
|
||||
func (b *backend) pathConfigClientExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
|
||||
entry, err := b.lockedClientConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
// Fetch the client configuration required to access the AWS API, after acquiring an exclusive lock.
|
||||
func (b *backend) lockedClientConfigEntry(ctx context.Context, s logical.Storage) (*clientConfig, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
return b.nonLockedClientConfigEntry(ctx, s)
|
||||
}
|
||||
|
||||
// Fetch the client configuration required to access the AWS API.
|
||||
func (b *backend) nonLockedClientConfigEntry(ctx context.Context, s logical.Storage) (*clientConfig, error) {
|
||||
entry, err := s.Get(ctx, "config/client")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result clientConfig
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigClientRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
clientConfig, err := b.lockedClientConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if clientConfig == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"access_key": clientConfig.AccessKey,
|
||||
"endpoint": clientConfig.Endpoint,
|
||||
"iam_endpoint": clientConfig.IAMEndpoint,
|
||||
"sts_endpoint": clientConfig.STSEndpoint,
|
||||
"sts_region": clientConfig.STSRegion,
|
||||
"iam_server_id_header_value": clientConfig.IAMServerIdHeaderValue,
|
||||
"max_retries": clientConfig.MaxRetries,
|
||||
"allowed_sts_header_values": clientConfig.AllowedSTSHeaderValues,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigClientDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
if err := req.Storage.Delete(ctx, "config/client"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove all the cached EC2 client objects in the backend.
|
||||
b.flushCachedEC2Clients()
|
||||
|
||||
// Remove all the cached EC2 client objects in the backend.
|
||||
b.flushCachedIAMClients()
|
||||
|
||||
// unset the cached default AWS account ID
|
||||
b.defaultAWSAccountID = ""
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pathConfigClientCreateUpdate is used to register the 'aws_secret_key' and 'aws_access_key'
|
||||
// that can be used to interact with AWS EC2 API.
|
||||
func (b *backend) pathConfigClientCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
configEntry, err := b.nonLockedClientConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configEntry == nil {
|
||||
configEntry = &clientConfig{}
|
||||
}
|
||||
|
||||
// changedCreds is whether we need to flush the cached AWS clients and store in the backend
|
||||
changedCreds := false
|
||||
// changedOtherConfig is whether other config has changed that requires storing in the backend
|
||||
// but does not require flushing the cached clients
|
||||
changedOtherConfig := false
|
||||
|
||||
accessKeyStr, ok := data.GetOk("access_key")
|
||||
if ok {
|
||||
if configEntry.AccessKey != accessKeyStr.(string) {
|
||||
changedCreds = true
|
||||
configEntry.AccessKey = accessKeyStr.(string)
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
// Use the default
|
||||
configEntry.AccessKey = data.Get("access_key").(string)
|
||||
}
|
||||
|
||||
secretKeyStr, ok := data.GetOk("secret_key")
|
||||
if ok {
|
||||
if configEntry.SecretKey != secretKeyStr.(string) {
|
||||
changedCreds = true
|
||||
configEntry.SecretKey = secretKeyStr.(string)
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.SecretKey = data.Get("secret_key").(string)
|
||||
}
|
||||
|
||||
endpointStr, ok := data.GetOk("endpoint")
|
||||
if ok {
|
||||
if configEntry.Endpoint != endpointStr.(string) {
|
||||
changedCreds = true
|
||||
configEntry.Endpoint = endpointStr.(string)
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.Endpoint = data.Get("endpoint").(string)
|
||||
}
|
||||
|
||||
iamEndpointStr, ok := data.GetOk("iam_endpoint")
|
||||
if ok {
|
||||
if configEntry.IAMEndpoint != iamEndpointStr.(string) {
|
||||
changedCreds = true
|
||||
configEntry.IAMEndpoint = iamEndpointStr.(string)
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.IAMEndpoint = data.Get("iam_endpoint").(string)
|
||||
}
|
||||
|
||||
stsEndpointStr, ok := data.GetOk("sts_endpoint")
|
||||
if ok {
|
||||
if configEntry.STSEndpoint != stsEndpointStr.(string) {
|
||||
// We don't directly cache STS clients as they are never directly used.
|
||||
// However, they are potentially indirectly used as credential providers
|
||||
// for the EC2 and IAM clients, and thus we would be indirectly caching
|
||||
// them there. So, if we change the STS endpoint, we should flush those
|
||||
// cached clients.
|
||||
changedCreds = true
|
||||
configEntry.STSEndpoint = stsEndpointStr.(string)
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.STSEndpoint = data.Get("sts_endpoint").(string)
|
||||
}
|
||||
|
||||
stsRegionStr, ok := data.GetOk("sts_region")
|
||||
if ok {
|
||||
if configEntry.STSRegion != stsRegionStr.(string) {
|
||||
// Region is used when building STS clients. As such, all the comments
|
||||
// regarding the sts_endpoint changing apply here as well.
|
||||
changedCreds = true
|
||||
configEntry.STSRegion = stsRegionStr.(string)
|
||||
}
|
||||
}
|
||||
|
||||
headerValStr, ok := data.GetOk("iam_server_id_header_value")
|
||||
if ok {
|
||||
if configEntry.IAMServerIdHeaderValue != headerValStr.(string) {
|
||||
// NOT setting changedCreds here, since this isn't really cached
|
||||
configEntry.IAMServerIdHeaderValue = headerValStr.(string)
|
||||
changedOtherConfig = true
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.IAMServerIdHeaderValue = data.Get("iam_server_id_header_value").(string)
|
||||
}
|
||||
|
||||
aHeadersValStr, ok := data.GetOk("allowed_sts_header_values")
|
||||
if ok {
|
||||
aHeadersValSl := aHeadersValStr.([]string)
|
||||
for i, v := range aHeadersValSl {
|
||||
aHeadersValSl[i] = textproto.CanonicalMIMEHeaderKey(v)
|
||||
}
|
||||
if !strutil.EquivalentSlices(configEntry.AllowedSTSHeaderValues, aHeadersValSl) {
|
||||
// NOT setting changedCreds here, since this isn't really cached
|
||||
configEntry.AllowedSTSHeaderValues = aHeadersValSl
|
||||
changedOtherConfig = true
|
||||
}
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
ah, ok := data.GetOk("allowed_sts_header_values")
|
||||
if ok {
|
||||
configEntry.AllowedSTSHeaderValues = ah.([]string)
|
||||
}
|
||||
}
|
||||
|
||||
maxRetriesInt, ok := data.GetOk("max_retries")
|
||||
if ok {
|
||||
configEntry.MaxRetries = maxRetriesInt.(int)
|
||||
changedOtherConfig = true
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.MaxRetries = data.Get("max_retries").(int)
|
||||
}
|
||||
|
||||
// Since this endpoint supports both create operation and update operation,
|
||||
// the error checks for access_key and secret_key not being set are not present.
|
||||
// This allows calling this endpoint multiple times to provide the values.
|
||||
// Hence, the readers of this endpoint should do the validation on
|
||||
// the validation of keys before using them.
|
||||
entry, err := b.configClientToEntry(configEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if changedCreds || changedOtherConfig || req.Operation == logical.CreateOperation {
|
||||
if err := req.Storage.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if changedCreds {
|
||||
b.flushCachedEC2Clients()
|
||||
b.flushCachedIAMClients()
|
||||
b.defaultAWSAccountID = ""
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// configClientToEntry allows the client config code to encapsulate its
|
||||
// knowledge about where its config is stored. It also provides a way
|
||||
// for other endpoints to update the config properly.
|
||||
func (b *backend) configClientToEntry(conf *clientConfig) (*logical.StorageEntry, error) {
|
||||
entry, err := logical.StorageEntryJSON("config/client", conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// Struct to hold 'aws_access_key' and 'aws_secret_key' that are required to
|
||||
// interact with the AWS EC2 API.
|
||||
type clientConfig struct {
|
||||
AccessKey string `json:"access_key"`
|
||||
SecretKey string `json:"secret_key"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
IAMEndpoint string `json:"iam_endpoint"`
|
||||
STSEndpoint string `json:"sts_endpoint"`
|
||||
STSRegion string `json:"sts_region"`
|
||||
IAMServerIdHeaderValue string `json:"iam_server_id_header_value"`
|
||||
AllowedSTSHeaderValues []string `json:"allowed_sts_header_values"`
|
||||
MaxRetries int `json:"max_retries"`
|
||||
}
|
||||
|
||||
func (c *clientConfig) validateAllowedSTSHeaderValues(headers http.Header) error {
|
||||
for k := range headers {
|
||||
h := textproto.CanonicalMIMEHeaderKey(k)
|
||||
if strings.HasPrefix(h, amzHeaderPrefix) &&
|
||||
!strutil.StrListContains(defaultAllowedSTSRequestHeaders, h) &&
|
||||
!strutil.StrListContains(c.AllowedSTSHeaderValues, h) {
|
||||
return errors.New("invalid request header: " + k)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const pathConfigClientHelpSyn = `
|
||||
Configure AWS IAM credentials that are used to query instance and role details from the AWS API.
|
||||
`
|
||||
|
||||
const pathConfigClientHelpDesc = `
|
||||
The aws-ec2 auth method makes AWS API queries to retrieve information
|
||||
regarding EC2 instances that perform login operations. The 'aws_secret_key' and
|
||||
'aws_access_key' parameters configured here should map to an AWS IAM user that
|
||||
has permission to make the following API queries:
|
||||
|
||||
* ec2:DescribeInstances
|
||||
* iam:GetInstanceProfile (if IAM Role binding is used)
|
||||
`
|
@ -1,131 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func TestBackend_pathConfigClient(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
storage := &logical.InmemStorage{}
|
||||
config.StorageView = storage
|
||||
|
||||
b, err := Backend(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = b.Setup(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// make sure we start with empty roles, which gives us confidence that the read later
|
||||
// actually is the two roles we created
|
||||
resp, err := b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/client",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// at this point, resp == nil is valid as no client config exists
|
||||
// if resp != nil, then resp.Data must have EndPoint and IAMServerIdHeaderValue as nil
|
||||
if resp != nil {
|
||||
if resp.IsError() {
|
||||
t.Fatalf("failed to read client config entry")
|
||||
} else if resp.Data["endpoint"] != nil || resp.Data["iam_server_id_header_value"] != nil {
|
||||
t.Fatalf("returned endpoint or iam_server_id_header_value non-nil")
|
||||
}
|
||||
}
|
||||
|
||||
data := map[string]interface{}{
|
||||
"sts_endpoint": "https://my-custom-sts-endpoint.example.com",
|
||||
"sts_region": "us-east-2",
|
||||
"iam_server_id_header_value": "vault_server_identification_314159",
|
||||
}
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.CreateOperation,
|
||||
Path: "config/client",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatal("failed to create the client config entry")
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/client",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || resp.IsError() {
|
||||
t.Fatal("failed to read the client config entry")
|
||||
}
|
||||
if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] {
|
||||
t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
|
||||
data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
|
||||
}
|
||||
if resp.Data["sts_endpoint"] != data["sts_endpoint"] {
|
||||
t.Fatalf("expected sts_endpoint: '%#v'; returned sts_endpoint: '%#v'",
|
||||
data["sts_endpoint"], resp.Data["sts_endpoint"])
|
||||
}
|
||||
if resp.Data["sts_region"] != data["sts_region"] {
|
||||
t.Fatalf("expected sts_region: '%#v'; returned sts_region: '%#v'",
|
||||
data["sts_region"], resp.Data["sts_region"])
|
||||
}
|
||||
|
||||
data = map[string]interface{}{
|
||||
"sts_endpoint": "https://my-custom-sts-endpoint2.example.com",
|
||||
"sts_region": "us-west-1",
|
||||
"iam_server_id_header_value": "vault_server_identification_2718281",
|
||||
}
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/client",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatal("failed to update the client config entry")
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/client",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil || resp.IsError() {
|
||||
t.Fatal("failed to read the client config entry")
|
||||
}
|
||||
if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] {
|
||||
t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'",
|
||||
data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"])
|
||||
}
|
||||
if resp.Data["sts_endpoint"] != data["sts_endpoint"] {
|
||||
t.Fatalf("expected sts_endpoint: '%#v'; returned sts_endpoint: '%#v'",
|
||||
data["sts_endpoint"], resp.Data["sts_endpoint"])
|
||||
}
|
||||
if resp.Data["sts_region"] != data["sts_region"] {
|
||||
t.Fatalf("expected sts_region: '%#v'; returned sts_region: '%#v'",
|
||||
data["sts_region"], resp.Data["sts_region"])
|
||||
}
|
||||
}
|
@ -1,221 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/authmetadata"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
var (
|
||||
// iamAuthMetadataFields is a list of the default auth metadata
|
||||
// added to tokens during login. The default alias type used
|
||||
// by this back-end is the role ID. Subsequently, the default
|
||||
// fields included are expected to have a low rate of change
|
||||
// when the role ID is in use.
|
||||
iamAuthMetadataFields = &authmetadata.Fields{
|
||||
FieldName: "iam_metadata",
|
||||
Default: []string{
|
||||
"account_id",
|
||||
"auth_type",
|
||||
},
|
||||
AvailableToAdd: []string{
|
||||
"canonical_arn",
|
||||
"client_arn",
|
||||
"client_user_id",
|
||||
"inferred_aws_region",
|
||||
"inferred_entity_id",
|
||||
"inferred_entity_type",
|
||||
},
|
||||
}
|
||||
|
||||
// ec2AuthMetadataFields is a list of the default auth metadata
|
||||
// added to tokens during login. The default alias type used
|
||||
// by this back-end is the role ID. Subsequently, the default
|
||||
// fields included are expected to have a low rate of change
|
||||
// when the role ID is in use.
|
||||
ec2AuthMetadataFields = &authmetadata.Fields{
|
||||
FieldName: "ec2_metadata",
|
||||
Default: []string{
|
||||
"account_id",
|
||||
"auth_type",
|
||||
},
|
||||
AvailableToAdd: []string{
|
||||
"ami_id",
|
||||
"instance_id",
|
||||
"region",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func (b *backend) pathConfigIdentity() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/identity$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"iam_alias": {
|
||||
Type: framework.TypeString,
|
||||
Default: identityAliasIAMUniqueID,
|
||||
Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasRoleID),
|
||||
},
|
||||
iamAuthMetadataFields.FieldName: authmetadata.FieldSchema(iamAuthMetadataFields),
|
||||
"ec2_alias": {
|
||||
Type: framework.TypeString,
|
||||
Default: identityAliasEC2InstanceID,
|
||||
Description: fmt.Sprintf("Configure how the AWS auth method generates entity alias when using EC2 auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasEC2InstanceID, identityAliasEC2ImageID, identityAliasRoleID),
|
||||
},
|
||||
ec2AuthMetadataFields.FieldName: authmetadata.FieldSchema(ec2AuthMetadataFields),
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: pathConfigIdentityRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "identity-integration-configuration",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: pathConfigIdentityUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "identity-integration",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigIdentityHelpSyn,
|
||||
HelpDescription: pathConfigIdentityHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func identityConfigEntry(ctx context.Context, s logical.Storage) (*identityConfig, error) {
|
||||
entryRaw, err := s.Get(ctx, "config/identity")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry := &identityConfig{
|
||||
IAMAuthMetadataHandler: authmetadata.NewHandler(iamAuthMetadataFields),
|
||||
EC2AuthMetadataHandler: authmetadata.NewHandler(ec2AuthMetadataFields),
|
||||
}
|
||||
if entryRaw != nil {
|
||||
if err := entryRaw.DecodeJSON(entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if entry.IAMAlias == "" {
|
||||
entry.IAMAlias = identityAliasRoleID
|
||||
}
|
||||
|
||||
if entry.EC2Alias == "" {
|
||||
entry.EC2Alias = identityAliasRoleID
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func pathConfigIdentityRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) {
|
||||
config, err := identityConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"iam_alias": config.IAMAlias,
|
||||
iamAuthMetadataFields.FieldName: config.IAMAuthMetadataHandler.AuthMetadata(),
|
||||
"ec2_alias": config.EC2Alias,
|
||||
ec2AuthMetadataFields.FieldName: config.EC2AuthMetadataHandler.AuthMetadata(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
config, err := identityConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iamAliasRaw, ok := data.GetOk("iam_alias")
|
||||
if ok {
|
||||
iamAlias := iamAliasRaw.(string)
|
||||
allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn}
|
||||
if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) {
|
||||
return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil
|
||||
}
|
||||
config.IAMAlias = iamAlias
|
||||
}
|
||||
|
||||
ec2AliasRaw, ok := data.GetOk("ec2_alias")
|
||||
if ok {
|
||||
ec2Alias := ec2AliasRaw.(string)
|
||||
allowedEC2AliasValues := []string{identityAliasRoleID, identityAliasEC2InstanceID, identityAliasEC2ImageID}
|
||||
if !strutil.StrListContains(allowedEC2AliasValues, ec2Alias) {
|
||||
return logical.ErrorResponse(fmt.Sprintf("ec2_alias of %q not in set of allowed values: %v", ec2Alias, allowedEC2AliasValues)), nil
|
||||
}
|
||||
config.EC2Alias = ec2Alias
|
||||
}
|
||||
if err := config.IAMAuthMetadataHandler.ParseAuthMetadata(data); err != nil {
|
||||
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
|
||||
}
|
||||
if err := config.EC2AuthMetadataHandler.ParseAuthMetadata(data); err != nil {
|
||||
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON("config/identity", config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = req.Storage.Put(ctx, entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type identityConfig struct {
|
||||
IAMAlias string `json:"iam_alias"`
|
||||
IAMAuthMetadataHandler *authmetadata.Handler `json:"iam_auth_metadata_handler"`
|
||||
EC2Alias string `json:"ec2_alias"`
|
||||
EC2AuthMetadataHandler *authmetadata.Handler `json:"ec2_auth_metadata_handler"`
|
||||
}
|
||||
|
||||
const (
|
||||
identityAliasIAMUniqueID = "unique_id"
|
||||
identityAliasIAMFullArn = "full_arn"
|
||||
identityAliasEC2InstanceID = "instance_id"
|
||||
identityAliasEC2ImageID = "image_id"
|
||||
identityAliasRoleID = "role_id"
|
||||
)
|
||||
|
||||
const pathConfigIdentityHelpSyn = `
|
||||
Configure the way the AWS auth method interacts with the identity store
|
||||
`
|
||||
|
||||
const pathConfigIdentityHelpDesc = `
|
||||
The AWS auth backend defaults to aliasing an IAM principal's unique ID to the
|
||||
identity store. This path allows users to change how Vault configures the
|
||||
mapping to Identity aliases for more flexibility.
|
||||
|
||||
You can set the iam_alias parameter to one of the following values:
|
||||
|
||||
* 'unique_id': This retains Vault's default behavior
|
||||
* 'full_arn': This maps the full authenticated ARN to the identity alias, e.g.,
|
||||
"arn:aws:sts::<account_id>:assumed-role/<role_name>/<role_session_name>
|
||||
This is useful where you have an identity provder that sets role_session_name
|
||||
to a known value of a person, such as a username or email address, and allows
|
||||
you to map those roles back to entries in your identity store.
|
||||
`
|
@ -1,167 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func TestBackend_pathConfigIdentity(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
storage := &logical.InmemStorage{}
|
||||
config.StorageView = storage
|
||||
|
||||
b, err := Backend(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = b.Setup(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check if default values are returned before setting the configuration
|
||||
resp, err := b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
if resp.Data["iam_alias"] == nil || resp.Data["iam_alias"] != identityAliasRoleID {
|
||||
t.Fatalf("bad: iam_alias; expected: %q, actual: %q", identityAliasIAMUniqueID, resp.Data["iam_alias"])
|
||||
}
|
||||
if resp.Data["ec2_alias"] == nil || resp.Data["ec2_alias"] != identityAliasRoleID {
|
||||
t.Fatalf("bad: ec2_alias; expected: %q, actual: %q", identityAliasIAMUniqueID, resp.Data["ec2_alias"])
|
||||
}
|
||||
|
||||
// Invalid value for iam_alias
|
||||
data := map[string]interface{}{
|
||||
"iam_alias": "invalid",
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatalf("nil response from invalid config/identity request")
|
||||
}
|
||||
if !resp.IsError() {
|
||||
t.Fatalf("received non-error response from invalid config/identity request: %#v", resp)
|
||||
}
|
||||
|
||||
// Valid value for iam_alias but invalid value for ec2_alias
|
||||
data["iam_alias"] = identityAliasIAMFullArn
|
||||
data["ec2_alias"] = "invalid"
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatalf("nil response from invalid config/identity request")
|
||||
}
|
||||
if !resp.IsError() {
|
||||
t.Fatalf("received non-error response from invalid config/identity request: %#v", resp)
|
||||
}
|
||||
|
||||
// Valid value for both iam_alias and ec2_alias
|
||||
data["ec2_alias"] = identityAliasEC2ImageID
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
|
||||
// Check if both values are stored properly
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
if resp.Data["iam_alias"] != identityAliasIAMFullArn {
|
||||
t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp.Data["iam_alias"])
|
||||
}
|
||||
if resp.Data["ec2_alias"] != identityAliasEC2ImageID {
|
||||
t.Fatalf("bad: expected response with ec2_alias value of %q; got %#v", identityAliasEC2ImageID, resp.Data["ec2_alias"])
|
||||
}
|
||||
|
||||
// Modify one field and ensure that the other one is unchanged
|
||||
data["ec2_alias"] = identityAliasEC2InstanceID
|
||||
delete(data, "iam_alias")
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
if resp.Data["iam_alias"] != identityAliasIAMFullArn {
|
||||
t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp.Data["iam_alias"])
|
||||
}
|
||||
if resp.Data["ec2_alias"] != identityAliasEC2InstanceID {
|
||||
t.Fatalf("bad: expected response with ec2_alias value of %q; got %#v", identityAliasEC2ImageID, resp.Data["ec2_alias"])
|
||||
}
|
||||
|
||||
// Update both iam_alias and ec2_alias
|
||||
data["iam_alias"] = identityAliasIAMUniqueID
|
||||
data["ec2_alias"] = identityAliasEC2InstanceID
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Data: data,
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
|
||||
// Check if updates were stored properly
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v\nresp: %#v", err, resp)
|
||||
}
|
||||
if resp.Data["iam_alias"] != identityAliasIAMUniqueID {
|
||||
t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp.Data["iam_alias"])
|
||||
}
|
||||
if resp.Data["ec2_alias"] != identityAliasEC2InstanceID {
|
||||
t.Fatalf("bad: expected response with ec2_alias value of %q; got %#v", identityAliasEC2ImageID, resp.Data["ec2_alias"])
|
||||
}
|
||||
}
|
@ -1,218 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/iam/iamiface"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func (b *backend) pathConfigRotateRoot() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/rotate-root",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationVerb: "rotate",
|
||||
OperationSuffix: "root-credentials",
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigRotateRootUpdate,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigRotateRootHelpSyn,
|
||||
HelpDescription: pathConfigRotateRootHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// First get the AWS key and secret and validate that we _can_ rotate them.
|
||||
// We need the read lock here to prevent anything else from mutating it while we're using it.
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
clientConf, err := b.nonLockedClientConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if clientConf == nil {
|
||||
return logical.ErrorResponse(`can't update client config because it's unset`), nil
|
||||
}
|
||||
if clientConf.AccessKey == "" {
|
||||
return logical.ErrorResponse("can't update access_key because it's unset"), nil
|
||||
}
|
||||
if clientConf.SecretKey == "" {
|
||||
return logical.ErrorResponse("can't update secret_key because it's unset"), nil
|
||||
}
|
||||
|
||||
// Getting our client through the b.clientIAM method requires values retrieved through
|
||||
// the user providing an ARN, which we don't have here, so let's just directly
|
||||
// make what we need.
|
||||
staticCreds := &credentials.StaticProvider{
|
||||
Value: credentials.Value{
|
||||
AccessKeyID: clientConf.AccessKey,
|
||||
SecretAccessKey: clientConf.SecretKey,
|
||||
},
|
||||
}
|
||||
// By default, leave the iamEndpoint nil to tell AWS it's unset. However, if it is
|
||||
// configured, populate the pointer.
|
||||
var iamEndpoint *string
|
||||
if clientConf.IAMEndpoint != "" {
|
||||
iamEndpoint = aws.String(clientConf.IAMEndpoint)
|
||||
}
|
||||
|
||||
// Attempt to retrieve the region, error out if no region is provided.
|
||||
region, err := awsutil.GetRegion("")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving region: %w", err)
|
||||
}
|
||||
|
||||
awsConfig := &aws.Config{
|
||||
Credentials: credentials.NewCredentials(staticCreds),
|
||||
Endpoint: iamEndpoint,
|
||||
|
||||
// Generally speaking, GetRegion will use the Vault server's region. However, if this
|
||||
// needs to be overridden, an easy way would be to set the AWS_DEFAULT_REGION on the Vault server
|
||||
// to the desired region. If that's still insufficient for someone's use case, in the future we
|
||||
// could add the ability to specify the region either on the client config or as part of the
|
||||
// inbound rotation call.
|
||||
Region: aws.String(region),
|
||||
|
||||
// Prevents races.
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
}
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iamClient := getIAMClient(sess)
|
||||
|
||||
// Get the current user's name since it's required to create an access key.
|
||||
// Empty input means get the current user.
|
||||
var getUserInput iam.GetUserInput
|
||||
getUserRes, err := iamClient.GetUserWithContext(ctx, &getUserInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error calling GetUser: %w", err)
|
||||
}
|
||||
if getUserRes == nil {
|
||||
return nil, fmt.Errorf("nil response from GetUser")
|
||||
}
|
||||
if getUserRes.User == nil {
|
||||
return nil, fmt.Errorf("nil user returned from GetUser")
|
||||
}
|
||||
if getUserRes.User.UserName == nil {
|
||||
return nil, fmt.Errorf("nil UserName returned from GetUser")
|
||||
}
|
||||
|
||||
// Create the new access key and secret.
|
||||
createAccessKeyInput := iam.CreateAccessKeyInput{
|
||||
UserName: getUserRes.User.UserName,
|
||||
}
|
||||
createAccessKeyRes, err := iamClient.CreateAccessKeyWithContext(ctx, &createAccessKeyInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error calling CreateAccessKey: %w", err)
|
||||
}
|
||||
if createAccessKeyRes.AccessKey == nil {
|
||||
return nil, fmt.Errorf("nil response from CreateAccessKey")
|
||||
}
|
||||
if createAccessKeyRes.AccessKey.AccessKeyId == nil || createAccessKeyRes.AccessKey.SecretAccessKey == nil {
|
||||
return nil, fmt.Errorf("nil AccessKeyId or SecretAccessKey returned from CreateAccessKey")
|
||||
}
|
||||
|
||||
// We're about to attempt to store the newly created key and secret, but just in case we can't,
|
||||
// let's clean up after ourselves.
|
||||
storedNewConf := false
|
||||
var errs error
|
||||
defer func() {
|
||||
if storedNewConf {
|
||||
return
|
||||
}
|
||||
// Attempt to delete the access key and secret we created but couldn't store and use.
|
||||
deleteAccessKeyInput := iam.DeleteAccessKeyInput{
|
||||
AccessKeyId: createAccessKeyRes.AccessKey.AccessKeyId,
|
||||
UserName: getUserRes.User.UserName,
|
||||
}
|
||||
if _, err := iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil {
|
||||
// Include this error in the errs returned by this method.
|
||||
errs = multierror.Append(errs, fmt.Errorf("error deleting newly created but unstored access key ID %s: %s", *createAccessKeyRes.AccessKey.AccessKeyId, err))
|
||||
}
|
||||
}()
|
||||
|
||||
oldAccessKey := clientConf.AccessKey
|
||||
clientConf.AccessKey = *createAccessKeyRes.AccessKey.AccessKeyId
|
||||
clientConf.SecretKey = *createAccessKeyRes.AccessKey.SecretAccessKey
|
||||
|
||||
// Now get ready to update storage, doing everything beforehand so we can minimize how long
|
||||
// we need to hold onto the lock.
|
||||
newEntry, err := b.configClientToEntry(clientConf)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("error generating new client config JSON: %w", err))
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
// Someday we may want to allow the user to send a number of seconds to wait here
|
||||
// before deleting the previous access key to allow work to complete. That would allow
|
||||
// AWS, which is eventually consistent, to finish populating the new key in all places.
|
||||
if err := req.Storage.Put(ctx, newEntry); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("error saving new client config: %w", err))
|
||||
return nil, errs
|
||||
}
|
||||
storedNewConf = true
|
||||
|
||||
// Previous cached clients need to be cleared because they may have been made using
|
||||
// the soon-to-be-obsolete credentials.
|
||||
b.IAMClientsMap = make(map[string]map[string]*iam.IAM)
|
||||
b.EC2ClientsMap = make(map[string]map[string]*ec2.EC2)
|
||||
|
||||
// Now to clean up the old key.
|
||||
deleteAccessKeyInput := iam.DeleteAccessKeyInput{
|
||||
AccessKeyId: aws.String(oldAccessKey),
|
||||
UserName: getUserRes.User.UserName,
|
||||
}
|
||||
if _, err = iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("error deleting old access key ID %s: %w", oldAccessKey, err))
|
||||
return nil, errs
|
||||
}
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"access_key": clientConf.AccessKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getIAMClient allows us to change how an IAM client is created
|
||||
// during testing. The AWS SDK doesn't easily lend itself to testing
|
||||
// using a Go httptest server because if you inject a test URL into
|
||||
// the config, the client strips important information about which
|
||||
// endpoint it's hitting. Per
|
||||
// https://aws.amazon.com/blogs/developer/mocking-out-then-aws-sdk-for-go-for-unit-testing/,
|
||||
// this is the recommended approach.
|
||||
var getIAMClient = func(sess *session.Session) iamiface.IAMAPI {
|
||||
return iam.New(sess)
|
||||
}
|
||||
|
||||
const pathConfigRotateRootHelpSyn = `
|
||||
Request to rotate the AWS credentials used by Vault
|
||||
`
|
||||
|
||||
const pathConfigRotateRootHelpDesc = `
|
||||
This path attempts to rotate the AWS credentials used by Vault for this mount.
|
||||
It is only valid if Vault has been configured to use AWS IAM credentials via the
|
||||
config/client endpoint.
|
||||
`
|
@ -1,98 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/iam/iamiface"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
type mockIAMClient awsutil.MockIAM
|
||||
|
||||
func (m *mockIAMClient) GetUserWithContext(_ aws.Context, input *iam.GetUserInput, _ ...request.Option) (*iam.GetUserOutput, error) {
|
||||
return (*awsutil.MockIAM)(m).GetUser(input)
|
||||
}
|
||||
|
||||
func (m *mockIAMClient) CreateAccessKeyWithContext(_ aws.Context, input *iam.CreateAccessKeyInput, _ ...request.Option) (*iam.CreateAccessKeyOutput, error) {
|
||||
return (*awsutil.MockIAM)(m).CreateAccessKey(input)
|
||||
}
|
||||
|
||||
func (m *mockIAMClient) DeleteAccessKeyWithContext(_ aws.Context, input *iam.DeleteAccessKeyInput, _ ...request.Option) (*iam.DeleteAccessKeyOutput, error) {
|
||||
return (*awsutil.MockIAM)(m).DeleteAccessKey(input)
|
||||
}
|
||||
|
||||
func TestPathConfigRotateRoot(t *testing.T) {
|
||||
getIAMClient = func(sess *session.Session) iamiface.IAMAPI {
|
||||
return &mockIAMClient{
|
||||
CreateAccessKeyOutput: &iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("fizz2"),
|
||||
SecretAccessKey: aws.String("buzz2"),
|
||||
},
|
||||
},
|
||||
GetUserOutput: &iam.GetUserOutput{
|
||||
User: &iam.User{
|
||||
UserName: aws.String("ellen"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
config := logical.TestBackendConfig()
|
||||
logical.TestBackendConfig()
|
||||
storage := &logical.InmemStorage{}
|
||||
config.StorageView = storage
|
||||
|
||||
b, err := Backend(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clientConf := &clientConfig{
|
||||
AccessKey: "fizz1",
|
||||
SecretKey: "buzz1",
|
||||
}
|
||||
entry, err := logical.StorageEntryJSON("config/client", clientConf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := storage.Put(ctx, entry); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/rotate-root",
|
||||
Storage: storage,
|
||||
}
|
||||
resp, err := b.HandleRequest(ctx, req)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: resp: %#v\nerr:%v", resp, err)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("expected nil response to represent a 204")
|
||||
}
|
||||
if resp.Data == nil {
|
||||
t.Fatal("expected resp.Data")
|
||||
}
|
||||
if resp.Data["access_key"].(string) != "fizz2" {
|
||||
t.Fatalf("expected new access key buzz2 but received %s", resp.Data["access_key"])
|
||||
}
|
||||
newClientConf, err := b.nonLockedClientConfigEntry(ctx, req.Storage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.Data["access_key"].(string) != newClientConf.AccessKey {
|
||||
t.Fatalf("expected new access key buzz2 to be saved to storage but receieved %s", clientConf.AccessKey)
|
||||
}
|
||||
}
|
@ -1,274 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// awsStsEntry is used to store details of an STS role for assumption
|
||||
type awsStsEntry struct {
|
||||
StsRole string `json:"sts_role"`
|
||||
}
|
||||
|
||||
func (b *backend) pathListSts() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/sts/?",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "sts-role-relationships",
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ListOperation: &framework.PathOperation{
|
||||
Callback: b.pathStsList,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathListStsHelpSyn,
|
||||
HelpDescription: pathListStsHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigSts() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/sts/" + framework.GenericNameRegex("account_id"),
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "sts-role",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"account_id": {
|
||||
Type: framework.TypeString,
|
||||
Description: `AWS account ID to be associated with STS role. If set,
|
||||
Vault will use assumed credentials to verify any login attempts from EC2
|
||||
instances in this account.`,
|
||||
},
|
||||
"sts_role": {
|
||||
Type: framework.TypeString,
|
||||
Description: `AWS ARN for STS role to be assumed when interacting with the account specified.
|
||||
The Vault server must have permissions to assume this role.`,
|
||||
},
|
||||
},
|
||||
|
||||
ExistenceCheck: b.pathConfigStsExistenceCheck,
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.CreateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigStsCreateUpdate,
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigStsCreateUpdate,
|
||||
},
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigStsRead,
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigStsDelete,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigStsSyn,
|
||||
HelpDescription: pathConfigStsDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Establishes dichotomy of request operation between CreateOperation and UpdateOperation.
|
||||
// Returning 'true' forces an UpdateOperation, CreateOperation otherwise.
|
||||
func (b *backend) pathConfigStsExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
|
||||
accountID := data.Get("account_id").(string)
|
||||
if accountID == "" {
|
||||
return false, fmt.Errorf("missing account_id")
|
||||
}
|
||||
|
||||
entry, err := b.lockedAwsStsEntry(ctx, req.Storage, accountID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
// pathStsList is used to list all the AWS STS role configurations
|
||||
func (b *backend) pathStsList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
sts, err := req.Storage.List(ctx, "config/sts/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return logical.ListResponse(sts), nil
|
||||
}
|
||||
|
||||
// nonLockedSetAwsStsEntry creates or updates an STS role association with the given accountID
|
||||
// This method does not acquire the write lock before creating or updating. If locking is
|
||||
// desired, use lockedSetAwsStsEntry instead
|
||||
func (b *backend) nonLockedSetAwsStsEntry(ctx context.Context, s logical.Storage, accountID string, stsEntry *awsStsEntry) error {
|
||||
if accountID == "" {
|
||||
return fmt.Errorf("missing AWS account ID")
|
||||
}
|
||||
|
||||
if stsEntry == nil {
|
||||
return fmt.Errorf("missing AWS STS Role ARN")
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON("config/sts/"+accountID, stsEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if entry == nil {
|
||||
return fmt.Errorf("failed to create storage entry for AWS STS configuration")
|
||||
}
|
||||
|
||||
return s.Put(ctx, entry)
|
||||
}
|
||||
|
||||
// lockedSetAwsStsEntry creates or updates an STS role association with the given accountID
|
||||
// This method acquires the write lock before creating or updating the STS entry.
|
||||
func (b *backend) lockedSetAwsStsEntry(ctx context.Context, s logical.Storage, accountID string, stsEntry *awsStsEntry) error {
|
||||
if accountID == "" {
|
||||
return fmt.Errorf("missing AWS account ID")
|
||||
}
|
||||
|
||||
if stsEntry == nil {
|
||||
return fmt.Errorf("missing sts entry")
|
||||
}
|
||||
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
return b.nonLockedSetAwsStsEntry(ctx, s, accountID, stsEntry)
|
||||
}
|
||||
|
||||
// nonLockedAwsStsEntry returns the STS role associated with the given accountID.
|
||||
// This method does not acquire the read lock before returning information. If locking is
|
||||
// desired, use lockedAwsStsEntry instead
|
||||
func (b *backend) nonLockedAwsStsEntry(ctx context.Context, s logical.Storage, accountID string) (*awsStsEntry, error) {
|
||||
entry, err := s.Get(ctx, "config/sts/"+accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var stsEntry awsStsEntry
|
||||
if err := entry.DecodeJSON(&stsEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stsEntry, nil
|
||||
}
|
||||
|
||||
// lockedAwsStsEntry returns the STS role associated with the given accountID.
|
||||
// This method acquires the read lock before returning the association.
|
||||
func (b *backend) lockedAwsStsEntry(ctx context.Context, s logical.Storage, accountID string) (*awsStsEntry, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
return b.nonLockedAwsStsEntry(ctx, s, accountID)
|
||||
}
|
||||
|
||||
// pathConfigStsRead is used to return information about an STS role/AWS accountID association
|
||||
func (b *backend) pathConfigStsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
accountID := data.Get("account_id").(string)
|
||||
if accountID == "" {
|
||||
return logical.ErrorResponse("missing account id"), nil
|
||||
}
|
||||
|
||||
stsEntry, err := b.lockedAwsStsEntry(ctx, req.Storage, accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stsEntry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"sts_role": stsEntry.StsRole,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pathConfigStsCreateUpdate is used to associate an STS role with a given AWS accountID
|
||||
func (b *backend) pathConfigStsCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
accountID := data.Get("account_id").(string)
|
||||
if accountID == "" {
|
||||
return logical.ErrorResponse("missing AWS account ID"), nil
|
||||
}
|
||||
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
// Check if an STS role is already registered
|
||||
stsEntry, err := b.nonLockedAwsStsEntry(ctx, req.Storage, accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stsEntry == nil {
|
||||
stsEntry = &awsStsEntry{}
|
||||
}
|
||||
|
||||
// Check that an STS role has actually been provided
|
||||
stsRole, ok := data.GetOk("sts_role")
|
||||
if ok {
|
||||
stsEntry.StsRole = stsRole.(string)
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
return logical.ErrorResponse("missing sts role"), nil
|
||||
}
|
||||
|
||||
if stsEntry.StsRole == "" {
|
||||
return logical.ErrorResponse("sts role cannot be empty"), nil
|
||||
}
|
||||
|
||||
// save the provided STS role
|
||||
if err := b.nonLockedSetAwsStsEntry(ctx, req.Storage, accountID, stsEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pathConfigStsDelete is used to delete a previously configured STS configuration
|
||||
func (b *backend) pathConfigStsDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
accountID := data.Get("account_id").(string)
|
||||
if accountID == "" {
|
||||
return logical.ErrorResponse("missing account id"), nil
|
||||
}
|
||||
|
||||
return nil, req.Storage.Delete(ctx, "config/sts/"+accountID)
|
||||
}
|
||||
|
||||
const pathConfigStsSyn = `
|
||||
Specify STS roles to be assumed for certain AWS accounts.
|
||||
`
|
||||
|
||||
const pathConfigStsDesc = `
|
||||
Allows the explicit association of STS roles to satellite AWS accounts (i.e. those
|
||||
which are not the account in which the Vault server is running.) Login attempts from
|
||||
EC2 instances running in these accounts will be verified using credentials obtained
|
||||
by assumption of these STS roles.
|
||||
|
||||
The environment in which the Vault server resides must have access to assume the
|
||||
given STS roles.
|
||||
`
|
||||
|
||||
const pathListStsHelpSyn = `
|
||||
List all the AWS account/STS role relationships registered with Vault.
|
||||
`
|
||||
|
||||
const pathListStsHelpDesc = `
|
||||
AWS accounts will be listed by account ID, along with their respective role names.
|
||||
`
|
@ -1,183 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const (
|
||||
identityAccessListConfigStorage = "config/tidy/identity-whitelist"
|
||||
)
|
||||
|
||||
func (b *backend) pathConfigTidyIdentityAccessList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: fmt.Sprintf("%s$", "config/tidy/identity-accesslist"),
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"safety_buffer": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Default: 259200, // 72h
|
||||
Description: `The amount of extra time that must have passed beyond the identity's
|
||||
expiration, before it is removed from the backend storage.`,
|
||||
},
|
||||
"disable_periodic_tidy": {
|
||||
Type: framework.TypeBool,
|
||||
Default: false,
|
||||
Description: "If set to 'true', disables the periodic tidying of the 'identity-accesslist/<instance_id>' entries.",
|
||||
},
|
||||
},
|
||||
|
||||
ExistenceCheck: b.pathConfigTidyIdentityAccessListExistenceCheck,
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.CreateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyIdentityAccessListCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "identity-access-list-tidy-operation",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyIdentityAccessListCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "identity-access-list-tidy-operation",
|
||||
},
|
||||
},
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyIdentityAccessListRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "identity-access-list-tidy-settings",
|
||||
},
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyIdentityAccessListDelete,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "identity-access-list-tidy-settings",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigTidyIdentityAccessListHelpSyn,
|
||||
HelpDescription: pathConfigTidyIdentityAccessListHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyIdentityAccessListExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
|
||||
entry, err := b.lockedConfigTidyIdentities(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) lockedConfigTidyIdentities(ctx context.Context, s logical.Storage) (*tidyWhitelistIdentityConfig, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
return b.nonLockedConfigTidyIdentities(ctx, s)
|
||||
}
|
||||
|
||||
func (b *backend) nonLockedConfigTidyIdentities(ctx context.Context, s logical.Storage) (*tidyWhitelistIdentityConfig, error) {
|
||||
entry, err := s.Get(ctx, identityAccessListConfigStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result tidyWhitelistIdentityConfig
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyIdentityAccessListCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
configEntry, err := b.nonLockedConfigTidyIdentities(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configEntry == nil {
|
||||
configEntry = &tidyWhitelistIdentityConfig{}
|
||||
}
|
||||
|
||||
safetyBufferInt, ok := data.GetOk("safety_buffer")
|
||||
if ok {
|
||||
configEntry.SafetyBuffer = safetyBufferInt.(int)
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.SafetyBuffer = data.Get("safety_buffer").(int)
|
||||
}
|
||||
|
||||
disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy")
|
||||
if ok {
|
||||
configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool)
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool)
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON(identityAccessListConfigStorage, configEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := req.Storage.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyIdentityAccessListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
clientConfig, err := b.lockedConfigTidyIdentities(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if clientConfig == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"safety_buffer": clientConfig.SafetyBuffer,
|
||||
"disable_periodic_tidy": clientConfig.DisablePeriodicTidy,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyIdentityAccessListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
return nil, req.Storage.Delete(ctx, identityAccessListConfigStorage)
|
||||
}
|
||||
|
||||
type tidyWhitelistIdentityConfig struct {
|
||||
SafetyBuffer int `json:"safety_buffer"`
|
||||
DisablePeriodicTidy bool `json:"disable_periodic_tidy"`
|
||||
}
|
||||
|
||||
const pathConfigTidyIdentityAccessListHelpSyn = `
|
||||
Configures the periodic tidying operation of the access list identity entries.
|
||||
`
|
||||
|
||||
const pathConfigTidyIdentityAccessListHelpDesc = `
|
||||
By default, the expired entries in the access list will be attempted to be removed
|
||||
periodically. This operation will look for expired items in the list and purges them.
|
||||
However, there is a safety buffer duration (defaults to 72h), purges the entries
|
||||
only if they have been persisting this duration, past its expiration time.
|
||||
`
|
@ -1,183 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const (
|
||||
roletagDenyListConfigStorage = "config/tidy/roletag-blacklist"
|
||||
)
|
||||
|
||||
func (b *backend) pathConfigTidyRoletagDenyList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/tidy/roletag-denylist$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"safety_buffer": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Default: 15552000, // 180d
|
||||
Description: `The amount of extra time that must have passed beyond the roletag
|
||||
expiration, before it is removed from the backend storage.
|
||||
Defaults to 4320h (180 days).`,
|
||||
},
|
||||
|
||||
"disable_periodic_tidy": {
|
||||
Type: framework.TypeBool,
|
||||
Default: false,
|
||||
Description: "If set to 'true', disables the periodic tidying of deny listed entries.",
|
||||
},
|
||||
},
|
||||
|
||||
ExistenceCheck: b.pathConfigTidyRoletagDenyListExistenceCheck,
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.CreateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyRoletagDenyListCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "role-tag-deny-list-tidy-operation",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyRoletagDenyListCreateUpdate,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "role-tag-deny-list-tidy-operation",
|
||||
},
|
||||
},
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyRoletagDenyListRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "role-tag-deny-list-tidy-settings",
|
||||
},
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigTidyRoletagDenyListDelete,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "role-tag-deny-list-tidy-settings",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigTidyRoletagDenyListHelpSyn,
|
||||
HelpDescription: pathConfigTidyRoletagDenyListHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyRoletagDenyListExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {
|
||||
entry, err := b.lockedConfigTidyRoleTags(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return entry != nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) lockedConfigTidyRoleTags(ctx context.Context, s logical.Storage) (*tidyDenyListRoleTagConfig, error) {
|
||||
b.configMutex.RLock()
|
||||
defer b.configMutex.RUnlock()
|
||||
|
||||
return b.nonLockedConfigTidyRoleTags(ctx, s)
|
||||
}
|
||||
|
||||
func (b *backend) nonLockedConfigTidyRoleTags(ctx context.Context, s logical.Storage) (*tidyDenyListRoleTagConfig, error) {
|
||||
entry, err := s.Get(ctx, roletagDenyListConfigStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result tidyDenyListRoleTagConfig
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyRoletagDenyListCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
configEntry, err := b.nonLockedConfigTidyRoleTags(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configEntry == nil {
|
||||
configEntry = &tidyDenyListRoleTagConfig{}
|
||||
}
|
||||
safetyBufferInt, ok := data.GetOk("safety_buffer")
|
||||
if ok {
|
||||
configEntry.SafetyBuffer = safetyBufferInt.(int)
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.SafetyBuffer = data.Get("safety_buffer").(int)
|
||||
}
|
||||
disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy")
|
||||
if ok {
|
||||
configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool)
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool)
|
||||
}
|
||||
|
||||
entry, err := logical.StorageEntryJSON(roletagDenyListConfigStorage, configEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := req.Storage.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyRoletagDenyListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
clientConfig, err := b.lockedConfigTidyRoleTags(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if clientConfig == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"safety_buffer": clientConfig.SafetyBuffer,
|
||||
"disable_periodic_tidy": clientConfig.DisablePeriodicTidy,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigTidyRoletagDenyListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.configMutex.Lock()
|
||||
defer b.configMutex.Unlock()
|
||||
|
||||
return nil, req.Storage.Delete(ctx, roletagDenyListConfigStorage)
|
||||
}
|
||||
|
||||
type tidyDenyListRoleTagConfig struct {
|
||||
SafetyBuffer int `json:"safety_buffer"`
|
||||
DisablePeriodicTidy bool `json:"disable_periodic_tidy"`
|
||||
}
|
||||
|
||||
const pathConfigTidyRoletagDenyListHelpSyn = `
|
||||
Configures the periodic tidying operation of the deny listed role tag entries.
|
||||
`
|
||||
|
||||
const pathConfigTidyRoletagDenyListHelpDesc = `
|
||||
By default, the expired entries in the deny list will be attempted to be removed
|
||||
periodically. This operation will look for expired items in the list and purges them.
|
||||
However, there is a safety buffer duration (defaults to 72h), purges the entries
|
||||
only if they have been persisting this duration, past its expiration time.
|
||||
`
|
@ -1,181 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const identityAccessListStorage = "whitelist/identity/"
|
||||
|
||||
func (b *backend) pathIdentityAccessList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "identity-accesslist/" + framework.GenericNameRegex("instance_id"),
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "identity-access-list",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"instance_id": {
|
||||
Type: framework.TypeString,
|
||||
Description: `EC2 instance ID. A successful login operation from an EC2 instance
|
||||
gets cached in this accesslist, keyed off of instance ID.`,
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathIdentityAccesslistRead,
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathIdentityAccesslistDelete,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathIdentityAccessListSyn,
|
||||
HelpDescription: pathIdentityAccessListDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathListIdentityAccessList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "identity-accesslist/?",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "identity-access-list",
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ListOperation: &framework.PathOperation{
|
||||
Callback: b.pathAccessListIdentitiesList,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathListIdentityAccessListHelpSyn,
|
||||
HelpDescription: pathListIdentityAccessListHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// pathAccessListIdentitiesList is used to list all the instance IDs that are present
|
||||
// in the identity access list. This will list both valid and expired entries.
|
||||
func (b *backend) pathAccessListIdentitiesList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
identities, err := req.Storage.List(ctx, identityAccessListStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return logical.ListResponse(identities), nil
|
||||
}
|
||||
|
||||
// Fetch an item from the access list given an instance ID.
|
||||
func accessListIdentityEntry(ctx context.Context, s logical.Storage, instanceID string) (*accessListIdentity, error) {
|
||||
entry, err := s.Get(ctx, identityAccessListStorage+instanceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result accessListIdentity
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Stores an instance ID and the information required to validate further login/renewal attempts from
|
||||
// the same instance ID.
|
||||
func setAccessListIdentityEntry(ctx context.Context, s logical.Storage, instanceID string, identity *accessListIdentity) error {
|
||||
entry, err := logical.StorageEntryJSON(identityAccessListStorage+instanceID, identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.Put(ctx, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pathIdentityAccesslistDelete is used to delete an entry from the identity access list given an instance ID.
|
||||
func (b *backend) pathIdentityAccesslistDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
instanceID := data.Get("instance_id").(string)
|
||||
if instanceID == "" {
|
||||
return logical.ErrorResponse("missing instance_id"), nil
|
||||
}
|
||||
|
||||
return nil, req.Storage.Delete(ctx, identityAccessListStorage+instanceID)
|
||||
}
|
||||
|
||||
// pathIdentityAccesslistRead is used to view an entry in the identity access list given an instance ID.
|
||||
func (b *backend) pathIdentityAccesslistRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
instanceID := data.Get("instance_id").(string)
|
||||
if instanceID == "" {
|
||||
return logical.ErrorResponse("missing instance_id"), nil
|
||||
}
|
||||
|
||||
entry, err := accessListIdentityEntry(ctx, req.Storage, instanceID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"role": entry.Role,
|
||||
"client_nonce": entry.ClientNonce,
|
||||
"creation_time": entry.CreationTime.Format(time.RFC3339Nano),
|
||||
"disallow_reauthentication": entry.DisallowReauthentication,
|
||||
"pending_time": entry.PendingTime,
|
||||
"expiration_time": entry.ExpirationTime.Format(time.RFC3339Nano),
|
||||
"last_updated_time": entry.LastUpdatedTime.Format(time.RFC3339Nano),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Struct to represent each item in the identity access list.
|
||||
type accessListIdentity struct {
|
||||
Role string `json:"role"`
|
||||
ClientNonce string `json:"client_nonce"`
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
DisallowReauthentication bool `json:"disallow_reauthentication"`
|
||||
PendingTime string `json:"pending_time"`
|
||||
ExpirationTime time.Time `json:"expiration_time"`
|
||||
LastUpdatedTime time.Time `json:"last_updated_time"`
|
||||
}
|
||||
|
||||
const pathIdentityAccessListSyn = `
|
||||
Read or delete entries in the identity access list.
|
||||
`
|
||||
|
||||
const pathIdentityAccessListDesc = `
|
||||
Each login from an EC2 instance creates/updates an entry in the identity access list.
|
||||
|
||||
Entries in this list can be viewed or deleted using this endpoint.
|
||||
|
||||
By default, a cron task will periodically look for expired entries in the access list
|
||||
and deletes them. The duration to periodically run this, is one hour by default.
|
||||
However, this can be configured using the 'config/tidy/identities' endpoint. This tidy
|
||||
action can be triggered via the API as well, using the 'tidy/identities' endpoint.
|
||||
`
|
||||
|
||||
const pathListIdentityAccessListHelpSyn = `
|
||||
Lists the items present in the identity access list.
|
||||
`
|
||||
|
||||
const pathListIdentityAccessListHelpDesc = `
|
||||
The entries in the identity access list is keyed off of the EC2 instance IDs.
|
||||
This endpoint lists all the entries present in the identity access list, both
|
||||
expired and un-expired entries. Use 'tidy/identities' endpoint to clean-up
|
||||
the access list of identities.
|
||||
`
|
File diff suppressed because it is too large
Load Diff
@ -1,709 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
|
||||
responseFromUser := `<GetCallerIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||
<GetCallerIdentityResult>
|
||||
<Arn>arn:aws:iam::123456789012:user/MyUserName</Arn>
|
||||
<UserId>ASOMETHINGSOMETHINGSOMETHING</UserId>
|
||||
<Account>123456789012</Account>
|
||||
</GetCallerIdentityResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7f4fc40c-853a-11e6-8848-8d035d01eb87</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetCallerIdentityResponse>`
|
||||
expectedUserArn := "arn:aws:iam::123456789012:user/MyUserName"
|
||||
|
||||
responseFromAssumedRole := `<GetCallerIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||
<GetCallerIdentityResult>
|
||||
<Arn>arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName</Arn>
|
||||
<UserId>ASOMETHINGSOMETHINGELSE:RoleSessionName</UserId>
|
||||
<Account>123456789012</Account>
|
||||
</GetCallerIdentityResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7f4fc40c-853a-11e6-8848-8d035d01eb87</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetCallerIdentityResponse>`
|
||||
expectedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName"
|
||||
|
||||
parsedUserResponse, err := parseGetCallerIdentityResponse(responseFromUser)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if parsedArn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsedArn != expectedUserArn {
|
||||
t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsedArn)
|
||||
}
|
||||
|
||||
parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if parsedArn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsedArn != expectedRoleArn {
|
||||
t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsedArn)
|
||||
}
|
||||
|
||||
_, err = parseGetCallerIdentityResponse("SomeRandomGibberish")
|
||||
if err == nil {
|
||||
t.Errorf("expected to NOT parse random giberish, but didn't get an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_pathLogin_parseIamArn(t *testing.T) {
|
||||
testParser := func(inputArn, expectedCanonicalArn string, expectedEntity iamEntity) {
|
||||
entity, err := parseIamArn(inputArn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expectedCanonicalArn != "" && entity.canonicalArn() != expectedCanonicalArn {
|
||||
t.Fatalf("expected to canonicalize ARN %q into %q but got %q instead", inputArn, expectedCanonicalArn, entity.canonicalArn())
|
||||
}
|
||||
if *entity != expectedEntity {
|
||||
t.Fatalf("expected to get iamEntity %#v from input ARN %q but instead got %#v", expectedEntity, inputArn, *entity)
|
||||
}
|
||||
}
|
||||
|
||||
testParser("arn:aws:iam::123456789012:user/UserPath/MyUserName",
|
||||
"arn:aws:iam::123456789012:user/MyUserName",
|
||||
iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "user", Path: "UserPath", FriendlyName: "MyUserName"},
|
||||
)
|
||||
canonicalRoleArn := "arn:aws:iam::123456789012:role/RoleName"
|
||||
testParser("arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName",
|
||||
canonicalRoleArn,
|
||||
iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "assumed-role", FriendlyName: "RoleName", SessionInfo: "RoleSessionName"},
|
||||
)
|
||||
testParser("arn:aws:iam::123456789012:role/RolePath/RoleName",
|
||||
canonicalRoleArn,
|
||||
iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "role", Path: "RolePath", FriendlyName: "RoleName"},
|
||||
)
|
||||
testParser("arn:aws:iam::123456789012:instance-profile/profilePath/InstanceProfileName",
|
||||
"",
|
||||
iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "instance-profile", Path: "profilePath", FriendlyName: "InstanceProfileName"},
|
||||
)
|
||||
|
||||
// Test that it properly handles pathological inputs...
|
||||
_, err := parseIamArn("")
|
||||
if err == nil {
|
||||
t.Error("expected error from empty input string")
|
||||
}
|
||||
|
||||
_, err = parseIamArn("arn:aws:iam::123456789012:role")
|
||||
if err == nil {
|
||||
t.Error("expected error from malformed ARN without a role name")
|
||||
}
|
||||
|
||||
_, err = parseIamArn("arn:aws:iam")
|
||||
if err == nil {
|
||||
t.Error("expected error from incomplete ARN (arn:aws:iam)")
|
||||
}
|
||||
|
||||
_, err = parseIamArn("arn:aws:iam::1234556789012:/")
|
||||
if err == nil {
|
||||
t.Error("expected error from empty principal type and no principal name (arn:aws:iam::1234556789012:/)")
|
||||
}
|
||||
_, err = parseIamArn("arn:aws:sts::1234556789012:assumed-role/role")
|
||||
if err == nil {
|
||||
t.Error("expected error from malformed assumed role ARN")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_validateVaultHeaderValue(t *testing.T) {
|
||||
const canaryHeaderValue = "Vault-Server"
|
||||
requestURL, err := url.Parse("https://sts.amazonaws.com/")
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing test URL: %v", err)
|
||||
}
|
||||
postHeadersMissing := http.Header{
|
||||
"Host": []string{"Foo"},
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
|
||||
}
|
||||
postHeadersInvalid := http.Header{
|
||||
"Host": []string{"Foo"},
|
||||
iamServerIdHeader: []string{"InvalidValue"},
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
|
||||
}
|
||||
postHeadersUnsigned := http.Header{
|
||||
"Host": []string{"Foo"},
|
||||
iamServerIdHeader: []string{canaryHeaderValue},
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
|
||||
}
|
||||
postHeadersValid := http.Header{
|
||||
"Host": []string{"Foo"},
|
||||
iamServerIdHeader: []string{canaryHeaderValue},
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
|
||||
}
|
||||
|
||||
postHeadersSplit := http.Header{
|
||||
"Host": []string{"Foo"},
|
||||
iamServerIdHeader: []string{canaryHeaderValue},
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
|
||||
}
|
||||
|
||||
err = validateVaultHeaderValue(postHeadersMissing, requestURL, canaryHeaderValue)
|
||||
if err == nil {
|
||||
t.Error("validated POST request with missing Vault header")
|
||||
}
|
||||
|
||||
err = validateVaultHeaderValue(postHeadersInvalid, requestURL, canaryHeaderValue)
|
||||
if err == nil {
|
||||
t.Error("validated POST request with invalid Vault header value")
|
||||
}
|
||||
|
||||
err = validateVaultHeaderValue(postHeadersUnsigned, requestURL, canaryHeaderValue)
|
||||
if err == nil {
|
||||
t.Error("validated POST request with unsigned Vault header")
|
||||
}
|
||||
|
||||
err = validateVaultHeaderValue(postHeadersValid, requestURL, canaryHeaderValue)
|
||||
if err != nil {
|
||||
t.Errorf("did NOT validate valid POST request: %v", err)
|
||||
}
|
||||
|
||||
err = validateVaultHeaderValue(postHeadersSplit, requestURL, canaryHeaderValue)
|
||||
if err != nil {
|
||||
t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackend_pathLogin_IAMHeaders tests login with iam_request_headers,
|
||||
// supporting both base64 encoded string and JSON headers
|
||||
func TestBackend_pathLogin_IAMHeaders(t *testing.T) {
|
||||
storage := &logical.InmemStorage{}
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = storage
|
||||
b, err := Backend(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = b.Setup(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// sets up a test server to stand in for STS service
|
||||
ts := setupIAMTestServer()
|
||||
defer ts.Close()
|
||||
|
||||
clientConfigData := map[string]interface{}{
|
||||
"iam_server_id_header_value": testVaultHeaderValue,
|
||||
"sts_endpoint": ts.URL,
|
||||
}
|
||||
clientRequest := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/client",
|
||||
Storage: storage,
|
||||
Data: clientConfigData,
|
||||
}
|
||||
_, err = b.HandleRequest(context.Background(), clientRequest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Configure identity.
|
||||
_, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"iam_alias": "role_id",
|
||||
"iam_metadata": []string{
|
||||
"account_id",
|
||||
"auth_type",
|
||||
"canonical_arn",
|
||||
"client_arn",
|
||||
"client_user_id",
|
||||
"inferred_aws_region",
|
||||
"inferred_entity_id",
|
||||
"inferred_entity_type",
|
||||
},
|
||||
"ec2_alias": "role_id",
|
||||
"ec2_metadata": []string{
|
||||
"account_id",
|
||||
"ami_id",
|
||||
"instance_id",
|
||||
"region",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create a role entry
|
||||
roleEntry := &awsRoleEntry{
|
||||
RoleID: "foo",
|
||||
Version: currentRoleStorageVersion,
|
||||
AuthType: iamAuthType,
|
||||
}
|
||||
|
||||
if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil {
|
||||
t.Fatalf("failed to set entry: %s", err)
|
||||
}
|
||||
|
||||
// create a baseline loginData map structure, including iam_request_headers
|
||||
// already base64encoded. This is the "Default" loginData used for all tests.
|
||||
// Each sub test can override the map's iam_request_headers entry
|
||||
loginData, err := defaultLoginData()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedAuthMetadata := map[string]string{
|
||||
"account_id": "123456789012",
|
||||
"auth_type": "iam",
|
||||
"canonical_arn": "arn:aws:iam::123456789012:user/valid-role",
|
||||
"client_arn": "arn:aws:iam::123456789012:user/valid-role",
|
||||
"client_user_id": "ASOMETHINGSOMETHINGSOMETHING",
|
||||
}
|
||||
|
||||
// expected errors for certain tests
|
||||
missingHeaderErr := errors.New("error validating X-Vault-AWS-IAM-Server-ID header: missing header \"X-Vault-AWS-IAM-Server-ID\"")
|
||||
parsingErr := errors.New("error making upstream request: error parsing STS response")
|
||||
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Header interface{}
|
||||
ExpectErr error
|
||||
}{
|
||||
{
|
||||
Name: "Default",
|
||||
},
|
||||
{
|
||||
Name: "Map-complete",
|
||||
Header: map[string]interface{}{
|
||||
"Content-Length": "43",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date": "20180910T203328Z",
|
||||
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Map-incomplete",
|
||||
Header: map[string]interface{}{
|
||||
"Content-Length": "43",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date": "20180910T203328Z",
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4",
|
||||
},
|
||||
ExpectErr: missingHeaderErr,
|
||||
},
|
||||
{
|
||||
Name: "Map-illegal-header",
|
||||
Header: map[string]interface{}{
|
||||
"Content-Length": "43",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date": "20180910T203328Z",
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4",
|
||||
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
|
||||
"X-Amz-Mallory-Header": "<?xml><h4ck0r/>",
|
||||
},
|
||||
ExpectErr: errors.New("invalid request header: X-Amz-Mallory-Header"),
|
||||
},
|
||||
{
|
||||
Name: "JSON-complete",
|
||||
Header: `{
|
||||
"Content-Length":"43",
|
||||
"Content-Type":"application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date":"20180910T203328Z",
|
||||
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
|
||||
"Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4"
|
||||
}`,
|
||||
},
|
||||
{
|
||||
Name: "JSON-incomplete",
|
||||
Header: `{
|
||||
"Content-Length":"43",
|
||||
"Content-Type":"application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date":"20180910T203328Z",
|
||||
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
|
||||
"Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id"
|
||||
}`,
|
||||
ExpectErr: parsingErr,
|
||||
},
|
||||
{
|
||||
Name: "Base64-complete",
|
||||
Header: base64Complete(),
|
||||
},
|
||||
{
|
||||
Name: "Base64-incomplete-missing-header",
|
||||
Header: base64MissingVaultID(),
|
||||
ExpectErr: missingHeaderErr,
|
||||
},
|
||||
{
|
||||
Name: "Base64-incomplete-missing-auth-sig",
|
||||
Header: base64MissingAuthField(),
|
||||
ExpectErr: parsingErr,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
if tc.Header != nil {
|
||||
loginData["iam_request_headers"] = tc.Header
|
||||
}
|
||||
|
||||
loginRequest := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "login",
|
||||
Storage: storage,
|
||||
Data: loginData,
|
||||
Connection: &logical.Connection{},
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), loginRequest)
|
||||
if err != nil || resp == nil || resp.IsError() {
|
||||
if tc.ExpectErr != nil && tc.ExpectErr.Error() == resp.Error().Error() {
|
||||
return
|
||||
}
|
||||
t.Errorf("un expected failed login:\nresp: %#v\n\nerr: %v", resp, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedAuthMetadata, resp.Auth.Alias.Metadata) {
|
||||
t.Errorf("expected metadata (%#v) to match (%#v)", expectedAuthMetadata, resp.Auth.Alias.Metadata)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestBackend_pathLogin_IAMRoleResolution tests role resolution for an Iam login
|
||||
func TestBackend_pathLogin_IAMRoleResolution(t *testing.T) {
|
||||
storage := &logical.InmemStorage{}
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = storage
|
||||
b, err := Backend(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = b.Setup(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// sets up a test server to stand in for STS service
|
||||
ts := setupIAMTestServer()
|
||||
defer ts.Close()
|
||||
|
||||
clientConfigData := map[string]interface{}{
|
||||
"iam_server_id_header_value": testVaultHeaderValue,
|
||||
"sts_endpoint": ts.URL,
|
||||
}
|
||||
clientRequest := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/client",
|
||||
Storage: storage,
|
||||
Data: clientConfigData,
|
||||
}
|
||||
_, err = b.HandleRequest(context.Background(), clientRequest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Configure identity.
|
||||
_, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"iam_alias": "role_id",
|
||||
"iam_metadata": []string{
|
||||
"account_id",
|
||||
"auth_type",
|
||||
"canonical_arn",
|
||||
"client_arn",
|
||||
"client_user_id",
|
||||
"inferred_aws_region",
|
||||
"inferred_entity_id",
|
||||
"inferred_entity_type",
|
||||
},
|
||||
"ec2_alias": "role_id",
|
||||
"ec2_metadata": []string{
|
||||
"account_id",
|
||||
"ami_id",
|
||||
"instance_id",
|
||||
"region",
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create a role entry
|
||||
roleEntry := &awsRoleEntry{
|
||||
RoleID: "foo",
|
||||
Version: currentRoleStorageVersion,
|
||||
AuthType: iamAuthType,
|
||||
}
|
||||
|
||||
if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil {
|
||||
t.Fatalf("failed to set entry: %s", err)
|
||||
}
|
||||
|
||||
// create a baseline loginData map structure, including iam_request_headers
|
||||
// already base64encoded. This is the "Default" loginData used for all tests.
|
||||
// Each sub test can override the map's iam_request_headers entry
|
||||
loginData, err := defaultLoginData()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
loginRequest := &logical.Request{
|
||||
Operation: logical.ResolveRoleOperation,
|
||||
Path: "login",
|
||||
Storage: storage,
|
||||
Data: loginData,
|
||||
Connection: &logical.Connection{},
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), loginRequest)
|
||||
if err != nil || resp == nil || resp.IsError() {
|
||||
t.Errorf("unexpected failed role resolution:\nresp: %#v\n\nerr: %v", resp, err)
|
||||
}
|
||||
if resp.Data["role"] != testValidRoleName {
|
||||
t.Fatalf("Role was not as expected. Expected %s, received %s", testValidRoleName, resp.Data["role"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_defaultAliasMetadata(t *testing.T) {
|
||||
storage := &logical.InmemStorage{}
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = storage
|
||||
b, err := Backend(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = b.Setup(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// sets up a test server to stand in for STS service
|
||||
ts := setupIAMTestServer()
|
||||
defer ts.Close()
|
||||
|
||||
clientConfigData := map[string]interface{}{
|
||||
"iam_server_id_header_value": testVaultHeaderValue,
|
||||
"sts_endpoint": ts.URL,
|
||||
}
|
||||
clientRequest := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/client",
|
||||
Storage: storage,
|
||||
Data: clientConfigData,
|
||||
}
|
||||
_, err = b.HandleRequest(context.Background(), clientRequest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Configure identity.
|
||||
_, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/identity",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"iam_alias": "role_id",
|
||||
"ec2_alias": "role_id",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create a role entry
|
||||
roleEntry := &awsRoleEntry{
|
||||
RoleID: "foo",
|
||||
Version: currentRoleStorageVersion,
|
||||
AuthType: iamAuthType,
|
||||
}
|
||||
|
||||
if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil {
|
||||
t.Fatalf("failed to set entry: %s", err)
|
||||
}
|
||||
|
||||
// create a baseline loginData map structure, including iam_request_headers
|
||||
// already base64encoded. This is the "Default" loginData used for all tests.
|
||||
// Each sub test can override the map's iam_request_headers entry
|
||||
loginData, err := defaultLoginData()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedAliasMetadata := map[string]string{
|
||||
"account_id": "123456789012",
|
||||
"auth_type": "iam",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Header interface{}
|
||||
ExpectErr error
|
||||
}{
|
||||
{
|
||||
Name: "Default",
|
||||
},
|
||||
{
|
||||
Name: "Map-complete",
|
||||
Header: map[string]interface{}{
|
||||
"Content-Length": "43",
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date": "20180910T203328Z",
|
||||
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
|
||||
"Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "JSON-complete",
|
||||
Header: `{
|
||||
"Content-Length":"43",
|
||||
"Content-Type":"application/x-www-form-urlencoded; charset=utf-8",
|
||||
"User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
|
||||
"X-Amz-Date":"20180910T203328Z",
|
||||
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
|
||||
"Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4"
|
||||
}`,
|
||||
},
|
||||
{
|
||||
Name: "Base64-complete",
|
||||
Header: base64Complete(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
if tc.Header != nil {
|
||||
loginData["iam_request_headers"] = tc.Header
|
||||
}
|
||||
|
||||
loginRequest := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "login",
|
||||
Storage: storage,
|
||||
Data: loginData,
|
||||
Connection: &logical.Connection{},
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), loginRequest)
|
||||
if err != nil || resp == nil || resp.IsError() {
|
||||
if tc.ExpectErr != nil && tc.ExpectErr.Error() == resp.Error().Error() {
|
||||
return
|
||||
}
|
||||
t.Errorf("un expected failed login:\nresp: %#v\n\nerr: %v", resp, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedAliasMetadata, resp.Auth.Alias.Metadata) {
|
||||
t.Errorf("expected metadata (%#v) to match (%#v)", expectedAliasMetadata, resp.Auth.Alias.Metadata)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func defaultLoginData() (map[string]interface{}, error) {
|
||||
awsSession, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create session: %s", err)
|
||||
}
|
||||
|
||||
stsService := sts.New(awsSession)
|
||||
stsInputParams := &sts.GetCallerIdentityInput{}
|
||||
stsRequestValid, _ := stsService.GetCallerIdentityRequest(stsInputParams)
|
||||
stsRequestValid.HTTPRequest.Header.Add(iamServerIdHeader, testVaultHeaderValue)
|
||||
stsRequestValid.HTTPRequest.Header.Add("Authorization", fmt.Sprintf("%s,%s,%s",
|
||||
"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request",
|
||||
"SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id",
|
||||
"Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"))
|
||||
stsRequestValid.Sign()
|
||||
|
||||
return buildCallerIdentityLoginData(stsRequestValid.HTTPRequest, testValidRoleName)
|
||||
}
|
||||
|
||||
// setupIAMTestServer configures httptest server to intercept and respond to the
|
||||
// IAM login path's invocation of submitCallerIdentityRequest (which does not
|
||||
// use the AWS SDK), which receieves the mocked response responseFromUser
|
||||
// containing user information matching the role.
|
||||
func setupIAMTestServer() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
responseString := `<GetCallerIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
|
||||
<GetCallerIdentityResult>
|
||||
<Arn>arn:aws:iam::123456789012:user/valid-role</Arn>
|
||||
<UserId>ASOMETHINGSOMETHINGSOMETHING</UserId>
|
||||
<Account>123456789012</Account>
|
||||
</GetCallerIdentityResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7f4fc40c-853a-11e6-8848-8d035d01eb87</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetCallerIdentityResponse>
|
||||
`
|
||||
|
||||
auth := r.Header.Get("Authorization")
|
||||
parts := strings.Split(auth, ",")
|
||||
for i, s := range parts {
|
||||
s = strings.TrimSpace(s)
|
||||
key := strings.Split(s, "=")
|
||||
parts[i] = key[0]
|
||||
}
|
||||
|
||||
// verify the "Authorization" header contains all the expected parts
|
||||
expectedAuthParts := []string{"AWS4-HMAC-SHA256 Credential", "SignedHeaders", "Signature"}
|
||||
var matchingCount int
|
||||
for _, v := range parts {
|
||||
for _, z := range expectedAuthParts {
|
||||
if z == v {
|
||||
matchingCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
if matchingCount != len(expectedAuthParts) {
|
||||
responseString = "missing auth parts"
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/xml")
|
||||
fmt.Fprintln(w, responseString)
|
||||
}))
|
||||
}
|
||||
|
||||
// base64Complete returns a base64 encoded auth header as expected
|
||||
func base64Complete() string {
|
||||
min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=97086b0531854844099fc52733fa2c88a2bfb54b2689600c6e249358a8353b52"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"],"X-Vault-Aws-Iam-Server-Id":["VaultAcceptanceTesting"]}`
|
||||
return min
|
||||
}
|
||||
|
||||
// base64MissingVaultID returns a base64 encoded auth header, that omits the
|
||||
// Vault ID header
|
||||
func base64MissingVaultID() string {
|
||||
min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=97086b0531854844099fc52733fa2c88a2bfb54b2689600c6e249358a8353b52"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"]}`
|
||||
return min
|
||||
}
|
||||
|
||||
// base64MissingAuthField returns a base64 encoded Auth header, that omits the
|
||||
// "Signature" part
|
||||
func base64MissingAuthField() string {
|
||||
min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"],"X-Vault-Aws-Iam-Server-Id":["VaultAcceptanceTesting"]}`
|
||||
return min
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,447 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/policyutil"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const roleTagVersion = "v1"
|
||||
|
||||
func (b *backend) pathRoleTag() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "role-tag",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"role": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the role.",
|
||||
},
|
||||
|
||||
"instance_id": {
|
||||
Type: framework.TypeString,
|
||||
Description: `Instance ID for which this tag is intended for.
|
||||
If set, the created tag can only be used by the instance with the given ID.`,
|
||||
},
|
||||
|
||||
"policies": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Policies to be associated with the tag. If set, must be a subset of the role's policies. If set, but set to an empty value, only the 'default' policy will be given to issued tokens.",
|
||||
},
|
||||
|
||||
"max_ttl": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Default: 0,
|
||||
Description: "If set, specifies the maximum allowed token lifetime.",
|
||||
},
|
||||
|
||||
"allow_instance_migration": {
|
||||
Type: framework.TypeBool,
|
||||
Default: false,
|
||||
Description: "If set, allows migration of the underlying instance where the client resides. This keys off of pendingTime in the metadata document, so essentially, this disables the client nonce check whenever the instance is migrated to a new host and pendingTime is newer than the previously-remembered time. Use with caution.",
|
||||
},
|
||||
|
||||
"disallow_reauthentication": {
|
||||
Type: framework.TypeBool,
|
||||
Default: false,
|
||||
Description: "If set, only allows a single token to be granted per instance ID. In order to perform a fresh login, the entry in access list for the instance ID needs to be cleared using the 'auth/aws-ec2/identity-accesslist/<instance_id>' endpoint.",
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathRoleTagUpdate,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathRoleTagSyn,
|
||||
HelpDescription: pathRoleTagDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// pathRoleTagUpdate is used to create an EC2 instance tag which will
|
||||
// identify the Vault resources that the instance will be authorized for.
|
||||
func (b *backend) pathRoleTagUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
roleName := strings.ToLower(data.Get("role").(string))
|
||||
if roleName == "" {
|
||||
return logical.ErrorResponse("missing role"), nil
|
||||
}
|
||||
|
||||
// Fetch the role entry
|
||||
roleEntry, err := b.role(ctx, req.Storage, roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if roleEntry == nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf("entry not found for role %s", roleName)), nil
|
||||
}
|
||||
|
||||
// If RoleTag is empty, disallow creation of tag.
|
||||
if roleEntry.RoleTag == "" {
|
||||
return logical.ErrorResponse("tag creation is not enabled for this role"), nil
|
||||
}
|
||||
|
||||
// There should be a HMAC key present in the role entry
|
||||
if roleEntry.HMACKey == "" {
|
||||
// Not being able to find the HMACKey is an internal error
|
||||
return nil, fmt.Errorf("failed to find the HMAC key")
|
||||
}
|
||||
|
||||
resp := &logical.Response{}
|
||||
|
||||
// Instance ID is an optional field.
|
||||
instanceID := strings.ToLower(data.Get("instance_id").(string))
|
||||
|
||||
// If no policies field was not supplied, then the tag should inherit all the policies
|
||||
// on the role. But, it was provided, but set to empty explicitly, only "default" policy
|
||||
// should be inherited. So, by leaving the policies var unset to anything when it is not
|
||||
// supplied, we ensure that it inherits all the policies on the role.
|
||||
var policies []string
|
||||
policiesRaw, ok := data.GetOk("policies")
|
||||
if ok {
|
||||
policies = policyutil.ParsePolicies(policiesRaw)
|
||||
}
|
||||
if !strutil.StrListSubset(roleEntry.TokenPolicies, policies) {
|
||||
resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.")
|
||||
}
|
||||
|
||||
// This is an optional field.
|
||||
disallowReauthentication := data.Get("disallow_reauthentication").(bool)
|
||||
|
||||
// This is an optional field.
|
||||
allowInstanceMigration := data.Get("allow_instance_migration").(bool)
|
||||
if allowInstanceMigration && !roleEntry.AllowInstanceMigration {
|
||||
resp.AddWarning("Role does not allow instance migration. Login will not be allowed with this tag unless the role value is updated.")
|
||||
}
|
||||
|
||||
if disallowReauthentication && allowInstanceMigration {
|
||||
return logical.ErrorResponse("cannot set both disallow_reauthentication and allow_instance_migration"), nil
|
||||
}
|
||||
|
||||
// max_ttl for the role tag should be less than the max_ttl set on the role.
|
||||
maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second
|
||||
|
||||
// max_ttl on the tag should not be greater than the system view's max_ttl value.
|
||||
if maxTTL > b.System().MaxLeaseTTL() {
|
||||
resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the mount maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, b.System().MaxLeaseTTL()/time.Second))
|
||||
}
|
||||
// If max_ttl is set for the role, check the bounds for tag's max_ttl value using that.
|
||||
if roleEntry.TokenMaxTTL != time.Duration(0) && maxTTL > roleEntry.TokenMaxTTL {
|
||||
resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the role maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, roleEntry.TokenMaxTTL/time.Second))
|
||||
}
|
||||
|
||||
if maxTTL < time.Duration(0) {
|
||||
return logical.ErrorResponse("max_ttl cannot be negative"), nil
|
||||
}
|
||||
|
||||
// Create a random nonce.
|
||||
nonce, err := createRoleTagNonce()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a role tag out of all the information provided.
|
||||
rTagValue, err := createRoleTagValue(&roleTag{
|
||||
Version: roleTagVersion,
|
||||
Role: roleName,
|
||||
Nonce: nonce,
|
||||
Policies: policies,
|
||||
MaxTTL: maxTTL,
|
||||
InstanceID: instanceID,
|
||||
DisallowReauthentication: disallowReauthentication,
|
||||
AllowInstanceMigration: allowInstanceMigration,
|
||||
}, roleEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the key to be used for the tag and the value to be used for that tag key.
|
||||
// This key value pair should be set on the EC2 instance.
|
||||
resp.Data = map[string]interface{}{
|
||||
"tag_key": roleEntry.RoleTag,
|
||||
"tag_value": rTagValue,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// createRoleTagValue prepares the plaintext version of the role tag,
|
||||
// and appends a HMAC of the plaintext value to it, before returning.
|
||||
func createRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (string, error) {
|
||||
if rTag == nil {
|
||||
return "", fmt.Errorf("nil role tag")
|
||||
}
|
||||
|
||||
if roleEntry == nil {
|
||||
return "", fmt.Errorf("nil role entry")
|
||||
}
|
||||
|
||||
// Attach version, nonce, policies and maxTTL to the role tag value.
|
||||
rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Attach HMAC to tag's plaintext and return.
|
||||
return appendHMAC(rTagPlaintext, roleEntry)
|
||||
}
|
||||
|
||||
// Takes in the plaintext part of the role tag, creates a HMAC of it and returns
|
||||
// a role tag value containing both the plaintext part and the HMAC part.
|
||||
func appendHMAC(rTagPlaintext string, roleEntry *awsRoleEntry) (string, error) {
|
||||
if rTagPlaintext == "" {
|
||||
return "", fmt.Errorf("empty role tag plaintext string")
|
||||
}
|
||||
|
||||
if roleEntry == nil {
|
||||
return "", fmt.Errorf("nil role entry")
|
||||
}
|
||||
|
||||
// Create the HMAC of the value
|
||||
hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// attach the HMAC to the value
|
||||
rTagValue := fmt.Sprintf("%s:%s", rTagPlaintext, hmacB64)
|
||||
|
||||
// This limit of 255 is enforced on the EC2 instance. Hence complying to that here.
|
||||
if len(rTagValue) > 255 {
|
||||
return "", fmt.Errorf("role tag 'value' exceeding the limit of 255 characters")
|
||||
}
|
||||
|
||||
return rTagValue, nil
|
||||
}
|
||||
|
||||
// verifyRoleTagValue rebuilds the role tag's plaintext part, computes the HMAC
|
||||
// from it using the role specific HMAC key and compares it with the received HMAC.
|
||||
func verifyRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (bool, error) {
|
||||
if rTag == nil {
|
||||
return false, fmt.Errorf("nil role tag")
|
||||
}
|
||||
|
||||
if roleEntry == nil {
|
||||
return false, fmt.Errorf("nil role entry")
|
||||
}
|
||||
|
||||
// Fetch the plaintext part of role tag
|
||||
rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Compute the HMAC of the plaintext
|
||||
hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return subtle.ConstantTimeCompare([]byte(rTag.HMAC), []byte(hmacB64)) == 1, nil
|
||||
}
|
||||
|
||||
// prepareRoleTagPlaintextValue builds the role tag value without the HMAC in it.
|
||||
func prepareRoleTagPlaintextValue(rTag *roleTag) (string, error) {
|
||||
if rTag == nil {
|
||||
return "", fmt.Errorf("nil role tag")
|
||||
}
|
||||
if rTag.Version == "" {
|
||||
return "", fmt.Errorf("missing version")
|
||||
}
|
||||
if rTag.Nonce == "" {
|
||||
return "", fmt.Errorf("missing nonce")
|
||||
}
|
||||
if rTag.Role == "" {
|
||||
return "", fmt.Errorf("missing role")
|
||||
}
|
||||
|
||||
// Attach Version, Nonce, Role, DisallowReauthentication and AllowInstanceMigration
|
||||
// fields to the role tag.
|
||||
value := fmt.Sprintf("%s:%s:r=%s:d=%s:m=%s", rTag.Version, rTag.Nonce, rTag.Role, strconv.FormatBool(rTag.DisallowReauthentication), strconv.FormatBool(rTag.AllowInstanceMigration))
|
||||
|
||||
// Attach the policies only if they are specified.
|
||||
if len(rTag.Policies) != 0 {
|
||||
value = fmt.Sprintf("%s:p=%s", value, strings.Join(rTag.Policies, ","))
|
||||
}
|
||||
|
||||
// Attach instance_id if set.
|
||||
if rTag.InstanceID != "" {
|
||||
value = fmt.Sprintf("%s:i=%s", value, rTag.InstanceID)
|
||||
}
|
||||
|
||||
// Attach max_ttl if it is provided.
|
||||
if int(rTag.MaxTTL.Seconds()) > 0 {
|
||||
value = fmt.Sprintf("%s:t=%d", value, int(rTag.MaxTTL.Seconds()))
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Parses the tag from string form into a struct form. This method
|
||||
// also verifies the correctness of the parsed role tag.
|
||||
func (b *backend) parseAndVerifyRoleTagValue(ctx context.Context, s logical.Storage, tag string) (*roleTag, error) {
|
||||
tagItems := strings.Split(tag, ":")
|
||||
|
||||
// Tag must contain version, nonce, policies and HMAC
|
||||
if len(tagItems) < 4 {
|
||||
return nil, fmt.Errorf("invalid tag")
|
||||
}
|
||||
|
||||
rTag := &roleTag{}
|
||||
|
||||
// Cache the HMAC value. The last item in the collection.
|
||||
rTag.HMAC = tagItems[len(tagItems)-1]
|
||||
|
||||
// Remove the HMAC from the list.
|
||||
tagItems = tagItems[:len(tagItems)-1]
|
||||
|
||||
// Version will be the first element.
|
||||
rTag.Version = tagItems[0]
|
||||
if rTag.Version != roleTagVersion {
|
||||
return nil, fmt.Errorf("invalid role tag version")
|
||||
}
|
||||
|
||||
// Nonce will be the second element.
|
||||
rTag.Nonce = tagItems[1]
|
||||
|
||||
// Delete the version and nonce from the list.
|
||||
tagItems = tagItems[2:]
|
||||
|
||||
for _, tagItem := range tagItems {
|
||||
var err error
|
||||
switch {
|
||||
case strings.HasPrefix(tagItem, "i="):
|
||||
rTag.InstanceID = strings.TrimPrefix(tagItem, "i=")
|
||||
case strings.HasPrefix(tagItem, "r="):
|
||||
rTag.Role = strings.TrimPrefix(tagItem, "r=")
|
||||
case strings.HasPrefix(tagItem, "p="):
|
||||
rTag.Policies = strings.Split(strings.TrimPrefix(tagItem, "p="), ",")
|
||||
case strings.HasPrefix(tagItem, "d="):
|
||||
rTag.DisallowReauthentication, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "d="))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case strings.HasPrefix(tagItem, "m="):
|
||||
rTag.AllowInstanceMigration, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "m="))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case strings.HasPrefix(tagItem, "t="):
|
||||
rTag.MaxTTL, err = parseutil.ParseDurationSecond(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t=")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized item %q in tag", tagItem)
|
||||
}
|
||||
}
|
||||
|
||||
if rTag.Role == "" {
|
||||
return nil, fmt.Errorf("missing role name")
|
||||
}
|
||||
|
||||
roleEntry, err := b.role(ctx, s, rTag.Role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if roleEntry == nil {
|
||||
return nil, fmt.Errorf("entry not found for %q", rTag.Role)
|
||||
}
|
||||
|
||||
// Create a HMAC of the plaintext value of role tag and compare it with the given value.
|
||||
verified, err := verifyRoleTagValue(rTag, roleEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verified {
|
||||
return nil, fmt.Errorf("role tag signature verification failed")
|
||||
}
|
||||
|
||||
return rTag, nil
|
||||
}
|
||||
|
||||
// Creates base64 encoded HMAC using a per-role key.
|
||||
func createRoleTagHMACBase64(key, value string) (string, error) {
|
||||
if key == "" {
|
||||
return "", fmt.Errorf("invalid HMAC key")
|
||||
}
|
||||
hm := hmac.New(sha256.New, []byte(key))
|
||||
hm.Write([]byte(value))
|
||||
|
||||
// base64 encode the hmac bytes.
|
||||
return base64.StdEncoding.EncodeToString(hm.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// Creates a base64 encoded random nonce.
|
||||
func createRoleTagNonce() (string, error) {
|
||||
if uuidBytes, err := uuid.GenerateRandomBytes(8); err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
return base64.StdEncoding.EncodeToString(uuidBytes), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Struct roleTag represents a role tag in a struct form.
|
||||
type roleTag struct {
|
||||
Version string `json:"version"`
|
||||
InstanceID string `json:"instance_id"`
|
||||
Nonce string `json:"nonce"`
|
||||
Policies []string `json:"policies"`
|
||||
MaxTTL time.Duration `json:"max_ttl"`
|
||||
Role string `json:"role"`
|
||||
HMAC string `json:"hmac"`
|
||||
DisallowReauthentication bool `json:"disallow_reauthentication"`
|
||||
AllowInstanceMigration bool `json:"allow_instance_migration"`
|
||||
}
|
||||
|
||||
func (rTag1 *roleTag) Equal(rTag2 *roleTag) bool {
|
||||
return rTag1 != nil &&
|
||||
rTag2 != nil &&
|
||||
rTag1.Version == rTag2.Version &&
|
||||
rTag1.Nonce == rTag2.Nonce &&
|
||||
policyutil.EquivalentPolicies(rTag1.Policies, rTag2.Policies) &&
|
||||
rTag1.MaxTTL == rTag2.MaxTTL &&
|
||||
rTag1.Role == rTag2.Role &&
|
||||
rTag1.HMAC == rTag2.HMAC &&
|
||||
rTag1.InstanceID == rTag2.InstanceID &&
|
||||
rTag1.DisallowReauthentication == rTag2.DisallowReauthentication &&
|
||||
rTag1.AllowInstanceMigration == rTag2.AllowInstanceMigration
|
||||
}
|
||||
|
||||
const pathRoleTagSyn = `
|
||||
Create a tag on a role in order to be able to further restrict the capabilities of a role.
|
||||
`
|
||||
|
||||
const pathRoleTagDesc = `
|
||||
If there are needs to apply only a subset of role's capabilities to any specific
|
||||
instance, create a role tag using this endpoint and attach the tag on the instance
|
||||
before performing login.
|
||||
|
||||
To be able to create a role tag, the 'role_tag' option on the role should be
|
||||
enabled via the endpoint 'role/<role>'. Also, the policies to be associated
|
||||
with the tag should be a subset of the policies associated with the registered role.
|
||||
|
||||
This endpoint will return both the 'key' and the 'value' of the tag to be set
|
||||
on the EC2 instance.
|
||||
`
|
File diff suppressed because it is too large
Load Diff
@ -1,274 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func (b *backend) pathRoletagDenyList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "roletag-denylist/(?P<role_tag>.*)",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "role-tag-deny-list",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"role_tag": {
|
||||
Type: framework.TypeString,
|
||||
Description: `Role tag to be deny listed. The tag can be supplied as-is. In order
|
||||
to avoid any encoding problems, it can be base64 encoded.`,
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathRoletagDenyListUpdate,
|
||||
},
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathRoletagDenyListRead,
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathRoletagDenyListDelete,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathRoletagBlacklistSyn,
|
||||
HelpDescription: pathRoletagBlacklistDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Path to list all the deny listed tags.
|
||||
func (b *backend) pathListRoletagDenyList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "roletag-denylist/?",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "role-tag-deny-lists",
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ListOperation: &framework.PathOperation{
|
||||
Callback: b.pathRoletagDenyListsList,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathListRoletagDenyListHelpSyn,
|
||||
HelpDescription: pathListRoletagDenyListHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Lists all the deny listed role tags.
|
||||
func (b *backend) pathRoletagDenyListsList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.denyListMutex.RLock()
|
||||
defer b.denyListMutex.RUnlock()
|
||||
|
||||
tags, err := req.Storage.List(ctx, denyListRoletagStorage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Tags are base64 encoded before indexing to avoid problems
|
||||
// with the path separators being present in the tag.
|
||||
// Reverse it before returning the list response.
|
||||
for i, keyB64 := range tags {
|
||||
if key, err := base64.StdEncoding.DecodeString(keyB64); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
// Overwrite the result with the decoded string.
|
||||
tags[i] = string(key)
|
||||
}
|
||||
}
|
||||
return logical.ListResponse(tags), nil
|
||||
}
|
||||
|
||||
// Fetch an entry from the role tag deny list for a given tag.
|
||||
// This method takes a role tag in its original form and not a base64 encoded form.
|
||||
func (b *backend) lockedDenyLististRoleTagEntry(ctx context.Context, s logical.Storage, tag string) (*roleTagBlacklistEntry, error) {
|
||||
b.denyListMutex.RLock()
|
||||
defer b.denyListMutex.RUnlock()
|
||||
|
||||
return b.nonLockedDenyListRoleTagEntry(ctx, s, tag)
|
||||
}
|
||||
|
||||
func (b *backend) nonLockedDenyListRoleTagEntry(ctx context.Context, s logical.Storage, tag string) (*roleTagBlacklistEntry, error) {
|
||||
entry, err := s.Get(ctx, denyListRoletagStorage+base64.StdEncoding.EncodeToString([]byte(tag)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result roleTagBlacklistEntry
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Deletes an entry from the role tag deny list for a given tag.
|
||||
func (b *backend) pathRoletagDenyListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.denyListMutex.Lock()
|
||||
defer b.denyListMutex.Unlock()
|
||||
|
||||
tag := data.Get("role_tag").(string)
|
||||
if tag == "" {
|
||||
return logical.ErrorResponse("missing role_tag"), nil
|
||||
}
|
||||
|
||||
return nil, req.Storage.Delete(ctx, denyListRoletagStorage+base64.StdEncoding.EncodeToString([]byte(tag)))
|
||||
}
|
||||
|
||||
// If the given role tag is deny listed, returns the details of the deny list entry.
|
||||
// Returns 'nil' otherwise.
|
||||
func (b *backend) pathRoletagDenyListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
tag := data.Get("role_tag").(string)
|
||||
if tag == "" {
|
||||
return logical.ErrorResponse("missing role_tag"), nil
|
||||
}
|
||||
|
||||
entry, err := b.lockedDenyLististRoleTagEntry(ctx, req.Storage, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"creation_time": entry.CreationTime.Format(time.RFC3339Nano),
|
||||
"expiration_time": entry.ExpirationTime.Format(time.RFC3339Nano),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// pathRoletagDenyListUpdate is used to deny list a given role tag.
|
||||
// Before a role tag is added to the deny list, the correctness of the plaintext part
|
||||
// in the role tag is verified using the associated HMAC.
|
||||
func (b *backend) pathRoletagDenyListUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// The role_tag value provided, optionally can be base64 encoded.
|
||||
tagInput := data.Get("role_tag").(string)
|
||||
if tagInput == "" {
|
||||
return logical.ErrorResponse("missing role_tag"), nil
|
||||
}
|
||||
|
||||
tag := ""
|
||||
|
||||
// Try to base64 decode the value.
|
||||
tagBytes, err := base64.StdEncoding.DecodeString(tagInput)
|
||||
if err != nil {
|
||||
// If the decoding failed, use the value as-is.
|
||||
tag = tagInput
|
||||
} else {
|
||||
// If the decoding succeeded, use the decoded value.
|
||||
tag = string(tagBytes)
|
||||
}
|
||||
|
||||
// Parse and verify the role tag from string form to a struct form and verify it.
|
||||
rTag, err := b.parseAndVerifyRoleTagValue(ctx, req.Storage, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rTag == nil {
|
||||
return logical.ErrorResponse("failed to verify the role tag and parse it"), nil
|
||||
}
|
||||
|
||||
// Get the entry for the role mentioned in the role tag.
|
||||
roleEntry, err := b.role(ctx, req.Storage, rTag.Role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if roleEntry == nil {
|
||||
return logical.ErrorResponse("role entry not found"), nil
|
||||
}
|
||||
|
||||
b.denyListMutex.Lock()
|
||||
defer b.denyListMutex.Unlock()
|
||||
|
||||
// Check if the role tag is already deny listed. If yes, update it.
|
||||
blEntry, err := b.nonLockedDenyListRoleTagEntry(ctx, req.Storage, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blEntry == nil {
|
||||
blEntry = &roleTagBlacklistEntry{}
|
||||
}
|
||||
|
||||
currentTime := time.Now()
|
||||
|
||||
// Check if this is a creation of deny list entry.
|
||||
if blEntry.CreationTime.IsZero() {
|
||||
// Set the creation time for the deny list entry.
|
||||
// This should not be updated after setting it once.
|
||||
// If deny list operation is invoked more than once, only update the expiration time.
|
||||
blEntry.CreationTime = currentTime
|
||||
}
|
||||
|
||||
// Decide the expiration time based on the max_ttl values. Since this is
|
||||
// restricting access, use the greatest duration, not the least.
|
||||
maxDur := rTag.MaxTTL
|
||||
if roleEntry.TokenMaxTTL > maxDur {
|
||||
maxDur = roleEntry.TokenMaxTTL
|
||||
}
|
||||
if b.System().MaxLeaseTTL() > maxDur {
|
||||
maxDur = b.System().MaxLeaseTTL()
|
||||
}
|
||||
|
||||
blEntry.ExpirationTime = currentTime.Add(maxDur)
|
||||
|
||||
entry, err := logical.StorageEntryJSON(denyListRoletagStorage+base64.StdEncoding.EncodeToString([]byte(tag)), blEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Store the deny list entry.
|
||||
if err := req.Storage.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type roleTagBlacklistEntry struct {
|
||||
CreationTime time.Time `json:"creation_time"`
|
||||
ExpirationTime time.Time `json:"expiration_time"`
|
||||
}
|
||||
|
||||
const pathRoletagBlacklistSyn = `
|
||||
Blacklist a previously created role tag.
|
||||
`
|
||||
|
||||
const pathRoletagBlacklistDesc = `
|
||||
Add a role tag to the deny list so that it cannot be used by any EC2 instance to perform further
|
||||
logins. This can be used if the role tag is suspected or believed to be possessed by
|
||||
an unintended party.
|
||||
|
||||
By default, a cron task will periodically look for expired entries in the deny list
|
||||
and deletes them. The duration to periodically run this, is one hour by default.
|
||||
However, this can be configured using the 'config/tidy/roletags' endpoint. This tidy
|
||||
action can be triggered via the API as well, using the 'tidy/roletags' endpoint.
|
||||
|
||||
Also note that delete operation is supported on this endpoint to remove specific
|
||||
entries from the deny list.
|
||||
`
|
||||
|
||||
const pathListRoletagDenyListHelpSyn = `
|
||||
Lists the deny list role tags.
|
||||
`
|
||||
|
||||
const pathListRoletagDenyListHelpDesc = `
|
||||
Lists all the entries present in the deny list. This will show both the valid
|
||||
entries and the expired entries in the deny list. Use 'tidy/roletags' endpoint
|
||||
to clean-up the deny list of role tags based on expiration time.
|
||||
`
|
@ -1,136 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func (b *backend) pathTidyIdentityAccessList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "tidy/identity-accesslist$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "identity-access-list",
|
||||
OperationVerb: "tidy",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"safety_buffer": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Default: 259200,
|
||||
Description: `The amount of extra time that must have passed beyond the identity's
|
||||
expiration, before it is removed from the backend storage.`,
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathTidyIdentityAccessListUpdate,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathTidyIdentityAccessListSyn,
|
||||
HelpDescription: pathTidyIdentityAccessListDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// tidyAccessListIdentity is used to delete entries in the access list that are expired.
|
||||
func (b *backend) tidyAccessListIdentity(ctx context.Context, req *logical.Request, safetyBuffer int) (*logical.Response, error) {
|
||||
// If we are a performance standby forward the request to the active node
|
||||
if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) {
|
||||
return nil, logical.ErrReadOnly
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(b.tidyAccessListCASGuard, 0, 1) {
|
||||
resp := &logical.Response{}
|
||||
resp.AddWarning("Tidy operation already in progress.")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
s := req.Storage
|
||||
|
||||
go func() {
|
||||
defer atomic.StoreUint32(b.tidyAccessListCASGuard, 0)
|
||||
|
||||
// Don't cancel when the original client request goes away
|
||||
ctx = context.Background()
|
||||
|
||||
logger := b.Logger().Named("wltidy")
|
||||
|
||||
bufferDuration := time.Duration(safetyBuffer) * time.Second
|
||||
|
||||
doTidy := func() error {
|
||||
identities, err := s.List(ctx, identityAccessListStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instanceID := range identities {
|
||||
identityEntry, err := s.Get(ctx, identityAccessListStorage+instanceID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching identity of instanceID %q: %w", instanceID, err)
|
||||
}
|
||||
|
||||
if identityEntry == nil {
|
||||
return fmt.Errorf("identity entry for instanceID %q is nil", instanceID)
|
||||
}
|
||||
|
||||
if identityEntry.Value == nil || len(identityEntry.Value) == 0 {
|
||||
return fmt.Errorf("found identity entry for instanceID %q but actual identity is empty", instanceID)
|
||||
}
|
||||
|
||||
var result accessListIdentity
|
||||
if err := identityEntry.DecodeJSON(&result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if time.Now().After(result.ExpirationTime.Add(bufferDuration)) {
|
||||
if err := s.Delete(ctx, identityAccessListStorage+instanceID); err != nil {
|
||||
return fmt.Errorf("error deleting identity of instanceID %q from storage: %w", instanceID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := doTidy(); err != nil {
|
||||
logger.Error("error running access list tidy", "error", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
resp := &logical.Response{}
|
||||
resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.")
|
||||
return logical.RespondWithStatusCode(resp, req, http.StatusAccepted)
|
||||
}
|
||||
|
||||
// pathTidyIdentityAccessListUpdate is used to delete entries in the access list that are expired.
|
||||
func (b *backend) pathTidyIdentityAccessListUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
return b.tidyAccessListIdentity(ctx, req, data.Get("safety_buffer").(int))
|
||||
}
|
||||
|
||||
const pathTidyIdentityAccessListSyn = `
|
||||
Clean-up the access list instance identity entries.
|
||||
`
|
||||
|
||||
const pathTidyIdentityAccessListDesc = `
|
||||
When an instance identity is in the access list, the expiration time of the access list
|
||||
entry is set based on the maximum 'max_ttl' value set on: the role, the role tag
|
||||
and the backend's mount.
|
||||
|
||||
When this endpoint is invoked, all the entries that are expired will be deleted.
|
||||
A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
|
||||
only those entries that are expired before 'safety_buffer' seconds.
|
||||
`
|
@ -1,140 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package awsauth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const (
|
||||
denyListRoletagStorage = "blacklist/roletag/"
|
||||
)
|
||||
|
||||
func (b *backend) pathTidyRoletagDenyList() *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "tidy/roletag-denylist$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "role-tag-deny-list",
|
||||
OperationVerb: "tidy",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"safety_buffer": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Default: 259200, // 72h
|
||||
Description: `The amount of extra time that must have passed beyond the roletag
|
||||
expiration, before it is removed from the backend storage.`,
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathTidyRoletagDenylistUpdate,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathTidyRoletagDenylistSyn,
|
||||
HelpDescription: pathTidyRoletagDenylistDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// tidyDenyListRoleTag is used to clean-up the entries in the role tag deny list.
|
||||
func (b *backend) tidyDenyListRoleTag(ctx context.Context, req *logical.Request, safetyBuffer int) (*logical.Response, error) {
|
||||
// If we are a performance standby forward the request to the active node
|
||||
if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) {
|
||||
return nil, logical.ErrReadOnly
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapUint32(b.tidyDenyListCASGuard, 0, 1) {
|
||||
resp := &logical.Response{}
|
||||
resp.AddWarning("Tidy operation already in progress.")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
s := req.Storage
|
||||
|
||||
go func() {
|
||||
defer atomic.StoreUint32(b.tidyDenyListCASGuard, 0)
|
||||
|
||||
// Don't cancel when the original client request goes away
|
||||
ctx = context.Background()
|
||||
|
||||
logger := b.Logger().Named("bltidy")
|
||||
|
||||
bufferDuration := time.Duration(safetyBuffer) * time.Second
|
||||
|
||||
doTidy := func() error {
|
||||
tags, err := s.List(ctx, denyListRoletagStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
tagEntry, err := s.Get(ctx, denyListRoletagStorage+tag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error fetching tag %q: %w", tag, err)
|
||||
}
|
||||
|
||||
if tagEntry == nil {
|
||||
return fmt.Errorf("tag entry for tag %q is nil", tag)
|
||||
}
|
||||
|
||||
if tagEntry.Value == nil || len(tagEntry.Value) == 0 {
|
||||
return fmt.Errorf("found entry for tag %q but actual tag is empty", tag)
|
||||
}
|
||||
|
||||
var result roleTagBlacklistEntry
|
||||
if err := tagEntry.DecodeJSON(&result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if time.Now().After(result.ExpirationTime.Add(bufferDuration)) {
|
||||
if err := s.Delete(ctx, denyListRoletagStorage+tag); err != nil {
|
||||
return fmt.Errorf("error deleting tag %q from storage: %w", tag, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := doTidy(); err != nil {
|
||||
logger.Error("error running deny list tidy", "error", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
resp := &logical.Response{}
|
||||
resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.")
|
||||
return logical.RespondWithStatusCode(resp, req, http.StatusAccepted)
|
||||
}
|
||||
|
||||
// pathTidyRoletagDenylistUpdate is used to clean-up the entries in the role tag deny list.
|
||||
func (b *backend) pathTidyRoletagDenylistUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
return b.tidyDenyListRoleTag(ctx, req, data.Get("safety_buffer").(int))
|
||||
}
|
||||
|
||||
const pathTidyRoletagDenylistSyn = `
|
||||
Clean-up the deny list role tag entries.
|
||||
`
|
||||
|
||||
const pathTidyRoletagDenylistDesc = `
|
||||
When a role tag is deny listed, the expiration time of the deny list entry is
|
||||
set based on the maximum 'max_ttl' value set on: the role, the role tag and the
|
||||
backend's mount.
|
||||
|
||||
When this endpoint is invoked, all the entries that are expired will be deleted.
|
||||
A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of
|
||||
only those entries that are expired before 'safety_buffer' seconds.
|
||||
`
|
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Andrew Smith
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -1,5 +0,0 @@
|
||||
# PKCS7
|
||||
|
||||
This code is used to verify PKCS7 signatures for the EC2 auth method. The code
|
||||
was forked from [mozilla-services/pkcs7](https://github.com/mozilla-services/pkcs7)
|
||||
and modified for Vault.
|
@ -1,271 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var encodeIndent = 0
|
||||
|
||||
type asn1Object interface {
|
||||
EncodeTo(writer *bytes.Buffer) error
|
||||
}
|
||||
|
||||
type asn1Structured struct {
|
||||
tagBytes []byte
|
||||
content []asn1Object
|
||||
}
|
||||
|
||||
func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
|
||||
// fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes)
|
||||
encodeIndent++
|
||||
inner := new(bytes.Buffer)
|
||||
for _, obj := range s.content {
|
||||
err := obj.EncodeTo(inner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
encodeIndent--
|
||||
out.Write(s.tagBytes)
|
||||
encodeLength(out, inner.Len())
|
||||
out.Write(inner.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
type asn1Primitive struct {
|
||||
tagBytes []byte
|
||||
length int
|
||||
content []byte
|
||||
}
|
||||
|
||||
func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error {
|
||||
_, err := out.Write(p.tagBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = encodeLength(out, p.length); err != nil {
|
||||
return err
|
||||
}
|
||||
// fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length)
|
||||
// fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content))
|
||||
out.Write(p.content)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ber2der(ber []byte) ([]byte, error) {
|
||||
if len(ber) == 0 {
|
||||
return nil, errors.New("ber2der: input ber is empty")
|
||||
}
|
||||
// fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber))
|
||||
out := new(bytes.Buffer)
|
||||
|
||||
obj, _, err := readObject(ber, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj.EncodeTo(out)
|
||||
|
||||
// if offset < len(ber) {
|
||||
// return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber))
|
||||
//}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
// encodes lengths that are longer than 127 into string of bytes
|
||||
func marshalLongLength(out *bytes.Buffer, i int) (err error) {
|
||||
n := lengthLength(i)
|
||||
|
||||
for ; n > 0; n-- {
|
||||
err = out.WriteByte(byte(i >> uint((n-1)*8)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computes the byte length of an encoded length value
|
||||
func lengthLength(i int) (numBytes int) {
|
||||
numBytes = 1
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// encodes the length in DER format
|
||||
// If the length fits in 7 bits, the value is encoded directly.
|
||||
//
|
||||
// Otherwise, the number of bytes to encode the length is first determined.
|
||||
// This number is likely to be 4 or less for a 32bit length. This number is
|
||||
// added to 0x80. The length is encoded in big endian encoding follow after
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// length | byte 1 | bytes n
|
||||
// 0 | 0x00 | -
|
||||
// 120 | 0x78 | -
|
||||
// 200 | 0x81 | 0xC8
|
||||
// 500 | 0x82 | 0x01 0xF4
|
||||
func encodeLength(out *bytes.Buffer, length int) (err error) {
|
||||
if length >= 128 {
|
||||
l := lengthLength(length)
|
||||
err = out.WriteByte(0x80 | byte(l))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = marshalLongLength(out, length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = out.WriteByte(byte(length))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
berLen := len(ber)
|
||||
if offset >= berLen {
|
||||
return nil, 0, errors.New("ber2der: offset is after end of ber data")
|
||||
}
|
||||
tagStart := offset
|
||||
b := ber[offset]
|
||||
offset++
|
||||
if offset >= berLen {
|
||||
return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
|
||||
}
|
||||
tag := b & 0x1F // last 5 bits
|
||||
if tag == 0x1F {
|
||||
tag = 0
|
||||
for ber[offset] >= 0x80 {
|
||||
tag = tag*128 + ber[offset] - 0x80
|
||||
offset++
|
||||
if offset >= berLen {
|
||||
return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
|
||||
}
|
||||
}
|
||||
// jvehent 20170227: this doesn't appear to be used anywhere...
|
||||
// tag = tag*128 + ber[offset] - 0x80
|
||||
offset++
|
||||
if offset >= berLen {
|
||||
return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
|
||||
}
|
||||
}
|
||||
tagEnd := offset
|
||||
|
||||
kind := b & 0x20
|
||||
if kind == 0 {
|
||||
debugprint("--> Primitive\n")
|
||||
} else {
|
||||
debugprint("--> Constructed\n")
|
||||
}
|
||||
// read length
|
||||
var length int
|
||||
l := ber[offset]
|
||||
offset++
|
||||
if offset >= berLen {
|
||||
return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
|
||||
}
|
||||
indefinite := false
|
||||
if l > 0x80 {
|
||||
numberOfBytes := (int)(l & 0x7F)
|
||||
if numberOfBytes > 4 { // int is only guaranteed to be 32bit
|
||||
return nil, 0, errors.New("ber2der: BER tag length too long")
|
||||
}
|
||||
if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F {
|
||||
return nil, 0, errors.New("ber2der: BER tag length is negative")
|
||||
}
|
||||
if (int)(ber[offset]) == 0x0 {
|
||||
return nil, 0, errors.New("ber2der: BER tag length has leading zero")
|
||||
}
|
||||
debugprint("--> (compute length) indicator byte: %x\n", l)
|
||||
debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes])
|
||||
for i := 0; i < numberOfBytes; i++ {
|
||||
length = length*256 + (int)(ber[offset])
|
||||
offset++
|
||||
if offset >= berLen {
|
||||
return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached")
|
||||
}
|
||||
}
|
||||
} else if l == 0x80 {
|
||||
indefinite = true
|
||||
} else {
|
||||
length = (int)(l)
|
||||
}
|
||||
if length < 0 {
|
||||
return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length")
|
||||
}
|
||||
// fmt.Printf("--> length : %d\n", length)
|
||||
contentEnd := offset + length
|
||||
if contentEnd > len(ber) {
|
||||
return nil, 0, errors.New("ber2der: BER tag length is more than available data")
|
||||
}
|
||||
debugprint("--> content start : %d\n", offset)
|
||||
debugprint("--> content end : %d\n", contentEnd)
|
||||
debugprint("--> content : % X\n", ber[offset:contentEnd])
|
||||
var obj asn1Object
|
||||
if indefinite && kind == 0 {
|
||||
return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
|
||||
}
|
||||
if kind == 0 {
|
||||
obj = asn1Primitive{
|
||||
tagBytes: ber[tagStart:tagEnd],
|
||||
length: length,
|
||||
content: ber[offset:contentEnd],
|
||||
}
|
||||
} else {
|
||||
var subObjects []asn1Object
|
||||
for (offset < contentEnd) || indefinite {
|
||||
var subObj asn1Object
|
||||
var err error
|
||||
subObj, offset, err = readObject(ber, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
subObjects = append(subObjects, subObj)
|
||||
|
||||
if indefinite {
|
||||
terminated, err := isIndefiniteTermination(ber, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if terminated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
obj = asn1Structured{
|
||||
tagBytes: ber[tagStart:tagEnd],
|
||||
content: subObjects,
|
||||
}
|
||||
}
|
||||
|
||||
// Apply indefinite form length with 0x0000 terminator.
|
||||
if indefinite {
|
||||
contentEnd = offset + 2
|
||||
}
|
||||
|
||||
return obj, contentEnd, nil
|
||||
}
|
||||
|
||||
func isIndefiniteTermination(ber []byte, offset int) (bool, error) {
|
||||
if len(ber)-offset < 2 {
|
||||
return false, errors.New("ber2der: Invalid BER format")
|
||||
}
|
||||
|
||||
return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil
|
||||
}
|
||||
|
||||
func debugprint(format string, a ...interface{}) {
|
||||
// fmt.Printf(format, a)
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBer2Der(t *testing.T) {
|
||||
// indefinite length fixture
|
||||
ber := []byte{0x30, 0x80, 0x02, 0x01, 0x01, 0x00, 0x00}
|
||||
expected := []byte{0x30, 0x03, 0x02, 0x01, 0x01}
|
||||
der, err := ber2der(ber)
|
||||
if err != nil {
|
||||
t.Fatalf("ber2der failed with error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(der, expected) {
|
||||
t.Errorf("ber2der result did not match.\n\tExpected: % X\n\tActual: % X", expected, der)
|
||||
}
|
||||
|
||||
if der2, err := ber2der(der); err != nil {
|
||||
t.Errorf("ber2der on DER bytes failed with error: %v", err)
|
||||
} else {
|
||||
if !bytes.Equal(der, der2) {
|
||||
t.Error("ber2der is not idempotent")
|
||||
}
|
||||
}
|
||||
var thing struct {
|
||||
Number int
|
||||
}
|
||||
rest, err := asn1.Unmarshal(der, &thing)
|
||||
if err != nil {
|
||||
t.Errorf("Cannot parse resulting DER because: %v", err)
|
||||
} else if len(rest) > 0 {
|
||||
t.Errorf("Resulting DER has trailing data: % X", rest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBer2Der_Negatives(t *testing.T) {
|
||||
fixtures := []struct {
|
||||
Input []byte
|
||||
ErrorContains string
|
||||
}{
|
||||
{[]byte{0x30, 0x85}, "end of ber data reached"},
|
||||
{[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, "length is negative"},
|
||||
{[]byte{0x30, 0x82, 0x0, 0x1}, "length has leading zero"},
|
||||
{[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, "Invalid BER format"},
|
||||
{[]byte{0x30, 0x80, 0x1, 0x2}, "end of ber data reached"},
|
||||
{[]byte{0x30, 0x03, 0x01, 0x02}, "length is more than available data"},
|
||||
{[]byte{0x30}, "end of ber data reached"},
|
||||
{[]byte("?0"), "end of ber data reached"},
|
||||
}
|
||||
|
||||
for _, fixture := range fixtures {
|
||||
_, err := ber2der(fixture.Input)
|
||||
if err == nil {
|
||||
t.Errorf("No error thrown. Expected: %s", fixture.ErrorContains)
|
||||
}
|
||||
if !strings.Contains(err.Error(), fixture.ErrorContains) {
|
||||
t.Errorf("Unexpected error thrown.\n\tExpected: /%s/\n\tActual: %s", fixture.ErrorContains, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyIndefiniteLengthBer(t *testing.T) {
|
||||
decoded := mustDecodePEM([]byte(testPKCS7))
|
||||
|
||||
_, err := ber2der(decoded)
|
||||
if err != nil {
|
||||
t.Errorf("cannot parse indefinite length ber: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func mustDecodePEM(data []byte) []byte {
|
||||
var block *pem.Block
|
||||
block, rest := pem.Decode(data)
|
||||
if len(rest) != 0 {
|
||||
panic(fmt.Errorf("unexpected remaining PEM block during decode"))
|
||||
}
|
||||
return block.Bytes
|
||||
}
|
||||
|
||||
const testPKCS7 = `
|
||||
-----BEGIN PKCS7-----
|
||||
MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0B
|
||||
BwGggCSABIIDfXsiQWdlbnRBY3Rpb25PdmVycmlkZXMiOnsiQWdlbnRPdmVycmlk
|
||||
ZXMiOnsiRmlsZUV4aXN0c0JlaGF2aW9yIjoiT1ZFUldSSVRFIn19LCJBcHBsaWNh
|
||||
dGlvbklkIjoiZTA0NDIzZTQtN2E2Ny00ZjljLWIyOTEtOTllNjNjMWMyMTU4Iiwi
|
||||
QXBwbGljYXRpb25OYW1lIjoibWthbmlhLXhyZF9zYW0uY2R3c19lY2hvc2VydmVy
|
||||
IiwiRGVwbG95bWVudENyZWF0b3IiOiJ1c2VyIiwiRGVwbG95bWVudEdyb3VwSWQi
|
||||
OiJmYWI5MjEwZi1mNmM3LTQyODUtYWEyZC03Mzc2MGQ4ODE3NmEiLCJEZXBsb3lt
|
||||
ZW50R3JvdXBOYW1lIjoibWthbmlhLXhyZF9zYW0uY2R3c19lY2hvc2VydmVyX2Rn
|
||||
IiwiRGVwbG95bWVudElkIjoiZC1UREUxVTNXREEiLCJEZXBsb3ltZW50VHlwZSI6
|
||||
IklOX1BMQUNFIiwiR2l0SHViQWNjZXNzVG9rZW4iOm51bGwsIkluc3RhbmNlR3Jv
|
||||
dXBJZCI6ImZhYjkyMTBmLWY2YzctNDI4NS1hYTJkLTczNzYwZDg4MTc2YSIsIlJl
|
||||
dmlzaW9uIjp7IkFwcFNwZWNDb250ZW50IjpudWxsLCJDb2RlQ29tbWl0UmV2aXNp
|
||||
b24iOm51bGwsIkdpdEh1YlJldmlzaW9uIjpudWxsLCJHaXRSZXZpc2lvbiI6bnVs
|
||||
bCwiUmV2aXNpb25UeXBlIjoiUzMiLCJTM1JldmlzaW9uIjp7IkJ1Y2tldCI6Im1r
|
||||
YW5pYS1jZHdzLWRlcGxveS1idWNrZXQiLCJCdW5kbGVUeXBlIjoiemlwIiwiRVRh
|
||||
ZyI6bnVsbCwiS2V5IjoieHJkOjpzYW0uY2R3czo6ZWNob3NlcnZlcjo6MTo6Lnpp
|
||||
cCIsIlZlcnNpb24iOm51bGx9fSwiUzNSZXZpc2lvbiI6eyJCdWNrZXQiOiJta2Fu
|
||||
aWEtY2R3cy1kZXBsb3ktYnVja2V0IiwiQnVuZGxlVHlwZSI6InppcCIsIkVUYWci
|
||||
Om51bGwsIktleSI6InhyZDo6c2FtLmNkd3M6OmVjaG9zZXJ2ZXI6OjE6Oi56aXAi
|
||||
LCJWZXJzaW9uIjpudWxsfSwiVGFyZ2V0UmV2aXNpb24iOm51bGx9AAAAAAAAoIAw
|
||||
ggWbMIIEg6ADAgECAhAGrjFMK45t2jcNHtjY1DjEMA0GCSqGSIb3DQEBCwUAMEYx
|
||||
CzAJBgNVBAYTAlVTMQ8wDQYDVQQKEwZBbWF6b24xFTATBgNVBAsTDFNlcnZlciBD
|
||||
QSAxQjEPMA0GA1UEAxMGQW1hem9uMB4XDTIwMTExMjAwMDAwMFoXDTIxMTAxNTIz
|
||||
NTk1OVowNDEyMDAGA1UEAxMpY29kZWRlcGxveS1zaWduZXItdXMtZWFzdC0yLmFt
|
||||
YXpvbmF3cy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDit4f+
|
||||
I4BSv4rBV/8bJ+f4KqBwTCt9iJeau/r9liQfMgj/C1M2E+aa++u8BtY/LQstB44v
|
||||
v6KqcaiOyWpkD9OsUty9qb4eNTPF2Y4jpNsi/Hfw0phsd9gLun2foppILmL4lZIG
|
||||
lBhTeEwv6qV4KbyXOG9abHOX32+jVFtM1rbzHNFvz90ysfZp16TBAi7IRKEZeXvd
|
||||
MvlJJMAJtAoblxiDIS3A1csY1G4XHYET8xIoCop3mqEZEtAxUUP2epdXXdhD2U0G
|
||||
7alSRS54o91QW1Dp3A13lu1A1nds9CkWlPkDTpKSUG/qN5y5+6dCCGaydgL5krMs
|
||||
R79bCrR1sEKm5hi1AgMBAAGjggKVMIICkTAfBgNVHSMEGDAWgBRZpGYGUqB7lZI8
|
||||
o5QHJ5Z0W/k90DAdBgNVHQ4EFgQUPF5qTbnTDYhmp7tGmmL/jTmLoHMwNAYDVR0R
|
||||
BC0wK4IpY29kZWRlcGxveS1zaWduZXItdXMtZWFzdC0yLmFtYXpvbmF3cy5jb20w
|
||||
DgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA7
|
||||
BgNVHR8ENDAyMDCgLqAshipodHRwOi8vY3JsLnNjYTFiLmFtYXpvbnRydXN0LmNv
|
||||
bS9zY2ExYi5jcmwwIAYDVR0gBBkwFzALBglghkgBhv1sAQIwCAYGZ4EMAQIBMHUG
|
||||
CCsGAQUFBwEBBGkwZzAtBggrBgEFBQcwAYYhaHR0cDovL29jc3Auc2NhMWIuYW1h
|
||||
em9udHJ1c3QuY29tMDYGCCsGAQUFBzAChipodHRwOi8vY3J0LnNjYTFiLmFtYXpv
|
||||
bnRydXN0LmNvbS9zY2ExYi5jcnQwDAYDVR0TAQH/BAIwADCCAQQGCisGAQQB1nkC
|
||||
BAIEgfUEgfIA8AB2APZclC/RdzAiFFQYCDCUVo7jTRMZM7/fDC8gC8xO8WTjAAAB
|
||||
dboejIcAAAQDAEcwRQIgeqoKXbST17TCEzM1BMWx/jjyVQVBIN3LG17U4OaV364C
|
||||
IQDPUSJZhJm7uqGea6+VwqeDe/vGuGSuJzkDwTIOeIXPaAB2AFzcQ5L+5qtFRLFe
|
||||
mtRW5hA3+9X6R9yhc5SyXub2xw7KAAABdboejNQAAAQDAEcwRQIgEKIAwwhjUcq2
|
||||
iwzBAagdy+fTiKnBY1Yjf6wOeRpwXfMCIQC8wM3nxiWrGgIpdzzgDvFhZZTV3N81
|
||||
JWcYAu+srIVOhTANBgkqhkiG9w0BAQsFAAOCAQEAer9kml53XFy4ZSVzCbdsIFYP
|
||||
Ohu7LDf5iffHBVZFnGOEVOmiPYYkNwi9R6EHIYaAs7G7GGLCp/6tdc+G4eF1j6wB
|
||||
IkmXZcxMTxk/87R+S+36yDLg1GBZvqttLfexj0TRVAfVLJc7FjLXAW2+wi7YyNe8
|
||||
X17lWBwHxa1r5KgweJshGzYVUsgMTSx0aJ+93ZnqplBp9x+9DSQNqqNlBgxFANxs
|
||||
ux+dfpduyLd8VLqtlECGC07tYE4mBaAjMiNjCZRWMp8ya/Z6J/bJZ27IDGA4dXzm
|
||||
l9NNnlbuUDAenAByUqE+0b78J6EmmdAVf+N8siriMg02FdP3lAXJLE8tDeZp8AAA
|
||||
MYICIDCCAhwCAQEwWjBGMQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRUw
|
||||
EwYDVQQLEwxTZXJ2ZXIgQ0EgMUIxDzANBgNVBAMTBkFtYXpvbgIQBq4xTCuObdo3
|
||||
DR7Y2NQ4xDANBglghkgBZQMEAgEFAKCBmDAYBgkqhkiG9w0BCQMxCwYJKoZIhvcN
|
||||
AQcBMBwGCSqGSIb3DQEJBTEPFw0yMTA2MjQxOTU1MzFaMC0GCSqGSIb3DQEJNDEg
|
||||
MB4wDQYJYIZIAWUDBAIBBQChDQYJKoZIhvcNAQELBQAwLwYJKoZIhvcNAQkEMSIE
|
||||
IP7gMuT2H0/AhgPgj3Eo0NWCIdQOBjJO18coNKIaOnJYMA0GCSqGSIb3DQEBCwUA
|
||||
BIIBAJX+e87q0YvRon9/ENTvE0FoYMzYblID2Reek6L217ZlZ6pUuRsc4ghhJ5Yh
|
||||
WZeOCaLwi4mrnQ5/+DGKkJ4a/w5sqFTwtJIGIIAuDCn/uDm8kIDUVkbeznSOLoPA
|
||||
67cxiqgIdqZ5pqUoid2YsDj20owrGDG4wUF6ZvhM9g/5va3CAhxqvTE2HwjhHTfz
|
||||
Cgl8Nlvalz7YxXEf2clFEiEVa1fVaGMl9pCyedAmTfd6hoivcpAsopvXfVaaaR2y
|
||||
iuZidpUfFhSk+Ls7TU/kB74ckfUGj5q/5HcKJgb/S+FYUV7eu0ewzTyW1uRl/d0U
|
||||
Tb7e7EjgDGJsjOTMdTrMfv8ho8kAAAAAAAA=
|
||||
-----END PKCS7-----
|
||||
`
|
@ -1,176 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
|
||||
var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported")
|
||||
|
||||
// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
|
||||
var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
|
||||
|
||||
// Decrypt decrypts encrypted content info for recipient cert and private key
|
||||
func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) {
|
||||
data, ok := p7.raw.(envelopedData)
|
||||
if !ok {
|
||||
return nil, ErrNotEncryptedContent
|
||||
}
|
||||
recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
|
||||
if recipient.EncryptedKey == nil {
|
||||
return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
|
||||
}
|
||||
switch pkey := pkey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
var contentKey []byte
|
||||
contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data.EncryptedContentInfo.decrypt(contentKey)
|
||||
}
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// DecryptUsingPSK decrypts encrypted data using caller provided
|
||||
// pre-shared secret
|
||||
func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) {
|
||||
data, ok := p7.raw.(encryptedData)
|
||||
if !ok {
|
||||
return nil, ErrNotEncryptedContent
|
||||
}
|
||||
return data.EncryptedContentInfo.decrypt(key)
|
||||
}
|
||||
|
||||
func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
|
||||
alg := eci.ContentEncryptionAlgorithm.Algorithm
|
||||
if !alg.Equal(OIDEncryptionAlgorithmDESCBC) &&
|
||||
!alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) &&
|
||||
!alg.Equal(OIDEncryptionAlgorithmAES256CBC) &&
|
||||
!alg.Equal(OIDEncryptionAlgorithmAES128CBC) &&
|
||||
!alg.Equal(OIDEncryptionAlgorithmAES128GCM) &&
|
||||
!alg.Equal(OIDEncryptionAlgorithmAES256GCM) {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// EncryptedContent can either be constructed of multple OCTET STRINGs
|
||||
// or _be_ a tagged OCTET STRING
|
||||
var cyphertext []byte
|
||||
if eci.EncryptedContent.IsCompound {
|
||||
// Complex case to concat all of the children OCTET STRINGs
|
||||
var buf bytes.Buffer
|
||||
cypherbytes := eci.EncryptedContent.Bytes
|
||||
for {
|
||||
var part []byte
|
||||
cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
|
||||
buf.Write(part)
|
||||
if cypherbytes == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
cyphertext = buf.Bytes()
|
||||
} else {
|
||||
// Simple case, the bytes _are_ the cyphertext
|
||||
cyphertext = eci.EncryptedContent.Bytes
|
||||
}
|
||||
|
||||
var block cipher.Block
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case alg.Equal(OIDEncryptionAlgorithmDESCBC):
|
||||
block, err = des.NewCipher(key)
|
||||
case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC):
|
||||
block, err = des.NewTripleDESCipher(key)
|
||||
case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM):
|
||||
fallthrough
|
||||
case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC):
|
||||
block, err = aes.NewCipher(key)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) {
|
||||
params := aesGCMParameters{}
|
||||
paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes
|
||||
|
||||
_, err := asn1.Unmarshal(paramBytes, ¶ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(params.Nonce) != gcm.NonceSize() {
|
||||
return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
|
||||
}
|
||||
if params.ICVLen != gcm.Overhead() {
|
||||
return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
|
||||
}
|
||||
|
||||
plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
|
||||
if len(iv) != block.BlockSize() {
|
||||
return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
|
||||
}
|
||||
mode := cipher.NewCBCDecrypter(block, iv)
|
||||
plaintext := make([]byte, len(cyphertext))
|
||||
mode.CryptBlocks(plaintext, cyphertext)
|
||||
if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
func unpad(data []byte, blocklen int) ([]byte, error) {
|
||||
if blocklen < 1 {
|
||||
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
|
||||
}
|
||||
if len(data)%blocklen != 0 || len(data) == 0 {
|
||||
return nil, fmt.Errorf("invalid data len %d", len(data))
|
||||
}
|
||||
|
||||
// the last byte is the length of padding
|
||||
padlen := int(data[len(data)-1])
|
||||
|
||||
// check padding integrity, all bytes should be the same
|
||||
pad := data[len(data)-padlen:]
|
||||
for _, padbyte := range pad {
|
||||
if padbyte != byte(padlen) {
|
||||
return nil, errors.New("invalid padding")
|
||||
}
|
||||
}
|
||||
|
||||
return data[:len(data)-padlen], nil
|
||||
}
|
||||
|
||||
func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
|
||||
for _, recp := range recipients {
|
||||
if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
|
||||
return recp
|
||||
}
|
||||
}
|
||||
return recipientInfo{}
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDecrypt(t *testing.T) {
|
||||
fixture := UnmarshalTestFixture(EncryptedTestFixture)
|
||||
p7, err := Parse(fixture.Input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
content, err := p7.Decrypt(fixture.Certificate, fixture.PrivateKey)
|
||||
if err != nil {
|
||||
t.Errorf("Cannot Decrypt with error: %v", err)
|
||||
}
|
||||
expected := []byte("This is a test")
|
||||
if !bytes.Equal(content, expected) {
|
||||
t.Errorf("Decrypted result does not match.\n\tExpected:%s\n\tActual:%s", expected, content)
|
||||
}
|
||||
}
|
||||
|
||||
// echo -n "This is a test" > test.txt
|
||||
// openssl cms -encrypt -in test.txt cert.pem
|
||||
var EncryptedTestFixture = `
|
||||
-----BEGIN PKCS7-----
|
||||
MIIBGgYJKoZIhvcNAQcDoIIBCzCCAQcCAQAxgcwwgckCAQAwMjApMRAwDgYDVQQK
|
||||
EwdBY21lIENvMRUwEwYDVQQDEwxFZGRhcmQgU3RhcmsCBQDL+CvWMA0GCSqGSIb3
|
||||
DQEBAQUABIGAyFz7bfI2noUs4FpmYfztm1pVjGyB00p9x0H3gGHEYNXdqlq8VG8d
|
||||
iq36poWtEkatnwsOlURWZYECSi0g5IAL0U9sj82EN0xssZNaK0S5FTGnB3DPvYgt
|
||||
HJvcKq7YvNLKMh4oqd17C6GB4oXyEBDj0vZnL7SUoCAOAWELPeC8CTUwMwYJKoZI
|
||||
hvcNAQcBMBQGCCqGSIb3DQMHBAhEowTkot3a7oAQFD//J/IhFnk+JbkH7HZQFA==
|
||||
-----END PKCS7-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIB1jCCAUGgAwIBAgIFAMv4K9YwCwYJKoZIhvcNAQELMCkxEDAOBgNVBAoTB0Fj
|
||||
bWUgQ28xFTATBgNVBAMTDEVkZGFyZCBTdGFyazAeFw0xNTA1MDYwMzU2NDBaFw0x
|
||||
NjA1MDYwMzU2NDBaMCUxEDAOBgNVBAoTB0FjbWUgQ28xETAPBgNVBAMTCEpvbiBT
|
||||
bm93MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDK6NU0R0eiCYVquU4RcjKc
|
||||
LzGfx0aa1lMr2TnLQUSeLFZHFxsyyMXXuMPig3HK4A7SGFHupO+/1H/sL4xpH5zg
|
||||
8+Zg2r8xnnney7abxcuv0uATWSIeKlNnb1ZO1BAxFnESc3GtyOCr2dUwZHX5mRVP
|
||||
+Zxp2ni5qHNraf3wE2VPIQIDAQABoxIwEDAOBgNVHQ8BAf8EBAMCAKAwCwYJKoZI
|
||||
hvcNAQELA4GBAIr2F7wsqmEU/J/kLyrCgEVXgaV/sKZq4pPNnzS0tBYk8fkV3V18
|
||||
sBJyHKRLL/wFZASvzDcVGCplXyMdAOCyfd8jO3F9Ac/xdlz10RrHJT75hNu3a7/n
|
||||
9KNwKhfN4A1CQv2x372oGjRhCW5bHNCWx4PIVeNzCyq/KZhyY9sxHE6f
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIICXgIBAAKBgQDK6NU0R0eiCYVquU4RcjKcLzGfx0aa1lMr2TnLQUSeLFZHFxsy
|
||||
yMXXuMPig3HK4A7SGFHupO+/1H/sL4xpH5zg8+Zg2r8xnnney7abxcuv0uATWSIe
|
||||
KlNnb1ZO1BAxFnESc3GtyOCr2dUwZHX5mRVP+Zxp2ni5qHNraf3wE2VPIQIDAQAB
|
||||
AoGBALyvnSt7KUquDen7nXQtvJBudnf9KFPt//OjkdHHxNZNpoF/JCSqfQeoYkeu
|
||||
MdAVYNLQGMiRifzZz4dDhA9xfUAuy7lcGQcMCxEQ1dwwuFaYkawbS0Tvy2PFlq2d
|
||||
H5/HeDXU4EDJ3BZg0eYj2Bnkt1sJI35UKQSxblQ0MY2q0uFBAkEA5MMOogkgUx1C
|
||||
67S1tFqMUSM8D0mZB0O5vOJZC5Gtt2Urju6vywge2ArExWRXlM2qGl8afFy2SgSv
|
||||
Xk5eybcEiQJBAOMRwwbEoW5NYHuFFbSJyWll4n71CYuWuQOCzehDPyTb80WFZGLV
|
||||
i91kFIjeERyq88eDE5xVB3ZuRiXqaShO/9kCQQCKOEkpInaDgZSjskZvuJ47kByD
|
||||
6CYsO4GIXQMMeHML8ncFH7bb6AYq5ybJVb2NTU7QLFJmfeYuhvIm+xdOreRxAkEA
|
||||
o5FC5Jg2FUfFzZSDmyZ6IONUsdF/i78KDV5nRv1R+hI6/oRlWNCtTNBv/lvBBd6b
|
||||
dseUE9QoaQZsn5lpILEvmQJAZ0B+Or1rAYjnbjnUhdVZoy9kC4Zov+4UH3N/BtSy
|
||||
KJRWUR0wTWfZBPZ5hAYZjTBEAFULaYCXlQKsODSp0M1aQA==
|
||||
-----END PRIVATE KEY-----`
|
@ -1,399 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type envelopedData struct {
|
||||
Version int
|
||||
RecipientInfos []recipientInfo `asn1:"set"`
|
||||
EncryptedContentInfo encryptedContentInfo
|
||||
}
|
||||
|
||||
type encryptedData struct {
|
||||
Version int
|
||||
EncryptedContentInfo encryptedContentInfo
|
||||
}
|
||||
|
||||
type recipientInfo struct {
|
||||
Version int
|
||||
IssuerAndSerialNumber issuerAndSerial
|
||||
KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedKey []byte
|
||||
}
|
||||
|
||||
type encryptedContentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent asn1.RawValue `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
const (
|
||||
// EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm
|
||||
EncryptionAlgorithmDESCBC = iota
|
||||
|
||||
// EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm
|
||||
// Avoid this algorithm unless required for interoperability; use AES GCM instead.
|
||||
EncryptionAlgorithmAES128CBC
|
||||
|
||||
// EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm
|
||||
// Avoid this algorithm unless required for interoperability; use AES GCM instead.
|
||||
EncryptionAlgorithmAES256CBC
|
||||
|
||||
// EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm
|
||||
EncryptionAlgorithmAES128GCM
|
||||
|
||||
// EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm
|
||||
EncryptionAlgorithmAES256GCM
|
||||
)
|
||||
|
||||
// ContentEncryptionAlgorithm determines the algorithm used to encrypt the
|
||||
// plaintext message. Change the value of this variable to change which
|
||||
// algorithm is used in the Encrypt() function.
|
||||
var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC
|
||||
|
||||
// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt
|
||||
// content with an unsupported algorithm.
|
||||
var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported")
|
||||
|
||||
// ErrPSKNotProvided is returned when attempting to encrypt
|
||||
// using a PSK without actually providing the PSK.
|
||||
var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided")
|
||||
|
||||
const nonceSize = 12
|
||||
|
||||
type aesGCMParameters struct {
|
||||
Nonce []byte `asn1:"tag:4"`
|
||||
ICVLen int
|
||||
}
|
||||
|
||||
func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) {
|
||||
var keyLen int
|
||||
var algID asn1.ObjectIdentifier
|
||||
switch ContentEncryptionAlgorithm {
|
||||
case EncryptionAlgorithmAES128GCM:
|
||||
keyLen = 16
|
||||
algID = OIDEncryptionAlgorithmAES128GCM
|
||||
case EncryptionAlgorithmAES256GCM:
|
||||
keyLen = 32
|
||||
algID = OIDEncryptionAlgorithmAES256GCM
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm)
|
||||
}
|
||||
if key == nil {
|
||||
// Create AES key
|
||||
key = make([]byte, keyLen)
|
||||
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create nonce
|
||||
nonce := make([]byte, nonceSize)
|
||||
|
||||
_, err := rand.Read(nonce)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Encrypt content
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ciphertext := gcm.Seal(nil, nonce, content, nil)
|
||||
|
||||
// Prepare ASN.1 Encrypted Content Info
|
||||
paramSeq := aesGCMParameters{
|
||||
Nonce: nonce,
|
||||
ICVLen: gcm.Overhead(),
|
||||
}
|
||||
|
||||
paramBytes, err := asn1.Marshal(paramSeq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
eci := encryptedContentInfo{
|
||||
ContentType: OIDData,
|
||||
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: algID,
|
||||
Parameters: asn1.RawValue{
|
||||
Tag: asn1.TagSequence,
|
||||
Bytes: paramBytes,
|
||||
},
|
||||
},
|
||||
EncryptedContent: marshalEncryptedContent(ciphertext),
|
||||
}
|
||||
|
||||
return key, &eci, nil
|
||||
}
|
||||
|
||||
func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) {
|
||||
if key == nil {
|
||||
// Create DES key
|
||||
key = make([]byte, 8)
|
||||
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create CBC IV
|
||||
iv := make([]byte, des.BlockSize)
|
||||
_, err := rand.Read(iv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Encrypt padded content
|
||||
block, err := des.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
plaintext, err := pad(content, mode.BlockSize())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cyphertext := make([]byte, len(plaintext))
|
||||
mode.CryptBlocks(cyphertext, plaintext)
|
||||
|
||||
// Prepare ASN.1 Encrypted Content Info
|
||||
eci := encryptedContentInfo{
|
||||
ContentType: OIDData,
|
||||
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: OIDEncryptionAlgorithmDESCBC,
|
||||
Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
|
||||
},
|
||||
EncryptedContent: marshalEncryptedContent(cyphertext),
|
||||
}
|
||||
|
||||
return key, &eci, nil
|
||||
}
|
||||
|
||||
func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) {
|
||||
var keyLen int
|
||||
var algID asn1.ObjectIdentifier
|
||||
switch ContentEncryptionAlgorithm {
|
||||
case EncryptionAlgorithmAES128CBC:
|
||||
keyLen = 16
|
||||
algID = OIDEncryptionAlgorithmAES128CBC
|
||||
case EncryptionAlgorithmAES256CBC:
|
||||
keyLen = 32
|
||||
algID = OIDEncryptionAlgorithmAES256CBC
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm)
|
||||
}
|
||||
|
||||
if key == nil {
|
||||
// Create AES key
|
||||
key = make([]byte, keyLen)
|
||||
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Create CBC IV
|
||||
iv := make([]byte, aes.BlockSize)
|
||||
_, err := rand.Read(iv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Encrypt padded content
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
plaintext, err := pad(content, mode.BlockSize())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cyphertext := make([]byte, len(plaintext))
|
||||
mode.CryptBlocks(cyphertext, plaintext)
|
||||
|
||||
// Prepare ASN.1 Encrypted Content Info
|
||||
eci := encryptedContentInfo{
|
||||
ContentType: OIDData,
|
||||
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: algID,
|
||||
Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
|
||||
},
|
||||
EncryptedContent: marshalEncryptedContent(cyphertext),
|
||||
}
|
||||
|
||||
return key, &eci, nil
|
||||
}
|
||||
|
||||
// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
|
||||
// recipient keys for each recipient public key.
|
||||
//
|
||||
// The algorithm used to perform encryption is determined by the current value
|
||||
// of the global ContentEncryptionAlgorithm package variable. By default, the
|
||||
// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the
|
||||
// value before calling Encrypt(). For example:
|
||||
//
|
||||
// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM
|
||||
//
|
||||
// TODO(fullsailor): Add support for encrypting content with other algorithms
|
||||
func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
|
||||
var eci *encryptedContentInfo
|
||||
var key []byte
|
||||
var err error
|
||||
|
||||
// Apply chosen symmetric encryption method
|
||||
switch ContentEncryptionAlgorithm {
|
||||
case EncryptionAlgorithmDESCBC:
|
||||
key, eci, err = encryptDESCBC(content, nil)
|
||||
case EncryptionAlgorithmAES128CBC:
|
||||
fallthrough
|
||||
case EncryptionAlgorithmAES256CBC:
|
||||
key, eci, err = encryptAESCBC(content, nil)
|
||||
case EncryptionAlgorithmAES128GCM:
|
||||
fallthrough
|
||||
case EncryptionAlgorithmAES256GCM:
|
||||
key, eci, err = encryptAESGCM(content, nil)
|
||||
|
||||
default:
|
||||
return nil, ErrUnsupportedEncryptionAlgorithm
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare each recipient's encrypted cipher key
|
||||
recipientInfos := make([]recipientInfo, len(recipients))
|
||||
for i, recipient := range recipients {
|
||||
encrypted, err := encryptKey(key, recipient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ias, err := cert2issuerAndSerial(recipient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info := recipientInfo{
|
||||
Version: 0,
|
||||
IssuerAndSerialNumber: ias,
|
||||
KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: OIDEncryptionAlgorithmRSA,
|
||||
},
|
||||
EncryptedKey: encrypted,
|
||||
}
|
||||
recipientInfos[i] = info
|
||||
}
|
||||
|
||||
// Prepare envelope content
|
||||
envelope := envelopedData{
|
||||
EncryptedContentInfo: *eci,
|
||||
Version: 0,
|
||||
RecipientInfos: recipientInfos,
|
||||
}
|
||||
innerContent, err := asn1.Marshal(envelope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare outer payload structure
|
||||
wrapper := contentInfo{
|
||||
ContentType: OIDEnvelopedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
|
||||
}
|
||||
|
||||
return asn1.Marshal(wrapper)
|
||||
}
|
||||
|
||||
// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure,
|
||||
// encrypted using caller provided pre-shared secret.
|
||||
func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) {
|
||||
var eci *encryptedContentInfo
|
||||
var err error
|
||||
|
||||
if key == nil {
|
||||
return nil, ErrPSKNotProvided
|
||||
}
|
||||
|
||||
// Apply chosen symmetric encryption method
|
||||
switch ContentEncryptionAlgorithm {
|
||||
case EncryptionAlgorithmDESCBC:
|
||||
_, eci, err = encryptDESCBC(content, key)
|
||||
|
||||
case EncryptionAlgorithmAES128GCM:
|
||||
fallthrough
|
||||
case EncryptionAlgorithmAES256GCM:
|
||||
_, eci, err = encryptAESGCM(content, key)
|
||||
|
||||
default:
|
||||
return nil, ErrUnsupportedEncryptionAlgorithm
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare encrypted-data content
|
||||
ed := encryptedData{
|
||||
Version: 0,
|
||||
EncryptedContentInfo: *eci,
|
||||
}
|
||||
innerContent, err := asn1.Marshal(ed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare outer payload structure
|
||||
wrapper := contentInfo{
|
||||
ContentType: OIDEncryptedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
|
||||
}
|
||||
|
||||
return asn1.Marshal(wrapper)
|
||||
}
|
||||
|
||||
func marshalEncryptedContent(content []byte) asn1.RawValue {
|
||||
asn1Content, _ := asn1.Marshal(content)
|
||||
return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
|
||||
}
|
||||
|
||||
func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
|
||||
if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
|
||||
return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
|
||||
}
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
func pad(data []byte, blocklen int) ([]byte, error) {
|
||||
if blocklen < 1 {
|
||||
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
|
||||
}
|
||||
padlen := blocklen - (len(data) % blocklen)
|
||||
if padlen == 0 {
|
||||
padlen = blocklen
|
||||
}
|
||||
pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
|
||||
return append(data, pad...), nil
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/x509"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncrypt(t *testing.T) {
|
||||
modes := []int{
|
||||
EncryptionAlgorithmDESCBC,
|
||||
EncryptionAlgorithmAES128CBC,
|
||||
EncryptionAlgorithmAES256CBC,
|
||||
EncryptionAlgorithmAES128GCM,
|
||||
EncryptionAlgorithmAES256GCM,
|
||||
}
|
||||
sigalgs := []x509.SignatureAlgorithm{
|
||||
x509.SHA256WithRSA,
|
||||
x509.SHA512WithRSA,
|
||||
}
|
||||
for _, mode := range modes {
|
||||
for _, sigalg := range sigalgs {
|
||||
ContentEncryptionAlgorithm = mode
|
||||
|
||||
plaintext := []byte("Hello Secret World!")
|
||||
cert, err := createTestCertificate(sigalg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
encrypted, err := Encrypt(plaintext, []*x509.Certificate{cert.Certificate})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
p7, err := Parse(encrypted)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot Parse encrypted result: %s", err)
|
||||
}
|
||||
result, err := p7.Decrypt(cert.Certificate, *cert.PrivateKey)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot Decrypt encrypted result: %s", err)
|
||||
}
|
||||
if !bytes.Equal(plaintext, result) {
|
||||
t.Errorf("encrypted data does not match plaintext:\n\tExpected: %s\n\tActual: %s", plaintext, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptUsingPSK(t *testing.T) {
|
||||
modes := []int{
|
||||
EncryptionAlgorithmDESCBC,
|
||||
EncryptionAlgorithmAES128GCM,
|
||||
}
|
||||
|
||||
for _, mode := range modes {
|
||||
ContentEncryptionAlgorithm = mode
|
||||
plaintext := []byte("Hello Secret World!")
|
||||
var key []byte
|
||||
|
||||
switch mode {
|
||||
case EncryptionAlgorithmDESCBC:
|
||||
key = []byte("64BitKey")
|
||||
case EncryptionAlgorithmAES128GCM:
|
||||
key = []byte("128BitKey4AESGCM")
|
||||
}
|
||||
ciphertext, err := EncryptUsingPSK(plaintext, key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p7, _ := Parse(ciphertext)
|
||||
result, err := p7.DecryptUsingPSK(key)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot Decrypt encrypted result: %s", err)
|
||||
}
|
||||
if !bytes.Equal(plaintext, result) {
|
||||
t.Errorf("encrypted data does not match plaintext:\n\tExpected: %s\n\tActual: %s", plaintext, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPad(t *testing.T) {
|
||||
tests := []struct {
|
||||
Original []byte
|
||||
Expected []byte
|
||||
BlockSize int
|
||||
}{
|
||||
{[]byte{0x1, 0x2, 0x3, 0x10}, []byte{0x1, 0x2, 0x3, 0x10, 0x4, 0x4, 0x4, 0x4}, 8},
|
||||
{[]byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0}, []byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8}, 8},
|
||||
}
|
||||
for _, test := range tests {
|
||||
padded, err := pad(test.Original, test.BlockSize)
|
||||
if err != nil {
|
||||
t.Errorf("pad encountered error: %s", err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(test.Expected, padded) {
|
||||
t.Errorf("pad results mismatch:\n\tExpected: %X\n\tActual: %X", test.Expected, padded)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,290 +0,0 @@
|
||||
// Package pkcs7 implements parsing and generation of some PKCS#7 structures.
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
_ "crypto/sha1" // for crypto.SHA1
|
||||
)
|
||||
|
||||
// PKCS7 Represents a PKCS7 structure
|
||||
type PKCS7 struct {
|
||||
Content []byte
|
||||
Certificates []*x509.Certificate
|
||||
CRLs []pkix.CertificateList
|
||||
Signers []signerInfo
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
type contentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
|
||||
}
|
||||
|
||||
// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
|
||||
// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
|
||||
// and Enveloped Data are supported (1.2.840.113549.1.7.3)
|
||||
var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
|
||||
|
||||
type unsignedData []byte
|
||||
|
||||
var (
|
||||
// Signed Data OIDs
|
||||
OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
|
||||
OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
|
||||
OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
|
||||
OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
|
||||
OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
|
||||
OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
|
||||
OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
|
||||
|
||||
// Digest Algorithms
|
||||
OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
|
||||
OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
|
||||
OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
|
||||
OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
|
||||
|
||||
OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
|
||||
OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
|
||||
|
||||
OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
|
||||
OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
|
||||
OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
|
||||
OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
|
||||
|
||||
// Signature Algorithms
|
||||
OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
|
||||
OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
|
||||
OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
|
||||
OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
|
||||
OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
|
||||
|
||||
OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
|
||||
OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
|
||||
OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
|
||||
|
||||
// Encryption Algorithms
|
||||
OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
|
||||
OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
|
||||
OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
|
||||
OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
|
||||
OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2}
|
||||
OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46}
|
||||
)
|
||||
|
||||
func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) {
|
||||
switch {
|
||||
case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1),
|
||||
oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1),
|
||||
oid.Equal(OIDEncryptionAlgorithmRSA):
|
||||
return crypto.SHA1, nil
|
||||
case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256):
|
||||
return crypto.SHA256, nil
|
||||
case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384):
|
||||
return crypto.SHA384, nil
|
||||
case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512):
|
||||
return crypto.SHA512, nil
|
||||
}
|
||||
return crypto.Hash(0), ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm
|
||||
// and returns the corresponding OID digest algorithm
|
||||
func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) {
|
||||
switch digestAlg {
|
||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
|
||||
return OIDDigestAlgorithmSHA1, nil
|
||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256:
|
||||
return OIDDigestAlgorithmSHA256, nil
|
||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||
return OIDDigestAlgorithmSHA384, nil
|
||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||
return OIDDigestAlgorithmSHA512, nil
|
||||
}
|
||||
return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm")
|
||||
}
|
||||
|
||||
// getOIDForEncryptionAlgorithm takes the private key type of the signer and
|
||||
// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm
|
||||
func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) {
|
||||
switch pkey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
switch {
|
||||
default:
|
||||
return OIDEncryptionAlgorithmRSA, nil
|
||||
case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA):
|
||||
return OIDEncryptionAlgorithmRSA, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1):
|
||||
return OIDEncryptionAlgorithmRSASHA1, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256):
|
||||
return OIDEncryptionAlgorithmRSASHA256, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384):
|
||||
return OIDEncryptionAlgorithmRSASHA384, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512):
|
||||
return OIDEncryptionAlgorithmRSASHA512, nil
|
||||
}
|
||||
case *ecdsa.PrivateKey:
|
||||
switch {
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1):
|
||||
return OIDDigestAlgorithmECDSASHA1, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256):
|
||||
return OIDDigestAlgorithmECDSASHA256, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384):
|
||||
return OIDDigestAlgorithmECDSASHA384, nil
|
||||
case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512):
|
||||
return OIDDigestAlgorithmECDSASHA512, nil
|
||||
}
|
||||
case *dsa.PrivateKey:
|
||||
return OIDDigestAlgorithmDSA, nil
|
||||
}
|
||||
return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey)
|
||||
}
|
||||
|
||||
// Parse decodes a DER encoded PKCS7 package
|
||||
func Parse(data []byte) (p7 *PKCS7, err error) {
|
||||
if len(data) == 0 {
|
||||
return nil, errors.New("pkcs7: input data is empty")
|
||||
}
|
||||
var info contentInfo
|
||||
der, err := ber2der(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rest, err := asn1.Unmarshal(der, &info)
|
||||
if len(rest) > 0 {
|
||||
err = asn1.SyntaxError{Msg: "trailing data"}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// fmt.Printf("--> Content Type: %s", info.ContentType)
|
||||
switch {
|
||||
case info.ContentType.Equal(OIDSignedData):
|
||||
return parseSignedData(info.Content.Bytes)
|
||||
case info.ContentType.Equal(OIDEnvelopedData):
|
||||
return parseEnvelopedData(info.Content.Bytes)
|
||||
case info.ContentType.Equal(OIDEncryptedData):
|
||||
return parseEncryptedData(info.Content.Bytes)
|
||||
}
|
||||
return nil, ErrUnsupportedContentType
|
||||
}
|
||||
|
||||
func parseEnvelopedData(data []byte) (*PKCS7, error) {
|
||||
var ed envelopedData
|
||||
if _, err := asn1.Unmarshal(data, &ed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PKCS7{
|
||||
raw: ed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseEncryptedData(data []byte) (*PKCS7, error) {
|
||||
var ed encryptedData
|
||||
if _, err := asn1.Unmarshal(data, &ed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PKCS7{
|
||||
raw: ed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (raw rawCertificates) Parse() ([]*x509.Certificate, error) {
|
||||
if len(raw.Raw) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var val asn1.RawValue
|
||||
if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return x509.ParseCertificates(val.Bytes)
|
||||
}
|
||||
|
||||
func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
|
||||
return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes)
|
||||
}
|
||||
|
||||
// Attribute represents a key value pair attribute. Value must be marshalable byte
|
||||
// `encoding/asn1`
|
||||
type Attribute struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
type attributes struct {
|
||||
types []asn1.ObjectIdentifier
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
// Add adds the attribute, maintaining insertion order
|
||||
func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
|
||||
attrs.types = append(attrs.types, attrType)
|
||||
attrs.values = append(attrs.values, value)
|
||||
}
|
||||
|
||||
type sortableAttribute struct {
|
||||
SortKey []byte
|
||||
Attribute attribute
|
||||
}
|
||||
|
||||
type attributeSet []sortableAttribute
|
||||
|
||||
func (sa attributeSet) Len() int {
|
||||
return len(sa)
|
||||
}
|
||||
|
||||
func (sa attributeSet) Less(i, j int) bool {
|
||||
return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0
|
||||
}
|
||||
|
||||
func (sa attributeSet) Swap(i, j int) {
|
||||
sa[i], sa[j] = sa[j], sa[i]
|
||||
}
|
||||
|
||||
func (sa attributeSet) Attributes() []attribute {
|
||||
attrs := make([]attribute, len(sa))
|
||||
for i, attr := range sa {
|
||||
attrs[i] = attr.Attribute
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (attrs *attributes) ForMarshalling() ([]attribute, error) {
|
||||
sortables := make(attributeSet, len(attrs.types))
|
||||
for i := range sortables {
|
||||
attrType := attrs.types[i]
|
||||
attrValue := attrs.values[i]
|
||||
asn1Value, err := asn1.Marshal(attrValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attr := attribute{
|
||||
Type: attrType,
|
||||
Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
|
||||
}
|
||||
encoded, err := asn1.Marshal(attr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sortables[i] = sortableAttribute{
|
||||
SortKey: encoded,
|
||||
Attribute: attr,
|
||||
}
|
||||
}
|
||||
sort.Sort(sortables)
|
||||
return sortables.Attributes(), nil
|
||||
}
|
@ -1,283 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
var test1024Key, test2048Key, test3072Key, test4096Key *rsa.PrivateKey
|
||||
|
||||
func init() {
|
||||
test1024Key = &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: fromBase10("123024078101403810516614073341068864574068590522569345017786163424062310013967742924377390210586226651760719671658568413826602264886073432535341149584680111145880576802262550990305759285883150470245429547886689754596541046564560506544976611114898883158121012232676781340602508151730773214407220733898059285561"),
|
||||
E: 65537,
|
||||
},
|
||||
D: fromBase10("118892427340746627750435157989073921703209000249285930635312944544706203626114423392257295670807166199489096863209592887347935991101581502404113203993092422730000157893515953622392722273095289787303943046491132467130346663160540744582438810535626328230098940583296878135092036661410664695896115177534496784545"),
|
||||
Primes: []*big.Int{
|
||||
fromBase10("12172745919282672373981903347443034348576729562395784527365032103134165674508405592530417723266847908118361582847315228810176708212888860333051929276459099"),
|
||||
fromBase10("10106518193772789699356660087736308350857919389391620140340519320928952625438936098550728858345355053201610649202713962702543058578827268756755006576249339"),
|
||||
},
|
||||
}
|
||||
test1024Key.Precompute()
|
||||
test2048Key = &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: fromBase10("14314132931241006650998084889274020608918049032671858325988396851334124245188214251956198731333464217832226406088020736932173064754214329009979944037640912127943488972644697423190955557435910767690712778463524983667852819010259499695177313115447116110358524558307947613422897787329221478860907963827160223559690523660574329011927531289655711860504630573766609239332569210831325633840174683944553667352219670930408593321661375473885147973879086994006440025257225431977751512374815915392249179976902953721486040787792801849818254465486633791826766873076617116727073077821584676715609985777563958286637185868165868520557"),
|
||||
E: 3,
|
||||
},
|
||||
D: fromBase10("9542755287494004433998723259516013739278699355114572217325597900889416163458809501304132487555642811888150937392013824621448709836142886006653296025093941418628992648429798282127303704957273845127141852309016655778568546006839666463451542076964744073572349705538631742281931858219480985907271975884773482372966847639853897890615456605598071088189838676728836833012254065983259638538107719766738032720239892094196108713378822882383694456030043492571063441943847195939549773271694647657549658603365629458610273821292232646334717612674519997533901052790334279661754176490593041941863932308687197618671528035670452762731"),
|
||||
Primes: []*big.Int{
|
||||
fromBase10("130903255182996722426771613606077755295583329135067340152947172868415809027537376306193179624298874215608270802054347609836776473930072411958753044562214537013874103802006369634761074377213995983876788718033850153719421695468704276694983032644416930879093914927146648402139231293035971427838068945045019075433"),
|
||||
fromBase10("109348945610485453577574767652527472924289229538286649661240938988020367005475727988253438647560958573506159449538793540472829815903949343191091817779240101054552748665267574271163617694640513549693841337820602726596756351006149518830932261246698766355347898158548465400674856021497190430791824869615170301029"),
|
||||
},
|
||||
}
|
||||
test2048Key.Precompute()
|
||||
test3072Key = &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: fromBase10("4799422180968749215324244710281712119910779465109490663934897082847293004098645365195947978124390029272750644394844443980065532911010718425428791498896288210928474905407341584968381379157418577471272697781778686372450913810019702928839200328075568223462554606149618941566459398862673532997592879359280754226882565483298027678735544377401276021471356093819491755877827249763065753555051973844057308627201762456191918852016986546071426986328720794061622370410645440235373576002278045257207695462423797272017386006110722769072206022723167102083033531426777518054025826800254337147514768377949097720074878744769255210076910190151785807232805749219196645305822228090875616900385866236956058984170647782567907618713309775105943700661530312800231153745705977436176908325539234432407050398510090070342851489496464612052853185583222422124535243967989533830816012180864309784486694786581956050902756173889941244024888811572094961378021"),
|
||||
E: 65537,
|
||||
},
|
||||
D: fromBase10("4068124900056380177006532461065648259352178312499768312132802353620854992915205894105621345694615110794369150964768050224096623567443679436821868510233726084582567244003894477723706516831312989564775159596496449435830457803384416702014837685962523313266832032687145914871879794104404800823188153886925022171560391765913739346955738372354826804228989767120353182641396181570533678315099748218734875742705419933837638038793286534641711407564379950728858267828581787483317040753987167237461567332386718574803231955771633274184646232632371006762852623964054645811527580417392163873708539175349637050049959954373319861427407953413018816604365474462455009323937599275324390953644555294418021286807661559165324810415569396577697316798600308544755741549699523972971375304826663847015905713096287495342701286542193782001358775773848824496321550110946106870685499577993864871847542645561943034990484973293461948058147956373115641615329"),
|
||||
Primes: []*big.Int{
|
||||
fromBase10("2378529069722721185825622840841310902793949682948530343491428052737890236476884657507685118578733560141370511507721598189068683665232991988491561624429938984370132428230072355214627085652359350722926394699707232921674771664421591347888367477300909202851476404132163673865768760147403525700174918450753162242834161458300343282159799476695001920226357456953682236859505243928716782707623075239350380352265954107362618991716602898266999700316937680986690964564264877"),
|
||||
fromBase10("2017811025336026464312837780072272578817919741496395062543647660689775637351085991504709917848745137013798005682591633910555599626950744674459976829106750083386168859581016361317479081273480343110649405858059581933773354781034946787147300862495438979895430001323443224335618577322449133208754541656374335100929456885995320929464029817626916719434010943205170760536768893924932021302887114400922813817969176636993508191950649313115712159241971065134077636674146073"),
|
||||
},
|
||||
}
|
||||
test3072Key.Precompute()
|
||||
test4096Key = &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: fromBase10("633335480064287130853997429184971616419051348693342219741748040433588285601270210251206421401040394238592139790962887290698043839174341843721930134010306454716566698330215646704263665452264344664385995704186692432827662862845900348526672531755932642433662686500295989783595767573119607065791980381547677840410600100715146047382485989885183858757974681241303484641390718944520330953604501686666386926996348457928415093305041429178744778762826377713889019740060910363468343855830206640274442887621960581569183233822878661711798998132931623726434336448716605363514220760343097572198620479297583609779817750646169845195672483600293522186340560792255595411601450766002877850696008003794520089358819042318331840490155176019070646738739580486357084733208876620846449161909966690602374519398451042362690200166144326179405976024265116931974936425064291406950542193873313447617169603706868220189295654943247311295475722243471700112334609817776430552541319671117235957754556272646031356496763094955985615723596562217985372503002989591679252640940571608314743271809251568670314461039035793703429977801961867815257832671786542212589906513979094156334941265621017752516999186481477500481433634914622735206243841674973785078408289183000133399026553"),
|
||||
E: 65537,
|
||||
},
|
||||
D: fromBase10("439373650557744155078930178606343279553665694488479749802070836418412881168612407941793966086633543867614175621952769177088930851151267623886678906158545451731745754402575409204816390946376103491325109185445659065122640946673660760274557781540431107937331701243915001777636528502669576801704352961341634812275635811512806966908648671988644114352046582195051714797831307925775689566757438907578527366568747104508496278929566712224252103563340770696548181508180254674236716995730292431858611476396845443056967589437890065663497768422598977743046882539288481002449571403783500529740184608873520856954837631427724158592309018382711485601884461168736465751756282510065053161144027097169985941910909130083273691945578478173708396726266170473745329617793866669307716920992380350270584929908460462802627239204245339385636926433446418108504614031393494119344916828744888432279343816084433424594432427362258172264834429525166677273382617457205387388293888430391895615438030066428745187333897518037597413369705720436392869403948934993623418405908467147848576977008003556716087129242155836114780890054057743164411952731290520995017097151300091841286806603044227906213832083363876549637037625314539090155417589796428888619937329669464810549362433"),
|
||||
Primes: []*big.Int{
|
||||
fromBase10("25745433817240673759910623230144796182285844101796353869339294232644316274580053211056707671663014355388701931204078502829809738396303142990312095225333440050808647355535878394534263839500592870406002873182360027755750148248672968563366185348499498613479490545488025779331426515670185366021612402246813511722553210128074701620113404560399242413747318161403908617342170447610792422053460359960010544593668037305465806912471260799852789913123044326555978680190904164976511331681163576833618899773550873682147782263100803907156362439021929408298804955194748640633152519828940133338948391986823456836070708197320166146761"),
|
||||
fromBase10("24599914864909676687852658457515103765368967514652318497893275892114442089314173678877914038802355565271545910572804267918959612739009937926962653912943833939518967731764560204997062096919833970670512726396663920955497151415639902788974842698619579886297871162402643104696160155894685518587660015182381685605752989716946154299190561137541792784125356553411300817844325739404126956793095254412123887617931225840421856505925283322918693259047428656823141903489964287619982295891439430302405252447010728112098326033634688757933930065610737780413018498561434074501822951716586796047404555397992425143397497639322075233073"),
|
||||
},
|
||||
}
|
||||
test4096Key.Precompute()
|
||||
}
|
||||
|
||||
func fromBase10(base10 string) *big.Int {
|
||||
i, ok := new(big.Int).SetString(base10, 10)
|
||||
if !ok {
|
||||
panic("bad number: " + base10)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
type certKeyPair struct {
|
||||
Certificate *x509.Certificate
|
||||
PrivateKey *crypto.PrivateKey
|
||||
}
|
||||
|
||||
func createTestCertificate(sigAlg x509.SignatureAlgorithm) (certKeyPair, error) {
|
||||
signer, err := createTestCertificateByIssuer("Eddard Stark", nil, sigAlg, true)
|
||||
if err != nil {
|
||||
return certKeyPair{}, err
|
||||
}
|
||||
pair, err := createTestCertificateByIssuer("Jon Snow", signer, sigAlg, false)
|
||||
if err != nil {
|
||||
return certKeyPair{}, err
|
||||
}
|
||||
return *pair, nil
|
||||
}
|
||||
|
||||
func createTestCertificateByIssuer(name string, issuer *certKeyPair, sigAlg x509.SignatureAlgorithm, isCA bool) (*certKeyPair, error) {
|
||||
var (
|
||||
err error
|
||||
priv crypto.PrivateKey
|
||||
derCert []byte
|
||||
issuerCert *x509.Certificate
|
||||
issuerKey crypto.PrivateKey
|
||||
)
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 32)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
CommonName: name,
|
||||
Organization: []string{"Acme Co"},
|
||||
},
|
||||
NotBefore: time.Now().Add(-1 * time.Second),
|
||||
NotAfter: time.Now().AddDate(1, 0, 0),
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageEmailProtection},
|
||||
}
|
||||
if issuer != nil {
|
||||
issuerCert = issuer.Certificate
|
||||
issuerKey = *issuer.PrivateKey
|
||||
}
|
||||
switch sigAlg {
|
||||
case x509.SHA256WithRSA:
|
||||
priv = test2048Key
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.SHA256WithRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.ECDSAWithSHA256
|
||||
case *dsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.DSAWithSHA256
|
||||
}
|
||||
case x509.SHA384WithRSA:
|
||||
priv = test3072Key
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.SHA384WithRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.ECDSAWithSHA384
|
||||
case *dsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.DSAWithSHA256
|
||||
}
|
||||
case x509.SHA512WithRSA:
|
||||
priv = test4096Key
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.SHA512WithRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.ECDSAWithSHA512
|
||||
case *dsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.DSAWithSHA256
|
||||
}
|
||||
case x509.ECDSAWithSHA256:
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.SHA256WithRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.ECDSAWithSHA256
|
||||
case *dsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.DSAWithSHA256
|
||||
}
|
||||
case x509.ECDSAWithSHA384:
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.SHA384WithRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.ECDSAWithSHA384
|
||||
case *dsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.DSAWithSHA256
|
||||
}
|
||||
case x509.ECDSAWithSHA512:
|
||||
priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.SHA512WithRSA
|
||||
case *ecdsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.ECDSAWithSHA512
|
||||
case *dsa.PrivateKey:
|
||||
template.SignatureAlgorithm = x509.DSAWithSHA256
|
||||
}
|
||||
}
|
||||
if isCA {
|
||||
template.IsCA = true
|
||||
template.KeyUsage |= x509.KeyUsageCertSign
|
||||
template.BasicConstraintsValid = true
|
||||
}
|
||||
if issuer == nil {
|
||||
// no issuer given,make this a self-signed root cert
|
||||
issuerCert = &template
|
||||
issuerKey = priv
|
||||
}
|
||||
|
||||
log.Println("creating cert", name, "issued by", issuerCert.Subject.CommonName, "with sigalg", sigAlg)
|
||||
switch priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*rsa.PrivateKey).Public(), issuerKey.(*rsa.PrivateKey))
|
||||
case *ecdsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*rsa.PrivateKey).Public(), issuerKey.(*ecdsa.PrivateKey))
|
||||
case *dsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*rsa.PrivateKey).Public(), issuerKey.(*dsa.PrivateKey))
|
||||
}
|
||||
case *ecdsa.PrivateKey:
|
||||
switch issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*ecdsa.PrivateKey).Public(), issuerKey.(*rsa.PrivateKey))
|
||||
case *ecdsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*ecdsa.PrivateKey).Public(), issuerKey.(*ecdsa.PrivateKey))
|
||||
case *dsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*ecdsa.PrivateKey).Public(), issuerKey.(*dsa.PrivateKey))
|
||||
}
|
||||
case *dsa.PrivateKey:
|
||||
pub := &priv.(*dsa.PrivateKey).PublicKey
|
||||
switch issuerKey := issuerKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, pub, issuerKey)
|
||||
case *ecdsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*dsa.PublicKey), issuerKey)
|
||||
case *dsa.PrivateKey:
|
||||
derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*dsa.PublicKey), issuerKey)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(derCert) == 0 {
|
||||
return nil, fmt.Errorf("no certificate created, probably due to wrong keys. types were %T and %T", priv, issuerKey)
|
||||
}
|
||||
cert, err := x509.ParseCertificate(derCert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pem.Encode(os.Stdout, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
||||
return &certKeyPair{
|
||||
Certificate: cert,
|
||||
PrivateKey: &priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type TestFixture struct {
|
||||
Input []byte
|
||||
Certificate *x509.Certificate
|
||||
PrivateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
func UnmarshalTestFixture(testPEMBlock string) TestFixture {
|
||||
var result TestFixture
|
||||
var derBlock *pem.Block
|
||||
pemBlock := []byte(testPEMBlock)
|
||||
for {
|
||||
derBlock, pemBlock = pem.Decode(pemBlock)
|
||||
if derBlock == nil {
|
||||
break
|
||||
}
|
||||
switch derBlock.Type {
|
||||
case "PKCS7":
|
||||
result.Input = derBlock.Bytes
|
||||
case "CERTIFICATE":
|
||||
result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes)
|
||||
case "PRIVATE KEY":
|
||||
result.PrivateKey, _ = x509.ParsePKCS1PrivateKey(derBlock.Bytes)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
@ -1,435 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/internal"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.PatchSha1()
|
||||
}
|
||||
|
||||
// SignedData is an opaque data structure for creating signed data payloads
|
||||
type SignedData struct {
|
||||
sd signedData
|
||||
certs []*x509.Certificate
|
||||
data, messageDigest []byte
|
||||
digestOid asn1.ObjectIdentifier
|
||||
encryptionOid asn1.ObjectIdentifier
|
||||
}
|
||||
|
||||
// NewSignedData takes data and initializes a PKCS7 SignedData struct that is
|
||||
// ready to be signed via AddSigner. The digest algorithm is set to SHA-256 by default
|
||||
// and can be changed by calling SetDigestAlgorithm.
|
||||
func NewSignedData(data []byte) (*SignedData, error) {
|
||||
content, err := asn1.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := contentInfo{
|
||||
ContentType: OIDData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
|
||||
}
|
||||
sd := signedData{
|
||||
ContentInfo: ci,
|
||||
Version: 1,
|
||||
}
|
||||
return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA256}, nil
|
||||
}
|
||||
|
||||
// SignerInfoConfig are optional values to include when adding a signer
|
||||
type SignerInfoConfig struct {
|
||||
ExtraSignedAttributes []Attribute
|
||||
ExtraUnsignedAttributes []Attribute
|
||||
}
|
||||
|
||||
type signedData struct {
|
||||
Version int `asn1:"default:1"`
|
||||
DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
|
||||
ContentInfo contentInfo
|
||||
Certificates rawCertificates `asn1:"optional,tag:0"`
|
||||
CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
|
||||
SignerInfos []signerInfo `asn1:"set"`
|
||||
}
|
||||
|
||||
type signerInfo struct {
|
||||
Version int `asn1:"default:1"`
|
||||
IssuerAndSerialNumber issuerAndSerial
|
||||
DigestAlgorithm pkix.AlgorithmIdentifier
|
||||
AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"`
|
||||
DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedDigest []byte
|
||||
UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"`
|
||||
}
|
||||
|
||||
type attribute struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value asn1.RawValue `asn1:"set"`
|
||||
}
|
||||
|
||||
func marshalAttributes(attrs []attribute) ([]byte, error) {
|
||||
encodedAttributes, err := asn1.Marshal(struct {
|
||||
A []attribute `asn1:"set"`
|
||||
}{A: attrs})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the leading sequence octets
|
||||
var raw asn1.RawValue
|
||||
asn1.Unmarshal(encodedAttributes, &raw)
|
||||
return raw.Bytes, nil
|
||||
}
|
||||
|
||||
type rawCertificates struct {
|
||||
Raw asn1.RawContent
|
||||
}
|
||||
|
||||
type issuerAndSerial struct {
|
||||
IssuerName asn1.RawValue
|
||||
SerialNumber *big.Int
|
||||
}
|
||||
|
||||
// SetDigestAlgorithm sets the digest algorithm to be used in the signing process.
|
||||
//
|
||||
// This should be called before adding signers
|
||||
func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) {
|
||||
sd.digestOid = d
|
||||
}
|
||||
|
||||
// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process.
|
||||
//
|
||||
// This should be called before adding signers
|
||||
func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) {
|
||||
sd.encryptionOid = d
|
||||
}
|
||||
|
||||
// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent.
|
||||
func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
|
||||
var parents []*x509.Certificate
|
||||
return sd.AddSignerChain(ee, pkey, parents, config)
|
||||
}
|
||||
|
||||
// AddSignerChain signs attributes about the content and adds certificates
|
||||
// and signers infos to the Signed Data. The certificate and private key
|
||||
// of the end-entity signer are used to issue the signature, and any
|
||||
// parent of that end-entity that need to be added to the list of
|
||||
// certifications can be specified in the parents slice.
|
||||
//
|
||||
// The signature algorithm used to hash the data is the one of the end-entity
|
||||
// certificate.
|
||||
func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error {
|
||||
// Following RFC 2315, 9.2 SignerInfo type, the distinguished name of
|
||||
// the issuer of the end-entity signer is stored in the issuerAndSerialNumber
|
||||
// section of the SignedData.SignerInfo, alongside the serial number of
|
||||
// the end-entity.
|
||||
var ias issuerAndSerial
|
||||
ias.SerialNumber = ee.SerialNumber
|
||||
if len(parents) == 0 {
|
||||
// no parent, the issuer is the end-entity cert itself
|
||||
ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer}
|
||||
} else {
|
||||
err := verifyPartialChain(ee, parents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// the first parent is the issuer
|
||||
ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject}
|
||||
}
|
||||
sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers,
|
||||
pkix.AlgorithmIdentifier{Algorithm: sd.digestOid},
|
||||
)
|
||||
hash, err := getHashForOID(sd.digestOid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := hash.New()
|
||||
h.Write(sd.data)
|
||||
sd.messageDigest = h.Sum(nil)
|
||||
encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
attrs := &attributes{}
|
||||
attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType)
|
||||
attrs.Add(OIDAttributeMessageDigest, sd.messageDigest)
|
||||
attrs.Add(OIDAttributeSigningTime, time.Now().UTC())
|
||||
for _, attr := range config.ExtraSignedAttributes {
|
||||
attrs.Add(attr.Type, attr.Value)
|
||||
}
|
||||
finalAttrs, err := attrs.ForMarshalling()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unsignedAttrs := &attributes{}
|
||||
for _, attr := range config.ExtraUnsignedAttributes {
|
||||
unsignedAttrs.Add(attr.Type, attr.Value)
|
||||
}
|
||||
finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create signature of signed attributes
|
||||
signature, err := signAttributes(finalAttrs, pkey, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signer := signerInfo{
|
||||
AuthenticatedAttributes: finalAttrs,
|
||||
UnauthenticatedAttributes: finalUnsignedAttrs,
|
||||
DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid},
|
||||
DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid},
|
||||
IssuerAndSerialNumber: ias,
|
||||
EncryptedDigest: signature,
|
||||
Version: 1,
|
||||
}
|
||||
sd.certs = append(sd.certs, ee)
|
||||
if len(parents) > 0 {
|
||||
sd.certs = append(sd.certs, parents...)
|
||||
}
|
||||
sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData.
|
||||
// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone
|
||||
// and does not include any signed attributes like timestamp and so on.
|
||||
//
|
||||
// This function is needed to sign old Android APKs, something you probably
|
||||
// shouldn't do unless you're maintaining backward compatibility for old
|
||||
// applications.
|
||||
func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
|
||||
var signature []byte
|
||||
sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid})
|
||||
hash, err := getHashForOID(sd.digestOid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := hash.New()
|
||||
h.Write(sd.data)
|
||||
sd.messageDigest = h.Sum(nil)
|
||||
switch pkey := pkey.(type) {
|
||||
case *dsa.PrivateKey:
|
||||
// dsa doesn't implement crypto.Signer so we make a special case
|
||||
// https://github.com/golang/go/issues/27889
|
||||
r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signature, err = asn1.Marshal(dsaSignature{r, s})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
key, ok := pkey.(crypto.Signer)
|
||||
if !ok {
|
||||
return errors.New("pkcs7: private key does not implement crypto.Signer")
|
||||
}
|
||||
signature, err = key.Sign(rand.Reader, sd.messageDigest, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var ias issuerAndSerial
|
||||
ias.SerialNumber = ee.SerialNumber
|
||||
// no parent, the issue is the end-entity cert itself
|
||||
ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer}
|
||||
if sd.encryptionOid == nil {
|
||||
// if the encryption algorithm wasn't set by SetEncryptionAlgorithm,
|
||||
// infer it from the digest algorithm
|
||||
sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signer := signerInfo{
|
||||
DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid},
|
||||
DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid},
|
||||
IssuerAndSerialNumber: ias,
|
||||
EncryptedDigest: signature,
|
||||
Version: 1,
|
||||
}
|
||||
// create signature of signed attributes
|
||||
sd.certs = append(sd.certs, ee)
|
||||
sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error {
|
||||
unsignedAttrs := &attributes{}
|
||||
for _, attr := range extraUnsignedAttrs {
|
||||
unsignedAttrs.Add(attr.Type, attr.Value)
|
||||
}
|
||||
finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
si.UnauthenticatedAttributes = finalUnsignedAttrs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddCertificate adds the certificate to the payload. Useful for parent certificates
|
||||
func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
|
||||
sd.certs = append(sd.certs, cert)
|
||||
}
|
||||
|
||||
// Detach removes content from the signed data struct to make it a detached signature.
|
||||
// This must be called right before Finish()
|
||||
func (sd *SignedData) Detach() {
|
||||
sd.sd.ContentInfo = contentInfo{ContentType: OIDData}
|
||||
}
|
||||
|
||||
// GetSignedData returns the private Signed Data
|
||||
func (sd *SignedData) GetSignedData() *signedData {
|
||||
return &sd.sd
|
||||
}
|
||||
|
||||
// Finish marshals the content and its signers
|
||||
func (sd *SignedData) Finish() ([]byte, error) {
|
||||
sd.sd.Certificates = marshalCertificates(sd.certs)
|
||||
inner, err := asn1.Marshal(sd.sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outer := contentInfo{
|
||||
ContentType: OIDSignedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
|
||||
}
|
||||
return asn1.Marshal(outer)
|
||||
}
|
||||
|
||||
// RemoveAuthenticatedAttributes removes authenticated attributes from signedData
|
||||
// similar to OpenSSL's PKCS7_NOATTR or -noattr flags
|
||||
func (sd *SignedData) RemoveAuthenticatedAttributes() {
|
||||
for i := range sd.sd.SignerInfos {
|
||||
sd.sd.SignerInfos[i].AuthenticatedAttributes = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData
|
||||
func (sd *SignedData) RemoveUnauthenticatedAttributes() {
|
||||
for i := range sd.sd.SignerInfos {
|
||||
sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil
|
||||
}
|
||||
}
|
||||
|
||||
// verifyPartialChain checks that a given cert is issued by the first parent in the list,
|
||||
// then continue down the path. It doesn't require the last parent to be a root CA,
|
||||
// or to be trusted in any truststore. It simply verifies that the chain provided, albeit
|
||||
// partial, makes sense.
|
||||
func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error {
|
||||
if len(parents) == 0 {
|
||||
return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName)
|
||||
}
|
||||
err := cert.CheckSignatureFrom(parents[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err)
|
||||
}
|
||||
if len(parents) == 1 {
|
||||
// there is no more parent to check, return
|
||||
return nil
|
||||
}
|
||||
return verifyPartialChain(parents[0], parents[1:])
|
||||
}
|
||||
|
||||
func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
|
||||
var ias issuerAndSerial
|
||||
// The issuer RDNSequence has to match exactly the sequence in the certificate
|
||||
// We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
|
||||
ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer}
|
||||
ias.SerialNumber = cert.SerialNumber
|
||||
|
||||
return ias, nil
|
||||
}
|
||||
|
||||
// signs the DER encoded form of the attributes with the private key
|
||||
func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) {
|
||||
attrBytes, err := marshalAttributes(attrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h := digestAlg.New()
|
||||
h.Write(attrBytes)
|
||||
hash := h.Sum(nil)
|
||||
|
||||
// dsa doesn't implement crypto.Signer so we make a special case
|
||||
// https://github.com/golang/go/issues/27889
|
||||
switch pkey := pkey.(type) {
|
||||
case *dsa.PrivateKey:
|
||||
r, s, err := dsa.Sign(rand.Reader, pkey, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return asn1.Marshal(dsaSignature{r, s})
|
||||
}
|
||||
|
||||
key, ok := pkey.(crypto.Signer)
|
||||
if !ok {
|
||||
return nil, errors.New("pkcs7: private key does not implement crypto.Signer")
|
||||
}
|
||||
return key.Sign(rand.Reader, hash, digestAlg)
|
||||
}
|
||||
|
||||
type dsaSignature struct {
|
||||
R, S *big.Int
|
||||
}
|
||||
|
||||
// concats and wraps the certificates in the RawValue structure
|
||||
func marshalCertificates(certs []*x509.Certificate) rawCertificates {
|
||||
var buf bytes.Buffer
|
||||
for _, cert := range certs {
|
||||
buf.Write(cert.Raw)
|
||||
}
|
||||
rawCerts, _ := marshalCertificateBytes(buf.Bytes())
|
||||
return rawCerts
|
||||
}
|
||||
|
||||
// Even though, the tag & length are stripped out during marshalling the
|
||||
// RawContent, we have to encode it into the RawContent. If its missing,
|
||||
// then `asn1.Marshal()` will strip out the certificate wrapper instead.
|
||||
func marshalCertificateBytes(certs []byte) (rawCertificates, error) {
|
||||
val := asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true}
|
||||
b, err := asn1.Marshal(val)
|
||||
if err != nil {
|
||||
return rawCertificates{}, err
|
||||
}
|
||||
return rawCertificates{Raw: b}, nil
|
||||
}
|
||||
|
||||
// DegenerateCertificate creates a signed data structure containing only the
|
||||
// provided certificate or certificate chain.
|
||||
func DegenerateCertificate(cert []byte) ([]byte, error) {
|
||||
rawCert, err := marshalCertificateBytes(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
emptyContent := contentInfo{ContentType: OIDData}
|
||||
sd := signedData{
|
||||
Version: 1,
|
||||
ContentInfo: emptyContent,
|
||||
Certificates: rawCert,
|
||||
CRLs: []pkix.CertificateList{},
|
||||
}
|
||||
content, err := asn1.Marshal(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedContent := contentInfo{
|
||||
ContentType: OIDSignedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
|
||||
}
|
||||
return asn1.Marshal(signedContent)
|
||||
}
|
@ -1,271 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/dsa"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSign(t *testing.T) {
|
||||
content := []byte("Hello World")
|
||||
sigalgs := []x509.SignatureAlgorithm{
|
||||
x509.SHA256WithRSA,
|
||||
x509.SHA512WithRSA,
|
||||
x509.ECDSAWithSHA256,
|
||||
x509.ECDSAWithSHA384,
|
||||
x509.ECDSAWithSHA512,
|
||||
}
|
||||
for _, sigalgroot := range sigalgs {
|
||||
rootCert, err := createTestCertificateByIssuer("PKCS7 Test Root CA", nil, sigalgroot, true)
|
||||
if err != nil {
|
||||
t.Fatalf("test %s: cannot generate root cert: %s", sigalgroot, err)
|
||||
}
|
||||
truststore := x509.NewCertPool()
|
||||
truststore.AddCert(rootCert.Certificate)
|
||||
for _, sigalginter := range sigalgs {
|
||||
interCert, err := createTestCertificateByIssuer("PKCS7 Test Intermediate Cert", rootCert, sigalginter, true)
|
||||
if err != nil {
|
||||
t.Fatalf("test %s/%s: cannot generate intermediate cert: %s", sigalgroot, sigalginter, err)
|
||||
}
|
||||
var parents []*x509.Certificate
|
||||
parents = append(parents, interCert.Certificate)
|
||||
for _, sigalgsigner := range sigalgs {
|
||||
signerCert, err := createTestCertificateByIssuer("PKCS7 Test Signer Cert", interCert, sigalgsigner, false)
|
||||
if err != nil {
|
||||
t.Fatalf("test %s/%s/%s: cannot generate signer cert: %s", sigalgroot, sigalginter, sigalgsigner, err)
|
||||
}
|
||||
for _, testDetach := range []bool{false, true} {
|
||||
log.Printf("test %s/%s/%s detached %t\n", sigalgroot, sigalginter, sigalgsigner, testDetach)
|
||||
toBeSigned, err := NewSignedData(content)
|
||||
if err != nil {
|
||||
t.Fatalf("test %s/%s/%s: cannot initialize signed data: %s", sigalgroot, sigalginter, sigalgsigner, err)
|
||||
}
|
||||
|
||||
// Set the digest to match the end entity cert
|
||||
signerDigest, _ := getDigestOIDForSignatureAlgorithm(signerCert.Certificate.SignatureAlgorithm)
|
||||
toBeSigned.SetDigestAlgorithm(signerDigest)
|
||||
|
||||
if err := toBeSigned.AddSignerChain(signerCert.Certificate, *signerCert.PrivateKey, parents, SignerInfoConfig{}); err != nil {
|
||||
t.Fatalf("test %s/%s/%s: cannot add signer: %s", sigalgroot, sigalginter, sigalgsigner, err)
|
||||
}
|
||||
if testDetach {
|
||||
toBeSigned.Detach()
|
||||
}
|
||||
signed, err := toBeSigned.Finish()
|
||||
if err != nil {
|
||||
t.Fatalf("test %s/%s/%s: cannot finish signing data: %s", sigalgroot, sigalginter, sigalgsigner, err)
|
||||
}
|
||||
pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed})
|
||||
p7, err := Parse(signed)
|
||||
if err != nil {
|
||||
t.Fatalf("test %s/%s/%s: cannot parse signed data: %s", sigalgroot, sigalginter, sigalgsigner, err)
|
||||
}
|
||||
if testDetach {
|
||||
p7.Content = content
|
||||
}
|
||||
if !bytes.Equal(content, p7.Content) {
|
||||
t.Errorf("test %s/%s/%s: content was not found in the parsed data:\n\tExpected: %s\n\tActual: %s", sigalgroot, sigalginter, sigalgsigner, content, p7.Content)
|
||||
}
|
||||
if err := p7.VerifyWithChain(truststore); err != nil {
|
||||
t.Errorf("test %s/%s/%s: cannot verify signed data: %s", sigalgroot, sigalginter, sigalgsigner, err)
|
||||
}
|
||||
if !signerDigest.Equal(p7.Signers[0].DigestAlgorithm.Algorithm) {
|
||||
t.Errorf("test %s/%s/%s: expected digest algorithm %q but got %q",
|
||||
sigalgroot, sigalginter, sigalgsigner, signerDigest, p7.Signers[0].DigestAlgorithm.Algorithm)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDSASignAndVerifyWithOpenSSL(t *testing.T) {
|
||||
t.Log(runtime.Version())
|
||||
content := []byte("Hello World")
|
||||
// write the content to a temp file
|
||||
tmpContentFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_content")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpContentFile.Name(), content, 0o755)
|
||||
|
||||
block, _ := pem.Decode(dsaPublicCert)
|
||||
if block == nil {
|
||||
t.Fatal("failed to parse certificate PEM")
|
||||
}
|
||||
signerCert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
t.Fatal("failed to parse certificate: " + err.Error())
|
||||
}
|
||||
|
||||
// write the signer cert to a temp file
|
||||
tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_signer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755)
|
||||
|
||||
priv := dsa.PrivateKey{
|
||||
PublicKey: dsa.PublicKey{
|
||||
Parameters: dsa.Parameters{
|
||||
P: fromHex("fd7f53811d75122952df4a9c2eece4e7f611b7523cef4400c31e3f80b6512669455d402251fb593d8d58fabfc5f5ba30f6cb9b556cd7813b801d346ff26660b76b9950a5a49f9fe8047b1022c24fbba9d7feb7c61bf83b57e7c6a8a6150f04fb83f6d3c51ec3023554135a169132f675f3ae2b61d72aeff22203199dd14801c7"),
|
||||
Q: fromHex("9760508F15230BCCB292B982A2EB840BF0581CF5"),
|
||||
G: fromHex("F7E1A085D69B3DDECBBCAB5C36B857B97994AFBBFA3AEA82F9574C0B3D0782675159578EBAD4594FE67107108180B449167123E84C281613B7CF09328CC8A6E13C167A8B547C8D28E0A3AE1E2BB3A675916EA37F0BFA213562F1FB627A01243BCCA4F1BEA8519089A883DFE15AE59F06928B665E807B552564014C3BFECF492A"),
|
||||
},
|
||||
},
|
||||
X: fromHex("7D6E1A3DD4019FD809669D8AB8DA73807CEF7EC1"),
|
||||
}
|
||||
toBeSigned, err := NewSignedData(content)
|
||||
if err != nil {
|
||||
t.Fatalf("test case: cannot initialize signed data: %s", err)
|
||||
}
|
||||
// openssl DSA only supports SHA1 for our 1024-bit DSA key, since that is all the standard officially supports
|
||||
toBeSigned.digestOid = OIDDigestAlgorithmSHA1
|
||||
if err := toBeSigned.SignWithoutAttr(signerCert, &priv, SignerInfoConfig{}); err != nil {
|
||||
t.Fatalf("Cannot add signer: %s", err)
|
||||
}
|
||||
toBeSigned.Detach()
|
||||
signed, err := toBeSigned.Finish()
|
||||
if err != nil {
|
||||
t.Fatalf("test case: cannot finish signing data: %s", err)
|
||||
}
|
||||
|
||||
// write the signature to a temp file
|
||||
tmpSignatureFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_signature")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpSignatureFile.Name(), pem.EncodeToMemory(&pem.Block{Type: "PKCS7", Bytes: signed}), 0o755)
|
||||
|
||||
// call openssl to verify the signature on the content using the root
|
||||
opensslCMD := exec.Command("openssl", "smime", "-verify", "-noverify",
|
||||
"-in", tmpSignatureFile.Name(), "-inform", "PEM",
|
||||
"-content", tmpContentFile.Name())
|
||||
out, err := opensslCMD.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("Command: %s", opensslCMD.Args)
|
||||
t.Fatalf("test case: openssl command failed with %s: %s", err, out)
|
||||
}
|
||||
os.Remove(tmpSignatureFile.Name()) // clean up
|
||||
os.Remove(tmpContentFile.Name()) // clean up
|
||||
os.Remove(tmpSignerCertFile.Name()) // clean up
|
||||
}
|
||||
|
||||
func ExampleSignedData() {
|
||||
// generate a signing cert or load a key pair
|
||||
cert, err := createTestCertificate(x509.SHA256WithRSA)
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot create test certificates: %s", err)
|
||||
}
|
||||
|
||||
// Initialize a SignedData struct with content to be signed
|
||||
signedData, err := NewSignedData([]byte("Example data to be signed"))
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot initialize signed data: %s", err)
|
||||
}
|
||||
|
||||
// Add the signing cert and private key
|
||||
if err := signedData.AddSigner(cert.Certificate, cert.PrivateKey, SignerInfoConfig{}); err != nil {
|
||||
fmt.Printf("Cannot add signer: %s", err)
|
||||
}
|
||||
|
||||
// Call Detach() is you want to remove content from the signature
|
||||
// and generate an S/MIME detached signature
|
||||
signedData.Detach()
|
||||
|
||||
// Finish() to obtain the signature bytes
|
||||
detachedSignature, err := signedData.Finish()
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot finish signing data: %s", err)
|
||||
}
|
||||
pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: detachedSignature})
|
||||
}
|
||||
|
||||
func TestUnmarshalSignedAttribute(t *testing.T) {
|
||||
cert, err := createTestCertificate(x509.SHA512WithRSA)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
content := []byte("Hello World")
|
||||
toBeSigned, err := NewSignedData(content)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot initialize signed data: %s", err)
|
||||
}
|
||||
oidTest := asn1.ObjectIdentifier{2, 3, 4, 5, 6, 7}
|
||||
testValue := "TestValue"
|
||||
if err := toBeSigned.AddSigner(cert.Certificate, *cert.PrivateKey, SignerInfoConfig{
|
||||
ExtraSignedAttributes: []Attribute{{Type: oidTest, Value: testValue}},
|
||||
}); err != nil {
|
||||
t.Fatalf("Cannot add signer: %s", err)
|
||||
}
|
||||
signed, err := toBeSigned.Finish()
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot finish signing data: %s", err)
|
||||
}
|
||||
p7, err := Parse(signed)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot parse signed data: %v", err)
|
||||
}
|
||||
var actual string
|
||||
err = p7.UnmarshalSignedAttribute(oidTest, &actual)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot unmarshal test value: %s", err)
|
||||
}
|
||||
if testValue != actual {
|
||||
t.Errorf("Attribute does not match test value\n\tExpected: %s\n\tActual: %s", testValue, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDegenerateCertificate(t *testing.T) {
|
||||
cert, err := createTestCertificate(x509.SHA256WithRSA)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
deg, err := DegenerateCertificate(cert.Certificate.Raw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testOpenSSLParse(t, deg)
|
||||
pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: deg})
|
||||
}
|
||||
|
||||
// writes the cert to a temporary file and tests that openssl can read it.
|
||||
func testOpenSSLParse(t *testing.T, certBytes []byte) {
|
||||
tmpCertFile, err := ioutil.TempFile("", "testCertificate")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(tmpCertFile.Name()) // clean up
|
||||
|
||||
if _, err := tmpCertFile.Write(certBytes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
opensslCMD := exec.Command("openssl", "pkcs7", "-inform", "der", "-in", tmpCertFile.Name())
|
||||
_, err = opensslCMD.Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tmpCertFile.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func fromHex(s string) *big.Int {
|
||||
result, ok := new(big.Int).SetString(s, 16)
|
||||
if !ok {
|
||||
panic(s)
|
||||
}
|
||||
return result
|
||||
}
|
@ -1,400 +0,0 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/subtle"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Verify is a wrapper around VerifyWithChain() that initializes an empty
|
||||
// trust store, effectively disabling certificate verification when validating
|
||||
// a signature.
|
||||
func (p7 *PKCS7) Verify() (err error) {
|
||||
return p7.VerifyWithChain(nil)
|
||||
}
|
||||
|
||||
// VerifyWithChain checks the signatures of a PKCS7 object.
|
||||
//
|
||||
// If truststore is not nil, it also verifies the chain of trust of
|
||||
// the end-entity signer cert to one of the roots in the
|
||||
// truststore. When the PKCS7 object includes the signing time
|
||||
// authenticated attr verifies the chain at that time and UTC now
|
||||
// otherwise.
|
||||
func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) {
|
||||
if len(p7.Signers) == 0 {
|
||||
return errors.New("pkcs7: Message has no signers")
|
||||
}
|
||||
for _, signer := range p7.Signers {
|
||||
if err := verifySignature(p7, signer, truststore); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyWithChainAtTime checks the signatures of a PKCS7 object.
|
||||
//
|
||||
// If truststore is not nil, it also verifies the chain of trust of
|
||||
// the end-entity signer cert to a root in the truststore at
|
||||
// currentTime. It does not use the signing time authenticated
|
||||
// attribute.
|
||||
func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) {
|
||||
if len(p7.Signers) == 0 {
|
||||
return errors.New("pkcs7: Message has no signers")
|
||||
}
|
||||
for _, signer := range p7.Signers {
|
||||
if err := verifySignatureAtTime(p7, signer, truststore, currentTime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) {
|
||||
signedData := p7.Content
|
||||
ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
|
||||
if ee == nil {
|
||||
return errors.New("pkcs7: No certificate for signer")
|
||||
}
|
||||
if len(signer.AuthenticatedAttributes) > 0 {
|
||||
// TODO(fullsailor): First check the content type match
|
||||
var (
|
||||
digest []byte
|
||||
signingTime time.Time
|
||||
)
|
||||
err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := hash.New()
|
||||
h.Write(p7.Content)
|
||||
computed := h.Sum(nil)
|
||||
if subtle.ConstantTimeCompare(digest, computed) != 1 {
|
||||
return &MessageDigestMismatchError{
|
||||
ExpectedDigest: digest,
|
||||
ActualDigest: computed,
|
||||
}
|
||||
}
|
||||
signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime)
|
||||
if err == nil {
|
||||
// signing time found, performing validity check
|
||||
if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) {
|
||||
return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q",
|
||||
signingTime.Format(time.RFC3339),
|
||||
ee.NotBefore.Format(time.RFC3339),
|
||||
ee.NotAfter.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
}
|
||||
if truststore != nil {
|
||||
_, err = verifyCertChain(ee, p7.Certificates, truststore, currentTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch sigalg {
|
||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||
return dsaCheckSignature(sigalg, signedData, signer.EncryptedDigest, ee.PublicKey)
|
||||
default:
|
||||
return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest)
|
||||
}
|
||||
}
|
||||
|
||||
// dsaSignature verifies the DSA signature on a PKCS7 document. DSA support was
|
||||
// removed from Go's crypto/x509 support prior to Go 1.16. This allows
|
||||
// verifying legacy signatures until affected applications can be migrated off
|
||||
// of DSA.
|
||||
func dsaCheckSignature(algo x509.SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey) error {
|
||||
dsaKey, ok := publicKey.(*dsa.PublicKey)
|
||||
if !ok {
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
var hashType crypto.Hash
|
||||
switch algo {
|
||||
case x509.DSAWithSHA1:
|
||||
hashType = crypto.SHA1
|
||||
case x509.DSAWithSHA256:
|
||||
hashType = crypto.SHA256
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
h := hashType.New()
|
||||
h.Write(signed)
|
||||
signed = h.Sum(nil)
|
||||
|
||||
dsaSig := new(dsaSignature)
|
||||
if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil {
|
||||
return err
|
||||
} else if len(rest) != 0 {
|
||||
return errors.New("x509: trailing data after DSA signature")
|
||||
}
|
||||
if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
|
||||
return errors.New("x509: DSA signature contained zero or negative values")
|
||||
}
|
||||
// According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer
|
||||
// than the key length, but crypto/dsa doesn't do it automatically.
|
||||
if maxHashLen := dsaKey.Q.BitLen() / 8; maxHashLen < len(signed) {
|
||||
signed = signed[:maxHashLen]
|
||||
}
|
||||
if !dsa.Verify(dsaKey, signed, dsaSig.R, dsaSig.S) {
|
||||
return errors.New("x509: DSA verification failure")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) {
|
||||
signedData := p7.Content
|
||||
ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
|
||||
if ee == nil {
|
||||
return errors.New("pkcs7: No certificate for signer")
|
||||
}
|
||||
signingTime := time.Now().UTC()
|
||||
if len(signer.AuthenticatedAttributes) > 0 {
|
||||
// TODO(fullsailor): First check the content type match
|
||||
var digest []byte
|
||||
err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := hash.New()
|
||||
h.Write(p7.Content)
|
||||
computed := h.Sum(nil)
|
||||
if subtle.ConstantTimeCompare(digest, computed) != 1 {
|
||||
return &MessageDigestMismatchError{
|
||||
ExpectedDigest: digest,
|
||||
ActualDigest: computed,
|
||||
}
|
||||
}
|
||||
signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime)
|
||||
if err == nil {
|
||||
// signing time found, performing validity check
|
||||
if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) {
|
||||
return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q",
|
||||
signingTime.Format(time.RFC3339),
|
||||
ee.NotBefore.Format(time.RFC3339),
|
||||
ee.NotAfter.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
}
|
||||
if truststore != nil {
|
||||
_, err = verifyCertChain(ee, p7.Certificates, truststore, signingTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch sigalg {
|
||||
case x509.DSAWithSHA1, x509.DSAWithSHA256:
|
||||
return dsaCheckSignature(sigalg, signedData, signer.EncryptedDigest, ee.PublicKey)
|
||||
default:
|
||||
return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest)
|
||||
}
|
||||
}
|
||||
|
||||
// GetOnlySigner returns an x509.Certificate for the first signer of the signed
|
||||
// data payload. If there are more or less than one signer, nil is returned
|
||||
func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
|
||||
if len(p7.Signers) != 1 {
|
||||
return nil
|
||||
}
|
||||
signer := p7.Signers[0]
|
||||
return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
|
||||
}
|
||||
|
||||
// UnmarshalSignedAttribute decodes a single attribute from the signer info
|
||||
func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
|
||||
sd, ok := p7.raw.(signedData)
|
||||
if !ok {
|
||||
return errors.New("pkcs7: payload is not signedData content")
|
||||
}
|
||||
if len(sd.SignerInfos) < 1 {
|
||||
return errors.New("pkcs7: payload has no signers")
|
||||
}
|
||||
attributes := sd.SignerInfos[0].AuthenticatedAttributes
|
||||
return unmarshalAttribute(attributes, attributeType, out)
|
||||
}
|
||||
|
||||
func parseSignedData(data []byte) (*PKCS7, error) {
|
||||
var sd signedData
|
||||
asn1.Unmarshal(data, &sd)
|
||||
certs, err := sd.Certificates.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fmt.Printf("--> Signed Data Version %d\n", sd.Version)
|
||||
|
||||
var compound asn1.RawValue
|
||||
var content unsignedData
|
||||
|
||||
// The Content.Bytes maybe empty on PKI responses.
|
||||
if len(sd.ContentInfo.Content.Bytes) > 0 {
|
||||
if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Compound octet string
|
||||
if compound.IsCompound {
|
||||
if compound.Tag == 4 {
|
||||
if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
content = compound.Bytes
|
||||
}
|
||||
} else {
|
||||
// assuming this is tag 04
|
||||
content = compound.Bytes
|
||||
}
|
||||
return &PKCS7{
|
||||
Content: content,
|
||||
Certificates: certs,
|
||||
CRLs: sd.CRLs,
|
||||
Signers: sd.SignerInfos,
|
||||
raw: sd,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// verifyCertChain takes an end-entity certs, a list of potential intermediates and a
|
||||
// truststore, and built all potential chains between the EE and a trusted root.
|
||||
//
|
||||
// When verifying chains that may have expired, currentTime can be set to a past date
|
||||
// to allow the verification to pass. If unset, currentTime is set to the current UTC time.
|
||||
func verifyCertChain(ee *x509.Certificate, certs []*x509.Certificate, truststore *x509.CertPool, currentTime time.Time) (chains [][]*x509.Certificate, err error) {
|
||||
intermediates := x509.NewCertPool()
|
||||
for _, intermediate := range certs {
|
||||
intermediates.AddCert(intermediate)
|
||||
}
|
||||
verifyOptions := x509.VerifyOptions{
|
||||
Roots: truststore,
|
||||
Intermediates: intermediates,
|
||||
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny},
|
||||
CurrentTime: currentTime,
|
||||
}
|
||||
chains, err = ee.Verify(verifyOptions)
|
||||
if err != nil {
|
||||
return chains, fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MessageDigestMismatchError is returned when the signer data digest does not
|
||||
// match the computed digest for the contained content
|
||||
type MessageDigestMismatchError struct {
|
||||
ExpectedDigest []byte
|
||||
ActualDigest []byte
|
||||
}
|
||||
|
||||
func (err *MessageDigestMismatchError) Error() string {
|
||||
return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
|
||||
}
|
||||
|
||||
func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) {
|
||||
switch {
|
||||
case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1):
|
||||
return x509.ECDSAWithSHA1, nil
|
||||
case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256):
|
||||
return x509.ECDSAWithSHA256, nil
|
||||
case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384):
|
||||
return x509.ECDSAWithSHA384, nil
|
||||
case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512):
|
||||
return x509.ECDSAWithSHA512, nil
|
||||
case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA),
|
||||
digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1),
|
||||
digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256),
|
||||
digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384),
|
||||
digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512):
|
||||
switch {
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1):
|
||||
return x509.SHA1WithRSA, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256):
|
||||
return x509.SHA256WithRSA, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384):
|
||||
return x509.SHA384WithRSA, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512):
|
||||
return x509.SHA512WithRSA, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q",
|
||||
digest.Algorithm.String(), digestEncryption.Algorithm.String())
|
||||
}
|
||||
case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA),
|
||||
digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1):
|
||||
switch {
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1):
|
||||
return x509.DSAWithSHA1, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256):
|
||||
return x509.DSAWithSHA256, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q",
|
||||
digest.Algorithm.String(), digestEncryption.Algorithm.String())
|
||||
}
|
||||
case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256),
|
||||
digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384),
|
||||
digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521):
|
||||
switch {
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1):
|
||||
return x509.ECDSAWithSHA1, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256):
|
||||
return x509.ECDSAWithSHA256, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384):
|
||||
return x509.ECDSAWithSHA384, nil
|
||||
case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512):
|
||||
return x509.ECDSAWithSHA512, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q",
|
||||
digest.Algorithm.String(), digestEncryption.Algorithm.String())
|
||||
}
|
||||
default:
|
||||
return -1, fmt.Errorf("pkcs7: unsupported algorithm %q",
|
||||
digestEncryption.Algorithm.String())
|
||||
}
|
||||
}
|
||||
|
||||
func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
|
||||
for _, cert := range certs {
|
||||
if isCertMatchForIssuerAndSerial(cert, ias) {
|
||||
return cert
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error {
|
||||
for _, attr := range attrs {
|
||||
if attr.Type.Equal(attributeType) {
|
||||
_, err := asn1.Unmarshal(attr.Value.Bytes, out)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return errors.New("pkcs7: attribute type not in attributes")
|
||||
}
|
@ -1,181 +0,0 @@
|
||||
//go:build go1.13 || go1.14 || go1.15
|
||||
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestVerifyEC2(t *testing.T) {
|
||||
fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture)
|
||||
p7, err := Parse(fixture.Input)
|
||||
if err != nil {
|
||||
t.Errorf("Parse encountered unexpected error: %v", err)
|
||||
}
|
||||
p7.Certificates = []*x509.Certificate{fixture.Certificate}
|
||||
if err := p7.Verify(); err != nil {
|
||||
t.Errorf("Verify failed with error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var EC2IdentityDocumentFixture = `
|
||||
-----BEGIN PKCS7-----
|
||||
MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA
|
||||
JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh
|
||||
eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1
|
||||
cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh
|
||||
bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs
|
||||
bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg
|
||||
OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK
|
||||
ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj
|
||||
aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy
|
||||
YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA
|
||||
AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n
|
||||
dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi
|
||||
IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B
|
||||
CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG
|
||||
CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w
|
||||
LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA
|
||||
AAAAAA==
|
||||
-----END PKCS7-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
|
||||
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
|
||||
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
|
||||
ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
|
||||
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
|
||||
cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
|
||||
ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
|
||||
VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
|
||||
hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
|
||||
k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
|
||||
hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
|
||||
lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
|
||||
MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
|
||||
MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
|
||||
vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
|
||||
7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
|
||||
-----END CERTIFICATE-----`
|
||||
|
||||
func TestDSASignWithOpenSSLAndVerify(t *testing.T) {
|
||||
content := []byte(`
|
||||
A ship in port is safe,
|
||||
but that's not what ships are built for.
|
||||
-- Grace Hopper`)
|
||||
// write the content to a temp file
|
||||
tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpContentFile.Name(), content, 0o755)
|
||||
|
||||
// write the signer cert to a temp file
|
||||
tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755)
|
||||
|
||||
// write the signer key to a temp file
|
||||
tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0o755)
|
||||
|
||||
tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// call openssl to sign the content
|
||||
opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1",
|
||||
"-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(),
|
||||
"-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(),
|
||||
"-certfile", tmpSignerCertFile.Name(), "-outform", "PEM")
|
||||
out, err := opensslCMD.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("openssl command failed with %s: %s", err, out)
|
||||
}
|
||||
|
||||
// verify the signed content
|
||||
pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("%s\n", pemSignature)
|
||||
derBlock, _ := pem.Decode(pemSignature)
|
||||
if derBlock == nil {
|
||||
t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name())
|
||||
}
|
||||
p7, err := Parse(derBlock.Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Parse encountered unexpected error: %v", err)
|
||||
}
|
||||
if err := p7.Verify(); err != nil {
|
||||
t.Fatalf("Verify failed with error: %v", err)
|
||||
}
|
||||
os.Remove(tmpSignerCertFile.Name()) // clean up
|
||||
os.Remove(tmpSignerKeyFile.Name()) // clean up
|
||||
os.Remove(tmpContentFile.Name()) // clean up
|
||||
}
|
||||
|
||||
var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY-----
|
||||
MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS
|
||||
PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl
|
||||
pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith
|
||||
1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L
|
||||
vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3
|
||||
zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo
|
||||
g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE=
|
||||
-----END PRIVATE KEY-----`)
|
||||
|
||||
var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE-----
|
||||
MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV
|
||||
bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
|
||||
VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du
|
||||
MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r
|
||||
bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE
|
||||
ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC
|
||||
AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD
|
||||
Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE
|
||||
exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii
|
||||
Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4
|
||||
V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI
|
||||
puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl
|
||||
nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp
|
||||
rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt
|
||||
1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT
|
||||
ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G
|
||||
CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688
|
||||
qzy/7yePTlhlpj+ahMM=
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
type DSATestFixture struct {
|
||||
Input []byte
|
||||
Certificate *x509.Certificate
|
||||
}
|
||||
|
||||
func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture {
|
||||
var result DSATestFixture
|
||||
var derBlock *pem.Block
|
||||
pemBlock := []byte(testPEMBlock)
|
||||
for {
|
||||
derBlock, pemBlock = pem.Decode(pemBlock)
|
||||
if derBlock == nil {
|
||||
break
|
||||
}
|
||||
switch derBlock.Type {
|
||||
case "PKCS7":
|
||||
result.Input = derBlock.Bytes
|
||||
case "CERTIFICATE":
|
||||
result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
@ -1,183 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/iam/iamiface"
|
||||
"github.com/aws/aws-sdk-go/service/sts/stsiface"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/sdk/queue"
|
||||
)
|
||||
|
||||
const (
|
||||
rootConfigPath = "config/root"
|
||||
minAwsUserRollbackAge = 5 * time.Minute
|
||||
operationPrefixAWS = "aws"
|
||||
operationPrefixAWSASD = "aws-config"
|
||||
)
|
||||
|
||||
func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) {
|
||||
b := Backend(conf)
|
||||
if err := b.Setup(ctx, conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func Backend(conf *logical.BackendConfig) *backend {
|
||||
var b backend
|
||||
b.credRotationQueue = queue.New()
|
||||
b.Backend = &framework.Backend{
|
||||
Help: strings.TrimSpace(backendHelp),
|
||||
|
||||
PathsSpecial: &logical.Paths{
|
||||
LocalStorage: []string{
|
||||
framework.WALPrefix,
|
||||
},
|
||||
SealWrapStorage: []string{
|
||||
rootConfigPath,
|
||||
pathStaticCreds + "/",
|
||||
},
|
||||
},
|
||||
|
||||
Paths: []*framework.Path{
|
||||
pathConfigRoot(&b),
|
||||
pathConfigRotateRoot(&b),
|
||||
pathConfigLease(&b),
|
||||
pathRoles(&b),
|
||||
pathListRoles(&b),
|
||||
pathStaticRoles(&b),
|
||||
pathStaticCredentials(&b),
|
||||
pathUser(&b),
|
||||
},
|
||||
|
||||
Secrets: []*framework.Secret{
|
||||
secretAccessKeys(&b),
|
||||
},
|
||||
|
||||
Invalidate: b.invalidate,
|
||||
WALRollback: b.walRollback,
|
||||
WALRollbackMinAge: minAwsUserRollbackAge,
|
||||
PeriodicFunc: func(ctx context.Context, req *logical.Request) error {
|
||||
repState := conf.System.ReplicationState()
|
||||
if (conf.System.LocalMount() ||
|
||||
!repState.HasState(consts.ReplicationPerformanceSecondary)) &&
|
||||
!repState.HasState(consts.ReplicationDRSecondary) &&
|
||||
!repState.HasState(consts.ReplicationPerformanceStandby) {
|
||||
return b.rotateExpiredStaticCreds(ctx, req)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
BackendType: logical.TypeLogical,
|
||||
}
|
||||
|
||||
return &b
|
||||
}
|
||||
|
||||
type backend struct {
|
||||
*framework.Backend
|
||||
|
||||
// Mutex to protect access to reading and writing policies
|
||||
roleMutex sync.RWMutex
|
||||
|
||||
// Mutex to protect access to iam/sts clients and client configs
|
||||
clientMutex sync.RWMutex
|
||||
|
||||
// iamClient and stsClient hold configured iam and sts clients for reuse, and
|
||||
// to enable mocking with AWS iface for tests
|
||||
iamClient iamiface.IAMAPI
|
||||
stsClient stsiface.STSAPI
|
||||
|
||||
// the age of a static role's credential is tracked by a priority queue and handled
|
||||
// by the PeriodicFunc
|
||||
credRotationQueue *queue.PriorityQueue
|
||||
}
|
||||
|
||||
const backendHelp = `
|
||||
The AWS backend dynamically generates AWS access keys for a set of
|
||||
IAM policies. The AWS access keys have a configurable lease set and
|
||||
are automatically revoked at the end of the lease.
|
||||
|
||||
After mounting this backend, credentials to generate IAM keys must
|
||||
be configured with the "root" path and policies must be written using
|
||||
the "roles/" endpoints before any access keys can be generated.
|
||||
`
|
||||
|
||||
func (b *backend) invalidate(ctx context.Context, key string) {
|
||||
switch {
|
||||
case key == rootConfigPath:
|
||||
b.clearClients()
|
||||
}
|
||||
}
|
||||
|
||||
// clearClients clears the backend's IAM and STS clients
|
||||
func (b *backend) clearClients() {
|
||||
b.clientMutex.Lock()
|
||||
defer b.clientMutex.Unlock()
|
||||
b.iamClient = nil
|
||||
b.stsClient = nil
|
||||
}
|
||||
|
||||
// clientIAM returns the configured IAM client. If nil, it constructs a new one
|
||||
// and returns it, setting it the internal variable
|
||||
func (b *backend) clientIAM(ctx context.Context, s logical.Storage) (iamiface.IAMAPI, error) {
|
||||
b.clientMutex.RLock()
|
||||
if b.iamClient != nil {
|
||||
b.clientMutex.RUnlock()
|
||||
return b.iamClient, nil
|
||||
}
|
||||
|
||||
// Upgrade the lock for writing
|
||||
b.clientMutex.RUnlock()
|
||||
b.clientMutex.Lock()
|
||||
defer b.clientMutex.Unlock()
|
||||
|
||||
// check client again, in the event that a client was being created while we
|
||||
// waited for Lock()
|
||||
if b.iamClient != nil {
|
||||
return b.iamClient, nil
|
||||
}
|
||||
|
||||
iamClient, err := nonCachedClientIAM(ctx, s, b.Logger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.iamClient = iamClient
|
||||
|
||||
return b.iamClient, nil
|
||||
}
|
||||
|
||||
func (b *backend) clientSTS(ctx context.Context, s logical.Storage) (stsiface.STSAPI, error) {
|
||||
b.clientMutex.RLock()
|
||||
if b.stsClient != nil {
|
||||
b.clientMutex.RUnlock()
|
||||
return b.stsClient, nil
|
||||
}
|
||||
|
||||
// Upgrade the lock for writing
|
||||
b.clientMutex.RUnlock()
|
||||
b.clientMutex.Lock()
|
||||
defer b.clientMutex.Unlock()
|
||||
|
||||
// check client again, in the event that a client was being created while we
|
||||
// waited for Lock()
|
||||
if b.stsClient != nil {
|
||||
return b.stsClient, nil
|
||||
}
|
||||
|
||||
stsClient, err := nonCachedClientSTS(ctx, s, b.Logger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.stsClient = stsClient
|
||||
|
||||
return b.stsClient, nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,107 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// NOTE: The caller is required to ensure that b.clientMutex is at least read locked
|
||||
func getRootConfig(ctx context.Context, s logical.Storage, clientType string, logger hclog.Logger) (*aws.Config, error) {
|
||||
credsConfig := &awsutil.CredentialsConfig{}
|
||||
var endpoint string
|
||||
var maxRetries int = aws.UseServiceDefaultRetries
|
||||
|
||||
entry, err := s.Get(ctx, "config/root")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
var config rootConfig
|
||||
if err := entry.DecodeJSON(&config); err != nil {
|
||||
return nil, fmt.Errorf("error reading root configuration: %w", err)
|
||||
}
|
||||
|
||||
credsConfig.AccessKey = config.AccessKey
|
||||
credsConfig.SecretKey = config.SecretKey
|
||||
credsConfig.Region = config.Region
|
||||
maxRetries = config.MaxRetries
|
||||
switch {
|
||||
case clientType == "iam" && config.IAMEndpoint != "":
|
||||
endpoint = *aws.String(config.IAMEndpoint)
|
||||
case clientType == "sts" && config.STSEndpoint != "":
|
||||
endpoint = *aws.String(config.STSEndpoint)
|
||||
}
|
||||
}
|
||||
|
||||
if credsConfig.Region == "" {
|
||||
credsConfig.Region = os.Getenv("AWS_REGION")
|
||||
if credsConfig.Region == "" {
|
||||
credsConfig.Region = os.Getenv("AWS_DEFAULT_REGION")
|
||||
if credsConfig.Region == "" {
|
||||
credsConfig.Region = "us-east-1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
credsConfig.HTTPClient = cleanhttp.DefaultClient()
|
||||
|
||||
credsConfig.Logger = logger
|
||||
|
||||
creds, err := credsConfig.GenerateCredentialChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &aws.Config{
|
||||
Credentials: creds,
|
||||
Region: aws.String(credsConfig.Region),
|
||||
Endpoint: &endpoint,
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
MaxRetries: aws.Int(maxRetries),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Logger) (*iam.IAM, error) {
|
||||
awsConfig, err := getRootConfig(ctx, s, "iam", logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := iam.New(sess)
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("could not obtain iam client")
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Logger) (*sts.STS, error) {
|
||||
awsConfig, err := getRootConfig(ctx, s, "sts", logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := sts.New(sess)
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("could not obtain sts client")
|
||||
}
|
||||
return client, nil
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/builtin/logical/aws"
|
||||
"github.com/hashicorp/vault/sdk/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
apiClientMeta := &api.PluginAPIClientMeta{}
|
||||
flags := apiClientMeta.FlagSet()
|
||||
flags.Parse(os.Args[1:])
|
||||
|
||||
tlsConfig := apiClientMeta.GetTLSConfig()
|
||||
tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig)
|
||||
|
||||
if err := plugin.ServeMultiplex(&plugin.ServeOpts{
|
||||
BackendFactoryFunc: aws.Factory,
|
||||
// set the TLSProviderFunc so that the plugin maintains backwards
|
||||
// compatibility with Vault versions that don’t support plugin AutoMTLS
|
||||
TLSProviderFunc: tlsProviderFunc,
|
||||
}); err != nil {
|
||||
logger := hclog.New(&hclog.LoggerOptions{})
|
||||
|
||||
logger.Error("plugin shutting down", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -1,144 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/iam/iamiface"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// PolicyDocument represents an IAM policy document
|
||||
type PolicyDocument struct {
|
||||
Version string `json:"Version"`
|
||||
Statements StatementEntries `json:"Statement"`
|
||||
}
|
||||
|
||||
// StatementEntries is a slice of statements that make up a PolicyDocument
|
||||
type StatementEntries []interface{}
|
||||
|
||||
// UnmarshalJSON is defined here for StatementEntries because the Statement
|
||||
// portion of an IAM Policy can either be a list or a single element, so if it's
|
||||
// a single element this wraps it in a []interface{} so that it's easy to
|
||||
// combine with other policy statements:
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_statement.html
|
||||
func (se *StatementEntries) UnmarshalJSON(b []byte) error {
|
||||
var out StatementEntries
|
||||
|
||||
var data interface{}
|
||||
if err := json.Unmarshal(b, &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch t := data.(type) {
|
||||
case []interface{}:
|
||||
out = t
|
||||
case interface{}:
|
||||
out = []interface{}{t}
|
||||
default:
|
||||
return fmt.Errorf("unsupported data type %T for StatementEntries", t)
|
||||
}
|
||||
*se = out
|
||||
return nil
|
||||
}
|
||||
|
||||
// getGroupPolicies takes a list of IAM Group names and returns a list of their
|
||||
// inline policy documents, and a list of the attached managed policy ARNs
|
||||
func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGroups []string) ([]string, []string, error) {
|
||||
var groupPolicies []string
|
||||
var groupPolicyARNs []string
|
||||
var err error
|
||||
var agp *iam.ListAttachedGroupPoliciesOutput
|
||||
var inlinePolicies *iam.ListGroupPoliciesOutput
|
||||
var inlinePolicyDoc *iam.GetGroupPolicyOutput
|
||||
var iamClient iamiface.IAMAPI
|
||||
|
||||
// Return early if there are no groups, to avoid creating an IAM client
|
||||
// needlessly
|
||||
if len(iamGroups) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
iamClient, err = b.clientIAM(ctx, s)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, g := range iamGroups {
|
||||
// Collect managed policy ARNs from the IAM Group
|
||||
agp, err = iamClient.ListAttachedGroupPoliciesWithContext(ctx, &iam.ListAttachedGroupPoliciesInput{
|
||||
GroupName: aws.String(g),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, p := range agp.AttachedPolicies {
|
||||
groupPolicyARNs = append(groupPolicyARNs, *p.PolicyArn)
|
||||
}
|
||||
|
||||
// Collect inline policy names from the IAM Group
|
||||
inlinePolicies, err = iamClient.ListGroupPoliciesWithContext(ctx, &iam.ListGroupPoliciesInput{
|
||||
GroupName: aws.String(g),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, iP := range inlinePolicies.PolicyNames {
|
||||
inlinePolicyDoc, err = iamClient.GetGroupPolicyWithContext(ctx, &iam.GetGroupPolicyInput{
|
||||
GroupName: &g,
|
||||
PolicyName: iP,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if inlinePolicyDoc != nil && inlinePolicyDoc.PolicyDocument != nil {
|
||||
var policyStr string
|
||||
if policyStr, err = url.QueryUnescape(*inlinePolicyDoc.PolicyDocument); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
groupPolicies = append(groupPolicies, policyStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return groupPolicies, groupPolicyARNs, nil
|
||||
}
|
||||
|
||||
// combinePolicyDocuments takes policy strings as input, and combines them into
|
||||
// a single policy document string
|
||||
func combinePolicyDocuments(policies ...string) (string, error) {
|
||||
var policy string
|
||||
var err error
|
||||
var policyBytes []byte
|
||||
newPolicy := PolicyDocument{
|
||||
// 2012-10-17 is the current version of the AWS policy language:
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
|
||||
Version: "2012-10-17",
|
||||
}
|
||||
newPolicy.Statements = make(StatementEntries, 0, len(policies))
|
||||
|
||||
for _, p := range policies {
|
||||
if len(p) == 0 {
|
||||
continue
|
||||
}
|
||||
var tmpDoc PolicyDocument
|
||||
err = json.Unmarshal([]byte(p), &tmpDoc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newPolicy.Statements = append(newPolicy.Statements, tmpDoc.Statements...)
|
||||
}
|
||||
|
||||
policyBytes, err = json.Marshal(&newPolicy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
policy = string(policyBytes)
|
||||
return policy, nil
|
||||
}
|
@ -1,264 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/iam/iamiface"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const ec2DescribePolicy = `{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["ec2:DescribeInstances"], "Resource": "*"}]}`
|
||||
|
||||
// ec2AllPolicy also uses a string instead of a list for the Action
|
||||
const ec2AllPolicy = `{"Version": "2012-10-17","Statement": [{"Effect": "Allow", "Action": "ec2:*", "Resource": "*"}]}`
|
||||
|
||||
// ec2SingleStatement is an example of the Statement portion containing a single statement that's not a list
|
||||
const ec2SingleStatement = `{"Version": "2012-10-17", "Statement": {"Effect": "Allow", "Action": ["ec2:DescribeInstances"], "Resource": "*"}}`
|
||||
|
||||
type mockGroupIAMClient struct {
|
||||
iamiface.IAMAPI
|
||||
ListAttachedGroupPoliciesResp iam.ListAttachedGroupPoliciesOutput
|
||||
ListGroupPoliciesResp iam.ListGroupPoliciesOutput
|
||||
GetGroupPolicyResp iam.GetGroupPolicyOutput
|
||||
}
|
||||
|
||||
func (m mockGroupIAMClient) ListAttachedGroupPoliciesWithContext(_ aws.Context, in *iam.ListAttachedGroupPoliciesInput, _ ...request.Option) (*iam.ListAttachedGroupPoliciesOutput, error) {
|
||||
return &m.ListAttachedGroupPoliciesResp, nil
|
||||
}
|
||||
|
||||
func (m mockGroupIAMClient) ListGroupPoliciesWithContext(_ aws.Context, in *iam.ListGroupPoliciesInput, _ ...request.Option) (*iam.ListGroupPoliciesOutput, error) {
|
||||
return &m.ListGroupPoliciesResp, nil
|
||||
}
|
||||
|
||||
func (m mockGroupIAMClient) GetGroupPolicyWithContext(_ aws.Context, in *iam.GetGroupPolicyInput, _ ...request.Option) (*iam.GetGroupPolicyOutput, error) {
|
||||
return &m.GetGroupPolicyResp, nil
|
||||
}
|
||||
|
||||
func Test_getGroupPolicies(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCases := []struct {
|
||||
description string
|
||||
listAGPResp iam.ListAttachedGroupPoliciesOutput
|
||||
listGPResp iam.ListGroupPoliciesOutput
|
||||
getGPResp iam.GetGroupPolicyOutput
|
||||
iamGroupArg []string
|
||||
wantGroupPolicies []string
|
||||
wantGroupPolicyARNs []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
description: "All IAM calls respond with data",
|
||||
listAGPResp: iam.ListAttachedGroupPoliciesOutput{
|
||||
AttachedPolicies: []*iam.AttachedPolicy{
|
||||
{
|
||||
PolicyArn: aws.String("abcdefghijklmnopqrst"),
|
||||
PolicyName: aws.String("test policy"),
|
||||
},
|
||||
},
|
||||
},
|
||||
listGPResp: iam.ListGroupPoliciesOutput{
|
||||
PolicyNames: []*string{
|
||||
aws.String("inline policy"),
|
||||
},
|
||||
},
|
||||
getGPResp: iam.GetGroupPolicyOutput{
|
||||
GroupName: aws.String("inline policy"),
|
||||
PolicyDocument: aws.String(ec2DescribePolicy),
|
||||
PolicyName: aws.String("ec2 describe"),
|
||||
},
|
||||
iamGroupArg: []string{"testgroup1"},
|
||||
wantGroupPolicies: []string{ec2DescribePolicy},
|
||||
wantGroupPolicyARNs: []string{"abcdefghijklmnopqrst"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
description: "No managed policies",
|
||||
listAGPResp: iam.ListAttachedGroupPoliciesOutput{},
|
||||
listGPResp: iam.ListGroupPoliciesOutput{
|
||||
PolicyNames: []*string{
|
||||
aws.String("inline policy"),
|
||||
},
|
||||
},
|
||||
getGPResp: iam.GetGroupPolicyOutput{
|
||||
GroupName: aws.String("inline policy"),
|
||||
PolicyDocument: aws.String(ec2DescribePolicy),
|
||||
PolicyName: aws.String("ec2 describe"),
|
||||
},
|
||||
iamGroupArg: []string{"testgroup1", "testgroup2"},
|
||||
wantGroupPolicies: []string{ec2DescribePolicy, ec2DescribePolicy},
|
||||
wantGroupPolicyARNs: []string(nil),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
description: "No inline policies",
|
||||
listAGPResp: iam.ListAttachedGroupPoliciesOutput{
|
||||
AttachedPolicies: []*iam.AttachedPolicy{
|
||||
{
|
||||
PolicyArn: aws.String("abcdefghijklmnopqrst"),
|
||||
PolicyName: aws.String("test policy"),
|
||||
},
|
||||
},
|
||||
},
|
||||
listGPResp: iam.ListGroupPoliciesOutput{},
|
||||
getGPResp: iam.GetGroupPolicyOutput{},
|
||||
iamGroupArg: []string{"testgroup1"},
|
||||
wantGroupPolicies: []string(nil),
|
||||
wantGroupPolicyARNs: []string{"abcdefghijklmnopqrst"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
description: "No policies",
|
||||
listAGPResp: iam.ListAttachedGroupPoliciesOutput{},
|
||||
listGPResp: iam.ListGroupPoliciesOutput{},
|
||||
getGPResp: iam.GetGroupPolicyOutput{},
|
||||
iamGroupArg: []string{"testgroup1"},
|
||||
wantGroupPolicies: []string(nil),
|
||||
wantGroupPolicyARNs: []string(nil),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
description: "empty iam_groups arg",
|
||||
listAGPResp: iam.ListAttachedGroupPoliciesOutput{},
|
||||
listGPResp: iam.ListGroupPoliciesOutput{},
|
||||
getGPResp: iam.GetGroupPolicyOutput{},
|
||||
iamGroupArg: []string{},
|
||||
wantGroupPolicies: []string(nil),
|
||||
wantGroupPolicyARNs: []string(nil),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// configure backend and iam client
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b.iamClient = &mockGroupIAMClient{
|
||||
ListAttachedGroupPoliciesResp: tc.listAGPResp,
|
||||
ListGroupPoliciesResp: tc.listGPResp,
|
||||
GetGroupPolicyResp: tc.getGPResp,
|
||||
}
|
||||
|
||||
// run the test and compare results
|
||||
groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(context.TODO(), config.StorageView, tc.iamGroupArg)
|
||||
assert.Equal(t, tc.wantGroupPolicies, groupPolicies)
|
||||
assert.Equal(t, tc.wantGroupPolicyARNs, groupPolicyARNs)
|
||||
assert.Equal(t, tc.wantErr, err != nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_combinePolicyDocuments(t *testing.T) {
|
||||
t.Parallel()
|
||||
testCases := []struct {
|
||||
description string
|
||||
input []string
|
||||
expectedOutput string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
description: "one policy",
|
||||
input: []string{
|
||||
ec2AllPolicy,
|
||||
},
|
||||
expectedOutput: `{"Version":"2012-10-17","Statement":[{"Action":"ec2:*","Effect":"Allow","Resource":"*"}]}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "two policies",
|
||||
input: []string{
|
||||
ec2AllPolicy,
|
||||
ec2DescribePolicy,
|
||||
},
|
||||
expectedOutput: `{"Version": "2012-10-17", "Statement":[
|
||||
{"Action": "ec2:*", "Effect": "Allow", "Resource": "*"},
|
||||
{"Action": ["ec2:DescribeInstances"], "Effect": "Allow", "Resource": "*"}]}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "two policies, one with empty statement",
|
||||
input: []string{
|
||||
ec2AllPolicy,
|
||||
`{"Version": "2012-10-17", "Statement": []}`,
|
||||
},
|
||||
expectedOutput: `{"Version": "2012-10-17", "Statement": [{"Action": "ec2:*", "Effect": "Allow", "Resource": "*"}]}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "malformed json",
|
||||
input: []string{
|
||||
`"Version": "2012-10-17","Statement": [{"Effect": "Allow", "Action": "ec2:*", "Resource": "*"}]}`,
|
||||
`{"Version": "2012-10-17", "Statement": []}`,
|
||||
},
|
||||
expectedOutput: ``,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
description: "not action",
|
||||
input: []string{
|
||||
`{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`,
|
||||
},
|
||||
expectedOutput: `{"Version": "2012-10-17","Statement":[{"Effect": "Allow","NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "one blank policy",
|
||||
input: []string{
|
||||
"",
|
||||
`{"Version": "2012-10-17", "Statement": []}`,
|
||||
},
|
||||
expectedOutput: `{"Version": "2012-10-17", "Statement": []}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "when statement is not a list",
|
||||
input: []string{
|
||||
ec2SingleStatement,
|
||||
},
|
||||
expectedOutput: `{"Version": "2012-10-17", "Statement": [{"Action": ["ec2:DescribeInstances"], "Effect": "Allow", "Resource": "*"}]}`,
|
||||
expectedErr: false,
|
||||
},
|
||||
{
|
||||
description: "statement is malformed json",
|
||||
input: []string{
|
||||
`{"Version": "2012-10-17", "Statement": {true}`,
|
||||
},
|
||||
expectedOutput: "",
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
policyOut, err := combinePolicyDocuments(tc.input...)
|
||||
if (err != nil) != tc.expectedErr {
|
||||
t.Fatalf("got unexpected error: %s", err)
|
||||
}
|
||||
if (err != nil) != tc.expectedErr {
|
||||
t.Fatalf("got unexpected error: %s", err)
|
||||
}
|
||||
// remove whitespace
|
||||
if tc.expectedOutput != "" {
|
||||
tc.expectedOutput, err = compactJSON(tc.expectedOutput)
|
||||
if err != nil {
|
||||
t.Fatalf("error compacting JSON: %s", err)
|
||||
}
|
||||
}
|
||||
if policyOut != tc.expectedOutput {
|
||||
t.Fatalf("did not receive expected output: want %s, got %s", tc.expectedOutput, policyOut)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func pathConfigLease(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/lease",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"lease": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Default lease for roles.",
|
||||
},
|
||||
|
||||
"lease_max": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Maximum time a credential is valid for.",
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathLeaseRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "lease-configuration",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathLeaseWrite,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "lease",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigLeaseHelpSyn,
|
||||
HelpDescription: pathConfigLeaseHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// Lease returns the lease information
|
||||
func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) {
|
||||
entry, err := s.Get(ctx, "config/lease")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var result configLease
|
||||
if err := entry.DecodeJSON(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
leaseRaw := d.Get("lease").(string)
|
||||
leaseMaxRaw := d.Get("lease_max").(string)
|
||||
|
||||
if len(leaseRaw) == 0 {
|
||||
return logical.ErrorResponse("'lease' is a required parameter"), nil
|
||||
}
|
||||
if len(leaseMaxRaw) == 0 {
|
||||
return logical.ErrorResponse("'lease_max' is a required parameter"), nil
|
||||
}
|
||||
|
||||
lease, err := parseutil.ParseDurationSecond(leaseRaw)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf(
|
||||
"Invalid lease: %s", err)), nil
|
||||
}
|
||||
leaseMax, err := parseutil.ParseDurationSecond(leaseMaxRaw)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf(
|
||||
"Invalid lease_max: %s", err)), nil
|
||||
}
|
||||
|
||||
// Store it
|
||||
entry, err := logical.StorageEntryJSON("config/lease", &configLease{
|
||||
Lease: lease,
|
||||
LeaseMax: leaseMax,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := req.Storage.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
lease, err := b.Lease(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lease == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"lease": lease.Lease.String(),
|
||||
"lease_max": lease.LeaseMax.String(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type configLease struct {
|
||||
Lease time.Duration
|
||||
LeaseMax time.Duration
|
||||
}
|
||||
|
||||
const pathConfigLeaseHelpSyn = `
|
||||
Configure the default lease information for generated credentials.
|
||||
`
|
||||
|
||||
const pathConfigLeaseHelpDesc = `
|
||||
This configures the default lease information used for credentials
|
||||
generated by this backend. The lease specifies the duration that a
|
||||
credential will be valid for, as well as the maximum session for
|
||||
a set of credentials.
|
||||
|
||||
The format for the lease is "1h" or integer and then unit. The longest
|
||||
unit is hour.
|
||||
`
|
@ -1,168 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// A single default template that supports both the different credential types (IAM/STS) that are capped at differing length limits (64 chars/32 chars respectively)
|
||||
const defaultUserNameTemplate = `{{ if (eq .Type "STS") }}{{ printf "vault-%s-%s" (unix_time) (random 20) | truncate 32 }}{{ else }}{{ printf "vault-%s-%s-%s" (printf "%s-%s" (.DisplayName) (.PolicyName) | truncate 42) (unix_time) (random 20) | truncate 64 }}{{ end }}`
|
||||
|
||||
func pathConfigRoot(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/root",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"access_key": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Access key with permission to create new keys.",
|
||||
},
|
||||
|
||||
"secret_key": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Secret key with permission to create new keys.",
|
||||
},
|
||||
|
||||
"region": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Region for API calls.",
|
||||
},
|
||||
"iam_endpoint": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Endpoint to custom IAM server URL",
|
||||
},
|
||||
"sts_endpoint": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Endpoint to custom STS server URL",
|
||||
},
|
||||
"max_retries": {
|
||||
Type: framework.TypeInt,
|
||||
Default: aws.UseServiceDefaultRetries,
|
||||
Description: "Maximum number of retries for recoverable exceptions of AWS APIs",
|
||||
},
|
||||
"username_template": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Template to generate custom IAM usernames",
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigRootRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "root-iam-credentials-configuration",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigRootWrite,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationVerb: "configure",
|
||||
OperationSuffix: "root-iam-credentials",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigRootHelpSyn,
|
||||
HelpDescription: pathConfigRootHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigRootRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.clientMutex.RLock()
|
||||
defer b.clientMutex.RUnlock()
|
||||
|
||||
entry, err := req.Storage.Get(ctx, "config/root")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var config rootConfig
|
||||
|
||||
if err := entry.DecodeJSON(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
configData := map[string]interface{}{
|
||||
"access_key": config.AccessKey,
|
||||
"region": config.Region,
|
||||
"iam_endpoint": config.IAMEndpoint,
|
||||
"sts_endpoint": config.STSEndpoint,
|
||||
"max_retries": config.MaxRetries,
|
||||
"username_template": config.UsernameTemplate,
|
||||
}
|
||||
return &logical.Response{
|
||||
Data: configData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
region := data.Get("region").(string)
|
||||
iamendpoint := data.Get("iam_endpoint").(string)
|
||||
stsendpoint := data.Get("sts_endpoint").(string)
|
||||
maxretries := data.Get("max_retries").(int)
|
||||
usernameTemplate := data.Get("username_template").(string)
|
||||
if usernameTemplate == "" {
|
||||
usernameTemplate = defaultUserNameTemplate
|
||||
}
|
||||
|
||||
b.clientMutex.Lock()
|
||||
defer b.clientMutex.Unlock()
|
||||
|
||||
entry, err := logical.StorageEntryJSON("config/root", rootConfig{
|
||||
AccessKey: data.Get("access_key").(string),
|
||||
SecretKey: data.Get("secret_key").(string),
|
||||
IAMEndpoint: iamendpoint,
|
||||
STSEndpoint: stsendpoint,
|
||||
Region: region,
|
||||
MaxRetries: maxretries,
|
||||
UsernameTemplate: usernameTemplate,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := req.Storage.Put(ctx, entry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// clear possible cached IAM / STS clients after successfully updating
|
||||
// config/root
|
||||
b.iamClient = nil
|
||||
b.stsClient = nil
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type rootConfig struct {
|
||||
AccessKey string `json:"access_key"`
|
||||
SecretKey string `json:"secret_key"`
|
||||
IAMEndpoint string `json:"iam_endpoint"`
|
||||
STSEndpoint string `json:"sts_endpoint"`
|
||||
Region string `json:"region"`
|
||||
MaxRetries int `json:"max_retries"`
|
||||
UsernameTemplate string `json:"username_template"`
|
||||
}
|
||||
|
||||
const pathConfigRootHelpSyn = `
|
||||
Configure the root credentials that are used to manage IAM.
|
||||
`
|
||||
|
||||
const pathConfigRootHelpDesc = `
|
||||
Before doing anything, the AWS backend needs credentials that are able
|
||||
to manage IAM policies, users, access keys, etc. This endpoint is used
|
||||
to configure those credentials. They don't necessarily need to be root
|
||||
keys as long as they have permission to manage IAM.
|
||||
`
|
@ -1,58 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func TestBackend_PathConfigRoot(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
configData := map[string]interface{}{
|
||||
"access_key": "AKIAEXAMPLE",
|
||||
"secret_key": "RandomData",
|
||||
"region": "us-west-2",
|
||||
"iam_endpoint": "https://iam.amazonaws.com",
|
||||
"sts_endpoint": "https://sts.us-west-2.amazonaws.com",
|
||||
"max_retries": 10,
|
||||
"username_template": defaultUserNameTemplate,
|
||||
}
|
||||
|
||||
configReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Storage: config.StorageView,
|
||||
Path: "config/root",
|
||||
Data: configData,
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), configReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: config writing failed: resp:%#v\n err: %v", resp, err)
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Storage: config.StorageView,
|
||||
Path: "config/root",
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: config reading failed: resp:%#v\n err: %v", resp, err)
|
||||
}
|
||||
|
||||
delete(configData, "secret_key")
|
||||
if !reflect.DeepEqual(resp.Data, configData) {
|
||||
t.Errorf("bad: expected to read config root as %#v, got %#v instead", configData, resp.Data)
|
||||
}
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func pathConfigRotateRoot(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "config/rotate-root",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "root-iam-credentials",
|
||||
OperationVerb: "rotate",
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathConfigRotateRootUpdate,
|
||||
ForwardPerformanceStandby: true,
|
||||
ForwardPerformanceSecondary: true,
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathConfigRotateRootHelpSyn,
|
||||
HelpDescription: pathConfigRotateRootHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// have to get the client config first because that takes out a read lock
|
||||
client, err := b.clientIAM(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("nil IAM client")
|
||||
}
|
||||
|
||||
b.clientMutex.Lock()
|
||||
defer b.clientMutex.Unlock()
|
||||
|
||||
rawRootConfig, err := req.Storage.Get(ctx, "config/root")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rawRootConfig == nil {
|
||||
return nil, fmt.Errorf("no configuration found for config/root")
|
||||
}
|
||||
var config rootConfig
|
||||
if err := rawRootConfig.DecodeJSON(&config); err != nil {
|
||||
return nil, fmt.Errorf("error reading root configuration: %w", err)
|
||||
}
|
||||
|
||||
if config.AccessKey == "" || config.SecretKey == "" {
|
||||
return logical.ErrorResponse("Cannot call config/rotate-root when either access_key or secret_key is empty"), nil
|
||||
}
|
||||
|
||||
var getUserInput iam.GetUserInput // empty input means get current user
|
||||
getUserRes, err := client.GetUserWithContext(ctx, &getUserInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error calling GetUser: %w", err)
|
||||
}
|
||||
if getUserRes == nil {
|
||||
return nil, fmt.Errorf("nil response from GetUser")
|
||||
}
|
||||
if getUserRes.User == nil {
|
||||
return nil, fmt.Errorf("nil user returned from GetUser")
|
||||
}
|
||||
if getUserRes.User.UserName == nil {
|
||||
return nil, fmt.Errorf("nil UserName returned from GetUser")
|
||||
}
|
||||
|
||||
createAccessKeyInput := iam.CreateAccessKeyInput{
|
||||
UserName: getUserRes.User.UserName,
|
||||
}
|
||||
createAccessKeyRes, err := client.CreateAccessKeyWithContext(ctx, &createAccessKeyInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error calling CreateAccessKey: %w", err)
|
||||
}
|
||||
if createAccessKeyRes.AccessKey == nil {
|
||||
return nil, fmt.Errorf("nil response from CreateAccessKey")
|
||||
}
|
||||
if createAccessKeyRes.AccessKey.AccessKeyId == nil || createAccessKeyRes.AccessKey.SecretAccessKey == nil {
|
||||
return nil, fmt.Errorf("nil AccessKeyId or SecretAccessKey returned from CreateAccessKey")
|
||||
}
|
||||
|
||||
oldAccessKey := config.AccessKey
|
||||
|
||||
config.AccessKey = *createAccessKeyRes.AccessKey.AccessKeyId
|
||||
config.SecretKey = *createAccessKeyRes.AccessKey.SecretAccessKey
|
||||
|
||||
newEntry, err := logical.StorageEntryJSON("config/root", config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating new config/root JSON: %w", err)
|
||||
}
|
||||
if err := req.Storage.Put(ctx, newEntry); err != nil {
|
||||
return nil, fmt.Errorf("error saving new config/root: %w", err)
|
||||
}
|
||||
|
||||
b.iamClient = nil
|
||||
b.stsClient = nil
|
||||
|
||||
deleteAccessKeyInput := iam.DeleteAccessKeyInput{
|
||||
AccessKeyId: aws.String(oldAccessKey),
|
||||
UserName: getUserRes.User.UserName,
|
||||
}
|
||||
_, err = client.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error deleting old access key: %w", err)
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: map[string]interface{}{
|
||||
"access_key": config.AccessKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
const pathConfigRotateRootHelpSyn = `
|
||||
Request to rotate the AWS credentials used by Vault
|
||||
`
|
||||
|
||||
const pathConfigRotateRootHelpDesc = `
|
||||
This path attempts to rotate the AWS credentials used by Vault for this mount.
|
||||
It is only valid if Vault has been configured to use AWS IAM credentials via the
|
||||
config/root endpoint.
|
||||
`
|
@ -1,633 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/arn"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
var userPathRegex = regexp.MustCompile(`^\/([\x21-\x7F]{0,510}\/)?$`)
|
||||
|
||||
func pathListRoles(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "roles/?$",
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "roles",
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.ListOperation: b.pathRoleList,
|
||||
},
|
||||
|
||||
HelpSynopsis: pathListRolesHelpSyn,
|
||||
HelpDescription: pathListRolesHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func pathRoles(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "roles/" + framework.GenericNameWithAtRegex("name"),
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationSuffix: "role",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the role",
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "Role Name",
|
||||
},
|
||||
},
|
||||
|
||||
"credential_type": {
|
||||
Type: framework.TypeString,
|
||||
Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred),
|
||||
},
|
||||
|
||||
"role_arns": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "ARNs of AWS roles allowed to be assumed. Only valid when credential_type is " + assumedRoleCred,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "Role ARNs",
|
||||
},
|
||||
},
|
||||
|
||||
"policy_arns": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: fmt.Sprintf(`ARNs of AWS policies. Behavior varies by credential_type. When credential_type is
|
||||
%s, then it will attach the specified policies to the generated IAM user.
|
||||
When credential_type is %s or %s, the policies will be passed as the
|
||||
PolicyArns parameter, acting as a filter on permissions available.`, iamUserCred, assumedRoleCred, federationTokenCred),
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "Policy ARNs",
|
||||
},
|
||||
},
|
||||
|
||||
"policy_document": {
|
||||
Type: framework.TypeString,
|
||||
Description: `JSON-encoded IAM policy document. Behavior varies by credential_type. When credential_type is
|
||||
iam_user, then it will attach the contents of the policy_document to the IAM
|
||||
user generated. When credential_type is assumed_role or federation_token, this
|
||||
will be passed in as the Policy parameter to the AssumeRole or
|
||||
GetFederationToken API call, acting as a filter on permissions available.`,
|
||||
},
|
||||
|
||||
"iam_groups": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: `Names of IAM groups that generated IAM users will be added to. For a credential
|
||||
type of assumed_role or federation_token, the policies sent to the
|
||||
corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the
|
||||
policies from each group in iam_groups combined with the policy_document
|
||||
and policy_arns parameters.`,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "IAM Groups",
|
||||
Value: "group1,group2",
|
||||
},
|
||||
},
|
||||
|
||||
"iam_tags": {
|
||||
Type: framework.TypeKVPairs,
|
||||
Description: `IAM tags to be set for any users created by this role. These must be presented
|
||||
as Key-Value pairs. This can be represented as a map or a list of equal sign
|
||||
delimited key pairs.`,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "IAM Tags",
|
||||
Value: "[key1=value1, key2=value2]",
|
||||
},
|
||||
},
|
||||
|
||||
"default_sts_ttl": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred),
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "Default STS TTL",
|
||||
},
|
||||
},
|
||||
|
||||
"max_sts_ttl": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred),
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "Max STS TTL",
|
||||
},
|
||||
},
|
||||
|
||||
"permissions_boundary_arn": {
|
||||
Type: framework.TypeString,
|
||||
Description: "ARN of an IAM policy to attach as a permissions boundary on IAM user credentials; only valid when credential_type is" + iamUserCred,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "Permissions Boundary ARN",
|
||||
},
|
||||
},
|
||||
|
||||
"arn": {
|
||||
Type: framework.TypeString,
|
||||
Description: `Use role_arns or policy_arns instead.`,
|
||||
Deprecated: true,
|
||||
},
|
||||
|
||||
"policy": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Use policy_document instead.",
|
||||
Deprecated: true,
|
||||
},
|
||||
|
||||
"user_path": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Path for IAM User. Only valid when credential_type is " + iamUserCred,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
Name: "User Path",
|
||||
Value: "/",
|
||||
},
|
||||
Default: "/",
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
logical.DeleteOperation: b.pathRolesDelete,
|
||||
logical.ReadOperation: b.pathRolesRead,
|
||||
logical.UpdateOperation: b.pathRolesWrite,
|
||||
},
|
||||
|
||||
HelpSynopsis: pathRolesHelpSyn,
|
||||
HelpDescription: pathRolesHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
b.roleMutex.RLock()
|
||||
defer b.roleMutex.RUnlock()
|
||||
entries, err := req.Storage.List(ctx, "role/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
legacyEntries, err := req.Storage.List(ctx, "policy/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logical.ListResponse(append(entries, legacyEntries...)), nil
|
||||
}
|
||||
|
||||
func (b *backend) pathRolesDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
for _, prefix := range []string{"policy/", "role/"} {
|
||||
err := req.Storage.Delete(ctx, prefix+d.Get("name").(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathRolesRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
entry, err := b.roleRead(ctx, req.Storage, d.Get("name").(string), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: entry.toResponseData(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func legacyRoleData(d *framework.FieldData) (string, error) {
|
||||
policy := d.Get("policy").(string)
|
||||
arn := d.Get("arn").(string)
|
||||
|
||||
switch {
|
||||
case policy == "" && arn == "":
|
||||
return "", nil
|
||||
case policy != "" && arn != "":
|
||||
return "", errors.New("only one of policy or arn should be provided")
|
||||
case policy != "":
|
||||
return policy, nil
|
||||
default:
|
||||
return arn, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
var resp logical.Response
|
||||
|
||||
roleName := d.Get("name").(string)
|
||||
if roleName == "" {
|
||||
return logical.ErrorResponse("missing role name"), nil
|
||||
}
|
||||
|
||||
b.roleMutex.Lock()
|
||||
defer b.roleMutex.Unlock()
|
||||
roleEntry, err := b.roleRead(ctx, req.Storage, roleName, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if roleEntry == nil {
|
||||
roleEntry = &awsRoleEntry{}
|
||||
} else if roleEntry.InvalidData != "" {
|
||||
resp.AddWarning(fmt.Sprintf("Invalid data of %q cleared out of role", roleEntry.InvalidData))
|
||||
roleEntry.InvalidData = ""
|
||||
}
|
||||
|
||||
legacyRole, err := legacyRoleData(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if credentialTypeRaw, ok := d.GetOk("credential_type"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with an explicit credential_type"), nil
|
||||
}
|
||||
roleEntry.CredentialTypes = []string{credentialTypeRaw.(string)}
|
||||
}
|
||||
|
||||
if roleArnsRaw, ok := d.GetOk("role_arns"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with role_arns"), nil
|
||||
}
|
||||
roleEntry.RoleArns = roleArnsRaw.([]string)
|
||||
}
|
||||
|
||||
if policyArnsRaw, ok := d.GetOk("policy_arns"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with policy_arns"), nil
|
||||
}
|
||||
roleEntry.PolicyArns = policyArnsRaw.([]string)
|
||||
}
|
||||
|
||||
if policyDocumentRaw, ok := d.GetOk("policy_document"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with policy_document"), nil
|
||||
}
|
||||
compacted := policyDocumentRaw.(string)
|
||||
if len(compacted) > 0 {
|
||||
compacted, err = compactJSON(policyDocumentRaw.(string))
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf("cannot parse policy document: %q", policyDocumentRaw.(string))), nil
|
||||
}
|
||||
}
|
||||
roleEntry.PolicyDocument = compacted
|
||||
}
|
||||
|
||||
if defaultSTSTTLRaw, ok := d.GetOk("default_sts_ttl"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with default_sts_ttl"), nil
|
||||
}
|
||||
roleEntry.DefaultSTSTTL = time.Duration(defaultSTSTTLRaw.(int)) * time.Second
|
||||
}
|
||||
|
||||
if maxSTSTTLRaw, ok := d.GetOk("max_sts_ttl"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with max_sts_ttl"), nil
|
||||
}
|
||||
roleEntry.MaxSTSTTL = time.Duration(maxSTSTTLRaw.(int)) * time.Second
|
||||
}
|
||||
|
||||
if userPathRaw, ok := d.GetOk("user_path"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with user_path"), nil
|
||||
}
|
||||
|
||||
roleEntry.UserPath = userPathRaw.(string)
|
||||
}
|
||||
|
||||
if permissionsBoundaryARNRaw, ok := d.GetOk("permissions_boundary_arn"); ok {
|
||||
if legacyRole != "" {
|
||||
return logical.ErrorResponse("cannot supply deprecated role or policy parameters with permissions_boundary_arn"), nil
|
||||
}
|
||||
roleEntry.PermissionsBoundaryARN = permissionsBoundaryARNRaw.(string)
|
||||
}
|
||||
|
||||
if iamGroups, ok := d.GetOk("iam_groups"); ok {
|
||||
roleEntry.IAMGroups = iamGroups.([]string)
|
||||
}
|
||||
|
||||
if iamTags, ok := d.GetOk("iam_tags"); ok {
|
||||
roleEntry.IAMTags = iamTags.(map[string]string)
|
||||
}
|
||||
|
||||
if legacyRole != "" {
|
||||
roleEntry = upgradeLegacyPolicyEntry(legacyRole)
|
||||
if roleEntry.InvalidData != "" {
|
||||
return logical.ErrorResponse(fmt.Sprintf("unable to parse supplied data: %q", roleEntry.InvalidData)), nil
|
||||
}
|
||||
resp.AddWarning("Detected use of legacy role or policy parameter. Please upgrade to use the new parameters.")
|
||||
} else {
|
||||
roleEntry.ProhibitFlexibleCredPath = false
|
||||
}
|
||||
|
||||
err = roleEntry.validate()
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf("error(s) validating supplied role data: %q", err)), nil
|
||||
}
|
||||
|
||||
err = setAwsRole(ctx, req.Storage, roleName, roleEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(resp.Warnings) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (b *backend) roleRead(ctx context.Context, s logical.Storage, roleName string, shouldLock bool) (*awsRoleEntry, error) {
|
||||
if roleName == "" {
|
||||
return nil, fmt.Errorf("missing role name")
|
||||
}
|
||||
if shouldLock {
|
||||
b.roleMutex.RLock()
|
||||
}
|
||||
entry, err := s.Get(ctx, "role/"+roleName)
|
||||
if shouldLock {
|
||||
b.roleMutex.RUnlock()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var roleEntry awsRoleEntry
|
||||
if entry != nil {
|
||||
if err := entry.DecodeJSON(&roleEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &roleEntry, nil
|
||||
}
|
||||
|
||||
if shouldLock {
|
||||
b.roleMutex.Lock()
|
||||
defer b.roleMutex.Unlock()
|
||||
}
|
||||
entry, err = s.Get(ctx, "role/"+roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if entry != nil {
|
||||
if err := entry.DecodeJSON(&roleEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &roleEntry, nil
|
||||
}
|
||||
|
||||
legacyEntry, err := s.Get(ctx, "policy/"+roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if legacyEntry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
newRoleEntry := upgradeLegacyPolicyEntry(string(legacyEntry.Value))
|
||||
if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) {
|
||||
err = setAwsRole(ctx, s, roleName, newRoleEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This can leave legacy data around in the policy/ path if it fails for some reason,
|
||||
// but should be pretty rare for this to fail but prior writes to succeed, so not worrying
|
||||
// about cleaning it up in case of error
|
||||
err = s.Delete(ctx, "policy/"+roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return newRoleEntry, nil
|
||||
}
|
||||
|
||||
func upgradeLegacyPolicyEntry(entry string) *awsRoleEntry {
|
||||
var newRoleEntry *awsRoleEntry
|
||||
if strings.HasPrefix(entry, "arn:") {
|
||||
parsedArn, err := arn.Parse(entry)
|
||||
if err != nil {
|
||||
newRoleEntry = &awsRoleEntry{
|
||||
InvalidData: entry,
|
||||
Version: 1,
|
||||
}
|
||||
return newRoleEntry
|
||||
}
|
||||
resourceParts := strings.Split(parsedArn.Resource, "/")
|
||||
resourceType := resourceParts[0]
|
||||
switch resourceType {
|
||||
case "role":
|
||||
newRoleEntry = &awsRoleEntry{
|
||||
CredentialTypes: []string{assumedRoleCred},
|
||||
RoleArns: []string{entry},
|
||||
ProhibitFlexibleCredPath: true,
|
||||
Version: 1,
|
||||
}
|
||||
case "policy":
|
||||
newRoleEntry = &awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred},
|
||||
PolicyArns: []string{entry},
|
||||
ProhibitFlexibleCredPath: true,
|
||||
Version: 1,
|
||||
}
|
||||
default:
|
||||
newRoleEntry = &awsRoleEntry{
|
||||
InvalidData: entry,
|
||||
Version: 1,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
compacted, err := compactJSON(entry)
|
||||
if err != nil {
|
||||
newRoleEntry = &awsRoleEntry{
|
||||
InvalidData: entry,
|
||||
Version: 1,
|
||||
}
|
||||
} else {
|
||||
// unfortunately, this is ambiguous between the cred types, so allow both
|
||||
newRoleEntry = &awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred, federationTokenCred},
|
||||
PolicyDocument: compacted,
|
||||
ProhibitFlexibleCredPath: true,
|
||||
Version: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newRoleEntry
|
||||
}
|
||||
|
||||
func validateAWSManagedPolicy(policyARN string) error {
|
||||
parsedARN, err := arn.Parse(policyARN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parsedARN.Service != "iam" {
|
||||
return fmt.Errorf("expected a service of iam but got %s", parsedARN.Service)
|
||||
}
|
||||
if !strings.HasPrefix(parsedARN.Resource, "policy/") {
|
||||
return fmt.Errorf("expected a resource type of policy but got %s", parsedARN.Resource)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setAwsRole(ctx context.Context, s logical.Storage, roleName string, roleEntry *awsRoleEntry) error {
|
||||
if roleName == "" {
|
||||
return fmt.Errorf("empty role name")
|
||||
}
|
||||
if roleEntry == nil {
|
||||
return fmt.Errorf("nil roleEntry")
|
||||
}
|
||||
entry, err := logical.StorageEntryJSON("role/"+roleName, roleEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry == nil {
|
||||
return fmt.Errorf("nil result when writing to storage")
|
||||
}
|
||||
if err := s.Put(ctx, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type awsRoleEntry struct {
|
||||
CredentialTypes []string `json:"credential_types"` // Entries must all be in the set of ("iam_user", "assumed_role", "federation_token")
|
||||
PolicyArns []string `json:"policy_arns"` // ARNs of managed policies to attach to an IAM user
|
||||
RoleArns []string `json:"role_arns"` // ARNs of roles to assume for AssumedRole credentials
|
||||
PolicyDocument string `json:"policy_document"` // JSON-serialized inline policy to attach to IAM users and/or to specify as the Policy parameter in AssumeRole calls
|
||||
IAMGroups []string `json:"iam_groups"` // Names of IAM groups that generated IAM users will be added to
|
||||
IAMTags map[string]string `json:"iam_tags"` // IAM tags that will be added to the generated IAM users
|
||||
InvalidData string `json:"invalid_data,omitempty"` // Invalid role data. Exists to support converting the legacy role data into the new format
|
||||
ProhibitFlexibleCredPath bool `json:"prohibit_flexible_cred_path,omitempty"` // Disallow accessing STS credentials via the creds path and vice verse
|
||||
Version int `json:"version"` // Version number of the role format
|
||||
DefaultSTSTTL time.Duration `json:"default_sts_ttl"` // Default TTL for STS credentials
|
||||
MaxSTSTTL time.Duration `json:"max_sts_ttl"` // Max allowed TTL for STS credentials
|
||||
UserPath string `json:"user_path"` // The path for the IAM user when using "iam_user" credential type
|
||||
PermissionsBoundaryARN string `json:"permissions_boundary_arn"` // ARN of an IAM policy to attach as a permissions boundary
|
||||
}
|
||||
|
||||
func (r *awsRoleEntry) toResponseData() map[string]interface{} {
|
||||
respData := map[string]interface{}{
|
||||
"credential_type": strings.Join(r.CredentialTypes, ","),
|
||||
"policy_arns": r.PolicyArns,
|
||||
"role_arns": r.RoleArns,
|
||||
"policy_document": r.PolicyDocument,
|
||||
"iam_groups": r.IAMGroups,
|
||||
"iam_tags": r.IAMTags,
|
||||
"default_sts_ttl": int64(r.DefaultSTSTTL.Seconds()),
|
||||
"max_sts_ttl": int64(r.MaxSTSTTL.Seconds()),
|
||||
"user_path": r.UserPath,
|
||||
"permissions_boundary_arn": r.PermissionsBoundaryARN,
|
||||
}
|
||||
|
||||
if r.InvalidData != "" {
|
||||
respData["invalid_data"] = r.InvalidData
|
||||
}
|
||||
return respData
|
||||
}
|
||||
|
||||
func (r *awsRoleEntry) validate() error {
|
||||
var errors *multierror.Error
|
||||
|
||||
if len(r.CredentialTypes) == 0 {
|
||||
errors = multierror.Append(errors, fmt.Errorf("did not supply credential_type"))
|
||||
}
|
||||
|
||||
allowedCredentialTypes := []string{iamUserCred, assumedRoleCred, federationTokenCred}
|
||||
for _, credType := range r.CredentialTypes {
|
||||
if !strutil.StrListContains(allowedCredentialTypes, credType) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("unrecognized credential type: %s", credType))
|
||||
}
|
||||
}
|
||||
|
||||
if r.DefaultSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("default_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred))
|
||||
}
|
||||
|
||||
if r.MaxSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("max_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred))
|
||||
}
|
||||
|
||||
if r.MaxSTSTTL > 0 &&
|
||||
r.DefaultSTSTTL > 0 &&
|
||||
r.DefaultSTSTTL > r.MaxSTSTTL {
|
||||
errors = multierror.Append(errors, fmt.Errorf(`"default_sts_ttl" value must be less than or equal to "max_sts_ttl" value`))
|
||||
}
|
||||
|
||||
if r.UserPath != "" {
|
||||
if !strutil.StrListContains(r.CredentialTypes, iamUserCred) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("user_path parameter only valid for %s credential type", iamUserCred))
|
||||
}
|
||||
if !userPathRegex.MatchString(r.UserPath) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("The specified value for user_path is invalid. It must match %q regexp", userPathRegex.String()))
|
||||
}
|
||||
}
|
||||
|
||||
if r.PermissionsBoundaryARN != "" {
|
||||
if !strutil.StrListContains(r.CredentialTypes, iamUserCred) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("cannot supply permissions_boundary_arn when credential_type isn't %s", iamUserCred))
|
||||
}
|
||||
if err := validateAWSManagedPolicy(r.PermissionsBoundaryARN); err != nil {
|
||||
errors = multierror.Append(fmt.Errorf("invalid permissions_boundary_arn parameter: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(r.RoleArns) > 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) {
|
||||
errors = multierror.Append(errors, fmt.Errorf("cannot supply role_arns when credential_type isn't %s", assumedRoleCred))
|
||||
}
|
||||
|
||||
return errors.ErrorOrNil()
|
||||
}
|
||||
|
||||
func compactJSON(input string) (string, error) {
|
||||
var compacted bytes.Buffer
|
||||
err := json.Compact(&compacted, []byte(input))
|
||||
return compacted.String(), err
|
||||
}
|
||||
|
||||
const (
|
||||
assumedRoleCred = "assumed_role"
|
||||
iamUserCred = "iam_user"
|
||||
federationTokenCred = "federation_token"
|
||||
)
|
||||
|
||||
const pathListRolesHelpSyn = `List the existing roles in this backend`
|
||||
|
||||
const pathListRolesHelpDesc = `Roles will be listed by the role name.`
|
||||
|
||||
const pathRolesHelpSyn = `
|
||||
Read, write and reference IAM policies that access keys can be made for.
|
||||
`
|
||||
|
||||
const pathRolesHelpDesc = `
|
||||
This path allows you to read and write roles that are used to
|
||||
create access keys. These roles are associated with IAM policies that
|
||||
map directly to the route to read the access keys. For example, if the
|
||||
backend is mounted at "aws" and you create a role at "aws/roles/deploy"
|
||||
then a user could request access credentials at "aws/creds/deploy".
|
||||
|
||||
You can either supply a user inline policy (via the policy argument), or
|
||||
provide a reference to an existing AWS policy by supplying the full arn
|
||||
reference (via the arn argument). Inline user policies written are normal
|
||||
IAM policies. Vault will not attempt to parse these except to validate
|
||||
that they're basic JSON. No validation is performed on arn references.
|
||||
|
||||
To validate the keys, attempt to read an access key after writing the policy.
|
||||
`
|
@ -1,451 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const adminAccessPolicyARN = "arn:aws:iam::aws:policy/AdministratorAccess"
|
||||
|
||||
func TestBackend_PathListRoles(t *testing.T) {
|
||||
var resp *logical.Response
|
||||
var err error
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
roleData := map[string]interface{}{
|
||||
"role_arns": []string{"arn:aws:iam::123456789012:role/path/RoleName"},
|
||||
"credential_type": assumedRoleCred,
|
||||
"default_sts_ttl": 3600,
|
||||
"max_sts_ttl": 3600,
|
||||
}
|
||||
|
||||
roleReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Storage: config.StorageView,
|
||||
Data: roleData,
|
||||
}
|
||||
|
||||
for i := 1; i <= 10; i++ {
|
||||
roleReq.Path = "roles/testrole" + strconv.Itoa(i)
|
||||
resp, err = b.HandleRequest(context.Background(), roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: role creation failed. resp:%#v\n err:%v", resp, err)
|
||||
}
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ListOperation,
|
||||
Path: "roles",
|
||||
Storage: config.StorageView,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err)
|
||||
}
|
||||
|
||||
if len(resp.Data["keys"].([]string)) != 10 {
|
||||
t.Fatalf("failed to list all 10 roles")
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(context.Background(), &logical.Request{
|
||||
Operation: logical.ListOperation,
|
||||
Path: "roles/",
|
||||
Storage: config.StorageView,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err)
|
||||
}
|
||||
|
||||
if len(resp.Data["keys"].([]string)) != 10 {
|
||||
t.Fatalf("failed to list all 10 roles")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpgradeLegacyPolicyEntry(t *testing.T) {
|
||||
var input string
|
||||
var expected awsRoleEntry
|
||||
var output *awsRoleEntry
|
||||
|
||||
input = "arn:aws:iam::123456789012:role/path/RoleName"
|
||||
expected = awsRoleEntry{
|
||||
CredentialTypes: []string{assumedRoleCred},
|
||||
RoleArns: []string{input},
|
||||
ProhibitFlexibleCredPath: true,
|
||||
Version: 1,
|
||||
}
|
||||
output = upgradeLegacyPolicyEntry(input)
|
||||
if output.InvalidData != "" {
|
||||
t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData)
|
||||
}
|
||||
if !reflect.DeepEqual(*output, expected) {
|
||||
t.Fatalf("bad: expected %#v; received %#v", expected, *output)
|
||||
}
|
||||
|
||||
input = "arn:aws:iam::123456789012:policy/MyPolicy"
|
||||
expected = awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred},
|
||||
PolicyArns: []string{input},
|
||||
ProhibitFlexibleCredPath: true,
|
||||
Version: 1,
|
||||
}
|
||||
output = upgradeLegacyPolicyEntry(input)
|
||||
if output.InvalidData != "" {
|
||||
t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData)
|
||||
}
|
||||
if !reflect.DeepEqual(*output, expected) {
|
||||
t.Fatalf("bad: expected %#v; received %#v", expected, *output)
|
||||
}
|
||||
|
||||
input = "arn:aws:iam::aws:policy/AWSManagedPolicy"
|
||||
expected.PolicyArns = []string{input}
|
||||
output = upgradeLegacyPolicyEntry(input)
|
||||
if output.InvalidData != "" {
|
||||
t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData)
|
||||
}
|
||||
if !reflect.DeepEqual(*output, expected) {
|
||||
t.Fatalf("bad: expected %#v; received %#v", expected, *output)
|
||||
}
|
||||
|
||||
input = `
|
||||
{
|
||||
"Version": "2012-10-07",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "ec2:Describe*",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}`
|
||||
compacted, err := compactJSON(input)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing JSON: %v", err)
|
||||
}
|
||||
expected = awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred, federationTokenCred},
|
||||
PolicyDocument: compacted,
|
||||
ProhibitFlexibleCredPath: true,
|
||||
Version: 1,
|
||||
}
|
||||
output = upgradeLegacyPolicyEntry(input)
|
||||
if output.InvalidData != "" {
|
||||
t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData)
|
||||
}
|
||||
if !reflect.DeepEqual(*output, expected) {
|
||||
t.Fatalf("bad: expected %#v; received %#v", expected, *output)
|
||||
}
|
||||
|
||||
// Due to lack of prior input validation, this could exist in the storage, and we need
|
||||
// to be able to read it out in some fashion, so have to handle this in a poor fashion
|
||||
input = "arn:gobbledygook"
|
||||
expected = awsRoleEntry{
|
||||
InvalidData: input,
|
||||
Version: 1,
|
||||
}
|
||||
output = upgradeLegacyPolicyEntry(input)
|
||||
if !reflect.DeepEqual(*output, expected) {
|
||||
t.Fatalf("bad: expected %#v; received %#v", expected, *output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserPathValidity(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
userPath string
|
||||
isValid bool
|
||||
}{
|
||||
{
|
||||
description: "Default",
|
||||
userPath: "/",
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "Empty",
|
||||
userPath: "",
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "Valid",
|
||||
userPath: "/path/",
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "Missing leading slash",
|
||||
userPath: "path/",
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "Missing trailing slash",
|
||||
userPath: "/path",
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "Invalid character",
|
||||
userPath: "/šiauliai/",
|
||||
isValid: false,
|
||||
},
|
||||
{
|
||||
description: "Max length",
|
||||
userPath: "/" + strings.Repeat("a", 510) + "/",
|
||||
isValid: true,
|
||||
},
|
||||
{
|
||||
description: "Too long",
|
||||
userPath: "/" + strings.Repeat("a", 511) + "/",
|
||||
isValid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
if tc.isValid != userPathRegex.MatchString(tc.userPath) {
|
||||
t.Fatalf("bad: expected %s", strconv.FormatBool(tc.isValid))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoleCRUDWithPermissionsBoundary(t *testing.T) {
|
||||
roleName := "test_perm_boundary"
|
||||
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
permissionsBoundaryARN := "arn:aws:iam::aws:policy/EC2FullAccess"
|
||||
|
||||
roleData := map[string]interface{}{
|
||||
"credential_type": iamUserCred,
|
||||
"policy_arns": []string{adminAccessPolicyARN},
|
||||
"permissions_boundary_arn": permissionsBoundaryARN,
|
||||
}
|
||||
request := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "roles/" + roleName,
|
||||
Storage: config.StorageView,
|
||||
Data: roleData,
|
||||
}
|
||||
resp, err := b.HandleRequest(context.Background(), request)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: role creation failed. resp:%#v\nerr:%v", resp, err)
|
||||
}
|
||||
|
||||
request = &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "roles/" + roleName,
|
||||
Storage: config.StorageView,
|
||||
}
|
||||
resp, err = b.HandleRequest(context.Background(), request)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: reading role failed. resp:%#v\nerr:%v", resp, err)
|
||||
}
|
||||
if resp.Data["credential_type"] != iamUserCred {
|
||||
t.Errorf("bad: expected credential_type of %s, got %s instead", iamUserCred, resp.Data["credential_type"])
|
||||
}
|
||||
if resp.Data["permissions_boundary_arn"] != permissionsBoundaryARN {
|
||||
t.Errorf("bad: expected permissions_boundary_arn of %s, got %s instead", permissionsBoundaryARN, resp.Data["permissions_boundary_arn"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoleWithPermissionsBoundaryValidation(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
roleData := map[string]interface{}{
|
||||
"credential_type": assumedRoleCred, // only iamUserCred supported with permissions_boundary_arn
|
||||
"role_arns": []string{"arn:aws:iam::123456789012:role/VaultRole"},
|
||||
"permissions_boundary_arn": "arn:aws:iam::aws:policy/FooBar",
|
||||
}
|
||||
request := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "roles/test_perm_boundary",
|
||||
Storage: config.StorageView,
|
||||
Data: roleData,
|
||||
}
|
||||
resp, err := b.HandleRequest(context.Background(), request)
|
||||
if err == nil && (resp == nil || !resp.IsError()) {
|
||||
t.Fatalf("bad: expected role creation to fail due to bad credential_type, but it didn't. resp:%#v\nerr:%v", resp, err)
|
||||
}
|
||||
|
||||
roleData = map[string]interface{}{
|
||||
"credential_type": iamUserCred,
|
||||
"policy_arns": []string{adminAccessPolicyARN},
|
||||
"permissions_boundary_arn": "arn:aws:notiam::aws:policy/FooBar",
|
||||
}
|
||||
request.Data = roleData
|
||||
resp, err = b.HandleRequest(context.Background(), request)
|
||||
if err == nil && (resp == nil || !resp.IsError()) {
|
||||
t.Fatalf("bad: expected role creation to fail due to malformed permissions_boundary_arn, but it didn't. resp:%#v\nerr:%v", resp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAWSManagedPolicy(t *testing.T) {
|
||||
expectErr := func(arn string) {
|
||||
err := validateAWSManagedPolicy(arn)
|
||||
if err == nil {
|
||||
t.Errorf("bad: expected arn of %s to return an error but it didn't", arn)
|
||||
}
|
||||
}
|
||||
|
||||
expectErr("not_an_arn")
|
||||
expectErr("notarn:aws:iam::aws:policy/FooBar")
|
||||
expectErr("arn:aws:notiam::aws:policy/FooBar")
|
||||
expectErr("arn:aws:iam::aws:notpolicy/FooBar")
|
||||
expectErr("arn:aws:iam::aws:policynot/FooBar")
|
||||
|
||||
arn := "arn:aws:iam::aws:policy/FooBar"
|
||||
err := validateAWSManagedPolicy(arn)
|
||||
if err != nil {
|
||||
t.Errorf("bad: expected arn of %s to not return an error but it did: %#v", arn, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoleEntryValidationCredTypes(t *testing.T) {
|
||||
roleEntry := awsRoleEntry{
|
||||
CredentialTypes: []string{},
|
||||
PolicyArns: []string{adminAccessPolicyARN},
|
||||
}
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with no CredentialTypes %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.CredentialTypes = []string{"invalid_type"}
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with invalid CredentialTypes %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.CredentialTypes = []string{iamUserCred, "invalid_type"}
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with invalid CredentialTypes %#v passed validation", roleEntry)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoleEntryValidationIamUserCred(t *testing.T) {
|
||||
allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}`
|
||||
roleEntry := awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred},
|
||||
PolicyArns: []string{adminAccessPolicyARN},
|
||||
PermissionsBoundaryARN: adminAccessPolicyARN,
|
||||
}
|
||||
err := roleEntry.validate()
|
||||
if err != nil {
|
||||
t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err)
|
||||
}
|
||||
roleEntry.PolicyDocument = allowAllPolicyDocument
|
||||
err = roleEntry.validate()
|
||||
if err != nil {
|
||||
t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err)
|
||||
}
|
||||
roleEntry.PolicyArns = []string{}
|
||||
err = roleEntry.validate()
|
||||
if err != nil {
|
||||
t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err)
|
||||
}
|
||||
|
||||
roleEntry = awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred},
|
||||
RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"},
|
||||
}
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with invalid RoleArns parameter %#v passed validation", roleEntry)
|
||||
}
|
||||
|
||||
roleEntry = awsRoleEntry{
|
||||
CredentialTypes: []string{iamUserCred},
|
||||
PolicyArns: []string{adminAccessPolicyARN},
|
||||
DefaultSTSTTL: 1,
|
||||
}
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized DefaultSTSTTL %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.DefaultSTSTTL = 0
|
||||
roleEntry.MaxSTSTTL = 1
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized MaxSTSTTL %#v passed validation", roleEntry)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoleEntryValidationAssumedRoleCred(t *testing.T) {
|
||||
allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}`
|
||||
roleEntry := awsRoleEntry{
|
||||
CredentialTypes: []string{assumedRoleCred},
|
||||
RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"},
|
||||
PolicyArns: []string{adminAccessPolicyARN},
|
||||
PolicyDocument: allowAllPolicyDocument,
|
||||
DefaultSTSTTL: 2,
|
||||
MaxSTSTTL: 3,
|
||||
}
|
||||
if err := roleEntry.validate(); err != nil {
|
||||
t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err)
|
||||
}
|
||||
|
||||
roleEntry.MaxSTSTTL = 1
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with MaxSTSTTL < DefaultSTSTTL %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.MaxSTSTTL = 0
|
||||
roleEntry.UserPath = "/foobar/"
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized UserPath %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.UserPath = ""
|
||||
roleEntry.PermissionsBoundaryARN = adminAccessPolicyARN
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized PermissionsBoundary %#v passed validation", roleEntry)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoleEntryValidationFederationTokenCred(t *testing.T) {
|
||||
allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}`
|
||||
roleEntry := awsRoleEntry{
|
||||
CredentialTypes: []string{federationTokenCred},
|
||||
PolicyDocument: allowAllPolicyDocument,
|
||||
PolicyArns: []string{adminAccessPolicyARN},
|
||||
DefaultSTSTTL: 2,
|
||||
MaxSTSTTL: 3,
|
||||
}
|
||||
if err := roleEntry.validate(); err != nil {
|
||||
t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err)
|
||||
}
|
||||
|
||||
roleEntry.RoleArns = []string{"arn:aws:iam::123456789012:role/SomeRole"}
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized RoleArns %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.RoleArns = []string{}
|
||||
roleEntry.UserPath = "/foobar/"
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized UserPath %#v passed validation", roleEntry)
|
||||
}
|
||||
|
||||
roleEntry.UserPath = ""
|
||||
roleEntry.MaxSTSTTL = 1
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with MaxSTSTTL < DefaultSTSTTL %#v passed validation", roleEntry)
|
||||
}
|
||||
roleEntry.MaxSTSTTL = 0
|
||||
roleEntry.PermissionsBoundaryARN = adminAccessPolicyARN
|
||||
if roleEntry.validate() == nil {
|
||||
t.Errorf("bad: invalid roleEntry with unrecognized PermissionsBoundary %#v passed validation", roleEntry)
|
||||
}
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
const (
|
||||
pathStaticCreds = "static-creds"
|
||||
|
||||
paramAccessKeyID = "access_key"
|
||||
paramSecretsAccessKey = "secret_key"
|
||||
)
|
||||
|
||||
type awsCredentials struct {
|
||||
AccessKeyID string `json:"access_key" structs:"access_key" mapstructure:"access_key"`
|
||||
SecretAccessKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"`
|
||||
}
|
||||
|
||||
func pathStaticCredentials(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: fmt.Sprintf("%s/%s", pathStaticCreds, framework.GenericNameWithAtRegex(paramRoleName)),
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
paramRoleName: {
|
||||
Type: framework.TypeString,
|
||||
Description: descRoleName,
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathStaticCredsRead,
|
||||
Responses: map[int][]framework.Response{
|
||||
http.StatusOK: {{
|
||||
Description: http.StatusText(http.StatusOK),
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
paramAccessKeyID: {
|
||||
Type: framework.TypeString,
|
||||
Description: descAccessKeyID,
|
||||
},
|
||||
paramSecretsAccessKey: {
|
||||
Type: framework.TypeString,
|
||||
Description: descSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathStaticCredsHelpSyn,
|
||||
HelpDescription: pathStaticCredsHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathStaticCredsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
roleName, ok := data.GetOk(paramRoleName)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing %q parameter", paramRoleName)
|
||||
}
|
||||
|
||||
entry, err := req.Storage.Get(ctx, formatCredsStoragePath(roleName.(string)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read credentials for role %q: %w", roleName, err)
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var credentials awsCredentials
|
||||
if err := entry.DecodeJSON(&credentials); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode credentials: %w", err)
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: structs.New(credentials).Map(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func formatCredsStoragePath(roleName string) string {
|
||||
return fmt.Sprintf("%s/%s", pathStaticCreds, roleName)
|
||||
}
|
||||
|
||||
const pathStaticCredsHelpSyn = `Retrieve static credentials from the named role.`
|
||||
|
||||
const pathStaticCredsHelpDesc = `
|
||||
This path reads AWS credentials for a certain static role. The keys are rotated
|
||||
periodically according to their configuration, and will return the same password
|
||||
until they are rotated.`
|
||||
|
||||
const (
|
||||
descAccessKeyID = "The access key of the AWS Credential"
|
||||
descSecretAccessKey = "The secret key of the AWS Credential"
|
||||
)
|
@ -1,95 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/fatih/structs"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// TestStaticCredsRead verifies that we can correctly read a cred that exists, and correctly _not read_
|
||||
// a cred that does not exist.
|
||||
func TestStaticCredsRead(t *testing.T) {
|
||||
// setup
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
bgCTX := context.Background() // for brevity later
|
||||
|
||||
// insert a cred to get
|
||||
creds := &awsCredentials{
|
||||
AccessKeyID: "foo",
|
||||
SecretAccessKey: "bar",
|
||||
}
|
||||
entry, err := logical.StorageEntryJSON(formatCredsStoragePath("test"), creds)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = config.StorageView.Put(bgCTX, entry)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// cases
|
||||
cases := []struct {
|
||||
name string
|
||||
roleName string
|
||||
expectedError error
|
||||
expectedResponse *logical.Response
|
||||
}{
|
||||
{
|
||||
name: "get existing creds",
|
||||
roleName: "test",
|
||||
expectedResponse: &logical.Response{
|
||||
Data: structs.New(creds).Map(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get non-existent creds",
|
||||
roleName: "this-doesnt-exist",
|
||||
// returns nil, nil
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := Backend(config)
|
||||
|
||||
req := &logical.Request{
|
||||
Storage: config.StorageView,
|
||||
Data: map[string]interface{}{
|
||||
"name": c.roleName,
|
||||
},
|
||||
}
|
||||
resp, err := b.pathStaticCredsRead(bgCTX, req, staticCredsFieldData(req.Data))
|
||||
|
||||
if err != c.expectedError {
|
||||
t.Fatalf("got error %q, but expected %q", err, c.expectedError)
|
||||
}
|
||||
if !reflect.DeepEqual(resp, c.expectedResponse) {
|
||||
t.Fatalf("got response %v, but expected %v", resp, c.expectedResponse)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func staticCredsFieldData(data map[string]interface{}) *framework.FieldData {
|
||||
schema := map[string]*framework.FieldSchema{
|
||||
paramRoleName: {
|
||||
Type: framework.TypeString,
|
||||
Description: descRoleName,
|
||||
},
|
||||
}
|
||||
|
||||
return &framework.FieldData{
|
||||
Raw: data,
|
||||
Schema: schema,
|
||||
}
|
||||
}
|
@ -1,334 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/fatih/structs"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/sdk/queue"
|
||||
)
|
||||
|
||||
const (
|
||||
pathStaticRole = "static-roles"
|
||||
|
||||
paramRoleName = "name"
|
||||
paramUsername = "username"
|
||||
paramRotationPeriod = "rotation_period"
|
||||
)
|
||||
|
||||
type staticRoleEntry struct {
|
||||
Name string `json:"name" structs:"name" mapstructure:"name"`
|
||||
ID string `json:"id" structs:"id" mapstructure:"id"`
|
||||
Username string `json:"username" structs:"username" mapstructure:"username"`
|
||||
RotationPeriod time.Duration `json:"rotation_period" structs:"rotation_period" mapstructure:"rotation_period"`
|
||||
}
|
||||
|
||||
func pathStaticRoles(b *backend) *framework.Path {
|
||||
roleResponse := map[int][]framework.Response{
|
||||
http.StatusOK: {{
|
||||
Description: http.StatusText(http.StatusOK),
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
paramRoleName: {
|
||||
Type: framework.TypeString,
|
||||
Description: descRoleName,
|
||||
},
|
||||
paramUsername: {
|
||||
Type: framework.TypeString,
|
||||
Description: descUsername,
|
||||
},
|
||||
paramRotationPeriod: {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: descRotationPeriod,
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
return &framework.Path{
|
||||
Pattern: fmt.Sprintf("%s/%s", pathStaticRole, framework.GenericNameWithAtRegex(paramRoleName)),
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
paramRoleName: {
|
||||
Type: framework.TypeString,
|
||||
Description: descRoleName,
|
||||
},
|
||||
paramUsername: {
|
||||
Type: framework.TypeString,
|
||||
Description: descUsername,
|
||||
},
|
||||
paramRotationPeriod: {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: descRotationPeriod,
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathStaticRolesRead,
|
||||
Responses: roleResponse,
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathStaticRolesWrite,
|
||||
ForwardPerformanceSecondary: true,
|
||||
ForwardPerformanceStandby: true,
|
||||
Responses: roleResponse,
|
||||
},
|
||||
logical.DeleteOperation: &framework.PathOperation{
|
||||
Callback: b.pathStaticRolesDelete,
|
||||
ForwardPerformanceSecondary: true,
|
||||
ForwardPerformanceStandby: true,
|
||||
Responses: map[int][]framework.Response{
|
||||
http.StatusNoContent: {{
|
||||
Description: http.StatusText(http.StatusNoContent),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathStaticRolesHelpSyn,
|
||||
HelpDescription: pathStaticRolesHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathStaticRolesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
roleName, ok := data.GetOk(paramRoleName)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing %q parameter", paramRoleName)
|
||||
}
|
||||
|
||||
b.roleMutex.RLock()
|
||||
defer b.roleMutex.RUnlock()
|
||||
|
||||
entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read configuration for static role %q: %w", roleName, err)
|
||||
}
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var config staticRoleEntry
|
||||
if err := entry.DecodeJSON(&config); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode configuration for static role %q: %w", roleName, err)
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: formatResponse(config),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathStaticRolesWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
// Create & validate config from request parameters
|
||||
config := staticRoleEntry{}
|
||||
isCreate := req.Operation == logical.CreateOperation
|
||||
|
||||
if rawRoleName, ok := data.GetOk(paramRoleName); ok {
|
||||
config.Name = rawRoleName.(string)
|
||||
|
||||
if err := b.validateRoleName(config.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return logical.ErrorResponse("missing %q parameter", paramRoleName), nil
|
||||
}
|
||||
|
||||
// retrieve old role value
|
||||
entry, err := req.Storage.Get(ctx, formatRoleStoragePath(config.Name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't check storage for pre-existing role: %w", err)
|
||||
}
|
||||
|
||||
if entry != nil {
|
||||
err = entry.DecodeJSON(&config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't convert existing role into config struct: %w", err)
|
||||
}
|
||||
} else {
|
||||
// if we couldn't find an entry, this is a create event
|
||||
isCreate = true
|
||||
}
|
||||
|
||||
// other params are optional if we're not Creating
|
||||
|
||||
if rawUsername, ok := data.GetOk(paramUsername); ok {
|
||||
config.Username = rawUsername.(string)
|
||||
|
||||
if err := b.validateIAMUserExists(ctx, req.Storage, &config, isCreate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if isCreate {
|
||||
return logical.ErrorResponse("missing %q parameter", paramUsername), nil
|
||||
}
|
||||
|
||||
if rawRotationPeriod, ok := data.GetOk(paramRotationPeriod); ok {
|
||||
config.RotationPeriod = time.Duration(rawRotationPeriod.(int)) * time.Second
|
||||
|
||||
if err := b.validateRotationPeriod(config.RotationPeriod); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if isCreate {
|
||||
return logical.ErrorResponse("missing %q parameter", paramRotationPeriod), nil
|
||||
}
|
||||
|
||||
b.roleMutex.Lock()
|
||||
defer b.roleMutex.Unlock()
|
||||
|
||||
// Upsert role config
|
||||
newRole, err := logical.StorageEntryJSON(formatRoleStoragePath(config.Name), config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal object to JSON: %w", err)
|
||||
}
|
||||
err = req.Storage.Put(ctx, newRole)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to save object in storage: %w", err)
|
||||
}
|
||||
|
||||
// Bootstrap initial set of keys if they did not exist before. AWS Secret Access Keys can only be obtained on creation,
|
||||
// so we need to boostrap new roles with a new initial set of keys to be able to serve valid credentials to Vault clients.
|
||||
existingCreds, err := req.Storage.Get(ctx, formatCredsStoragePath(config.Name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to verify if credentials already exist for role %q: %w", config.Name, err)
|
||||
}
|
||||
if existingCreds == nil {
|
||||
err := b.createCredential(ctx, req.Storage, config, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new credentials for role %q: %w", config.Name, err)
|
||||
}
|
||||
|
||||
err = b.credRotationQueue.Push(&queue.Item{
|
||||
Key: config.Name,
|
||||
Value: config,
|
||||
Priority: time.Now().Add(config.RotationPeriod).Unix(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", config.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
Data: formatResponse(config),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *backend) pathStaticRolesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
roleName, ok := data.GetOk(paramRoleName)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing %q parameter", paramRoleName)
|
||||
}
|
||||
|
||||
b.roleMutex.Lock()
|
||||
defer b.roleMutex.Unlock()
|
||||
|
||||
entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't locate role in storage due to error: %w", err)
|
||||
}
|
||||
// no entry in storage, but no error either, congrats, it's deleted!
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var cfg staticRoleEntry
|
||||
err = entry.DecodeJSON(&cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't convert storage entry to role config")
|
||||
}
|
||||
|
||||
err = b.deleteCredential(ctx, req.Storage, cfg, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to clean credentials while deleting role %q: %w", roleName.(string), err)
|
||||
}
|
||||
|
||||
// delete from the queue
|
||||
_, err = b.credRotationQueue.PopByKey(cfg.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't delete key from queue: %w", err)
|
||||
}
|
||||
|
||||
return nil, req.Storage.Delete(ctx, formatRoleStoragePath(roleName.(string)))
|
||||
}
|
||||
|
||||
func (b *backend) validateRoleName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("empty role name attribute given")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateIAMUser checks the user information we have for the role against the information on AWS. On a create, it uses the username
|
||||
// to retrieve the user information and _sets_ the userID. On update, it validates the userID and username.
|
||||
func (b *backend) validateIAMUserExists(ctx context.Context, storage logical.Storage, entry *staticRoleEntry, isCreate bool) error {
|
||||
c, err := b.clientIAM(ctx, storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to validate username %q: %w", entry.Username, err)
|
||||
}
|
||||
|
||||
// we don't really care about the content of the result, just that it's not an error
|
||||
out, err := c.GetUser(&iam.GetUserInput{
|
||||
UserName: aws.String(entry.Username),
|
||||
})
|
||||
if err != nil || out.User == nil {
|
||||
return fmt.Errorf("unable to validate username %q: %w", entry.Username, err)
|
||||
}
|
||||
if *out.User.UserName != entry.Username {
|
||||
return fmt.Errorf("AWS GetUser returned a username, but it didn't match: %q was requested, but %q was returned", entry.Username, *out.User.UserName)
|
||||
}
|
||||
|
||||
if !isCreate && *out.User.UserId != entry.ID {
|
||||
return fmt.Errorf("AWS GetUser returned a user, but the ID did not match: %q was requested, but %q was returned", entry.ID, *out.User.UserId)
|
||||
} else {
|
||||
// if this is an insert, store the userID. This is the immutable part of an IAM user, but it's not exactly user-friendly.
|
||||
// So, we allow users to specify usernames, but on updates we'll use the ID as a verification cross-check.
|
||||
entry.ID = *out.User.UserId
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
minAllowableRotationPeriod = 1 * time.Minute
|
||||
)
|
||||
|
||||
func (b *backend) validateRotationPeriod(period time.Duration) error {
|
||||
if period < minAllowableRotationPeriod {
|
||||
return fmt.Errorf("role rotation period out of range: must be greater than %.2f seconds", minAllowableRotationPeriod.Seconds())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func formatResponse(cfg staticRoleEntry) map[string]interface{} {
|
||||
response := structs.New(cfg).Map()
|
||||
response[paramRotationPeriod] = int64(cfg.RotationPeriod.Seconds())
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
func formatRoleStoragePath(roleName string) string {
|
||||
return fmt.Sprintf("%s/%s", pathStaticRole, roleName)
|
||||
}
|
||||
|
||||
const pathStaticRolesHelpSyn = `
|
||||
Manage static roles for AWS.
|
||||
`
|
||||
|
||||
const pathStaticRolesHelpDesc = `
|
||||
This path lets you manage static roles (users) for the AWS secret backend.
|
||||
A static role is associated with a single IAM user, and manages the access
|
||||
keys based on a rotation period, automatically rotating the credential. If
|
||||
the IAM user has multiple access keys, the oldest key will be rotated.
|
||||
`
|
||||
|
||||
const (
|
||||
descRoleName = "The name of this role."
|
||||
descUsername = "The IAM user to adopt as a static role."
|
||||
descRotationPeriod = `Period by which to rotate the backing credential of the adopted user.
|
||||
This can be a Go duration (e.g, '1m', 24h'), or an integer number of seconds.`
|
||||
)
|
@ -1,493 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/queue"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
// TestStaticRolesValidation verifies that valid requests pass validation and that invalid requests fail validation.
|
||||
// This includes the user already existing in IAM roles, and the rotation period being sufficiently long.
|
||||
func TestStaticRolesValidation(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
bgCTX := context.Background() // for brevity
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
opts []awsutil.MockIAMOption
|
||||
requestData map[string]interface{}
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "all good",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}),
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"),
|
||||
SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"),
|
||||
UserName: aws.String("jane-doe"),
|
||||
},
|
||||
}),
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{},
|
||||
IsTruncated: aws.Bool(false),
|
||||
}),
|
||||
},
|
||||
requestData: map[string]interface{}{
|
||||
"name": "test",
|
||||
"username": "jane-doe",
|
||||
"rotation_period": "1d",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bad user",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserError(errors.New("oh no")),
|
||||
},
|
||||
requestData: map[string]interface{}{
|
||||
"name": "test",
|
||||
"username": "jane-doe",
|
||||
"rotation_period": "24h",
|
||||
},
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "user mismatch",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("ms-impostor"), UserId: aws.String("fake-id")}}),
|
||||
},
|
||||
requestData: map[string]interface{}{
|
||||
"name": "test",
|
||||
"username": "jane-doe",
|
||||
"rotation_period": "1d2h",
|
||||
},
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "bad rotation period",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}),
|
||||
},
|
||||
requestData: map[string]interface{}{
|
||||
"name": "test",
|
||||
"username": "jane-doe",
|
||||
"rotation_period": "45s",
|
||||
},
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := Backend(config)
|
||||
miam, err := awsutil.NewMockIAM(c.opts...)(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b.iamClient = miam
|
||||
if err := b.Setup(bgCTX, config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Storage: config.StorageView,
|
||||
Data: c.requestData,
|
||||
Path: "static-roles/test",
|
||||
}
|
||||
_, err = b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data))
|
||||
if c.isError && err == nil {
|
||||
t.Fatal("expected an error but didn't get one")
|
||||
} else if !c.isError && err != nil {
|
||||
t.Fatalf("got an unexpected error: %s", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestStaticRolesWrite validates that we can write a new entry for a new static role, and that we correctly
|
||||
// do not write if the request is invalid in some way.
|
||||
func TestStaticRolesWrite(t *testing.T) {
|
||||
bgCTX := context.Background()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
opts []awsutil.MockIAMOption
|
||||
data map[string]interface{}
|
||||
expectedError bool
|
||||
findUser bool
|
||||
isUpdate bool
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}),
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{},
|
||||
IsTruncated: aws.Bool(false),
|
||||
}),
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"),
|
||||
SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"),
|
||||
UserName: aws.String("jane-doe"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
data: map[string]interface{}{
|
||||
"name": "test",
|
||||
"username": "jane-doe",
|
||||
"rotation_period": "1d",
|
||||
},
|
||||
// writes role, writes cred
|
||||
findUser: true,
|
||||
},
|
||||
{
|
||||
name: "no aws user",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserError(errors.New("no such user, etc etc")),
|
||||
},
|
||||
data: map[string]interface{}{
|
||||
"name": "test",
|
||||
"username": "a-nony-mous",
|
||||
"rotation_period": "15s",
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "update existing user",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}),
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{},
|
||||
IsTruncated: aws.Bool(false),
|
||||
}),
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"),
|
||||
SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"),
|
||||
UserName: aws.String("john-doe"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
data: map[string]interface{}{
|
||||
"name": "johnny",
|
||||
"rotation_period": "19m",
|
||||
},
|
||||
findUser: true,
|
||||
isUpdate: true,
|
||||
},
|
||||
}
|
||||
|
||||
// if a user exists (user doesn't exist is tested in validation)
|
||||
// we'll check how many keys the user has - if it's two, we delete one.
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
miam, err := awsutil.NewMockIAM(
|
||||
c.opts...,
|
||||
)(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := Backend(config)
|
||||
b.iamClient = miam
|
||||
if err := b.Setup(bgCTX, config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// put a role in storage for update tests
|
||||
staticRole := staticRoleEntry{
|
||||
Name: "johnny",
|
||||
Username: "john-doe",
|
||||
ID: "unique-id",
|
||||
RotationPeriod: 24 * time.Hour,
|
||||
}
|
||||
entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = config.StorageView.Put(bgCTX, entry)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Storage: config.StorageView,
|
||||
Data: c.data,
|
||||
Path: "static-roles/" + c.data["name"].(string),
|
||||
}
|
||||
|
||||
r, err := b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data))
|
||||
if c.expectedError && err == nil {
|
||||
t.Fatal(err)
|
||||
} else if c.expectedError {
|
||||
return // save us some if statements
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("got an error back unexpectedly: %s", err)
|
||||
}
|
||||
|
||||
if c.findUser && r == nil {
|
||||
t.Fatal("response was nil, but it shouldn't have been")
|
||||
}
|
||||
|
||||
role, err := config.StorageView.Get(bgCTX, req.Path)
|
||||
if c.findUser && (err != nil || role == nil) {
|
||||
t.Fatalf("couldn't find the role we should have stored: %s", err)
|
||||
}
|
||||
var actualData staticRoleEntry
|
||||
err = role.DecodeJSON(&actualData)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't convert storage data to role entry: %s", err)
|
||||
}
|
||||
|
||||
// construct expected data
|
||||
var expectedData staticRoleEntry
|
||||
fieldData := staticRoleFieldData(c.data)
|
||||
if c.isUpdate {
|
||||
// data is johnny + c.data
|
||||
expectedData = staticRole
|
||||
}
|
||||
|
||||
if u, ok := fieldData.GetOk("username"); ok {
|
||||
expectedData.Username = u.(string)
|
||||
}
|
||||
if r, ok := fieldData.GetOk("rotation_period"); ok {
|
||||
expectedData.RotationPeriod = time.Duration(r.(int)) * time.Second
|
||||
}
|
||||
if n, ok := fieldData.GetOk("name"); ok {
|
||||
expectedData.Name = n.(string)
|
||||
}
|
||||
|
||||
// validate fields
|
||||
if eu, au := expectedData.Username, actualData.Username; eu != au {
|
||||
t.Fatalf("mismatched username, expected %q but got %q", eu, au)
|
||||
}
|
||||
if er, ar := expectedData.RotationPeriod, actualData.RotationPeriod; er != ar {
|
||||
t.Fatalf("mismatched rotation period, expected %q but got %q", er, ar)
|
||||
}
|
||||
if en, an := expectedData.Name, actualData.Name; en != an {
|
||||
t.Fatalf("mismatched role name, expected %q, but got %q", en, an)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestStaticRoleRead validates that we can read a configured role and correctly do not read anything if we
|
||||
// request something that doesn't exist.
|
||||
func TestStaticRoleRead(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
bgCTX := context.Background()
|
||||
|
||||
// test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe"
|
||||
cases := []struct {
|
||||
name string
|
||||
roleName string
|
||||
found bool
|
||||
}{
|
||||
{
|
||||
name: "role name exists",
|
||||
roleName: "test",
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "role name not found",
|
||||
roleName: "toast",
|
||||
found: false, // implied, but set for clarity
|
||||
},
|
||||
}
|
||||
|
||||
staticRole := staticRoleEntry{
|
||||
Name: "test",
|
||||
Username: "jane-doe",
|
||||
RotationPeriod: 24 * time.Hour,
|
||||
}
|
||||
entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = config.StorageView.Put(bgCTX, entry)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
req := &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Storage: config.StorageView,
|
||||
Data: map[string]interface{}{
|
||||
"name": c.roleName,
|
||||
},
|
||||
Path: formatRoleStoragePath(c.roleName),
|
||||
}
|
||||
|
||||
b := Backend(config)
|
||||
|
||||
r, err := b.pathStaticRolesRead(bgCTX, req, staticRoleFieldData(req.Data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c.found {
|
||||
if r == nil {
|
||||
t.Fatal("response was nil, but it shouldn't have been")
|
||||
}
|
||||
} else {
|
||||
if r != nil {
|
||||
t.Fatal("response should have been nil on a non-existent role")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestStaticRoleDelete validates that we correctly remove a role on a delete request, and that we correctly do not
|
||||
// remove anything if a role does not exist with that name.
|
||||
func TestStaticRoleDelete(t *testing.T) {
|
||||
bgCTX := context.Background()
|
||||
|
||||
// test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe"
|
||||
cases := []struct {
|
||||
name string
|
||||
role string
|
||||
found bool
|
||||
}{
|
||||
{
|
||||
name: "role found",
|
||||
role: "test",
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "role not found",
|
||||
role: "tossed",
|
||||
found: false,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
// fake an IAM
|
||||
var iamfunc awsutil.IAMAPIFunc
|
||||
if !c.found {
|
||||
iamfunc = awsutil.NewMockIAM(awsutil.WithDeleteAccessKeyError(errors.New("shouldn't have called delete")))
|
||||
} else {
|
||||
iamfunc = awsutil.NewMockIAM()
|
||||
}
|
||||
miam, err := iamfunc(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't initialize mockiam: %s", err)
|
||||
}
|
||||
|
||||
b := Backend(config)
|
||||
b.iamClient = miam
|
||||
|
||||
// put in storage
|
||||
staticRole := staticRoleEntry{
|
||||
Name: "test",
|
||||
Username: "jane-doe",
|
||||
RotationPeriod: 24 * time.Hour,
|
||||
}
|
||||
entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = config.StorageView.Put(bgCTX, entry)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
l, err := config.StorageView.List(bgCTX, "")
|
||||
if err != nil || len(l) != 1 {
|
||||
t.Fatalf("couldn't add an entry to storage during test setup: %s", err)
|
||||
}
|
||||
|
||||
// put in queue
|
||||
err = b.credRotationQueue.Push(&queue.Item{
|
||||
Key: staticRole.Name,
|
||||
Value: staticRole,
|
||||
Priority: time.Now().Add(90 * time.Hour).Unix(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't add items to pq")
|
||||
}
|
||||
|
||||
req := &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Storage: config.StorageView,
|
||||
Data: map[string]interface{}{
|
||||
"name": c.role,
|
||||
},
|
||||
Path: formatRoleStoragePath(c.role),
|
||||
}
|
||||
|
||||
r, err := b.pathStaticRolesDelete(bgCTX, req, staticRoleFieldData(req.Data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r != nil {
|
||||
t.Fatal("response wasn't nil, but it should have been")
|
||||
}
|
||||
|
||||
l, err = config.StorageView.List(bgCTX, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c.found && len(l) != 0 {
|
||||
t.Fatal("size of role storage is non zero after delete")
|
||||
} else if !c.found && len(l) != 1 {
|
||||
t.Fatal("size of role storage changed after what should have been no deletion")
|
||||
}
|
||||
|
||||
if c.found && b.credRotationQueue.Len() != 0 {
|
||||
t.Fatal("size of queue is non-zero after delete")
|
||||
} else if !c.found && b.credRotationQueue.Len() != 1 {
|
||||
t.Fatal("size of queue changed after what should have been no deletion")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func staticRoleFieldData(data map[string]interface{}) *framework.FieldData {
|
||||
schema := map[string]*framework.FieldSchema{
|
||||
paramRoleName: {
|
||||
Type: framework.TypeString,
|
||||
Description: descRoleName,
|
||||
},
|
||||
paramUsername: {
|
||||
Type: framework.TypeString,
|
||||
Description: descUsername,
|
||||
},
|
||||
paramRotationPeriod: {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: descRotationPeriod,
|
||||
},
|
||||
}
|
||||
|
||||
return &framework.FieldData{
|
||||
Raw: data,
|
||||
Schema: schema,
|
||||
}
|
||||
}
|
@ -1,314 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/go-secure-stdlib/strutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
func pathUser(b *backend) *framework.Path {
|
||||
return &framework.Path{
|
||||
Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"),
|
||||
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationPrefix: operationPrefixAWS,
|
||||
OperationVerb: "generate",
|
||||
},
|
||||
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the role",
|
||||
},
|
||||
"role_arn": {
|
||||
Type: framework.TypeString,
|
||||
Description: "ARN of role to assume when credential_type is " + assumedRoleCred,
|
||||
},
|
||||
"ttl": {
|
||||
Type: framework.TypeDurationSecond,
|
||||
Description: "Lifetime of the returned credentials in seconds",
|
||||
Default: 3600,
|
||||
},
|
||||
"role_session_name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Session name to use when assuming role. Max chars: 64",
|
||||
},
|
||||
},
|
||||
|
||||
Operations: map[logical.Operation]framework.OperationHandler{
|
||||
logical.ReadOperation: &framework.PathOperation{
|
||||
Callback: b.pathCredsRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "credentials|sts-credentials",
|
||||
},
|
||||
},
|
||||
logical.UpdateOperation: &framework.PathOperation{
|
||||
Callback: b.pathCredsRead,
|
||||
DisplayAttrs: &framework.DisplayAttributes{
|
||||
OperationSuffix: "credentials-with-parameters|sts-credentials-with-parameters",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
HelpSynopsis: pathUserHelpSyn,
|
||||
HelpDescription: pathUserHelpDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
roleName := d.Get("name").(string)
|
||||
|
||||
// Read the policy
|
||||
role, err := b.roleRead(ctx, req.Storage, roleName, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving role: %w", err)
|
||||
}
|
||||
if role == nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf(
|
||||
"Role %q not found", roleName)), nil
|
||||
}
|
||||
|
||||
var ttl int64
|
||||
ttlRaw, ok := d.GetOk("ttl")
|
||||
switch {
|
||||
case ok:
|
||||
ttl = int64(ttlRaw.(int))
|
||||
case role.DefaultSTSTTL > 0:
|
||||
ttl = int64(role.DefaultSTSTTL.Seconds())
|
||||
default:
|
||||
ttl = int64(d.Get("ttl").(int))
|
||||
}
|
||||
|
||||
var maxTTL int64
|
||||
if role.MaxSTSTTL > 0 {
|
||||
maxTTL = int64(role.MaxSTSTTL.Seconds())
|
||||
} else {
|
||||
maxTTL = int64(b.System().MaxLeaseTTL().Seconds())
|
||||
}
|
||||
|
||||
if ttl > maxTTL {
|
||||
ttl = maxTTL
|
||||
}
|
||||
|
||||
roleArn := d.Get("role_arn").(string)
|
||||
roleSessionName := d.Get("role_session_name").(string)
|
||||
|
||||
var credentialType string
|
||||
switch {
|
||||
case len(role.CredentialTypes) == 1:
|
||||
credentialType = role.CredentialTypes[0]
|
||||
// There is only one way for the CredentialTypes to contain more than one entry, and that's an upgrade path
|
||||
// where it contains iamUserCred and federationTokenCred
|
||||
// This ambiguity can be resolved based on req.Path, so resolve it assuming CredentialTypes only has those values
|
||||
case len(role.CredentialTypes) > 1:
|
||||
if strings.HasPrefix(req.Path, "creds") {
|
||||
credentialType = iamUserCred
|
||||
} else {
|
||||
credentialType = federationTokenCred
|
||||
}
|
||||
// sanity check on the assumption above
|
||||
if !strutil.StrListContains(role.CredentialTypes, credentialType) {
|
||||
return logical.ErrorResponse(fmt.Sprintf("requested credential type %q not in allowed credential types %#v", credentialType, role.CredentialTypes)), nil
|
||||
}
|
||||
}
|
||||
|
||||
// creds requested through the sts path shouldn't be allowed to get iamUserCred type creds
|
||||
// when the role is created from legacy data because they might have more privileges in AWS.
|
||||
// See https://github.com/hashicorp/vault/issues/4229#issuecomment-380316788 for details.
|
||||
if role.ProhibitFlexibleCredPath {
|
||||
if credentialType == iamUserCred && strings.HasPrefix(req.Path, "sts") {
|
||||
return logical.ErrorResponse(fmt.Sprintf("attempted to retrieve %s credentials through the sts path; this is not allowed for legacy roles", iamUserCred)), nil
|
||||
}
|
||||
if credentialType != iamUserCred && strings.HasPrefix(req.Path, "creds") {
|
||||
return logical.ErrorResponse(fmt.Sprintf("attempted to retrieve %s credentials through the creds path; this is not allowed for legacy roles", credentialType)), nil
|
||||
}
|
||||
}
|
||||
|
||||
switch credentialType {
|
||||
case iamUserCred:
|
||||
return b.secretAccessKeysCreate(ctx, req.Storage, req.DisplayName, roleName, role)
|
||||
case assumedRoleCred:
|
||||
switch {
|
||||
case roleArn == "":
|
||||
if len(role.RoleArns) != 1 {
|
||||
return logical.ErrorResponse("did not supply a role_arn parameter and unable to determine one"), nil
|
||||
}
|
||||
roleArn = role.RoleArns[0]
|
||||
case !strutil.StrListContains(role.RoleArns, roleArn):
|
||||
return logical.ErrorResponse(fmt.Sprintf("role_arn %q not in allowed role arns for Vault role %q", roleArn, roleName)), nil
|
||||
}
|
||||
return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName)
|
||||
case federationTokenCred:
|
||||
return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl)
|
||||
default:
|
||||
return logical.ErrorResponse(fmt.Sprintf("unknown credential_type: %q", credentialType)), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _kind string, data interface{}) error {
|
||||
var entry walUser
|
||||
if err := mapstructure.Decode(data, &entry); err != nil {
|
||||
return err
|
||||
}
|
||||
username := entry.UserName
|
||||
|
||||
// Get the client
|
||||
client, err := b.clientIAM(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get information about this user
|
||||
groupsResp, err := client.ListGroupsForUserWithContext(ctx, &iam.ListGroupsForUserInput{
|
||||
UserName: aws.String(username),
|
||||
MaxItems: aws.Int64(1000),
|
||||
})
|
||||
if err != nil {
|
||||
// This isn't guaranteed to be perfect; for example, an IAM user
|
||||
// might have gotten put into the WAL but then the IAM user creation
|
||||
// failed (e.g., Vault didn't have permissions) and then the WAL
|
||||
// deletion failed as well. Then, if Vault doesn't have access to
|
||||
// call iam:ListGroupsForUser, AWS will return an access denied error
|
||||
// and the WAL will never get cleaned up. But this is better than
|
||||
// just having Vault "forget" about a user it actually created.
|
||||
//
|
||||
// BEWARE a potential race condition -- where this is called
|
||||
// immediately after a user is created. AWS eventual consistency
|
||||
// might say the user doesn't exist when the user does in fact
|
||||
// exist, and this could cause Vault to forget about the user.
|
||||
// This won't happen if the user creation fails (because the WAL
|
||||
// minimum age is 5 minutes, and AWS eventual consistency is, in
|
||||
// practice, never that long), but it could happen if a lease holder
|
||||
// asks immediately after getting a user to revoke the lease, causing
|
||||
// Vault to leak the secret, which would be a Very Bad Thing to allow.
|
||||
// So we make sure that, if there's an associated lease, it must be at
|
||||
// least 5 minutes old as well.
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
acceptMissingIamUsers := false
|
||||
if req.Secret == nil || time.Since(req.Secret.IssueTime) > time.Duration(minAwsUserRollbackAge) {
|
||||
// WAL rollback
|
||||
acceptMissingIamUsers = true
|
||||
}
|
||||
if aerr.Code() == iam.ErrCodeNoSuchEntityException && acceptMissingIamUsers {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
groups := groupsResp.Groups
|
||||
|
||||
// Inline (user) policies
|
||||
policiesResp, err := client.ListUserPoliciesWithContext(ctx, &iam.ListUserPoliciesInput{
|
||||
UserName: aws.String(username),
|
||||
MaxItems: aws.Int64(1000),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
policies := policiesResp.PolicyNames
|
||||
|
||||
// Attached managed policies
|
||||
manPoliciesResp, err := client.ListAttachedUserPoliciesWithContext(ctx, &iam.ListAttachedUserPoliciesInput{
|
||||
UserName: aws.String(username),
|
||||
MaxItems: aws.Int64(1000),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
manPolicies := manPoliciesResp.AttachedPolicies
|
||||
|
||||
keysResp, err := client.ListAccessKeysWithContext(ctx, &iam.ListAccessKeysInput{
|
||||
UserName: aws.String(username),
|
||||
MaxItems: aws.Int64(1000),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keys := keysResp.AccessKeyMetadata
|
||||
|
||||
// Revoke all keys
|
||||
for _, k := range keys {
|
||||
_, err = client.DeleteAccessKeyWithContext(ctx, &iam.DeleteAccessKeyInput{
|
||||
AccessKeyId: k.AccessKeyId,
|
||||
UserName: aws.String(username),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Detach managed policies
|
||||
for _, p := range manPolicies {
|
||||
_, err = client.DetachUserPolicyWithContext(ctx, &iam.DetachUserPolicyInput{
|
||||
UserName: aws.String(username),
|
||||
PolicyArn: p.PolicyArn,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete any inline (user) policies
|
||||
for _, p := range policies {
|
||||
_, err = client.DeleteUserPolicyWithContext(ctx, &iam.DeleteUserPolicyInput{
|
||||
UserName: aws.String(username),
|
||||
PolicyName: p,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the user from all their groups
|
||||
for _, g := range groups {
|
||||
_, err = client.RemoveUserFromGroupWithContext(ctx, &iam.RemoveUserFromGroupInput{
|
||||
GroupName: g.GroupName,
|
||||
UserName: aws.String(username),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the user
|
||||
_, err = client.DeleteUserWithContext(ctx, &iam.DeleteUserInput{
|
||||
UserName: aws.String(username),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type walUser struct {
|
||||
UserName string
|
||||
}
|
||||
|
||||
const pathUserHelpSyn = `
|
||||
Generate AWS credentials from a specific Vault role.
|
||||
`
|
||||
|
||||
const pathUserHelpDesc = `
|
||||
This path will generate new, never before used AWS credentials for
|
||||
accessing AWS. The IAM policy used to back this key pair will be
|
||||
the "name" parameter. For example, if this backend is mounted at "aws",
|
||||
then "aws/creds/deploy" would generate access keys for the "deploy" role.
|
||||
|
||||
The access keys will have a lease associated with them. The access keys
|
||||
can be revoked by using the lease ID when using the iam_user credential type.
|
||||
When using AWS STS credential types (assumed_role or federation_token),
|
||||
revoking the lease does not revoke the access keys.
|
||||
`
|
@ -1,30 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func (b *backend) walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error {
|
||||
walRollbackMap := map[string]framework.WALRollbackFunc{
|
||||
"user": b.pathUserRollback,
|
||||
}
|
||||
|
||||
if !b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, ok := walRollbackMap[kind]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown type to rollback")
|
||||
}
|
||||
|
||||
return f(ctx, req, kind, data)
|
||||
}
|
@ -1,191 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/sdk/queue"
|
||||
)
|
||||
|
||||
// rotateExpiredStaticCreds will pop expired credentials (credentials whose priority
|
||||
// represents a time before the present), rotate the associated credential, and push
|
||||
// them back onto the queue with the new priority.
|
||||
func (b *backend) rotateExpiredStaticCreds(ctx context.Context, req *logical.Request) error {
|
||||
var errs *multierror.Error
|
||||
|
||||
for {
|
||||
keepGoing, err := b.rotateCredential(ctx, req.Storage)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
}
|
||||
if !keepGoing {
|
||||
if errs.ErrorOrNil() != nil {
|
||||
return fmt.Errorf("error(s) occurred while rotating expired static credentials: %w", errs)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// rotateCredential pops an element from the priority queue, and if it is expired, rotate and re-push.
|
||||
// If a cred was rotated, it returns true, otherwise false.
|
||||
func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (rotated bool, err error) {
|
||||
// If queue is empty or first item does not need a rotation (priority is next rotation timestamp) there is nothing to do
|
||||
item, err := b.credRotationQueue.Pop()
|
||||
if err != nil {
|
||||
// the queue is just empty, which is fine.
|
||||
if err == queue.ErrEmpty {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to pop from queue for role %q: %w", item.Key, err)
|
||||
}
|
||||
if item.Priority > time.Now().Unix() {
|
||||
// no rotation required
|
||||
// push the item back into priority queue
|
||||
err = b.credRotationQueue.Push(item)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", item.Key, err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
cfg := item.Value.(staticRoleEntry)
|
||||
|
||||
err = b.createCredential(ctx, storage, cfg, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// set new priority and re-queue
|
||||
item.Priority = time.Now().Add(cfg.RotationPeriod).Unix()
|
||||
err = b.credRotationQueue.Push(item)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// createCredential will create a new iam credential, deleting the oldest one if necessary.
|
||||
func (b *backend) createCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error {
|
||||
iamClient, err := b.clientIAM(ctx, storage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get the AWS IAM client: %w", err)
|
||||
}
|
||||
|
||||
// IAM users can have a most 2 sets of keys at a time.
|
||||
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html)
|
||||
// Ideally we would get this value through an api check, but I'm not sure one exists.
|
||||
const maxAllowedKeys = 2
|
||||
|
||||
err = b.validateIAMUserExists(ctx, storage, &cfg, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("iam user didn't exist, or username/userid didn't match: %w", err)
|
||||
}
|
||||
|
||||
accessKeys, err := iamClient.ListAccessKeys(&iam.ListAccessKeysInput{
|
||||
UserName: aws.String(cfg.Username),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list existing access keys for IAM user %q: %w", cfg.Username, err)
|
||||
}
|
||||
|
||||
// If we have the maximum number of keys, we have to delete one to make another (so we can get the credentials).
|
||||
// We'll delete the oldest one.
|
||||
//
|
||||
// Since this check relies on a pre-coded maximum, it's a bit fragile. If the number goes up, we risk deleting
|
||||
// a key when we didn't need to. If this number goes down, we'll start throwing errors because we think we're
|
||||
// allowed to create a key and aren't. In either case, adjusting the constant should be sufficient to fix things.
|
||||
if len(accessKeys.AccessKeyMetadata) >= maxAllowedKeys {
|
||||
oldestKey := accessKeys.AccessKeyMetadata[0]
|
||||
|
||||
for i := 1; i < len(accessKeys.AccessKeyMetadata); i++ {
|
||||
if accessKeys.AccessKeyMetadata[i].CreateDate.Before(*oldestKey.CreateDate) {
|
||||
oldestKey = accessKeys.AccessKeyMetadata[i]
|
||||
}
|
||||
}
|
||||
|
||||
_, err := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{
|
||||
AccessKeyId: oldestKey.AccessKeyId,
|
||||
UserName: oldestKey.UserName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to delete oldest access keys for user %q: %w", cfg.Username, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create new set of keys
|
||||
out, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{
|
||||
UserName: aws.String(cfg.Username),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create new access keys for user %q: %w", cfg.Username, err)
|
||||
}
|
||||
|
||||
// Persist new keys
|
||||
entry, err := logical.StorageEntryJSON(formatCredsStoragePath(cfg.Name), &awsCredentials{
|
||||
AccessKeyID: *out.AccessKey.AccessKeyId,
|
||||
SecretAccessKey: *out.AccessKey.SecretAccessKey,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal object to JSON: %w", err)
|
||||
}
|
||||
if shouldLockStorage {
|
||||
b.roleMutex.Lock()
|
||||
defer b.roleMutex.Unlock()
|
||||
}
|
||||
err = storage.Put(ctx, entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save object in storage: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete credential will remove the credential associated with the role from storage.
|
||||
func (b *backend) deleteCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error {
|
||||
// synchronize storage access if we didn't in the caller.
|
||||
if shouldLockStorage {
|
||||
b.roleMutex.Lock()
|
||||
defer b.roleMutex.Unlock()
|
||||
}
|
||||
|
||||
key, err := storage.Get(ctx, formatCredsStoragePath(cfg.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find key in storage: %w", err)
|
||||
}
|
||||
// no entry, so i guess we deleted it already
|
||||
if key == nil {
|
||||
return nil
|
||||
}
|
||||
var creds awsCredentials
|
||||
err = key.DecodeJSON(&creds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't decode storage entry to a valid credential: %w", err)
|
||||
}
|
||||
|
||||
err = storage.Delete(ctx, formatCredsStoragePath(cfg.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't delete from storage: %w", err)
|
||||
}
|
||||
|
||||
// because we have the information, this is the one we created, so it's safe for us to delete.
|
||||
_, err = b.iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{
|
||||
AccessKeyId: aws.String(creds.AccessKeyID),
|
||||
UserName: aws.String(cfg.Username),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't delete from IAM: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,351 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/iam/iamiface"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/sdk/queue"
|
||||
)
|
||||
|
||||
// TestRotation verifies that the rotation code and priority queue correctly selects and rotates credentials
|
||||
// for static secrets.
|
||||
func TestRotation(t *testing.T) {
|
||||
bgCTX := context.Background()
|
||||
|
||||
type credToInsert struct {
|
||||
config staticRoleEntry // role configuration from a normal createRole request
|
||||
age time.Duration // how old the cred should be - if this is longer than the config.RotationPeriod,
|
||||
// the cred is 'pre-expired'
|
||||
|
||||
changed bool // whether we expect the cred to change - this is technically redundant to a comparison between
|
||||
// rotationPeriod and age.
|
||||
}
|
||||
|
||||
// due to a limitation with the mockIAM implementation, any cred you want to rotate must have
|
||||
// username jane-doe and userid unique-id, since we can only pre-can one exact response to GetUser
|
||||
cases := []struct {
|
||||
name string
|
||||
creds []credToInsert
|
||||
}{
|
||||
{
|
||||
name: "refresh one",
|
||||
creds: []credToInsert{
|
||||
{
|
||||
config: staticRoleEntry{
|
||||
Name: "test",
|
||||
Username: "jane-doe",
|
||||
ID: "unique-id",
|
||||
RotationPeriod: 2 * time.Second,
|
||||
},
|
||||
age: 5 * time.Second,
|
||||
changed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "refresh none",
|
||||
creds: []credToInsert{
|
||||
{
|
||||
config: staticRoleEntry{
|
||||
Name: "test",
|
||||
Username: "jane-doe",
|
||||
ID: "unique-id",
|
||||
RotationPeriod: 1 * time.Minute,
|
||||
},
|
||||
age: 5 * time.Second,
|
||||
changed: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "refresh one of two",
|
||||
creds: []credToInsert{
|
||||
{
|
||||
config: staticRoleEntry{
|
||||
Name: "toast",
|
||||
Username: "john-doe",
|
||||
ID: "other-id",
|
||||
RotationPeriod: 1 * time.Minute,
|
||||
},
|
||||
age: 5 * time.Second,
|
||||
changed: false,
|
||||
},
|
||||
{
|
||||
config: staticRoleEntry{
|
||||
Name: "test",
|
||||
Username: "jane-doe",
|
||||
ID: "unique-id",
|
||||
RotationPeriod: 1 * time.Second,
|
||||
},
|
||||
age: 5 * time.Second,
|
||||
changed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no creds to rotate",
|
||||
creds: []credToInsert{},
|
||||
},
|
||||
}
|
||||
|
||||
ak := "long-access-key-id"
|
||||
oldSecret := "abcdefghijklmnopqrstuvwxyz"
|
||||
newSecret := "zyxwvutsrqponmlkjihgfedcba"
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
b := Backend(config)
|
||||
|
||||
// insert all our creds
|
||||
for i, cred := range c.creds {
|
||||
|
||||
// all the creds will be the same for every user, but that's okay
|
||||
// since what we care about is whether they changed on a single-user basis.
|
||||
miam, err := awsutil.NewMockIAM(
|
||||
// blank list for existing user
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{
|
||||
{},
|
||||
},
|
||||
}),
|
||||
// initial key to store
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String(ak),
|
||||
SecretAccessKey: aws.String(oldSecret),
|
||||
},
|
||||
}),
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{
|
||||
User: &iam.User{
|
||||
UserId: aws.String(cred.config.ID),
|
||||
UserName: aws.String(cred.config.Username),
|
||||
},
|
||||
}),
|
||||
)(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't initialze mock IAM handler: %s", err)
|
||||
}
|
||||
b.iamClient = miam
|
||||
|
||||
err = b.createCredential(bgCTX, config.StorageView, cred.config, true)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't insert credential %d: %s", i, err)
|
||||
}
|
||||
|
||||
item := &queue.Item{
|
||||
Key: cred.config.Name,
|
||||
Value: cred.config,
|
||||
Priority: time.Now().Add(-1 * cred.age).Add(cred.config.RotationPeriod).Unix(),
|
||||
}
|
||||
err = b.credRotationQueue.Push(item)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't push item onto queue: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// update aws responses, same argument for why it's okay every cred will be the same
|
||||
miam, err := awsutil.NewMockIAM(
|
||||
// old key
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{
|
||||
{
|
||||
AccessKeyId: aws.String(ak),
|
||||
},
|
||||
},
|
||||
}),
|
||||
// new key
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String(ak),
|
||||
SecretAccessKey: aws.String(newSecret),
|
||||
},
|
||||
}),
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{
|
||||
User: &iam.User{
|
||||
UserId: aws.String("unique-id"),
|
||||
UserName: aws.String("jane-doe"),
|
||||
},
|
||||
}),
|
||||
)(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't initialze mock IAM handler: %s", err)
|
||||
}
|
||||
b.iamClient = miam
|
||||
|
||||
req := &logical.Request{
|
||||
Storage: config.StorageView,
|
||||
}
|
||||
err = b.rotateExpiredStaticCreds(bgCTX, req)
|
||||
if err != nil {
|
||||
t.Fatalf("got an error rotating credentials: %s", err)
|
||||
}
|
||||
|
||||
// check our credentials
|
||||
for i, cred := range c.creds {
|
||||
entry, err := config.StorageView.Get(bgCTX, formatCredsStoragePath(cred.config.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("got an error retrieving credentials %d", i)
|
||||
}
|
||||
var out awsCredentials
|
||||
err = entry.DecodeJSON(&out)
|
||||
if err != nil {
|
||||
t.Fatalf("could not unmarshal storage view entry for cred %d to an aws credential: %s", i, err)
|
||||
}
|
||||
|
||||
if cred.changed && out.SecretAccessKey != newSecret {
|
||||
t.Fatalf("expected the key for cred %d to have changed, but it hasn't", i)
|
||||
} else if !cred.changed && out.SecretAccessKey != oldSecret {
|
||||
t.Fatalf("expected the key for cred %d to have stayed the same, but it changed", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeIAM struct {
|
||||
iamiface.IAMAPI
|
||||
delReqs []*iam.DeleteAccessKeyInput
|
||||
}
|
||||
|
||||
func (f *fakeIAM) DeleteAccessKey(r *iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) {
|
||||
f.delReqs = append(f.delReqs, r)
|
||||
return f.IAMAPI.DeleteAccessKey(r)
|
||||
}
|
||||
|
||||
// TestCreateCredential verifies that credential creation firstly only deletes credentials if it needs to (i.e., two
|
||||
// or more credentials on IAM), and secondly correctly deletes the oldest one.
|
||||
func TestCreateCredential(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
username string
|
||||
id string
|
||||
deletedKey string
|
||||
opts []awsutil.MockIAMOption
|
||||
}{
|
||||
{
|
||||
name: "zero keys",
|
||||
username: "jane-doe",
|
||||
id: "unique-id",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{},
|
||||
}),
|
||||
// delete should _not_ be called
|
||||
awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")),
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("key"),
|
||||
SecretAccessKey: aws.String("itsasecret"),
|
||||
},
|
||||
}),
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{
|
||||
User: &iam.User{
|
||||
UserId: aws.String("unique-id"),
|
||||
UserName: aws.String("jane-doe"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one key",
|
||||
username: "jane-doe",
|
||||
id: "unique-id",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{
|
||||
{AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Now())},
|
||||
},
|
||||
}),
|
||||
// delete should _not_ be called
|
||||
awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")),
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("key"),
|
||||
SecretAccessKey: aws.String("itsasecret"),
|
||||
},
|
||||
}),
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{
|
||||
User: &iam.User{
|
||||
UserId: aws.String("unique-id"),
|
||||
UserName: aws.String("jane-doe"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "two keys",
|
||||
username: "jane-doe",
|
||||
id: "unique-id",
|
||||
deletedKey: "foo",
|
||||
opts: []awsutil.MockIAMOption{
|
||||
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
|
||||
AccessKeyMetadata: []*iam.AccessKeyMetadata{
|
||||
{AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Time{})},
|
||||
{AccessKeyId: aws.String("bar"), CreateDate: aws.Time(time.Now())},
|
||||
},
|
||||
}),
|
||||
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
|
||||
AccessKey: &iam.AccessKey{
|
||||
AccessKeyId: aws.String("key"),
|
||||
SecretAccessKey: aws.String("itsasecret"),
|
||||
},
|
||||
}),
|
||||
awsutil.WithGetUserOutput(&iam.GetUserOutput{
|
||||
User: &iam.User{
|
||||
UserId: aws.String("unique-id"),
|
||||
UserName: aws.String("jane-doe"),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
miam, err := awsutil.NewMockIAM(
|
||||
c.opts...,
|
||||
)(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fiam := &fakeIAM{
|
||||
IAMAPI: miam,
|
||||
}
|
||||
|
||||
b := Backend(config)
|
||||
b.iamClient = fiam
|
||||
|
||||
err = b.createCredential(context.Background(), config.StorageView, staticRoleEntry{Username: c.username, ID: c.id}, true)
|
||||
if err != nil {
|
||||
t.Fatalf("got an error we didn't expect: %q", err)
|
||||
}
|
||||
|
||||
if c.deletedKey != "" {
|
||||
if len(fiam.delReqs) != 1 {
|
||||
t.Fatalf("called the wrong number of deletes (called %d deletes)", len(fiam.delReqs))
|
||||
}
|
||||
actualKey := *fiam.delReqs[0].AccessKeyId
|
||||
if c.deletedKey != actualKey {
|
||||
t.Fatalf("we deleted the wrong key: %q instead of %q", actualKey, c.deletedKey)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,525 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/template"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/iam"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
"github.com/hashicorp/errwrap"
|
||||
)
|
||||
|
||||
const (
|
||||
secretAccessKeyType = "access_keys"
|
||||
storageKey = "config/root"
|
||||
)
|
||||
|
||||
func secretAccessKeys(b *backend) *framework.Secret {
|
||||
return &framework.Secret{
|
||||
Type: secretAccessKeyType,
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"access_key": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Access Key",
|
||||
},
|
||||
|
||||
"secret_key": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Secret Key",
|
||||
},
|
||||
"security_token": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Security Token",
|
||||
},
|
||||
},
|
||||
|
||||
Renew: b.secretAccessKeysRenew,
|
||||
Revoke: b.secretAccessKeysRevoke,
|
||||
}
|
||||
}
|
||||
|
||||
func genUsername(displayName, policyName, userType, usernameTemplate string) (ret string, err error) {
|
||||
switch userType {
|
||||
case "iam_user", "assume_role":
|
||||
// IAM users are capped at 64 chars
|
||||
up, err := template.NewTemplate(template.Template(usernameTemplate))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to initialize username template: %w", err)
|
||||
}
|
||||
|
||||
um := UsernameMetadata{
|
||||
Type: "IAM",
|
||||
DisplayName: normalizeDisplayName(displayName),
|
||||
PolicyName: normalizeDisplayName(policyName),
|
||||
}
|
||||
|
||||
ret, err = up.Generate(um)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate username: %w", err)
|
||||
}
|
||||
// To prevent a custom template from exceeding IAM length limits
|
||||
if len(ret) > 64 {
|
||||
return "", fmt.Errorf("the username generated by the template exceeds the IAM username length limits of 64 chars")
|
||||
}
|
||||
case "sts":
|
||||
up, err := template.NewTemplate(template.Template(usernameTemplate))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to initialize username template: %w", err)
|
||||
}
|
||||
|
||||
um := UsernameMetadata{
|
||||
Type: "STS",
|
||||
}
|
||||
ret, err = up.Generate(um)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate username: %w", err)
|
||||
}
|
||||
// To prevent a custom template from exceeding STS length limits
|
||||
if len(ret) > 32 {
|
||||
return "", fmt.Errorf("the username generated by the template exceeds the STS username length limits of 32 chars")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *backend) getFederationToken(ctx context.Context, s logical.Storage,
|
||||
displayName, policyName, policy string, policyARNs []string,
|
||||
iamGroups []string, lifeTimeInSeconds int64) (*logical.Response, error,
|
||||
) {
|
||||
groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(ctx, s, iamGroups)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
if groupPolicies != nil {
|
||||
groupPolicies = append(groupPolicies, policy)
|
||||
policy, err = combinePolicyDocuments(groupPolicies...)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
}
|
||||
if len(groupPolicyARNs) > 0 {
|
||||
policyARNs = append(policyARNs, groupPolicyARNs...)
|
||||
}
|
||||
|
||||
stsClient, err := b.clientSTS(ctx, s)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
|
||||
config, err := readConfig(ctx, s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read configuration: %w", err)
|
||||
}
|
||||
|
||||
// Set as defaultUsernameTemplate if not provided
|
||||
usernameTemplate := config.UsernameTemplate
|
||||
if usernameTemplate == "" {
|
||||
usernameTemplate = defaultUserNameTemplate
|
||||
}
|
||||
|
||||
username, usernameError := genUsername(displayName, policyName, "sts", usernameTemplate)
|
||||
// Send a 400 to Framework.OperationFunc Handler
|
||||
if usernameError != nil {
|
||||
return nil, usernameError
|
||||
}
|
||||
|
||||
getTokenInput := &sts.GetFederationTokenInput{
|
||||
Name: aws.String(username),
|
||||
DurationSeconds: &lifeTimeInSeconds,
|
||||
}
|
||||
if len(policy) > 0 {
|
||||
getTokenInput.Policy = aws.String(policy)
|
||||
}
|
||||
if len(policyARNs) > 0 {
|
||||
getTokenInput.PolicyArns = convertPolicyARNs(policyARNs)
|
||||
}
|
||||
|
||||
// If neither a policy document nor policy ARNs are specified, then GetFederationToken will
|
||||
// return credentials equivalent to that of the Vault server itself. We probably don't want
|
||||
// that by default; the behavior can be explicitly opted in to by associating the Vault role
|
||||
// with a policy ARN or document that allows the appropriate permissions.
|
||||
if policy == "" && len(policyARNs) == 0 {
|
||||
return logical.ErrorResponse("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred), nil
|
||||
}
|
||||
|
||||
tokenResp, err := stsClient.GetFederationTokenWithContext(ctx, getTokenInput)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
|
||||
// While STS credentials cannot be revoked/renewed, we will still create a lease since users are
|
||||
// relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually.
|
||||
//
|
||||
ttl := tokenResp.Credentials.Expiration.Sub(time.Now())
|
||||
resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
|
||||
"access_key": *tokenResp.Credentials.AccessKeyId,
|
||||
"secret_key": *tokenResp.Credentials.SecretAccessKey,
|
||||
"security_token": *tokenResp.Credentials.SessionToken,
|
||||
"ttl": uint64(ttl.Seconds()),
|
||||
}, map[string]interface{}{
|
||||
"username": username,
|
||||
"policy": policy,
|
||||
"is_sts": true,
|
||||
})
|
||||
|
||||
// Set the secret TTL to appropriately match the expiration of the token
|
||||
resp.Secret.TTL = ttl
|
||||
|
||||
// STS are purposefully short-lived and aren't renewable
|
||||
resp.Secret.Renewable = false
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
|
||||
displayName, roleName, roleArn, policy string, policyARNs []string,
|
||||
iamGroups []string, lifeTimeInSeconds int64, roleSessionName string) (*logical.Response, error,
|
||||
) {
|
||||
// grab any IAM group policies associated with the vault role, both inline
|
||||
// and managed
|
||||
groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(ctx, s, iamGroups)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
if len(groupPolicies) > 0 {
|
||||
groupPolicies = append(groupPolicies, policy)
|
||||
policy, err = combinePolicyDocuments(groupPolicies...)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
}
|
||||
if len(groupPolicyARNs) > 0 {
|
||||
policyARNs = append(policyARNs, groupPolicyARNs...)
|
||||
}
|
||||
|
||||
stsClient, err := b.clientSTS(ctx, s)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
|
||||
config, err := readConfig(ctx, s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read configuration: %w", err)
|
||||
}
|
||||
|
||||
// Set as defaultUsernameTemplate if not provided
|
||||
usernameTemplate := config.UsernameTemplate
|
||||
if usernameTemplate == "" {
|
||||
usernameTemplate = defaultUserNameTemplate
|
||||
}
|
||||
|
||||
var roleSessionNameError error
|
||||
if roleSessionName == "" {
|
||||
roleSessionName, roleSessionNameError = genUsername(displayName, roleName, "assume_role", usernameTemplate)
|
||||
// Send a 400 to Framework.OperationFunc Handler
|
||||
if roleSessionNameError != nil {
|
||||
return nil, roleSessionNameError
|
||||
}
|
||||
} else {
|
||||
roleSessionName = normalizeDisplayName(roleSessionName)
|
||||
}
|
||||
|
||||
assumeRoleInput := &sts.AssumeRoleInput{
|
||||
RoleSessionName: aws.String(roleSessionName),
|
||||
RoleArn: aws.String(roleArn),
|
||||
DurationSeconds: &lifeTimeInSeconds,
|
||||
}
|
||||
if policy != "" {
|
||||
assumeRoleInput.SetPolicy(policy)
|
||||
}
|
||||
if len(policyARNs) > 0 {
|
||||
assumeRoleInput.SetPolicyArns(convertPolicyARNs(policyARNs))
|
||||
}
|
||||
tokenResp, err := stsClient.AssumeRoleWithContext(ctx, assumeRoleInput)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
|
||||
// While STS credentials cannot be revoked/renewed, we will still create a lease since users are
|
||||
// relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually.
|
||||
//
|
||||
ttl := tokenResp.Credentials.Expiration.Sub(time.Now())
|
||||
resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
|
||||
"access_key": *tokenResp.Credentials.AccessKeyId,
|
||||
"secret_key": *tokenResp.Credentials.SecretAccessKey,
|
||||
"security_token": *tokenResp.Credentials.SessionToken,
|
||||
"arn": *tokenResp.AssumedRoleUser.Arn,
|
||||
"ttl": uint64(ttl.Seconds()),
|
||||
}, map[string]interface{}{
|
||||
"username": roleSessionName,
|
||||
"policy": roleArn,
|
||||
"is_sts": true,
|
||||
})
|
||||
|
||||
// Set the secret TTL to appropriately match the expiration of the token
|
||||
resp.Secret.TTL = ttl
|
||||
|
||||
// STS are purposefully short-lived and aren't renewable
|
||||
resp.Secret.Renewable = false
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func readConfig(ctx context.Context, storage logical.Storage) (rootConfig, error) {
|
||||
entry, err := storage.Get(ctx, storageKey)
|
||||
if err != nil {
|
||||
return rootConfig{}, err
|
||||
}
|
||||
if entry == nil {
|
||||
return rootConfig{}, nil
|
||||
}
|
||||
|
||||
var connConfig rootConfig
|
||||
if err := entry.DecodeJSON(&connConfig); err != nil {
|
||||
return rootConfig{}, err
|
||||
}
|
||||
return connConfig, nil
|
||||
}
|
||||
|
||||
func (b *backend) secretAccessKeysCreate(
|
||||
ctx context.Context,
|
||||
s logical.Storage,
|
||||
displayName, policyName string,
|
||||
role *awsRoleEntry,
|
||||
) (*logical.Response, error) {
|
||||
iamClient, err := b.clientIAM(ctx, s)
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
|
||||
config, err := readConfig(ctx, s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read configuration: %w", err)
|
||||
}
|
||||
|
||||
// Set as defaultUsernameTemplate if not provided
|
||||
usernameTemplate := config.UsernameTemplate
|
||||
if usernameTemplate == "" {
|
||||
usernameTemplate = defaultUserNameTemplate
|
||||
}
|
||||
|
||||
username, usernameError := genUsername(displayName, policyName, "iam_user", usernameTemplate)
|
||||
// Send a 400 to Framework.OperationFunc Handler
|
||||
if usernameError != nil {
|
||||
return nil, usernameError
|
||||
}
|
||||
|
||||
// Write to the WAL that this user will be created. We do this before
|
||||
// the user is created because if switch the order then the WAL put
|
||||
// can fail, which would put us in an awkward position: we have a user
|
||||
// we need to rollback but can't put the WAL entry to do the rollback.
|
||||
walID, err := framework.PutWAL(ctx, s, "user", &walUser{
|
||||
UserName: username,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error writing WAL entry: %w", err)
|
||||
}
|
||||
|
||||
userPath := role.UserPath
|
||||
if userPath == "" {
|
||||
userPath = "/"
|
||||
}
|
||||
|
||||
createUserRequest := &iam.CreateUserInput{
|
||||
UserName: aws.String(username),
|
||||
Path: aws.String(userPath),
|
||||
}
|
||||
if role.PermissionsBoundaryARN != "" {
|
||||
createUserRequest.PermissionsBoundary = aws.String(role.PermissionsBoundaryARN)
|
||||
}
|
||||
|
||||
// Create the user
|
||||
_, err = iamClient.CreateUserWithContext(ctx, createUserRequest)
|
||||
if err != nil {
|
||||
if walErr := framework.DeleteWAL(ctx, s, walID); walErr != nil {
|
||||
iamErr := fmt.Errorf("error creating IAM user: %w", err)
|
||||
return nil, errwrap.Wrap(fmt.Errorf("failed to delete WAL entry: %w", walErr), iamErr)
|
||||
}
|
||||
return logical.ErrorResponse("Error creating IAM user: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
|
||||
for _, arn := range role.PolicyArns {
|
||||
// Attach existing policy against user
|
||||
_, err = iamClient.AttachUserPolicyWithContext(ctx, &iam.AttachUserPolicyInput{
|
||||
UserName: aws.String(username),
|
||||
PolicyArn: aws.String(arn),
|
||||
})
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error attaching user policy: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
|
||||
}
|
||||
if role.PolicyDocument != "" {
|
||||
// Add new inline user policy against user
|
||||
_, err = iamClient.PutUserPolicyWithContext(ctx, &iam.PutUserPolicyInput{
|
||||
UserName: aws.String(username),
|
||||
PolicyName: aws.String(policyName),
|
||||
PolicyDocument: aws.String(role.PolicyDocument),
|
||||
})
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error putting user policy: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, group := range role.IAMGroups {
|
||||
// Add user to IAM groups
|
||||
_, err = iamClient.AddUserToGroupWithContext(ctx, &iam.AddUserToGroupInput{
|
||||
UserName: aws.String(username),
|
||||
GroupName: aws.String(group),
|
||||
})
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error adding user to group: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
}
|
||||
|
||||
var tags []*iam.Tag
|
||||
for key, value := range role.IAMTags {
|
||||
// This assignment needs to be done in order to create unique addresses for
|
||||
// these variables. Without doing so, all the tags will be copies of the last
|
||||
// tag listed in the role.
|
||||
k, v := key, value
|
||||
tags = append(tags, &iam.Tag{Key: &k, Value: &v})
|
||||
}
|
||||
|
||||
if len(tags) > 0 {
|
||||
_, err = iamClient.TagUserWithContext(ctx, &iam.TagUserInput{
|
||||
Tags: tags,
|
||||
UserName: &username,
|
||||
})
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error adding tags to user: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the keys
|
||||
keyResp, err := iamClient.CreateAccessKeyWithContext(ctx, &iam.CreateAccessKeyInput{
|
||||
UserName: aws.String(username),
|
||||
})
|
||||
if err != nil {
|
||||
return logical.ErrorResponse("Error creating access keys: %s", err), awsutil.CheckAWSError(err)
|
||||
}
|
||||
|
||||
// Remove the WAL entry, we succeeded! If we fail, we don't return
|
||||
// the secret because it'll get rolled back anyways, so we have to return
|
||||
// an error here.
|
||||
if err := framework.DeleteWAL(ctx, s, walID); err != nil {
|
||||
return nil, fmt.Errorf("failed to commit WAL entry: %w", err)
|
||||
}
|
||||
|
||||
// Return the info!
|
||||
resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
|
||||
"access_key": *keyResp.AccessKey.AccessKeyId,
|
||||
"secret_key": *keyResp.AccessKey.SecretAccessKey,
|
||||
"security_token": nil,
|
||||
}, map[string]interface{}{
|
||||
"username": username,
|
||||
"policy": role,
|
||||
"is_sts": false,
|
||||
})
|
||||
|
||||
lease, err := b.Lease(ctx, s)
|
||||
if err != nil || lease == nil {
|
||||
lease = &configLease{}
|
||||
}
|
||||
|
||||
resp.Secret.TTL = lease.Lease
|
||||
resp.Secret.MaxTTL = lease.LeaseMax
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (b *backend) secretAccessKeysRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
// STS already has a lifetime, and we don't support renewing it
|
||||
isSTSRaw, ok := req.Secret.InternalData["is_sts"]
|
||||
if ok {
|
||||
isSTS, ok := isSTSRaw.(bool)
|
||||
if ok {
|
||||
if isSTS {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lease, err := b.Lease(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lease == nil {
|
||||
lease = &configLease{}
|
||||
}
|
||||
|
||||
resp := &logical.Response{Secret: req.Secret}
|
||||
resp.Secret.TTL = lease.Lease
|
||||
resp.Secret.MaxTTL = lease.LeaseMax
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (b *backend) secretAccessKeysRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
// STS cleans up after itself so we can skip this if is_sts internal data
|
||||
// element set to true. If is_sts is not set, assumes old version
|
||||
// and defaults to the IAM approach.
|
||||
isSTSRaw, ok := req.Secret.InternalData["is_sts"]
|
||||
if ok {
|
||||
isSTS, ok := isSTSRaw.(bool)
|
||||
if ok {
|
||||
if isSTS {
|
||||
return nil, nil
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("secret has is_sts but value could not be understood")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the username from the internal data
|
||||
usernameRaw, ok := req.Secret.InternalData["username"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("secret is missing username internal data")
|
||||
}
|
||||
username, ok := usernameRaw.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("secret is missing username internal data")
|
||||
}
|
||||
|
||||
// Use the user rollback mechanism to delete this user
|
||||
err := b.pathUserRollback(ctx, req, "user", map[string]interface{}{
|
||||
"username": username,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func normalizeDisplayName(displayName string) string {
|
||||
re := regexp.MustCompile("[^a-zA-Z0-9+=,.@_-]")
|
||||
return re.ReplaceAllString(displayName, "_")
|
||||
}
|
||||
|
||||
func convertPolicyARNs(policyARNs []string) []*sts.PolicyDescriptorType {
|
||||
size := len(policyARNs)
|
||||
retval := make([]*sts.PolicyDescriptorType, size, size)
|
||||
for i, arn := range policyARNs {
|
||||
retval[i] = &sts.PolicyDescriptorType{
|
||||
Arn: aws.String(arn),
|
||||
}
|
||||
}
|
||||
return retval
|
||||
}
|
||||
|
||||
type UsernameMetadata struct {
|
||||
Type string
|
||||
DisplayName string
|
||||
PolicyName string
|
||||
}
|
@ -1,205 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNormalizeDisplayName_NormRequired(t *testing.T) {
|
||||
invalidNames := map[string]string{
|
||||
"^#$test name\nshould be normalized)(*": "___test_name_should_be_normalized___",
|
||||
"^#$test name1 should be normalized)(*": "___test_name1_should_be_normalized___",
|
||||
"^#$test name should be normalized)(*": "___test_name__should_be_normalized___",
|
||||
"^#$test name__should be normalized)(*": "___test_name__should_be_normalized___",
|
||||
}
|
||||
|
||||
for k, v := range invalidNames {
|
||||
normalizedName := normalizeDisplayName(k)
|
||||
if normalizedName != v {
|
||||
t.Fatalf(
|
||||
"normalizeDisplayName does not normalize AWS name correctly: %s should resolve to %s",
|
||||
k,
|
||||
normalizedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeDisplayName_NormNotRequired(t *testing.T) {
|
||||
validNames := []string{
|
||||
"test_name_should_normalize_to_itself@example.com",
|
||||
"test1_name_should_normalize_to_itself@example.com",
|
||||
"UPPERlower0123456789-_,.@example.com",
|
||||
}
|
||||
|
||||
for _, n := range validNames {
|
||||
normalizedName := normalizeDisplayName(n)
|
||||
if normalizedName != n {
|
||||
t.Fatalf(
|
||||
"normalizeDisplayName erroneously normalizes valid names: expected %s but normalized to %s",
|
||||
n,
|
||||
normalizedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenUsername(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
policy string
|
||||
userType string
|
||||
UsernameTemplate string
|
||||
expectedError string
|
||||
expectedRegex string
|
||||
expectedLength int
|
||||
}
|
||||
|
||||
tests := map[string]testCase{
|
||||
"Truncated to 64. No warnings expected": {
|
||||
name: "name1",
|
||||
policy: "policy1",
|
||||
userType: "iam_user",
|
||||
UsernameTemplate: defaultUserNameTemplate,
|
||||
expectedError: "",
|
||||
expectedRegex: `^vault-name1-policy1-[0-9]+-[a-zA-Z0-9]+`,
|
||||
expectedLength: 64,
|
||||
},
|
||||
"Truncated to 32. No warnings expected": {
|
||||
name: "name1",
|
||||
policy: "policy1",
|
||||
userType: "sts",
|
||||
UsernameTemplate: defaultUserNameTemplate,
|
||||
expectedError: "",
|
||||
expectedRegex: `^vault-[0-9]+-[a-zA-Z0-9]+`,
|
||||
expectedLength: 32,
|
||||
},
|
||||
"Too long. Error expected — IAM": {
|
||||
name: "this---is---a---very---long---name",
|
||||
policy: "long------policy------name",
|
||||
userType: "assume_role",
|
||||
UsernameTemplate: `{{ if (eq .Type "IAM") }}{{ printf "%s-%s-%s-%s" (.DisplayName) (.PolicyName) (unix_time) (random 20) }}{{ end }}`,
|
||||
expectedError: "the username generated by the template exceeds the IAM username length limits of 64 chars",
|
||||
expectedRegex: "",
|
||||
expectedLength: 64,
|
||||
},
|
||||
"Too long. Error expected — STS": {
|
||||
name: "this---is---a---very---long---name",
|
||||
policy: "long------policy------name",
|
||||
userType: "sts",
|
||||
UsernameTemplate: `{{ if (eq .Type "STS") }}{{ printf "%s-%s-%s-%s" (.DisplayName) (.PolicyName) (unix_time) (random 20) }}{{ end }}`,
|
||||
expectedError: "the username generated by the template exceeds the STS username length limits of 32 chars",
|
||||
expectedRegex: "",
|
||||
expectedLength: 32,
|
||||
},
|
||||
}
|
||||
|
||||
for testDescription, testCase := range tests {
|
||||
t.Run(testDescription, func(t *testing.T) {
|
||||
testUsername, err := genUsername(testCase.name, testCase.policy, testCase.userType, testCase.UsernameTemplate)
|
||||
if err != nil && !strings.Contains(err.Error(), testCase.expectedError) {
|
||||
t.Fatalf("expected an error %s; instead received %s", testCase.expectedError, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
require.Regexp(t, testCase.expectedRegex, testUsername)
|
||||
|
||||
if len(testUsername) > testCase.expectedLength {
|
||||
t.Fatalf("expected username to be of length %d, got %d", testCase.expectedLength, len(testUsername))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadConfig_DefaultTemplate(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testTemplate := ""
|
||||
configData := map[string]interface{}{
|
||||
"connection_uri": "test_uri",
|
||||
"username": "guest",
|
||||
"password": "guest",
|
||||
"username_template": testTemplate,
|
||||
}
|
||||
configReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/root",
|
||||
Storage: config.StorageView,
|
||||
Data: configData,
|
||||
}
|
||||
resp, err := b.HandleRequest(context.Background(), configReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: resp: %#v\nerr:%s", resp, err)
|
||||
}
|
||||
if resp != nil {
|
||||
t.Fatal("expected a nil response")
|
||||
}
|
||||
|
||||
configResult, err := readConfig(context.Background(), config.StorageView)
|
||||
if err != nil {
|
||||
t.Fatalf("expected err to be nil; got %s", err)
|
||||
}
|
||||
|
||||
// No template provided, config set to defaultUsernameTemplate
|
||||
if configResult.UsernameTemplate != defaultUserNameTemplate {
|
||||
t.Fatalf(
|
||||
"expected template %s; got %s",
|
||||
defaultUserNameTemplate,
|
||||
configResult.UsernameTemplate,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadConfig_CustomTemplate(t *testing.T) {
|
||||
config := logical.TestBackendConfig()
|
||||
config.StorageView = &logical.InmemStorage{}
|
||||
b := Backend(config)
|
||||
if err := b.Setup(context.Background(), config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testTemplate := "`foo-{{ .DisplayName }}`"
|
||||
configData := map[string]interface{}{
|
||||
"connection_uri": "test_uri",
|
||||
"username": "guest",
|
||||
"password": "guest",
|
||||
"username_template": testTemplate,
|
||||
}
|
||||
configReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "config/root",
|
||||
Storage: config.StorageView,
|
||||
Data: configData,
|
||||
}
|
||||
resp, err := b.HandleRequest(context.Background(), configReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: resp: %#v\nerr:%s", resp, err)
|
||||
}
|
||||
if resp != nil {
|
||||
t.Fatal("expected a nil response")
|
||||
}
|
||||
|
||||
configResult, err := readConfig(context.Background(), config.StorageView)
|
||||
if err != nil {
|
||||
t.Fatalf("expected err to be nil; got %s", err)
|
||||
}
|
||||
|
||||
if configResult.UsernameTemplate != testTemplate {
|
||||
t.Fatalf(
|
||||
"expected template %s; got %s",
|
||||
testTemplate,
|
||||
configResult.UsernameTemplate,
|
||||
)
|
||||
}
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
stepwise "github.com/hashicorp/vault-testing-stepwise"
|
||||
dockerEnvironment "github.com/hashicorp/vault-testing-stepwise/environments/docker"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
var stepwiseSetup sync.Once
|
||||
|
||||
func TestAccBackend_Stepwise_basic(t *testing.T) {
|
||||
t.Parallel()
|
||||
envOptions := &stepwise.MountOptions{
|
||||
RegistryName: "aws-sec",
|
||||
PluginType: api.PluginTypeSecrets,
|
||||
PluginName: "aws",
|
||||
MountPathPrefix: "aws-sec",
|
||||
}
|
||||
roleName := "vault-stepwise-role"
|
||||
stepwise.Run(t, stepwise.Case{
|
||||
Precheck: func() { testAccStepwisePreCheck(t) },
|
||||
Environment: dockerEnvironment.NewEnvironment("aws", envOptions),
|
||||
Steps: []stepwise.Step{
|
||||
testAccStepwiseConfig(t),
|
||||
testAccStepwiseWritePolicy(t, roleName, testDynamoPolicy),
|
||||
testAccStepwiseRead(t, "creds", roleName, []credentialTestFunc{listDynamoTablesTest}),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccStepwiseConfig(t *testing.T) stepwise.Step {
|
||||
return stepwise.Step{
|
||||
Operation: stepwise.UpdateOperation,
|
||||
Path: "config/root",
|
||||
Data: map[string]interface{}{
|
||||
"region": os.Getenv("AWS_DEFAULT_REGION"),
|
||||
"access_key": os.Getenv("TEST_AWS_ACCESS_KEY"),
|
||||
"secret_key": os.Getenv("TEST_AWS_SECRET_KEY"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testAccStepwiseWritePolicy(t *testing.T, name string, policy string) stepwise.Step {
|
||||
return stepwise.Step{
|
||||
Operation: stepwise.UpdateOperation,
|
||||
Path: "roles/" + name,
|
||||
Data: map[string]interface{}{
|
||||
"policy_document": policy,
|
||||
"credential_type": "iam_user",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testAccStepwiseRead(t *testing.T, path, name string, credentialTests []credentialTestFunc) stepwise.Step {
|
||||
return stepwise.Step{
|
||||
Operation: stepwise.ReadOperation,
|
||||
Path: path + "/" + name,
|
||||
Assert: func(resp *api.Secret, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var d struct {
|
||||
AccessKey string `mapstructure:"access_key"`
|
||||
SecretKey string `mapstructure:"secret_key"`
|
||||
STSToken string `mapstructure:"security_token"`
|
||||
}
|
||||
if err := mapstructure.Decode(resp.Data, &d); err != nil {
|
||||
return err
|
||||
}
|
||||
t.Logf("[WARN] Generated credentials: %v", d)
|
||||
for _, testFunc := range credentialTests {
|
||||
err := testFunc(d.AccessKey, d.SecretKey, d.STSToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testAccStepwisePreCheck(t *testing.T) {
|
||||
stepwiseSetup.Do(func() {
|
||||
if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
|
||||
t.Logf("[INFO] Test: Using us-west-2 as test region")
|
||||
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
|
||||
}
|
||||
|
||||
// Ensure test variables are set
|
||||
if v := os.Getenv("TEST_AWS_ACCESS_KEY"); v == "" {
|
||||
t.Skip("TEST_AWS_ACCESS_KEY not set")
|
||||
}
|
||||
if v := os.Getenv("TEST_AWS_SECRET_KEY"); v == "" {
|
||||
t.Skip("TEST_AWS_SECRET_KEY not set")
|
||||
}
|
||||
})
|
||||
}
|
@ -1,247 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/api"
|
||||
vaultaws "github.com/hashicorp/vault/builtin/credential/aws"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth"
|
||||
agentaws "github.com/hashicorp/vault/command/agentproxyshared/auth/aws"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/sink"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/sink/file"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
)
|
||||
|
||||
const (
|
||||
// These are the access key and secret that should be used when calling "AssumeRole"
|
||||
// for the given AWS_TEST_ROLE_ARN.
|
||||
envVarAwsTestAccessKey = "AWS_TEST_ACCESS_KEY"
|
||||
envVarAwsTestSecretKey = "AWS_TEST_SECRET_KEY"
|
||||
envVarAwsTestRoleArn = "AWS_TEST_ROLE_ARN"
|
||||
|
||||
// The AWS SDK doesn't export its standard env vars so they're captured here.
|
||||
// These are used for the duration of the test to make sure the agent is able to
|
||||
// pick up creds from the env.
|
||||
//
|
||||
// To run this test, do not set these. Only the above ones need to be set.
|
||||
envVarAwsAccessKey = "AWS_ACCESS_KEY_ID"
|
||||
envVarAwsSecretKey = "AWS_SECRET_ACCESS_KEY"
|
||||
envVarAwsSessionToken = "AWS_SESSION_TOKEN"
|
||||
)
|
||||
|
||||
func TestAWSEndToEnd(t *testing.T) {
|
||||
if !runAcceptanceTests {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
// Ensure each cred is populated.
|
||||
credNames := []string{
|
||||
envVarAwsTestAccessKey,
|
||||
envVarAwsTestSecretKey,
|
||||
envVarAwsTestRoleArn,
|
||||
}
|
||||
testhelpers.SkipUnlessEnvVarsSet(t, credNames)
|
||||
|
||||
logger := logging.NewVaultLogger(hclog.Trace)
|
||||
coreConfig := &vault.CoreConfig{
|
||||
Logger: logger,
|
||||
CredentialBackends: map[string]logical.Factory{
|
||||
"aws": vaultaws.Factory,
|
||||
},
|
||||
}
|
||||
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
})
|
||||
cluster.Start()
|
||||
defer cluster.Cleanup()
|
||||
|
||||
vault.TestWaitActive(t, cluster.Cores[0].Core)
|
||||
client := cluster.Cores[0].Client
|
||||
|
||||
// Setup Vault
|
||||
if err := client.Sys().EnableAuthWithOptions("aws", &api.EnableAuthOptions{
|
||||
Type: "aws",
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := client.Logical().Write("auth/aws/role/test", map[string]interface{}{
|
||||
"auth_type": "iam",
|
||||
"policies": "default",
|
||||
// Retain thru the account number of the given arn and wildcard the rest.
|
||||
"bound_iam_principal_arn": os.Getenv(envVarAwsTestRoleArn)[:25] + "*",
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
|
||||
// We're going to feed aws auth creds via env variables.
|
||||
if err := setAwsEnvCreds(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := unsetAwsEnvCreds(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
am, err := agentaws.NewAWSAuthMethod(&auth.AuthConfig{
|
||||
Logger: logger.Named("auth.aws"),
|
||||
MountPath: "auth/aws",
|
||||
Config: map[string]interface{}{
|
||||
"role": "test",
|
||||
"type": "iam",
|
||||
"credential_poll_interval": 1,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ahConfig := &auth.AuthHandlerConfig{
|
||||
Logger: logger.Named("auth.handler"),
|
||||
Client: client,
|
||||
}
|
||||
|
||||
ah := auth.NewAuthHandler(ahConfig)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- ah.Run(ctx, am)
|
||||
}()
|
||||
defer func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tokenSinkFileName := tmpFile.Name()
|
||||
tmpFile.Close()
|
||||
os.Remove(tokenSinkFileName)
|
||||
t.Logf("output: %s", tokenSinkFileName)
|
||||
|
||||
config := &sink.SinkConfig{
|
||||
Logger: logger.Named("sink.file"),
|
||||
Config: map[string]interface{}{
|
||||
"path": tokenSinkFileName,
|
||||
},
|
||||
WrapTTL: 10 * time.Second,
|
||||
}
|
||||
|
||||
fs, err := file.NewFileSink(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config.Sink = fs
|
||||
|
||||
ss := sink.NewSinkServer(&sink.SinkServerConfig{
|
||||
Logger: logger.Named("sink.server"),
|
||||
Client: client,
|
||||
})
|
||||
go func() {
|
||||
errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config})
|
||||
}()
|
||||
defer func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// This has to be after the other defers so it happens first. It allows
|
||||
// successful test runs to immediately cancel all of the runner goroutines
|
||||
// and unblock any of the blocking defer calls by the runner's DoneCh that
|
||||
// comes before this and avoid successful tests from taking the entire
|
||||
// timeout duration.
|
||||
defer cancel()
|
||||
|
||||
if stat, err := os.Lstat(tokenSinkFileName); err == nil {
|
||||
t.Fatalf("expected err but got %s", stat)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Fatal("expected notexist err")
|
||||
}
|
||||
|
||||
// Wait 2 seconds for the env variables to be detected and an auth to be generated.
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
token, err := readToken(tokenSinkFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if token.Token == "" {
|
||||
t.Fatal("expected token but didn't receive it")
|
||||
}
|
||||
}
|
||||
|
||||
func setAwsEnvCreds() error {
|
||||
cfg := &aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(os.Getenv(envVarAwsTestAccessKey), os.Getenv(envVarAwsTestSecretKey), ""),
|
||||
}
|
||||
sess, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := sts.New(sess)
|
||||
|
||||
roleArn := os.Getenv(envVarAwsTestRoleArn)
|
||||
uid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
input := &sts.AssumeRoleInput{
|
||||
RoleArn: &roleArn,
|
||||
RoleSessionName: &uid,
|
||||
}
|
||||
output, err := client.AssumeRole(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Setenv(envVarAwsAccessKey, *output.Credentials.AccessKeyId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Setenv(envVarAwsSecretKey, *output.Credentials.SecretAccessKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Setenv(envVarAwsSessionToken, *output.Credentials.SessionToken)
|
||||
}
|
||||
|
||||
func unsetAwsEnvCreds() error {
|
||||
if err := os.Unsetenv(envVarAwsAccessKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Unsetenv(envVarAwsSecretKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Unsetenv(envVarAwsSessionToken)
|
||||
}
|
@ -1,301 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
typeEC2 = "ec2"
|
||||
typeIAM = "iam"
|
||||
|
||||
/*
|
||||
|
||||
IAM creds can be inferred from instance metadata or the container
|
||||
identity service, and those creds expire at varying intervals with
|
||||
new creds becoming available at likewise varying intervals. Let's
|
||||
default to polling once a minute so all changes can be picked up
|
||||
rather quickly. This is configurable, however.
|
||||
|
||||
*/
|
||||
defaultCredentialPollInterval = 60
|
||||
)
|
||||
|
||||
type awsMethod struct {
|
||||
logger hclog.Logger
|
||||
authType string
|
||||
nonce string
|
||||
mountPath string
|
||||
role string
|
||||
headerValue string
|
||||
region string
|
||||
|
||||
// These are used to share the latest creds safely across goroutines.
|
||||
credLock sync.Mutex
|
||||
lastCreds *credentials.Credentials
|
||||
|
||||
// Notifies the outer environment that it should call Authenticate again.
|
||||
credsFound chan struct{}
|
||||
|
||||
// Detects that the outer environment is closing.
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
|
||||
if conf == nil {
|
||||
return nil, errors.New("empty config")
|
||||
}
|
||||
if conf.Config == nil {
|
||||
return nil, errors.New("empty config data")
|
||||
}
|
||||
|
||||
a := &awsMethod{
|
||||
logger: conf.Logger,
|
||||
mountPath: conf.MountPath,
|
||||
credsFound: make(chan struct{}),
|
||||
stopCh: make(chan struct{}),
|
||||
region: awsutil.DefaultRegion,
|
||||
}
|
||||
|
||||
typeRaw, ok := conf.Config["type"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing 'type' value")
|
||||
}
|
||||
a.authType, ok = typeRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'type' config value to string")
|
||||
}
|
||||
|
||||
roleRaw, ok := conf.Config["role"]
|
||||
if !ok {
|
||||
return nil, errors.New("missing 'role' value")
|
||||
}
|
||||
a.role, ok = roleRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'role' config value to string")
|
||||
}
|
||||
|
||||
switch {
|
||||
case a.role == "":
|
||||
return nil, errors.New("'role' value is empty")
|
||||
case a.authType == "":
|
||||
return nil, errors.New("'type' value is empty")
|
||||
case a.authType != typeEC2 && a.authType != typeIAM:
|
||||
return nil, errors.New("'type' value is invalid")
|
||||
}
|
||||
|
||||
accessKey := ""
|
||||
accessKeyRaw, ok := conf.Config["access_key"]
|
||||
if ok {
|
||||
accessKey, ok = accessKeyRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'access_key' value into string")
|
||||
}
|
||||
}
|
||||
|
||||
secretKey := ""
|
||||
secretKeyRaw, ok := conf.Config["secret_key"]
|
||||
if ok {
|
||||
secretKey, ok = secretKeyRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'secret_key' value into string")
|
||||
}
|
||||
}
|
||||
|
||||
sessionToken := ""
|
||||
sessionTokenRaw, ok := conf.Config["session_token"]
|
||||
if ok {
|
||||
sessionToken, ok = sessionTokenRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'session_token' value into string")
|
||||
}
|
||||
}
|
||||
|
||||
headerValueRaw, ok := conf.Config["header_value"]
|
||||
if ok {
|
||||
a.headerValue, ok = headerValueRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'header_value' value into string")
|
||||
}
|
||||
}
|
||||
|
||||
nonceRaw, ok := conf.Config["nonce"]
|
||||
if ok {
|
||||
a.nonce, ok = nonceRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'nonce' value into string")
|
||||
}
|
||||
}
|
||||
|
||||
regionRaw, ok := conf.Config["region"]
|
||||
if ok {
|
||||
a.region, ok = regionRaw.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("could not convert 'region' value into string")
|
||||
}
|
||||
}
|
||||
|
||||
if a.authType == typeIAM {
|
||||
|
||||
// Check for an optional custom frequency at which we should poll for creds.
|
||||
credentialPollIntervalSec := defaultCredentialPollInterval
|
||||
if credentialPollIntervalRaw, ok := conf.Config["credential_poll_interval"]; ok {
|
||||
if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok {
|
||||
credentialPollIntervalSec = credentialPollInterval
|
||||
} else {
|
||||
return nil, errors.New("could not convert 'credential_poll_interval' into int")
|
||||
}
|
||||
}
|
||||
|
||||
// Do an initial population of the creds because we want to err right away if we can't
|
||||
// even get a first set.
|
||||
creds, err := awsutil.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.lastCreds = creds
|
||||
|
||||
go a.pollForCreds(accessKey, secretKey, sessionToken, credentialPollIntervalSec)
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retToken string, header http.Header, retData map[string]interface{}, retErr error) {
|
||||
a.logger.Trace("beginning authentication")
|
||||
|
||||
data := make(map[string]interface{})
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("error creating session: %w", err)
|
||||
return
|
||||
}
|
||||
metadataSvc := ec2metadata.New(sess)
|
||||
|
||||
switch a.authType {
|
||||
case typeEC2:
|
||||
// Fetch document
|
||||
{
|
||||
doc, err := metadataSvc.GetDynamicData("/instance-identity/document")
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("error requesting doc: %w", err)
|
||||
return
|
||||
}
|
||||
data["identity"] = base64.StdEncoding.EncodeToString([]byte(doc))
|
||||
}
|
||||
|
||||
// Fetch signature
|
||||
{
|
||||
signature, err := metadataSvc.GetDynamicData("/instance-identity/signature")
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("error requesting signature: %w", err)
|
||||
return
|
||||
}
|
||||
data["signature"] = signature
|
||||
}
|
||||
|
||||
// Add the reauthentication value, if we have one
|
||||
if a.nonce == "" {
|
||||
uid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("error generating uuid for reauthentication value: %w", err)
|
||||
return
|
||||
}
|
||||
a.nonce = uid
|
||||
}
|
||||
data["nonce"] = a.nonce
|
||||
|
||||
default:
|
||||
// This is typeIAM.
|
||||
a.credLock.Lock()
|
||||
defer a.credLock.Unlock()
|
||||
|
||||
var err error
|
||||
data, err = awsutil.GenerateLoginData(a.lastCreds, a.headerValue, a.region, a.logger)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("error creating login value: %w", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
data["role"] = a.role
|
||||
|
||||
return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil
|
||||
}
|
||||
|
||||
func (a *awsMethod) NewCreds() chan struct{} {
|
||||
return a.credsFound
|
||||
}
|
||||
|
||||
func (a *awsMethod) CredSuccess() {}
|
||||
|
||||
func (a *awsMethod) Shutdown() {
|
||||
close(a.credsFound)
|
||||
close(a.stopCh)
|
||||
}
|
||||
|
||||
func (a *awsMethod) pollForCreds(accessKey, secretKey, sessionToken string, frequencySeconds int) {
|
||||
ticker := time.NewTicker(time.Duration(frequencySeconds) * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-a.stopCh:
|
||||
a.logger.Trace("shutdown triggered, stopping aws auth handler")
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := a.checkCreds(accessKey, secretKey, sessionToken); err != nil {
|
||||
a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *awsMethod) checkCreds(accessKey, secretKey, sessionToken string) error {
|
||||
a.credLock.Lock()
|
||||
defer a.credLock.Unlock()
|
||||
|
||||
a.logger.Trace("checking for new credentials")
|
||||
currentCreds, err := awsutil.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentVal, err := currentCreds.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lastVal, err := a.lastCreds.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// These will always have different pointers regardless of whether their
|
||||
// values are identical, hence the use of DeepEqual.
|
||||
if !a.lastCreds.IsExpired() && reflect.DeepEqual(currentVal, lastVal) {
|
||||
a.logger.Trace("credentials are unchanged and still valid")
|
||||
return nil
|
||||
}
|
||||
|
||||
a.lastCreds = currentCreds
|
||||
a.logger.Trace("new credentials detected, triggering Authenticate")
|
||||
a.credsFound <- struct{}{}
|
||||
return nil
|
||||
}
|
@ -14,7 +14,6 @@ import (
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/approle"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/aws"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/cert"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/cf"
|
||||
"github.com/hashicorp/vault/command/agentproxyshared/auth/gcp"
|
||||
@ -34,8 +33,6 @@ import (
|
||||
// the method type is invalid.
|
||||
func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) {
|
||||
switch autoAuthMethodType {
|
||||
case "aws":
|
||||
return aws.NewAWSAuthMethod(authConfig)
|
||||
case "cert":
|
||||
return cert.NewCertAuthMethod(authConfig)
|
||||
case "cf":
|
||||
|
@ -85,7 +85,6 @@ func (b *BaseCommand) PredictVaultAvailableMounts() complete.Predictor {
|
||||
// This list does not contain deprecated backends. At present, there is no
|
||||
// API that lists all available secret backends, so this is hard-coded :(.
|
||||
return complete.PredictSet(
|
||||
"aws",
|
||||
"consul",
|
||||
"database",
|
||||
"generic",
|
||||
@ -106,7 +105,6 @@ func (b *BaseCommand) PredictVaultAvailableAuths() complete.Predictor {
|
||||
return complete.PredictSet(
|
||||
"app-id",
|
||||
"approle",
|
||||
"aws",
|
||||
"cert",
|
||||
"gcp",
|
||||
"github",
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
credOIDC "github.com/hashicorp/vault-plugin-auth-jwt"
|
||||
credKerb "github.com/hashicorp/vault-plugin-auth-kerberos"
|
||||
credOCI "github.com/hashicorp/vault-plugin-auth-oci"
|
||||
credAws "github.com/hashicorp/vault/builtin/credential/aws"
|
||||
credCert "github.com/hashicorp/vault/builtin/credential/cert"
|
||||
credGitHub "github.com/hashicorp/vault/builtin/credential/github"
|
||||
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
|
||||
@ -48,7 +47,6 @@ import (
|
||||
physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb"
|
||||
physConsul "github.com/hashicorp/vault/physical/consul"
|
||||
physCouchDB "github.com/hashicorp/vault/physical/couchdb"
|
||||
physDynamoDB "github.com/hashicorp/vault/physical/dynamodb"
|
||||
physEtcd "github.com/hashicorp/vault/physical/etcd"
|
||||
physFoundationDB "github.com/hashicorp/vault/physical/foundationdb"
|
||||
physGCS "github.com/hashicorp/vault/physical/gcs"
|
||||
@ -57,7 +55,6 @@ import (
|
||||
physOCI "github.com/hashicorp/vault/physical/oci"
|
||||
physPostgreSQL "github.com/hashicorp/vault/physical/postgresql"
|
||||
physRaft "github.com/hashicorp/vault/physical/raft"
|
||||
physS3 "github.com/hashicorp/vault/physical/s3"
|
||||
physSpanner "github.com/hashicorp/vault/physical/spanner"
|
||||
physSwift "github.com/hashicorp/vault/physical/swift"
|
||||
physZooKeeper "github.com/hashicorp/vault/physical/zookeeper"
|
||||
@ -187,7 +184,6 @@ var (
|
||||
"consul": physConsul.NewConsulBackend,
|
||||
"couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend,
|
||||
"couchdb": physCouchDB.NewCouchDBBackend,
|
||||
"dynamodb": physDynamoDB.NewDynamoDBBackend,
|
||||
"etcd": physEtcd.NewEtcdBackend,
|
||||
"file_transactional": physFile.NewTransactionalFileBackend,
|
||||
"file": physFile.NewFileBackend,
|
||||
@ -201,7 +197,6 @@ var (
|
||||
"mysql": physMySQL.NewMySQLBackend,
|
||||
"oci": physOCI.NewBackend,
|
||||
"postgresql": physPostgreSQL.NewPostgreSQLBackend,
|
||||
"s3": physS3.NewS3Backend,
|
||||
"spanner": physSpanner.NewBackend,
|
||||
"swift": physSwift.NewSwiftBackend,
|
||||
"raft": physRaft.NewRaftBackend,
|
||||
@ -218,7 +213,6 @@ var (
|
||||
|
||||
func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory {
|
||||
loginHandlers := map[string]LoginHandler{
|
||||
"aws": &credAws.CLIHandler{},
|
||||
"centrify": &credCentrify.CLIHandler{},
|
||||
"cert": &credCert.CLIHandler{},
|
||||
"cf": &credCF.CLIHandler{},
|
||||
|
16
go.mod
16
go.mod
@ -34,8 +34,6 @@ require (
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/armon/go-radix v1.0.0
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
|
||||
github.com/aws/aws-sdk-go v1.49.22
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.19
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a
|
||||
github.com/cenkalti/backoff/v3 v3.2.2
|
||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0
|
||||
@ -84,7 +82,6 @@ require (
|
||||
github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2
|
||||
github.com/hashicorp/go-rootcerts v1.0.2
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.2.3
|
||||
github.com/hashicorp/go-secure-stdlib/base62 v0.1.2
|
||||
github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1
|
||||
github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2
|
||||
@ -232,17 +229,7 @@ require (
|
||||
github.com/agext/levenshtein v1.2.1 // indirect
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go v1.49.22 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/speakeasy v0.1.0 // indirect
|
||||
github.com/boombuler/barcode v1.0.1 // indirect
|
||||
@ -314,6 +301,7 @@ require (
|
||||
github.com/hashicorp/cronexpr v1.1.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/awsutil v0.2.3 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect
|
||||
github.com/hashicorp/go-slug v0.11.1 // indirect
|
||||
github.com/hashicorp/go-tfe v1.25.1 // indirect
|
||||
|
24
go.sum
24
go.sum
@ -1010,30 +1010,6 @@ github.com/aws/aws-sdk-go v1.43.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4o
|
||||
github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.49.22 h1:r01+cQJ3cORQI1PJxG8af0jzrZpUOL9L+/3kU2x1geU=
|
||||
github.com/aws/aws-sdk-go v1.49.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
|
@ -21,14 +21,12 @@ import (
|
||||
logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap"
|
||||
logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform"
|
||||
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
|
||||
credAws "github.com/hashicorp/vault/builtin/credential/aws"
|
||||
credCert "github.com/hashicorp/vault/builtin/credential/cert"
|
||||
credGitHub "github.com/hashicorp/vault/builtin/credential/github"
|
||||
credLdap "github.com/hashicorp/vault/builtin/credential/ldap"
|
||||
credOkta "github.com/hashicorp/vault/builtin/credential/okta"
|
||||
credRadius "github.com/hashicorp/vault/builtin/credential/radius"
|
||||
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
|
||||
logicalAws "github.com/hashicorp/vault/builtin/logical/aws"
|
||||
logicalConsul "github.com/hashicorp/vault/builtin/logical/consul"
|
||||
logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad"
|
||||
logicalPki "github.com/hashicorp/vault/builtin/logical/pki"
|
||||
@ -89,7 +87,6 @@ func newRegistry() *registry {
|
||||
DeprecationStatus: consts.Removed,
|
||||
},
|
||||
"approle": {Factory: credAppRole.Factory},
|
||||
"aws": {Factory: credAws.Factory},
|
||||
"centrify": {Factory: credCentrify.Factory},
|
||||
"cert": {Factory: credCert.Factory},
|
||||
"cf": {Factory: credCF.Factory},
|
||||
@ -126,7 +123,6 @@ func newRegistry() *registry {
|
||||
Factory: logicalAd.Factory,
|
||||
DeprecationStatus: consts.Deprecated,
|
||||
},
|
||||
"aws": {Factory: logicalAws.Factory},
|
||||
"cassandra": {
|
||||
Factory: removedFactory,
|
||||
DeprecationStatus: consts.Removed,
|
||||
|
@ -87,7 +87,6 @@ func NewMockBuiltinRegistry() *mockBuiltinRegistry {
|
||||
PluginType: consts.PluginTypeCredential,
|
||||
DeprecationStatus: consts.PendingRemoval,
|
||||
},
|
||||
"aws": {PluginType: consts.PluginTypeCredential},
|
||||
"consul": {PluginType: consts.PluginTypeSecrets},
|
||||
},
|
||||
}
|
||||
@ -121,13 +120,6 @@ func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (fu
|
||||
switch name {
|
||||
case "approle", "pending-removal-test-plugin":
|
||||
return toFunc(approle.Factory), true
|
||||
case "aws":
|
||||
return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||
b := new(framework.Backend)
|
||||
b.Setup(ctx, config)
|
||||
b.BackendType = logical.TypeCredential
|
||||
return b, nil
|
||||
}), true
|
||||
case "postgresql-database-plugin":
|
||||
return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||
b := new(framework.Backend)
|
||||
|
@ -1,114 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package minio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/vault/sdk/helper/docker"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Endpoint string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Region string
|
||||
}
|
||||
|
||||
const (
|
||||
accessKeyID = "min-access-key"
|
||||
secretKey = "min-secret-key"
|
||||
)
|
||||
|
||||
func PrepareTestContainer(t *testing.T, version string) (func(), *Config) {
|
||||
if version == "" {
|
||||
version = "latest"
|
||||
}
|
||||
runner, err := docker.NewServiceRunner(docker.RunOptions{
|
||||
ContainerName: "minio",
|
||||
ImageRepo: "docker.mirror.hashicorp.services/minio/minio",
|
||||
ImageTag: version,
|
||||
Env: []string{
|
||||
"MINIO_ACCESS_KEY=" + accessKeyID,
|
||||
"MINIO_SECRET_KEY=" + secretKey,
|
||||
},
|
||||
Cmd: []string{"server", "/data"},
|
||||
Ports: []string{"9000/tcp"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start docker Minio: %s", err)
|
||||
}
|
||||
|
||||
svc, err := runner.StartService(context.Background(), connectMinio)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start docker Minio: %s", err)
|
||||
}
|
||||
|
||||
return svc.Cleanup, &Config{
|
||||
Endpoint: svc.Config.URL().Host,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretKey,
|
||||
Region: "us-east-1",
|
||||
}
|
||||
}
|
||||
|
||||
func connectMinio(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
|
||||
u := url.URL{
|
||||
Scheme: "s3",
|
||||
Host: fmt.Sprintf("%s:%d", host, port),
|
||||
}
|
||||
|
||||
c := &Config{
|
||||
Endpoint: u.Host,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretKey,
|
||||
Region: "us-east-1",
|
||||
}
|
||||
s3conn, err := c.Conn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = s3conn.ListBuckets(&s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return docker.NewServiceURL(u), nil
|
||||
}
|
||||
|
||||
func (c *Config) Conn() (*s3.S3, error) {
|
||||
cfg := &aws.Config{
|
||||
DisableSSL: aws.Bool(true),
|
||||
Region: aws.String("us-east-1"),
|
||||
Endpoint: aws.String(c.Endpoint),
|
||||
S3ForcePathStyle: aws.Bool(true),
|
||||
Credentials: credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.StaticProvider{
|
||||
Value: credentials.Value{
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretKey,
|
||||
},
|
||||
},
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()),
|
||||
}),
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s3.New(sess), nil
|
||||
}
|
@ -14,7 +14,6 @@ import (
|
||||
"github.com/hashicorp/go-hclog"
|
||||
wrapping "github.com/hashicorp/go-kms-wrapping/v2"
|
||||
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2"
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2"
|
||||
@ -173,9 +172,6 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin
|
||||
case wrapping.WrapperTypeAead:
|
||||
wrapper, kmsInfo, err = GetAEADKMSFunc(configKMS, opts...)
|
||||
|
||||
case wrapping.WrapperTypeAwsKms:
|
||||
wrapper, kmsInfo, err = GetAWSKMSFunc(configKMS, opts...)
|
||||
|
||||
case wrapping.WrapperTypeGcpCkms:
|
||||
wrapper, kmsInfo, err = GetGCPCKMSKMSFunc(configKMS, opts...)
|
||||
|
||||
@ -225,26 +221,6 @@ func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[st
|
||||
return wrapper, info, nil
|
||||
}
|
||||
|
||||
var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) {
|
||||
wrapper := awskms.NewWrapper()
|
||||
wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...)
|
||||
if err != nil {
|
||||
// If the error is any other than logical.KeyNotFoundError, return the error
|
||||
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
info := make(map[string]string)
|
||||
if wrapperInfo != nil {
|
||||
info["AWS KMS Region"] = wrapperInfo.Metadata["region"]
|
||||
info["AWS KMS KeyID"] = wrapperInfo.Metadata["kms_key_id"]
|
||||
if endpoint, ok := wrapperInfo.Metadata["endpoint"]; ok {
|
||||
info["AWS KMS Endpoint"] = endpoint
|
||||
}
|
||||
}
|
||||
return wrapper, info, nil
|
||||
}
|
||||
|
||||
func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) {
|
||||
wrapper := gcpckms.NewWrapper()
|
||||
wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...)
|
||||
|
@ -1,910 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package dynamodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
pkgPath "path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
|
||||
metrics "github.com/armon/go-metrics"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
uuid "github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
|
||||
"github.com/cenkalti/backoff/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDynamoDBRegion is used when no region is configured
|
||||
// explicitly.
|
||||
DefaultDynamoDBRegion = "us-east-1"
|
||||
// DefaultDynamoDBTableName is used when no table name
|
||||
// is configured explicitly.
|
||||
DefaultDynamoDBTableName = "vault-dynamodb-backend"
|
||||
|
||||
// DefaultDynamoDBReadCapacity is the default read capacity
|
||||
// that is used when none is configured explicitly.
|
||||
DefaultDynamoDBReadCapacity = 5
|
||||
// DefaultDynamoDBWriteCapacity is the default write capacity
|
||||
// that is used when none is configured explicitly.
|
||||
DefaultDynamoDBWriteCapacity = 5
|
||||
|
||||
// DynamoDBEmptyPath is the string that is used instead of
|
||||
// empty strings when stored in DynamoDB.
|
||||
DynamoDBEmptyPath = " "
|
||||
// DynamoDBLockPrefix is the prefix used to mark DynamoDB records
|
||||
// as locks. This prefix causes them not to be returned by
|
||||
// List operations.
|
||||
DynamoDBLockPrefix = "_"
|
||||
|
||||
// The lock TTL matches the default that Consul API uses, 15 seconds.
|
||||
DynamoDBLockTTL = 15 * time.Second
|
||||
|
||||
// The amount of time to wait between the lock renewals
|
||||
DynamoDBLockRenewInterval = 5 * time.Second
|
||||
|
||||
// DynamoDBLockRetryInterval is the amount of time to wait
|
||||
// if a lock fails before trying again.
|
||||
DynamoDBLockRetryInterval = time.Second
|
||||
// DynamoDBWatchRetryMax is the number of times to re-try a
|
||||
// failed watch before signaling that leadership is lost.
|
||||
DynamoDBWatchRetryMax = 5
|
||||
// DynamoDBWatchRetryInterval is the amount of time to wait
|
||||
// if a watch fails before trying again.
|
||||
DynamoDBWatchRetryInterval = 5 * time.Second
|
||||
)
|
||||
|
||||
// Verify DynamoDBBackend satisfies the correct interfaces
|
||||
var (
|
||||
_ physical.Backend = (*DynamoDBBackend)(nil)
|
||||
_ physical.HABackend = (*DynamoDBBackend)(nil)
|
||||
_ physical.Lock = (*DynamoDBLock)(nil)
|
||||
)
|
||||
|
||||
// DynamoDBBackend is a physical backend that stores data in
|
||||
// a DynamoDB table. It can be run in high-availability mode
|
||||
// as DynamoDB has locking capabilities.
|
||||
type DynamoDBBackend struct {
|
||||
table string
|
||||
client *dynamodb.DynamoDB
|
||||
logger log.Logger
|
||||
haEnabled bool
|
||||
permitPool *physical.PermitPool
|
||||
}
|
||||
|
||||
// DynamoDBRecord is the representation of a vault entry in
|
||||
// DynamoDB. The vault key is split up into two components
|
||||
// (Path and Key) in order to allow more efficient listings.
|
||||
type DynamoDBRecord struct {
|
||||
Path string
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// DynamoDBLock implements a lock using an DynamoDB client.
|
||||
type DynamoDBLock struct {
|
||||
backend *DynamoDBBackend
|
||||
value, key string
|
||||
identity string
|
||||
held bool
|
||||
lock sync.Mutex
|
||||
// Allow modifying the Lock durations for ease of unit testing.
|
||||
renewInterval time.Duration
|
||||
ttl time.Duration
|
||||
watchRetryInterval time.Duration
|
||||
}
|
||||
|
||||
type DynamoDBLockRecord struct {
|
||||
Path string
|
||||
Key string
|
||||
Value []byte
|
||||
Identity []byte
|
||||
Expires int64
|
||||
}
|
||||
|
||||
// NewDynamoDBBackend constructs a DynamoDB backend. If the
|
||||
// configured DynamoDB table does not exist, it creates it.
|
||||
func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
table := os.Getenv("AWS_DYNAMODB_TABLE")
|
||||
if table == "" {
|
||||
table = conf["table"]
|
||||
if table == "" {
|
||||
table = DefaultDynamoDBTableName
|
||||
}
|
||||
}
|
||||
readCapacityString := os.Getenv("AWS_DYNAMODB_READ_CAPACITY")
|
||||
if readCapacityString == "" {
|
||||
readCapacityString = conf["read_capacity"]
|
||||
if readCapacityString == "" {
|
||||
readCapacityString = "0"
|
||||
}
|
||||
}
|
||||
readCapacity, err := strconv.Atoi(readCapacityString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid read capacity: %q", readCapacityString)
|
||||
}
|
||||
if readCapacity == 0 {
|
||||
readCapacity = DefaultDynamoDBReadCapacity
|
||||
}
|
||||
|
||||
writeCapacityString := os.Getenv("AWS_DYNAMODB_WRITE_CAPACITY")
|
||||
if writeCapacityString == "" {
|
||||
writeCapacityString = conf["write_capacity"]
|
||||
if writeCapacityString == "" {
|
||||
writeCapacityString = "0"
|
||||
}
|
||||
}
|
||||
writeCapacity, err := strconv.Atoi(writeCapacityString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid write capacity: %q", writeCapacityString)
|
||||
}
|
||||
if writeCapacity == 0 {
|
||||
writeCapacity = DefaultDynamoDBWriteCapacity
|
||||
}
|
||||
|
||||
endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT")
|
||||
if endpoint == "" {
|
||||
endpoint = conf["endpoint"]
|
||||
}
|
||||
region := os.Getenv("AWS_DYNAMODB_REGION")
|
||||
if region == "" {
|
||||
region = os.Getenv("AWS_REGION")
|
||||
if region == "" {
|
||||
region = os.Getenv("AWS_DEFAULT_REGION")
|
||||
if region == "" {
|
||||
region = conf["region"]
|
||||
if region == "" {
|
||||
region = DefaultDynamoDBRegion
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dynamodbMaxRetryString := os.Getenv("AWS_DYNAMODB_MAX_RETRIES")
|
||||
if dynamodbMaxRetryString == "" {
|
||||
dynamodbMaxRetryString = conf["dynamodb_max_retries"]
|
||||
}
|
||||
dynamodbMaxRetry := aws.UseServiceDefaultRetries
|
||||
if dynamodbMaxRetryString != "" {
|
||||
var err error
|
||||
dynamodbMaxRetry, err = strconv.Atoi(dynamodbMaxRetryString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid max retry: %q", dynamodbMaxRetryString)
|
||||
}
|
||||
}
|
||||
|
||||
credsConfig := &awsutil.CredentialsConfig{
|
||||
AccessKey: conf["access_key"],
|
||||
SecretKey: conf["secret_key"],
|
||||
SessionToken: conf["session_token"],
|
||||
Logger: logger,
|
||||
}
|
||||
creds, err := credsConfig.GenerateCredentialChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pooledTransport := cleanhttp.DefaultPooledTransport()
|
||||
pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
|
||||
|
||||
awsConf := aws.NewConfig().
|
||||
WithCredentials(creds).
|
||||
WithRegion(region).
|
||||
WithEndpoint(endpoint).
|
||||
WithHTTPClient(&http.Client{
|
||||
Transport: pooledTransport,
|
||||
}).
|
||||
WithMaxRetries(dynamodbMaxRetry)
|
||||
|
||||
awsSession, err := session.NewSession(awsConf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not establish AWS session: %w", err)
|
||||
}
|
||||
|
||||
client := dynamodb.New(awsSession)
|
||||
|
||||
if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
haEnabled := os.Getenv("DYNAMODB_HA_ENABLED")
|
||||
if haEnabled == "" {
|
||||
haEnabled = conf["ha_enabled"]
|
||||
}
|
||||
haEnabledBool, _ := strconv.ParseBool(haEnabled)
|
||||
|
||||
maxParStr, ok := conf["max_parallel"]
|
||||
var maxParInt int
|
||||
if ok {
|
||||
maxParInt, err = strconv.Atoi(maxParStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
|
||||
}
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("max_parallel set", "max_parallel", maxParInt)
|
||||
}
|
||||
}
|
||||
|
||||
return &DynamoDBBackend{
|
||||
table: table,
|
||||
client: client,
|
||||
permitPool: physical.NewPermitPool(maxParInt),
|
||||
haEnabled: haEnabledBool,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put is used to insert or update an entry
|
||||
func (d *DynamoDBBackend) Put(ctx context.Context, entry *physical.Entry) error {
|
||||
defer metrics.MeasureSince([]string{"dynamodb", "put"}, time.Now())
|
||||
|
||||
record := DynamoDBRecord{
|
||||
Path: recordPathForVaultKey(entry.Key),
|
||||
Key: recordKeyForVaultKey(entry.Key),
|
||||
Value: entry.Value,
|
||||
}
|
||||
item, err := dynamodbattribute.MarshalMap(record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err)
|
||||
}
|
||||
requests := []*dynamodb.WriteRequest{{
|
||||
PutRequest: &dynamodb.PutRequest{
|
||||
Item: item,
|
||||
},
|
||||
}}
|
||||
|
||||
for _, prefix := range physical.Prefixes(entry.Key) {
|
||||
record = DynamoDBRecord{
|
||||
Path: recordPathForVaultKey(prefix),
|
||||
Key: fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)),
|
||||
}
|
||||
item, err := dynamodbattribute.MarshalMap(record)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err)
|
||||
}
|
||||
requests = append(requests, &dynamodb.WriteRequest{
|
||||
PutRequest: &dynamodb.PutRequest{
|
||||
Item: item,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return d.batchWriteRequests(requests)
|
||||
}
|
||||
|
||||
// Get is used to fetch an entry
|
||||
func (d *DynamoDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
||||
defer metrics.MeasureSince([]string{"dynamodb", "get"}, time.Now())
|
||||
|
||||
d.permitPool.Acquire()
|
||||
defer d.permitPool.Release()
|
||||
|
||||
resp, err := d.client.GetItem(&dynamodb.GetItemInput{
|
||||
TableName: aws.String(d.table),
|
||||
ConsistentRead: aws.Bool(true),
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"Path": {S: aws.String(recordPathForVaultKey(key))},
|
||||
"Key": {S: aws.String(recordKeyForVaultKey(key))},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Item == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
record := &DynamoDBRecord{}
|
||||
if err := dynamodbattribute.UnmarshalMap(resp.Item, record); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &physical.Entry{
|
||||
Key: vaultKey(record),
|
||||
Value: record.Value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Delete is used to permanently delete an entry
|
||||
func (d *DynamoDBBackend) Delete(ctx context.Context, key string) error {
|
||||
defer metrics.MeasureSince([]string{"dynamodb", "delete"}, time.Now())
|
||||
|
||||
requests := []*dynamodb.WriteRequest{{
|
||||
DeleteRequest: &dynamodb.DeleteRequest{
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"Path": {S: aws.String(recordPathForVaultKey(key))},
|
||||
"Key": {S: aws.String(recordKeyForVaultKey(key))},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
// Clean up empty "folders" by looping through all levels of the path to the item being deleted looking for
|
||||
// children. Loop from deepest path to shallowest, and only consider items children if they are not going to be
|
||||
// deleted by our batch delete request. If a path has no valid children, then it should be considered an empty
|
||||
// "folder" and be deleted along with the original item in our batch job. Because we loop from deepest path to
|
||||
// shallowest, once we find a path level that contains valid children we can stop the cleanup operation.
|
||||
prefixes := physical.Prefixes(key)
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(prefixes)))
|
||||
for index, prefix := range prefixes {
|
||||
// Because delete batches its requests, we need to pass keys we know are going to be deleted into
|
||||
// hasChildren so it can exclude those when it determines if there WILL be any children left after
|
||||
// the delete operations have completed.
|
||||
var excluded []string
|
||||
if index == 0 {
|
||||
// This is the value we know for sure is being deleted
|
||||
excluded = append(excluded, recordKeyForVaultKey(key))
|
||||
} else {
|
||||
// The previous path doesn't count as a child, since if we're still looping, we've found no children
|
||||
excluded = append(excluded, recordKeyForVaultKey(prefixes[index-1]))
|
||||
}
|
||||
|
||||
hasChildren, err := d.hasChildren(prefix, excluded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !hasChildren {
|
||||
// If there are no children other than ones we know are being deleted then cleanup empty "folder" pointers
|
||||
requests = append(requests, &dynamodb.WriteRequest{
|
||||
DeleteRequest: &dynamodb.DeleteRequest{
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"Path": {S: aws.String(recordPathForVaultKey(prefix))},
|
||||
"Key": {S: aws.String(fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)))},
|
||||
},
|
||||
},
|
||||
})
|
||||
} else {
|
||||
// This loop starts at the deepest path and works backwards looking for children
|
||||
// once a deeper level of the path has been found to have children there is no
|
||||
// more cleanup that needs to happen, otherwise we might remove folder pointers
|
||||
// to that deeper path making it "undiscoverable" with the list operation
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return d.batchWriteRequests(requests)
|
||||
}
|
||||
|
||||
// List is used to list all the keys under a given
|
||||
// prefix, up to the next prefix.
|
||||
func (d *DynamoDBBackend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
defer metrics.MeasureSince([]string{"dynamodb", "list"}, time.Now())
|
||||
|
||||
prefix = strings.TrimSuffix(prefix, "/")
|
||||
|
||||
keys := []string{}
|
||||
prefix = escapeEmptyPath(prefix)
|
||||
queryInput := &dynamodb.QueryInput{
|
||||
TableName: aws.String(d.table),
|
||||
ConsistentRead: aws.Bool(true),
|
||||
KeyConditions: map[string]*dynamodb.Condition{
|
||||
"Path": {
|
||||
ComparisonOperator: aws.String("EQ"),
|
||||
AttributeValueList: []*dynamodb.AttributeValue{{
|
||||
S: aws.String(prefix),
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
d.permitPool.Acquire()
|
||||
defer d.permitPool.Release()
|
||||
|
||||
err := d.client.QueryPages(queryInput, func(out *dynamodb.QueryOutput, lastPage bool) bool {
|
||||
var record DynamoDBRecord
|
||||
for _, item := range out.Items {
|
||||
dynamodbattribute.UnmarshalMap(item, &record)
|
||||
if !strings.HasPrefix(record.Key, DynamoDBLockPrefix) {
|
||||
keys = append(keys, record.Key)
|
||||
}
|
||||
}
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// hasChildren returns true if there exist items below a certain path prefix.
|
||||
// To do so, the method fetches such items from DynamoDB. This method is primarily
|
||||
// used by Delete. Because DynamoDB requests are batched this method is being called
|
||||
// before any deletes take place. To account for that hasChildren accepts a slice of
|
||||
// strings representing values we expect to find that should NOT be counted as children
|
||||
// because they are going to be deleted.
|
||||
func (d *DynamoDBBackend) hasChildren(prefix string, exclude []string) (bool, error) {
|
||||
prefix = strings.TrimSuffix(prefix, "/")
|
||||
prefix = escapeEmptyPath(prefix)
|
||||
|
||||
queryInput := &dynamodb.QueryInput{
|
||||
TableName: aws.String(d.table),
|
||||
ConsistentRead: aws.Bool(true),
|
||||
KeyConditions: map[string]*dynamodb.Condition{
|
||||
"Path": {
|
||||
ComparisonOperator: aws.String("EQ"),
|
||||
AttributeValueList: []*dynamodb.AttributeValue{{
|
||||
S: aws.String(prefix),
|
||||
}},
|
||||
},
|
||||
},
|
||||
// Avoid fetching too many items from DynamoDB for performance reasons.
|
||||
// We want to know if there are any children we don't expect to see.
|
||||
// Answering that question requires fetching a minimum of one more item
|
||||
// than the number we expect. In most cases this value will be 2
|
||||
Limit: aws.Int64(int64(len(exclude) + 1)),
|
||||
}
|
||||
|
||||
d.permitPool.Acquire()
|
||||
defer d.permitPool.Release()
|
||||
|
||||
out, err := d.client.Query(queryInput)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var childrenExist bool
|
||||
for _, item := range out.Items {
|
||||
for _, excluded := range exclude {
|
||||
// Check if we've found an item we didn't expect to. Look for "folder" pointer keys (trailing slash)
|
||||
// and regular value keys (no trailing slash)
|
||||
if *item["Key"].S != excluded && *item["Key"].S != fmt.Sprintf("%s/", excluded) {
|
||||
childrenExist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if childrenExist {
|
||||
// We only need to find ONE child we didn't expect to.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return childrenExist, nil
|
||||
}
|
||||
|
||||
// LockWith is used for mutual exclusion based on the given key.
|
||||
func (d *DynamoDBBackend) LockWith(key, value string) (physical.Lock, error) {
|
||||
identity, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DynamoDBLock{
|
||||
backend: d,
|
||||
key: pkgPath.Join(pkgPath.Dir(key), DynamoDBLockPrefix+pkgPath.Base(key)),
|
||||
value: value,
|
||||
identity: identity,
|
||||
renewInterval: DynamoDBLockRenewInterval,
|
||||
ttl: DynamoDBLockTTL,
|
||||
watchRetryInterval: DynamoDBWatchRetryInterval,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *DynamoDBBackend) HAEnabled() bool {
|
||||
return d.haEnabled
|
||||
}
|
||||
|
||||
// batchWriteRequests takes a list of write requests and executes them in badges
|
||||
// with a maximum size of 25 (which is the limit of BatchWriteItem requests).
|
||||
func (d *DynamoDBBackend) batchWriteRequests(requests []*dynamodb.WriteRequest) error {
|
||||
for len(requests) > 0 {
|
||||
batchSize := int(math.Min(float64(len(requests)), 25))
|
||||
batch := map[string][]*dynamodb.WriteRequest{d.table: requests[:batchSize]}
|
||||
requests = requests[batchSize:]
|
||||
|
||||
var err error
|
||||
|
||||
d.permitPool.Acquire()
|
||||
|
||||
boff := backoff.NewExponentialBackOff()
|
||||
boff.MaxElapsedTime = 600 * time.Second
|
||||
|
||||
for len(batch) > 0 {
|
||||
var output *dynamodb.BatchWriteItemOutput
|
||||
output, err = d.client.BatchWriteItem(&dynamodb.BatchWriteItemInput{
|
||||
RequestItems: batch,
|
||||
})
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if len(output.UnprocessedItems) == 0 {
|
||||
break
|
||||
} else {
|
||||
duration := boff.NextBackOff()
|
||||
if duration != backoff.Stop {
|
||||
batch = output.UnprocessedItems
|
||||
time.Sleep(duration)
|
||||
} else {
|
||||
err = errors.New("dynamodb: timeout handling UnproccessedItems")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.permitPool.Release()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock tries to acquire the lock by repeatedly trying to create
|
||||
// a record in the DynamoDB table. It will block until either the
|
||||
// stop channel is closed or the lock could be acquired successfully.
|
||||
// The returned channel will be closed once the lock is deleted or
|
||||
// changed in the DynamoDB table.
|
||||
func (l *DynamoDBLock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
if l.held {
|
||||
return nil, fmt.Errorf("lock already held")
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
// close done channel even in case of error
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
close(done)
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
stop = make(chan struct{})
|
||||
success = make(chan struct{})
|
||||
errors = make(chan error)
|
||||
leader = make(chan struct{})
|
||||
)
|
||||
// try to acquire the lock asynchronously
|
||||
go l.tryToLock(stop, success, errors)
|
||||
|
||||
select {
|
||||
case <-success:
|
||||
l.held = true
|
||||
// after acquiring it successfully, we must renew the lock periodically,
|
||||
// and watch the lock in order to close the leader channel
|
||||
// once it is lost.
|
||||
go l.periodicallyRenewLock(leader)
|
||||
go l.watch(leader)
|
||||
case retErr = <-errors:
|
||||
close(stop)
|
||||
return nil, retErr
|
||||
case <-stopCh:
|
||||
close(stop)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return leader, retErr
|
||||
}
|
||||
|
||||
// Unlock releases the lock by deleting the lock record from the
|
||||
// DynamoDB table.
|
||||
func (l *DynamoDBLock) Unlock() error {
|
||||
l.lock.Lock()
|
||||
defer l.lock.Unlock()
|
||||
if !l.held {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.held = false
|
||||
|
||||
// Conditionally delete after check that the key is actually this Vault's and
|
||||
// not been already claimed by another leader
|
||||
condition := "#identity = :identity"
|
||||
deleteMyLock := &dynamodb.DeleteItemInput{
|
||||
TableName: &l.backend.table,
|
||||
ConditionExpression: &condition,
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"Path": {S: aws.String(recordPathForVaultKey(l.key))},
|
||||
"Key": {S: aws.String(recordKeyForVaultKey(l.key))},
|
||||
},
|
||||
ExpressionAttributeNames: map[string]*string{
|
||||
"#identity": aws.String("Identity"),
|
||||
},
|
||||
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
|
||||
":identity": {B: []byte(l.identity)},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := l.backend.client.DeleteItem(deleteMyLock)
|
||||
if isConditionCheckFailed(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Value checks whether or not the lock is held by any instance of DynamoDBLock,
|
||||
// including this one, and returns the current value.
|
||||
func (l *DynamoDBLock) Value() (bool, string, error) {
|
||||
entry, err := l.backend.Get(context.Background(), l.key)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
if entry == nil {
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
return true, string(entry.Value), nil
|
||||
}
|
||||
|
||||
// tryToLock tries to create a new item in DynamoDB
|
||||
// every `DynamoDBLockRetryInterval`. As long as the item
|
||||
// cannot be created (because it already exists), it will
|
||||
// be retried. If the operation fails due to an error, it
|
||||
// is sent to the errors channel.
|
||||
// When the lock could be acquired successfully, the success
|
||||
// channel is closed.
|
||||
func (l *DynamoDBLock) tryToLock(stop, success chan struct{}, errors chan error) {
|
||||
ticker := time.NewTicker(DynamoDBLockRetryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case <-ticker.C:
|
||||
err := l.updateItem(true)
|
||||
if err != nil {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
// Don't report a condition check failure, this means that the lock
|
||||
// is already being held.
|
||||
if !isConditionCheckFailed(err) {
|
||||
errors <- err
|
||||
}
|
||||
} else {
|
||||
// Its not an AWS error, and is probably not transient, bail out.
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
} else {
|
||||
close(success)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DynamoDBLock) periodicallyRenewLock(done chan struct{}) {
|
||||
ticker := time.NewTicker(l.renewInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// This should not renew the lock if the lock was deleted from under you.
|
||||
err := l.updateItem(false)
|
||||
if err != nil {
|
||||
if !isConditionCheckFailed(err) {
|
||||
l.backend.logger.Error("error renewing leadership lock", "error", err)
|
||||
}
|
||||
}
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempts to put/update the dynamodb item using condition expressions to
|
||||
// evaluate the TTL.
|
||||
func (l *DynamoDBLock) updateItem(createIfMissing bool) error {
|
||||
now := time.Now()
|
||||
|
||||
conditionExpression := ""
|
||||
if createIfMissing {
|
||||
conditionExpression += "attribute_not_exists(#path) or " +
|
||||
"attribute_not_exists(#key) or "
|
||||
} else {
|
||||
conditionExpression += "attribute_exists(#path) and " +
|
||||
"attribute_exists(#key) and "
|
||||
}
|
||||
|
||||
// To work when upgrading from older versions that did not include the
|
||||
// Identity attribute, we first check if the attr doesn't exist, and if
|
||||
// it does, then we check if the identity is equal to our own.
|
||||
// We also write if the lock expired.
|
||||
conditionExpression += "(attribute_not_exists(#identity) or #identity = :identity or #expires <= :now)"
|
||||
|
||||
_, err := l.backend.client.UpdateItem(&dynamodb.UpdateItemInput{
|
||||
TableName: aws.String(l.backend.table),
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"Path": {S: aws.String(recordPathForVaultKey(l.key))},
|
||||
"Key": {S: aws.String(recordKeyForVaultKey(l.key))},
|
||||
},
|
||||
UpdateExpression: aws.String("SET #value=:value, #identity=:identity, #expires=:expires"),
|
||||
// If both key and path already exist, we can only write if
|
||||
// A. identity is equal to our identity (or the identity doesn't exist)
|
||||
// or
|
||||
// B. The ttl on the item is <= to the current time
|
||||
ConditionExpression: aws.String(conditionExpression),
|
||||
ExpressionAttributeNames: map[string]*string{
|
||||
"#path": aws.String("Path"),
|
||||
"#key": aws.String("Key"),
|
||||
"#identity": aws.String("Identity"),
|
||||
"#expires": aws.String("Expires"),
|
||||
"#value": aws.String("Value"),
|
||||
},
|
||||
ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
|
||||
":identity": {B: []byte(l.identity)},
|
||||
":value": {B: []byte(l.value)},
|
||||
":now": {N: aws.String(strconv.FormatInt(now.UnixNano(), 10))},
|
||||
":expires": {N: aws.String(strconv.FormatInt(now.Add(l.ttl).UnixNano(), 10))},
|
||||
},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// watch checks whether the lock has changed in the
|
||||
// DynamoDB table and closes the leader channel if so.
|
||||
// The interval is set by `DynamoDBWatchRetryInterval`.
|
||||
// If an error occurs during the check, watch will retry
|
||||
// the operation for `DynamoDBWatchRetryMax` times and
|
||||
// close the leader channel if it can't succeed.
|
||||
func (l *DynamoDBLock) watch(lost chan struct{}) {
|
||||
retries := DynamoDBWatchRetryMax
|
||||
|
||||
ticker := time.NewTicker(l.watchRetryInterval)
|
||||
WatchLoop:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
resp, err := l.backend.client.GetItem(&dynamodb.GetItemInput{
|
||||
TableName: aws.String(l.backend.table),
|
||||
ConsistentRead: aws.Bool(true),
|
||||
Key: map[string]*dynamodb.AttributeValue{
|
||||
"Path": {S: aws.String(recordPathForVaultKey(l.key))},
|
||||
"Key": {S: aws.String(recordKeyForVaultKey(l.key))},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
retries--
|
||||
if retries == 0 {
|
||||
break WatchLoop
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
break WatchLoop
|
||||
}
|
||||
record := &DynamoDBLockRecord{}
|
||||
err = dynamodbattribute.UnmarshalMap(resp.Item, record)
|
||||
if err != nil || string(record.Identity) != l.identity {
|
||||
break WatchLoop
|
||||
}
|
||||
}
|
||||
retries = DynamoDBWatchRetryMax
|
||||
}
|
||||
|
||||
close(lost)
|
||||
}
|
||||
|
||||
// ensureTableExists creates a DynamoDB table with a given
|
||||
// DynamoDB client. If the table already exists, it is not
|
||||
// being reconfigured.
|
||||
func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, writeCapacity int) error {
|
||||
_, err := client.DescribeTable(&dynamodb.DescribeTableInput{
|
||||
TableName: aws.String(table),
|
||||
})
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
if awsError.Code() == "ResourceNotFoundException" {
|
||||
_, err := client.CreateTable(&dynamodb.CreateTableInput{
|
||||
TableName: aws.String(table),
|
||||
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
|
||||
ReadCapacityUnits: aws.Int64(int64(readCapacity)),
|
||||
WriteCapacityUnits: aws.Int64(int64(writeCapacity)),
|
||||
},
|
||||
KeySchema: []*dynamodb.KeySchemaElement{{
|
||||
AttributeName: aws.String("Path"),
|
||||
KeyType: aws.String("HASH"),
|
||||
}, {
|
||||
AttributeName: aws.String("Key"),
|
||||
KeyType: aws.String("RANGE"),
|
||||
}},
|
||||
AttributeDefinitions: []*dynamodb.AttributeDefinition{{
|
||||
AttributeName: aws.String("Path"),
|
||||
AttributeType: aws.String("S"),
|
||||
}, {
|
||||
AttributeName: aws.String("Key"),
|
||||
AttributeType: aws.String("S"),
|
||||
}},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.WaitUntilTableExists(&dynamodb.DescribeTableInput{
|
||||
TableName: aws.String(table),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// table created successfully
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// recordPathForVaultKey transforms a vault key into
|
||||
// a value suitable for the `DynamoDBRecord`'s `Path`
|
||||
// property. This path equals the vault key without
|
||||
// its last component.
|
||||
func recordPathForVaultKey(key string) string {
|
||||
if strings.Contains(key, "/") {
|
||||
return pkgPath.Dir(key)
|
||||
}
|
||||
return DynamoDBEmptyPath
|
||||
}
|
||||
|
||||
// recordKeyForVaultKey transforms a vault key into
|
||||
// a value suitable for the `DynamoDBRecord`'s `Key`
|
||||
// property. This path equals the vault key's
|
||||
// last component.
|
||||
func recordKeyForVaultKey(key string) string {
|
||||
return pkgPath.Base(key)
|
||||
}
|
||||
|
||||
// vaultKey returns the vault key for a given record
|
||||
// from the DynamoDB table. This is the combination of
|
||||
// the records Path and Key.
|
||||
func vaultKey(record *DynamoDBRecord) string {
|
||||
path := unescapeEmptyPath(record.Path)
|
||||
if path == "" {
|
||||
return record.Key
|
||||
}
|
||||
return pkgPath.Join(record.Path, record.Key)
|
||||
}
|
||||
|
||||
// escapeEmptyPath is used to escape the root key's path
|
||||
// with a value that can be stored in DynamoDB. DynamoDB
|
||||
// does not allow values to be empty strings.
|
||||
func escapeEmptyPath(s string) string {
|
||||
if s == "" {
|
||||
return DynamoDBEmptyPath
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// unescapeEmptyPath is the opposite of `escapeEmptyPath`.
|
||||
func unescapeEmptyPath(s string) string {
|
||||
if s == DynamoDBEmptyPath {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// isConditionCheckFailed tests whether err is an ErrCodeConditionalCheckFailedException
|
||||
// from the AWS SDK.
|
||||
func isConditionCheckFailed(err error) bool {
|
||||
if err != nil {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
return err.Code() == dynamodb.ErrCodeConditionalCheckFailedException
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
@ -1,421 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package dynamodb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/helper/docker"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
|
||||
)
|
||||
|
||||
func TestDynamoDBBackend(t *testing.T) {
|
||||
cleanup, svccfg := prepareDynamoDBTestContainer(t)
|
||||
defer cleanup()
|
||||
|
||||
creds, err := svccfg.Credentials.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
region := os.Getenv("AWS_DEFAULT_REGION")
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
awsSession, err := session.NewSession(&aws.Config{
|
||||
Credentials: svccfg.Credentials,
|
||||
Endpoint: aws.String(svccfg.URL().String()),
|
||||
Region: aws.String(region),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
conn := dynamodb.New(awsSession)
|
||||
|
||||
randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||
table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt)
|
||||
|
||||
defer func() {
|
||||
conn.DeleteTable(&dynamodb.DeleteTableInput{
|
||||
TableName: aws.String(table),
|
||||
})
|
||||
}()
|
||||
|
||||
logger := logging.NewVaultLogger(log.Debug)
|
||||
|
||||
b, err := NewDynamoDBBackend(map[string]string{
|
||||
"access_key": creds.AccessKeyID,
|
||||
"secret_key": creds.SecretAccessKey,
|
||||
"session_token": creds.SessionToken,
|
||||
"table": table,
|
||||
"region": region,
|
||||
"endpoint": svccfg.URL().String(),
|
||||
}, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
physical.ExerciseBackend(t, b)
|
||||
physical.ExerciseBackend_ListPrefix(t, b)
|
||||
|
||||
t.Run("Marshalling upgrade", func(t *testing.T) {
|
||||
path := "test_key"
|
||||
|
||||
// Manually write to DynamoDB using the old ConvertTo function
|
||||
// for marshalling data
|
||||
inputEntry := &physical.Entry{
|
||||
Key: path,
|
||||
Value: []byte{0x0f, 0xcf, 0x4a, 0x0f, 0xba, 0x2b, 0x15, 0xf0, 0xaa, 0x75, 0x09},
|
||||
}
|
||||
|
||||
record := DynamoDBRecord{
|
||||
Path: recordPathForVaultKey(inputEntry.Key),
|
||||
Key: recordKeyForVaultKey(inputEntry.Key),
|
||||
Value: inputEntry.Value,
|
||||
}
|
||||
|
||||
item, err := dynamodbattribute.ConvertToMap(record)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
request := &dynamodb.PutItemInput{
|
||||
Item: item,
|
||||
TableName: &table,
|
||||
}
|
||||
conn.PutItem(request)
|
||||
|
||||
// Read back the data using the normal interface which should
|
||||
// handle the old marshalling format gracefully
|
||||
entry, err := b.Get(context.Background(), path)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if diff := deep.Equal(inputEntry, entry); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDynamoDBHABackend(t *testing.T) {
|
||||
cleanup, svccfg := prepareDynamoDBTestContainer(t)
|
||||
defer cleanup()
|
||||
|
||||
creds, err := svccfg.Credentials.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
region := os.Getenv("AWS_DEFAULT_REGION")
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
awsSession, err := session.NewSession(&aws.Config{
|
||||
Credentials: svccfg.Credentials,
|
||||
Endpoint: aws.String(svccfg.URL().String()),
|
||||
Region: aws.String(region),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
conn := dynamodb.New(awsSession)
|
||||
|
||||
randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||
table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt)
|
||||
|
||||
defer func() {
|
||||
conn.DeleteTable(&dynamodb.DeleteTableInput{
|
||||
TableName: aws.String(table),
|
||||
})
|
||||
}()
|
||||
|
||||
logger := logging.NewVaultLogger(log.Debug)
|
||||
config := map[string]string{
|
||||
"access_key": creds.AccessKeyID,
|
||||
"secret_key": creds.SecretAccessKey,
|
||||
"session_token": creds.SessionToken,
|
||||
"table": table,
|
||||
"region": region,
|
||||
"endpoint": svccfg.URL().String(),
|
||||
}
|
||||
|
||||
b, err := NewDynamoDBBackend(config, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
b2, err := NewDynamoDBBackend(config, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend))
|
||||
testDynamoDBLockTTL(t, b.(physical.HABackend))
|
||||
testDynamoDBLockRenewal(t, b.(physical.HABackend))
|
||||
}
|
||||
|
||||
// Similar to testHABackend, but using internal implementation details to
|
||||
// trigger the lock failure scenario by setting the lock renew period for one
|
||||
// of the locks to a higher value than the lock TTL.
|
||||
func testDynamoDBLockTTL(t *testing.T, ha physical.HABackend) {
|
||||
// Set much smaller lock times to speed up the test.
|
||||
lockTTL := time.Second * 3
|
||||
renewInterval := time.Second * 1
|
||||
watchInterval := time.Second * 1
|
||||
|
||||
// Get the lock
|
||||
origLock, err := ha.LockWith("dynamodbttl", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// set the first lock renew period to double the expected TTL.
|
||||
lock := origLock.(*DynamoDBLock)
|
||||
lock.renewInterval = lockTTL * 2
|
||||
lock.ttl = lockTTL
|
||||
lock.watchRetryInterval = watchInterval
|
||||
|
||||
// Attempt to lock
|
||||
leaderCh, err := lock.Lock(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if leaderCh == nil {
|
||||
t.Fatalf("failed to get leader ch")
|
||||
}
|
||||
|
||||
// Check the value
|
||||
held, val, err := lock.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !held {
|
||||
t.Fatalf("should be held")
|
||||
}
|
||||
if val != "bar" {
|
||||
t.Fatalf("bad value: %v", err)
|
||||
}
|
||||
|
||||
// Second acquisition should succeed because the first lock should
|
||||
// not renew within the 3 sec TTL.
|
||||
origLock2, err := ha.LockWith("dynamodbttl", "baz")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
lock2 := origLock2.(*DynamoDBLock)
|
||||
lock2.renewInterval = renewInterval
|
||||
lock2.ttl = lockTTL
|
||||
lock2.watchRetryInterval = watchInterval
|
||||
|
||||
// Cancel attempt eventually so as not to block unit tests forever
|
||||
stopCh := make(chan struct{})
|
||||
time.AfterFunc(lockTTL*10, func() {
|
||||
close(stopCh)
|
||||
})
|
||||
|
||||
// Attempt to lock should work
|
||||
leaderCh2, err := lock2.Lock(stopCh)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if leaderCh2 == nil {
|
||||
t.Fatalf("should get leader ch")
|
||||
}
|
||||
|
||||
// Check the value
|
||||
held, val, err = lock2.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !held {
|
||||
t.Fatalf("should be held")
|
||||
}
|
||||
if val != "baz" {
|
||||
t.Fatalf("bad value: %v", err)
|
||||
}
|
||||
|
||||
// The first lock should have lost the leader channel
|
||||
leaderChClosed := false
|
||||
blocking := make(chan struct{})
|
||||
// Attempt to read from the leader or the blocking channel, which ever one
|
||||
// happens first.
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(watchInterval * 3):
|
||||
return
|
||||
case <-leaderCh:
|
||||
leaderChClosed = true
|
||||
close(blocking)
|
||||
case <-blocking:
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
<-blocking
|
||||
if !leaderChClosed {
|
||||
t.Fatalf("original lock did not have its leader channel closed.")
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
lock2.Unlock()
|
||||
}
|
||||
|
||||
// Similar to testHABackend, but using internal implementation details to
|
||||
// trigger a renewal before a "watch" check, which has been a source of
|
||||
// race conditions.
|
||||
func testDynamoDBLockRenewal(t *testing.T, ha physical.HABackend) {
|
||||
renewInterval := time.Second * 1
|
||||
watchInterval := time.Second * 5
|
||||
|
||||
// Get the lock
|
||||
origLock, err := ha.LockWith("dynamodbrenewal", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// customize the renewal and watch intervals
|
||||
lock := origLock.(*DynamoDBLock)
|
||||
lock.renewInterval = renewInterval
|
||||
lock.watchRetryInterval = watchInterval
|
||||
|
||||
// Attempt to lock
|
||||
leaderCh, err := lock.Lock(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if leaderCh == nil {
|
||||
t.Fatalf("failed to get leader ch")
|
||||
}
|
||||
|
||||
// Check the value
|
||||
held, val, err := lock.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !held {
|
||||
t.Fatalf("should be held")
|
||||
}
|
||||
if val != "bar" {
|
||||
t.Fatalf("bad value: %v", err)
|
||||
}
|
||||
|
||||
// Release the lock, which will delete the stored item
|
||||
if err := lock.Unlock(); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Wait longer than the renewal time, but less than the watch time
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
|
||||
// Attempt to lock with new lock
|
||||
newLock, err := ha.LockWith("dynamodbrenewal", "baz")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Cancel attempt in 6 sec so as not to block unit tests forever
|
||||
stopCh := make(chan struct{})
|
||||
time.AfterFunc(6*time.Second, func() {
|
||||
close(stopCh)
|
||||
})
|
||||
|
||||
// Attempt to lock should work
|
||||
leaderCh2, err := newLock.Lock(stopCh)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if leaderCh2 == nil {
|
||||
t.Fatalf("should get leader ch")
|
||||
}
|
||||
|
||||
// Check the value
|
||||
held, val, err = newLock.Value()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !held {
|
||||
t.Fatalf("should be held")
|
||||
}
|
||||
if val != "baz" {
|
||||
t.Fatalf("bad value: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
newLock.Unlock()
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
docker.ServiceURL
|
||||
Credentials *credentials.Credentials
|
||||
}
|
||||
|
||||
var _ docker.ServiceConfig = &Config{}
|
||||
|
||||
func prepareDynamoDBTestContainer(t *testing.T) (func(), *Config) {
|
||||
// If environment variable is set, assume caller wants to target a real
|
||||
// DynamoDB.
|
||||
if endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT"); endpoint != "" {
|
||||
s, err := docker.NewServiceURLParse(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return func() {}, &Config{*s, credentials.NewEnvCredentials()}
|
||||
}
|
||||
|
||||
runner, err := docker.NewServiceRunner(docker.RunOptions{
|
||||
ImageRepo: "docker.mirror.hashicorp.services/cnadiminti/dynamodb-local",
|
||||
ImageTag: "latest",
|
||||
ContainerName: "dynamodb",
|
||||
Ports: []string{"8000/tcp"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start local DynamoDB: %s", err)
|
||||
}
|
||||
|
||||
svc, err := runner.StartService(context.Background(), connectDynamoDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not start local DynamoDB: %s", err)
|
||||
}
|
||||
|
||||
return svc.Cleanup, svc.Config.(*Config)
|
||||
}
|
||||
|
||||
func connectDynamoDB(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: fmt.Sprintf("%s:%d", host, port),
|
||||
}
|
||||
resp, err := http.Get(u.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != 400 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Config{
|
||||
ServiceURL: *docker.NewServiceURL(u),
|
||||
Credentials: credentials.NewStaticCredentials("fake", "fake", ""),
|
||||
}, nil
|
||||
}
|
@ -1,325 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
)
|
||||
|
||||
// Verify S3Backend satisfies the correct interfaces
|
||||
var _ physical.Backend = (*S3Backend)(nil)
|
||||
|
||||
// S3Backend is a physical backend that stores data
|
||||
// within an S3 bucket.
|
||||
type S3Backend struct {
|
||||
bucket string
|
||||
path string
|
||||
kmsKeyId string
|
||||
client *s3.S3
|
||||
logger log.Logger
|
||||
permitPool *physical.PermitPool
|
||||
}
|
||||
|
||||
// NewS3Backend constructs a S3 backend using a pre-existing
|
||||
// bucket. Credentials can be provided to the backend, sourced
|
||||
// from the environment, AWS credential files or by IAM role.
|
||||
func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
bucket := os.Getenv("AWS_S3_BUCKET")
|
||||
if bucket == "" {
|
||||
bucket = conf["bucket"]
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("'bucket' must be set")
|
||||
}
|
||||
}
|
||||
|
||||
path := conf["path"]
|
||||
|
||||
accessKey, ok := conf["access_key"]
|
||||
if !ok {
|
||||
accessKey = ""
|
||||
}
|
||||
secretKey, ok := conf["secret_key"]
|
||||
if !ok {
|
||||
secretKey = ""
|
||||
}
|
||||
sessionToken, ok := conf["session_token"]
|
||||
if !ok {
|
||||
sessionToken = ""
|
||||
}
|
||||
endpoint := os.Getenv("AWS_S3_ENDPOINT")
|
||||
if endpoint == "" {
|
||||
endpoint = conf["endpoint"]
|
||||
}
|
||||
region := os.Getenv("AWS_REGION")
|
||||
if region == "" {
|
||||
region = os.Getenv("AWS_DEFAULT_REGION")
|
||||
if region == "" {
|
||||
region = conf["region"]
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
}
|
||||
}
|
||||
s3ForcePathStyleStr, ok := conf["s3_force_path_style"]
|
||||
if !ok {
|
||||
s3ForcePathStyleStr = "false"
|
||||
}
|
||||
s3ForcePathStyleBool, err := parseutil.ParseBool(s3ForcePathStyleStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid boolean set for s3_force_path_style: %q", s3ForcePathStyleStr)
|
||||
}
|
||||
disableSSLStr, ok := conf["disable_ssl"]
|
||||
if !ok {
|
||||
disableSSLStr = "false"
|
||||
}
|
||||
disableSSLBool, err := parseutil.ParseBool(disableSSLStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid boolean set for disable_ssl: %q", disableSSLStr)
|
||||
}
|
||||
|
||||
credsConfig := &awsutil.CredentialsConfig{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
SessionToken: sessionToken,
|
||||
Logger: logger,
|
||||
}
|
||||
creds, err := credsConfig.GenerateCredentialChain()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pooledTransport := cleanhttp.DefaultPooledTransport()
|
||||
pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: creds,
|
||||
HTTPClient: &http.Client{
|
||||
Transport: pooledTransport,
|
||||
},
|
||||
Endpoint: aws.String(endpoint),
|
||||
Region: aws.String(region),
|
||||
S3ForcePathStyle: aws.Bool(s3ForcePathStyleBool),
|
||||
DisableSSL: aws.Bool(disableSSLBool),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s3conn := s3.New(sess)
|
||||
|
||||
_, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to access bucket %q in region %q: %w", bucket, region, err)
|
||||
}
|
||||
|
||||
maxParStr, ok := conf["max_parallel"]
|
||||
var maxParInt int
|
||||
if ok {
|
||||
maxParInt, err = strconv.Atoi(maxParStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
|
||||
}
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("max_parallel set", "max_parallel", maxParInt)
|
||||
}
|
||||
}
|
||||
|
||||
kmsKeyId, ok := conf["kms_key_id"]
|
||||
if !ok {
|
||||
kmsKeyId = ""
|
||||
}
|
||||
|
||||
s := &S3Backend{
|
||||
client: s3conn,
|
||||
bucket: bucket,
|
||||
path: path,
|
||||
kmsKeyId: kmsKeyId,
|
||||
logger: logger,
|
||||
permitPool: physical.NewPermitPool(maxParInt),
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Put is used to insert or update an entry
|
||||
func (s *S3Backend) Put(ctx context.Context, entry *physical.Entry) error {
|
||||
defer metrics.MeasureSince([]string{"s3", "put"}, time.Now())
|
||||
|
||||
s.permitPool.Acquire()
|
||||
defer s.permitPool.Release()
|
||||
|
||||
// Setup key
|
||||
key := path.Join(s.path, entry.Key)
|
||||
|
||||
putObjectInput := &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
Body: bytes.NewReader(entry.Value),
|
||||
}
|
||||
|
||||
if s.kmsKeyId != "" {
|
||||
putObjectInput.ServerSideEncryption = aws.String("aws:kms")
|
||||
putObjectInput.SSEKMSKeyId = aws.String(s.kmsKeyId)
|
||||
}
|
||||
|
||||
_, err := s.client.PutObject(putObjectInput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get is used to fetch an entry
|
||||
func (s *S3Backend) Get(ctx context.Context, key string) (*physical.Entry, error) {
|
||||
defer metrics.MeasureSince([]string{"s3", "get"}, time.Now())
|
||||
|
||||
s.permitPool.Acquire()
|
||||
defer s.permitPool.Release()
|
||||
|
||||
// Setup key
|
||||
key = path.Join(s.path, key)
|
||||
|
||||
resp, err := s.client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if resp != nil && resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// Return nil on 404s, error on anything else
|
||||
if awsErr.StatusCode() == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, fmt.Errorf("got nil response from S3 but no error")
|
||||
}
|
||||
|
||||
data := bytes.NewBuffer(nil)
|
||||
if resp.ContentLength != nil {
|
||||
data = bytes.NewBuffer(make([]byte, 0, *resp.ContentLength))
|
||||
}
|
||||
_, err = io.Copy(data, resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip path prefix
|
||||
if s.path != "" {
|
||||
key = strings.TrimPrefix(key, s.path+"/")
|
||||
}
|
||||
|
||||
ent := &physical.Entry{
|
||||
Key: key,
|
||||
Value: data.Bytes(),
|
||||
}
|
||||
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
// Delete is used to permanently delete an entry
|
||||
func (s *S3Backend) Delete(ctx context.Context, key string) error {
|
||||
defer metrics.MeasureSince([]string{"s3", "delete"}, time.Now())
|
||||
|
||||
s.permitPool.Acquire()
|
||||
defer s.permitPool.Release()
|
||||
|
||||
// Setup key
|
||||
key = path.Join(s.path, key)
|
||||
|
||||
_, err := s.client.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// List is used to list all the keys under a given
|
||||
// prefix, up to the next prefix.
|
||||
func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) {
|
||||
defer metrics.MeasureSince([]string{"s3", "list"}, time.Now())
|
||||
|
||||
s.permitPool.Acquire()
|
||||
defer s.permitPool.Release()
|
||||
|
||||
// Setup prefix
|
||||
prefix = path.Join(s.path, prefix)
|
||||
|
||||
// Validate prefix (if present) is ending with a "/"
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
params := &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
Delimiter: aws.String("/"),
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
|
||||
err := s.client.ListObjectsV2Pages(params,
|
||||
func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
if page != nil {
|
||||
// Add truncated 'folder' paths
|
||||
for _, commonPrefix := range page.CommonPrefixes {
|
||||
// Avoid panic
|
||||
if commonPrefix == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
commonPrefix := strings.TrimPrefix(*commonPrefix.Prefix, prefix)
|
||||
keys = append(keys, commonPrefix)
|
||||
}
|
||||
// Add objects only from the current 'folder'
|
||||
for _, key := range page.Contents {
|
||||
// Avoid panic
|
||||
if key == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimPrefix(*key.Key, prefix)
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys, nil
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-secure-stdlib/awsutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
)
|
||||
|
||||
func TestDefaultS3Backend(t *testing.T) {
|
||||
DoS3BackendTest(t, "")
|
||||
}
|
||||
|
||||
func TestS3BackendSseKms(t *testing.T) {
|
||||
DoS3BackendTest(t, "alias/aws/s3")
|
||||
}
|
||||
|
||||
func DoS3BackendTest(t *testing.T, kmsKeyId string) {
|
||||
if enabled := os.Getenv("VAULT_ACC"); enabled == "" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
if !hasAWSCredentials() {
|
||||
t.Skip("Skipping because AWS credentials could not be resolved. See https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials for information on how to set up AWS credentials.")
|
||||
}
|
||||
|
||||
logger := logging.NewVaultLogger(log.Debug)
|
||||
|
||||
credsConfig := &awsutil.CredentialsConfig{Logger: logger}
|
||||
|
||||
credsChain, err := credsConfig.GenerateCredentialChain()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = credsChain.Get()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// If the variable is empty or doesn't exist, the default
|
||||
// AWS endpoints will be used
|
||||
endpoint := os.Getenv("AWS_S3_ENDPOINT")
|
||||
|
||||
region := os.Getenv("AWS_DEFAULT_REGION")
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: credsChain,
|
||||
Endpoint: aws.String(endpoint),
|
||||
Region: aws.String(region),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s3conn := s3.New(sess)
|
||||
|
||||
randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||
bucket := fmt.Sprintf("vault-s3-testacc-%d", randInt)
|
||||
|
||||
_, err = s3conn.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test bucket: %s", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Gotta list all the objects and delete them
|
||||
// before being able to delete the bucket
|
||||
listResp, _ := s3conn.ListObjects(&s3.ListObjectsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
|
||||
objects := &s3.Delete{}
|
||||
for _, key := range listResp.Contents {
|
||||
oi := &s3.ObjectIdentifier{Key: key.Key}
|
||||
objects.Objects = append(objects.Objects, oi)
|
||||
}
|
||||
|
||||
s3conn.DeleteObjects(&s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Delete: objects,
|
||||
})
|
||||
|
||||
_, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucket)})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// This uses the same logic to find the AWS credentials as we did at the beginning of the test
|
||||
b, err := NewS3Backend(map[string]string{
|
||||
"bucket": bucket,
|
||||
"kmsKeyId": kmsKeyId,
|
||||
"path": "test/vault",
|
||||
}, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
physical.ExerciseBackend(t, b)
|
||||
physical.ExerciseBackend_ListPrefix(t, b)
|
||||
}
|
||||
|
||||
func hasAWSCredentials() bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cfg, err := config.LoadDefaultConfig(ctx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
creds, err := cfg.Credentials.Retrieve(ctx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return creds.HasKeys()
|
||||
}
|
@ -51,7 +51,6 @@ echo "Mounting all builtin plugins ..."
|
||||
|
||||
# Enable auth plugins
|
||||
vault auth enable "approle"
|
||||
vault auth enable "aws"
|
||||
vault auth enable "centrify"
|
||||
vault auth enable "cert"
|
||||
vault auth enable "cf"
|
||||
@ -67,7 +66,6 @@ vault auth enable "radius"
|
||||
vault auth enable "userpass"
|
||||
|
||||
# Enable secrets plugins
|
||||
vault secrets enable "aws"
|
||||
vault secrets enable "consul"
|
||||
vault secrets enable "database"
|
||||
vault secrets enable "gcp"
|
||||
|
@ -20,10 +20,6 @@ export default ApplicationAdapter.extend({
|
||||
buildURL(modelName, id, snapshot) {
|
||||
const backendId = id ? id : snapshot.belongsTo('backend').id;
|
||||
let url = `${this.namespace}/${backendId}/config`;
|
||||
// aws has a lot more config endpoints
|
||||
if (modelName.includes('aws')) {
|
||||
url = `${url}/${this.pathForType(modelName)}`;
|
||||
}
|
||||
return url;
|
||||
},
|
||||
|
||||
|
@ -1,7 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import AuthConfig from '../_base';
|
||||
export default AuthConfig.extend();
|
@ -1,7 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import AuthConfig from '../_base';
|
||||
export default AuthConfig.extend();
|
@ -1,7 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import AuthConfig from '../_base';
|
||||
export default AuthConfig.extend();
|
@ -1,36 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import ApplicationAdapter from './application';
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
createRecord(store, type, snapshot) {
|
||||
const ttl = snapshot.attr('ttl');
|
||||
const roleArn = snapshot.attr('roleArn');
|
||||
const roleType = snapshot.attr('credentialType');
|
||||
let method = 'POST';
|
||||
let options;
|
||||
const data = {};
|
||||
if (roleType === 'iam_user') {
|
||||
method = 'GET';
|
||||
} else {
|
||||
if (ttl !== undefined) {
|
||||
data.ttl = ttl;
|
||||
}
|
||||
if (roleType === 'assumed_role' && roleArn) {
|
||||
data.role_arn = roleArn;
|
||||
}
|
||||
options = data.ttl || data.role_arn ? { data } : {};
|
||||
}
|
||||
const role = snapshot.attr('role');
|
||||
const url = `/v1/${role.backend}/creds/${role.name}`;
|
||||
|
||||
return this.ajax(url, method, options).then((response) => {
|
||||
response.id = snapshot.id;
|
||||
response.modelName = type.modelName;
|
||||
store.pushPayload(type.modelName, response);
|
||||
});
|
||||
},
|
||||
});
|
@ -1,80 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import ApplicationAdapter from './application';
|
||||
import { encodePath } from 'vault/utils/path-encoding-helpers';
|
||||
|
||||
export default ApplicationAdapter.extend({
|
||||
namespace: 'v1',
|
||||
|
||||
createOrUpdate(store, type, snapshot, requestType) {
|
||||
const { name, backend } = snapshot.record;
|
||||
const serializer = store.serializerFor(type.modelName);
|
||||
const data = serializer.serialize(snapshot, requestType);
|
||||
const url = this.urlForRole(backend, name);
|
||||
|
||||
return this.ajax(url, 'POST', { data }).then((resp) => {
|
||||
// Ember data doesn't like 204 responses except for DELETE method
|
||||
const response = resp || { data: {} };
|
||||
response.data.name = name;
|
||||
response.data.backend = name;
|
||||
return response;
|
||||
});
|
||||
},
|
||||
|
||||
createRecord() {
|
||||
return this.createOrUpdate(...arguments);
|
||||
},
|
||||
|
||||
updateRecord() {
|
||||
return this.createOrUpdate(...arguments, 'update');
|
||||
},
|
||||
|
||||
deleteRecord(store, type, snapshot) {
|
||||
const { id } = snapshot;
|
||||
return this.ajax(this.urlForRole(snapshot.record.get('backend'), id), 'DELETE');
|
||||
},
|
||||
|
||||
pathForType() {
|
||||
return 'roles';
|
||||
},
|
||||
|
||||
urlForRole(backend, id) {
|
||||
let url = `${this.buildURL()}/${encodePath(backend)}/roles`;
|
||||
if (id) {
|
||||
url = url + '/' + encodePath(id);
|
||||
}
|
||||
return url;
|
||||
},
|
||||
|
||||
optionsForQuery(id) {
|
||||
const data = {};
|
||||
if (!id) {
|
||||
data['list'] = true;
|
||||
}
|
||||
return { data };
|
||||
},
|
||||
|
||||
fetchByQuery(store, query) {
|
||||
const { id, backend } = query;
|
||||
return this.ajax(this.urlForRole(backend, id), 'GET', this.optionsForQuery(id)).then((resp) => {
|
||||
const data = {
|
||||
id,
|
||||
name: id,
|
||||
backend,
|
||||
};
|
||||
|
||||
return { ...resp, ...data };
|
||||
});
|
||||
},
|
||||
|
||||
query(store, type, query) {
|
||||
return this.fetchByQuery(store, query);
|
||||
},
|
||||
|
||||
queryRecord(store, type, query) {
|
||||
return this.fetchByQuery(store, query);
|
||||
},
|
||||
});
|
@ -98,12 +98,6 @@ export default ApplicationAdapter.extend({
|
||||
},
|
||||
|
||||
queryRecord(store, type, query) {
|
||||
if (query.type === 'aws') {
|
||||
return this.ajax(`/v1/${encodePath(query.backend)}/config/lease`, 'GET').then((resp) => {
|
||||
resp.path = query.backend + '/';
|
||||
return resp;
|
||||
});
|
||||
}
|
||||
return;
|
||||
},
|
||||
|
||||
@ -120,18 +114,6 @@ export default ApplicationAdapter.extend({
|
||||
}
|
||||
},
|
||||
|
||||
saveAWSRoot(store, type, snapshot) {
|
||||
const { data } = snapshot.adapterOptions;
|
||||
const path = encodePath(snapshot.id);
|
||||
return this.ajax(`/v1/${path}/config/root`, 'POST', { data });
|
||||
},
|
||||
|
||||
saveAWSLease(store, type, snapshot) {
|
||||
const { data } = snapshot.adapterOptions;
|
||||
const path = encodePath(snapshot.id);
|
||||
return this.ajax(`/v1/${path}/config/lease`, 'POST', { data });
|
||||
},
|
||||
|
||||
saveZeroAddressConfig(store, type, snapshot) {
|
||||
const path = encodePath(snapshot.id);
|
||||
const roles = store.peekAll('role-ssh').filterBy('zeroAddress').mapBy('id').join(',');
|
||||
|
@ -1,56 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import Component from '@glimmer/component';
|
||||
import { action } from '@ember/object';
|
||||
|
||||
/**
|
||||
* @module ConfigureAwsSecretComponent
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* <ConfigureAwsSecret
|
||||
@model={{model}}
|
||||
@tab={{tab}}
|
||||
@accessKey={{accessKey}}
|
||||
@secretKey={{secretKey}}
|
||||
@region={{region}}
|
||||
@iamEndpoint={{iamEndpoint}}
|
||||
@stsEndpoint={{stsEndpoint}}
|
||||
@saveAWSRoot={{action "save" "saveAWSRoot"}}
|
||||
@saveAWSLease={{action "save" "saveAWSLease"}} />
|
||||
* ```
|
||||
*
|
||||
* @param {object} model - aws secret engine model
|
||||
* @param {string} tab - current tab selection
|
||||
* @param {string} accessKey - AWS access key
|
||||
* @param {string} secretKey - AWS secret key
|
||||
* @param {string} region - AWS region
|
||||
* @param {string} iamEndpoint - IAM endpoint
|
||||
* @param {string} stsEndpoint - Sts endpoint
|
||||
* @param {Function} saveAWSRoot - parent action which saves AWS root credentials
|
||||
* @param {Function} saveAWSLease - parent action which updates AWS lease information
|
||||
*
|
||||
*/
|
||||
export default class ConfigureAwsSecretComponent extends Component {
|
||||
@action
|
||||
saveRootCreds(data, event) {
|
||||
event.preventDefault();
|
||||
this.args.saveAWSRoot(data);
|
||||
}
|
||||
|
||||
@action
|
||||
saveLease(data, event) {
|
||||
event.preventDefault();
|
||||
this.args.saveAWSLease(data);
|
||||
}
|
||||
|
||||
@action
|
||||
handleTtlChange(name, ttlObj) {
|
||||
// lease values cannot be undefined, set to 0 to use default
|
||||
const valueToSet = ttlObj.enabled ? ttlObj.goSafeTimeString : 0;
|
||||
this.args.model.set(name, valueToSet);
|
||||
}
|
||||
}
|
@ -15,11 +15,6 @@ const MODEL_TYPES = {
|
||||
model: 'ssh-otp-credential',
|
||||
title: 'Generate SSH Credentials',
|
||||
},
|
||||
'aws-creds': {
|
||||
model: 'aws-credential',
|
||||
title: 'Generate AWS Credentials',
|
||||
backIsListLink: true,
|
||||
},
|
||||
'pki-issue': {
|
||||
model: 'pki/cert',
|
||||
title: 'Issue Certificate',
|
||||
|
@ -33,7 +33,6 @@ class DistributionData {
|
||||
|
||||
const VALID_TYPES_BY_PROVIDER = {
|
||||
gcpckms: ['aes256-gcm96', 'rsa-2048', 'rsa-3072', 'rsa-4096', 'ecdsa-p256', 'ecdsa-p384', 'ecdsa-p521'],
|
||||
awskms: ['aes256-gcm96'],
|
||||
};
|
||||
export default class KeymgmtDistribute extends Component {
|
||||
@service store;
|
||||
@ -88,9 +87,7 @@ export default class KeymgmtDistribute extends Component {
|
||||
|
||||
get operations() {
|
||||
const pt = this.providerType;
|
||||
if (pt === 'awskms') {
|
||||
return ['encrypt', 'decrypt'];
|
||||
} else if (pt === 'gcpckms') {
|
||||
if (pt === 'gcpckms') {
|
||||
const kt = this.keyModel?.type || '';
|
||||
switch (kt) {
|
||||
case 'aes256-gcm96':
|
||||
|
@ -1,55 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import { isBlank } from '@ember/utils';
|
||||
import { set } from '@ember/object';
|
||||
import RoleEdit from './role-edit';
|
||||
const SHOW_ROUTE = 'vault.cluster.secrets.backend.show';
|
||||
|
||||
export default RoleEdit.extend({
|
||||
actions: {
|
||||
createOrUpdate(type, event) {
|
||||
event.preventDefault();
|
||||
|
||||
// all of the attributes with fieldValue:'id' are called `name`
|
||||
const modelId = this.model.id || this.model.name;
|
||||
// prevent from submitting if there's no key
|
||||
// maybe do something fancier later
|
||||
if (type === 'create' && isBlank(modelId)) {
|
||||
return;
|
||||
}
|
||||
var credential_type = this.model.credential_type;
|
||||
if (credential_type == 'iam_user') {
|
||||
set(this, 'model.role_arns', []);
|
||||
}
|
||||
if (credential_type == 'assumed_role') {
|
||||
set(this, 'model.policy_arns', []);
|
||||
}
|
||||
if (credential_type == 'federation_token') {
|
||||
set(this, 'model.role_arns', []);
|
||||
set(this, 'model.policy_arns', []);
|
||||
}
|
||||
|
||||
var policy_document = this.model.policy_document;
|
||||
if (policy_document == '{}') {
|
||||
set(this, 'model.policy_document', '');
|
||||
}
|
||||
|
||||
this.persist('save', () => {
|
||||
this.hasDataChanges();
|
||||
this.transitionToRoute(SHOW_ROUTE, modelId);
|
||||
});
|
||||
},
|
||||
|
||||
codemirrorUpdated(attr, val, codemirror) {
|
||||
codemirror.performLint();
|
||||
const hasErrors = codemirror.state.lint.marked.length > 0;
|
||||
|
||||
if (!hasErrors) {
|
||||
set(this.model, attr, val);
|
||||
}
|
||||
},
|
||||
},
|
||||
});
|
@ -59,8 +59,6 @@ export default Component.extend({
|
||||
}),
|
||||
actionText: computed('mountSubtype', function () {
|
||||
switch (this.mountSubtype) {
|
||||
case 'aws':
|
||||
return 'Generate credential';
|
||||
case 'ssh':
|
||||
return 'Sign keys';
|
||||
case 'pki':
|
||||
|
@ -8,7 +8,7 @@ import Controller from '@ember/controller';
|
||||
|
||||
export default Controller.extend({
|
||||
isConfigurable: computed('model.type', function () {
|
||||
const configurableEngines = ['aws', 'ssh'];
|
||||
const configurableEngines = ['ssh'];
|
||||
return configurableEngines.includes(this.model.type);
|
||||
}),
|
||||
});
|
||||
|
@ -10,13 +10,6 @@ import Controller from '@ember/controller';
|
||||
const CONFIG_ATTRS = {
|
||||
// ssh
|
||||
configured: false,
|
||||
|
||||
// aws root config
|
||||
iamEndpoint: null,
|
||||
stsEndpoint: null,
|
||||
accessKey: null,
|
||||
secretKey: null,
|
||||
region: '',
|
||||
};
|
||||
|
||||
export default Controller.extend(CONFIG_ATTRS, {
|
||||
|
@ -1,30 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) HashiCorp, Inc.
|
||||
* SPDX-License-Identifier: BUSL-1.1
|
||||
*/
|
||||
|
||||
import { helper as buildHelper } from '@ember/component/helper';
|
||||
|
||||
//list from http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region
|
||||
const REGIONS = [
|
||||
'us-east-1',
|
||||
'us-east-2',
|
||||
'us-west-1',
|
||||
'us-west-2',
|
||||
'ca-central-1',
|
||||
'ap-south-1',
|
||||
'ap-northeast-1',
|
||||
'ap-northeast-2',
|
||||
'ap-southeast-1',
|
||||
'ap-southeast-2',
|
||||
'eu-central-1',
|
||||
'eu-west-1',
|
||||
'eu-west-2',
|
||||
'sa-east-1',
|
||||
];
|
||||
|
||||
export function regions() {
|
||||
return REGIONS.slice(0);
|
||||
}
|
||||
|
||||
export default buildHelper(regions);
|
@ -18,13 +18,6 @@ const MOUNTABLE_AUTH_METHODS = [
|
||||
type: 'approle',
|
||||
category: 'generic',
|
||||
},
|
||||
{
|
||||
displayName: 'AWS',
|
||||
value: 'aws',
|
||||
type: 'aws',
|
||||
category: 'cloud',
|
||||
glyph: 'aws-color',
|
||||
},
|
||||
{
|
||||
displayName: 'Google Cloud',
|
||||
value: 'gcp',
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user