1
0

Compare commits

...

5 Commits

Author SHA1 Message Date
ba5d1c6734 remove cassandra 2024-07-01 11:03:30 +03:00
4caef23458 remove couchdb 2024-07-01 11:03:30 +03:00
19af08af57 remove etcd 2024-07-01 11:03:30 +03:00
eae51feabb remove swift 2024-07-01 11:03:30 +03:00
42b08e8140 remove zookeeper 2024-07-01 11:03:30 +03:00
34 changed files with 3 additions and 5252 deletions

View File

@ -10,7 +10,6 @@
/builtin/credential/okta/ @hashicorp/vault-ecosystem
# Secrets engines (pki, ssh, totp and transit omitted)
/builtin/logical/cassandra/ @hashicorp/vault-ecosystem
/builtin/logical/consul/ @hashicorp/vault-ecosystem
/builtin/logical/database/ @hashicorp/vault-ecosystem
/builtin/logical/mysql/ @hashicorp/vault-ecosystem

View File

@ -307,9 +307,6 @@ mysql-database-plugin:
mysql-legacy-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin
cassandra-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin
influxdb-database-plugin:
@CGO_ENABLED=0 $(GO_CMD) build -o bin/influxdb-database-plugin ./plugins/database/influxdb/influxdb-database-plugin
@ -366,7 +363,7 @@ ci-copywriteheaders:
cd sdk && $(CURDIR)/scripts/copywrite-exceptions.sh
cd shamir && $(CURDIR)/scripts/copywrite-exceptions.sh
.PHONY: all bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker clean dev
.PHONY: all bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin influxdb-database-plugin postgresql-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker clean dev
.NOTPARALLEL: ember-dist ember-dist-dev

View File

@ -42,18 +42,13 @@ import (
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
physAerospike "github.com/hashicorp/vault/physical/aerospike"
physCassandra "github.com/hashicorp/vault/physical/cassandra"
physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb"
physConsul "github.com/hashicorp/vault/physical/consul"
physCouchDB "github.com/hashicorp/vault/physical/couchdb"
physEtcd "github.com/hashicorp/vault/physical/etcd"
physFoundationDB "github.com/hashicorp/vault/physical/foundationdb"
physMySQL "github.com/hashicorp/vault/physical/mysql"
physOCI "github.com/hashicorp/vault/physical/oci"
physPostgreSQL "github.com/hashicorp/vault/physical/postgresql"
physRaft "github.com/hashicorp/vault/physical/raft"
physSwift "github.com/hashicorp/vault/physical/swift"
physZooKeeper "github.com/hashicorp/vault/physical/zookeeper"
physFile "github.com/hashicorp/vault/sdk/physical/file"
physInmem "github.com/hashicorp/vault/sdk/physical/inmem"
@ -175,12 +170,8 @@ var (
physicalBackends = map[string]physical.Factory{
"aerospike": physAerospike.NewAerospikeBackend,
"cassandra": physCassandra.NewCassandraBackend,
"cockroachdb": physCockroachDB.NewCockroachDBBackend,
"consul": physConsul.NewConsulBackend,
"couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend,
"couchdb": physCouchDB.NewCouchDBBackend,
"etcd": physEtcd.NewEtcdBackend,
"file_transactional": physFile.NewTransactionalFileBackend,
"file": physFile.NewFileBackend,
"foundationdb": physFoundationDB.NewFDBBackend,
@ -191,9 +182,7 @@ var (
"mysql": physMySQL.NewMySQLBackend,
"oci": physOCI.NewBackend,
"postgresql": physPostgreSQL.NewPostgreSQLBackend,
"swift": physSwift.NewSwiftBackend,
"raft": physRaft.NewRaftBackend,
"zookeeper": physZooKeeper.NewZooKeeperBackend,
}
serviceRegistrations = map[string]sr.Factory{

9
go.mod
View File

@ -32,7 +32,6 @@ require (
github.com/armon/go-radix v1.0.0
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a
github.com/cenkalti/backoff/v3 v3.2.2
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
@ -47,8 +46,6 @@ require (
github.com/go-ldap/ldap/v3 v3.4.4
github.com/go-sql-driver/mysql v1.6.0
github.com/go-test/deep v1.1.0
github.com/go-zookeeper/zk v1.0.3
github.com/gocql/gocql v1.0.0
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang/protobuf v1.5.3
github.com/google/go-cmp v0.6.0
@ -141,7 +138,6 @@ require (
github.com/mitchellh/mapstructure v1.5.0
github.com/mitchellh/reflectwalk v1.0.2
github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc
github.com/ncw/swift v1.0.47
github.com/oklog/run v1.1.0
github.com/okta/okta-sdk-golang/v2 v2.12.1
github.com/oracle/oci-go-sdk v24.3.0+incompatible
@ -161,8 +157,6 @@ require (
github.com/shirou/gopsutil/v3 v3.22.6
github.com/stretchr/testify v1.8.4
go.etcd.io/bbolt v1.3.7
go.etcd.io/etcd/client/pkg/v3 v3.5.7
go.etcd.io/etcd/client/v2 v2.305.5
go.etcd.io/etcd/client/v3 v3.5.7
go.opentelemetry.io/otel v1.22.0
go.opentelemetry.io/otel/sdk v1.22.0
@ -220,6 +214,7 @@ require (
github.com/bgentry/speakeasy v0.1.0 // indirect
github.com/boombuler/barcode v1.0.1 // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
@ -278,7 +273,6 @@ require (
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gophercloud/gophercloud v0.1.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/hashicorp/cronexpr v1.1.1 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect
@ -366,6 +360,7 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect
github.com/zclconf/go-cty v1.12.1 // indirect
go.etcd.io/etcd/api/v3 v3.5.7 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
go.mongodb.org/mongo-driver v1.11.6 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect

11
go.sum
View File

@ -1007,15 +1007,12 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
@ -1606,8 +1603,6 @@ github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
@ -1641,8 +1636,6 @@ github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gocql/gocql v1.0.0 h1:UnbTERpP72VZ/viKE1Q1gPtmLvyTZTvuAstvSRydw/c=
github.com/gocql/gocql v1.0.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
@ -1856,8 +1849,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4Zs
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp-forge/bbolt v1.3.8-hc3 h1:iTWR3RDPj0TGChAvJ8QjHFcNFWAUVgNQV73IE6gAX4E=
github.com/hashicorp-forge/bbolt v1.3.8-hc3/go.mod h1:sQBu5UIJ+rcUFU4Fo9rpTHNV935jwmGWS3dQ/MV8810=
github.com/hashicorp/cap v0.3.0 h1:zFzVxuWy78lO6QRLHu/ONkjx/Jh0lpfvPgmpDGri43E=
@ -2428,7 +2419,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc h1:7xGrl4tTpBQu5Zjll08WupHyq+Sp0Z/adtyf1cfk3Q8=
github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY=
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs=
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=
@ -2910,7 +2900,6 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4
go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI=
go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c=

View File

@ -31,7 +31,6 @@ import (
logicalSsh "github.com/hashicorp/vault/builtin/logical/ssh"
logicalTotp "github.com/hashicorp/vault/builtin/logical/totp"
logicalTransit "github.com/hashicorp/vault/builtin/logical/transit"
dbCass "github.com/hashicorp/vault/plugins/database/cassandra"
dbInflux "github.com/hashicorp/vault/plugins/database/influxdb"
dbMysql "github.com/hashicorp/vault/plugins/database/mysql"
dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql"
@ -110,7 +109,6 @@ func newRegistry() *registry {
"mysql-rds-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)},
"mysql-legacy-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)},
"cassandra-database-plugin": {Factory: dbCass.New},
"influxdb-database-plugin": {Factory: dbInflux.New},
"postgresql-database-plugin": {Factory: dbPostgres.New},
},
@ -119,10 +117,6 @@ func newRegistry() *registry {
Factory: logicalAd.Factory,
DeprecationStatus: consts.Deprecated,
},
"cassandra": {
Factory: removedFactory,
DeprecationStatus: consts.Removed,
},
"consul": {Factory: logicalConsul.Factory},
"kubernetes": {Factory: logicalKube.Factory},
"kv": {Factory: logicalKv.Factory},

View File

@ -1,178 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"testing"
"time"
"github.com/gocql/gocql"
"github.com/hashicorp/vault/sdk/helper/docker"
)
type containerConfig struct {
containerName string
imageName string
version string
copyFromTo map[string]string
env []string
sslOpts *gocql.SslOptions
}
type ContainerOpt func(*containerConfig)
func ContainerName(name string) ContainerOpt {
return func(cfg *containerConfig) {
cfg.containerName = name
}
}
func Image(imageName string, version string) ContainerOpt {
return func(cfg *containerConfig) {
cfg.imageName = imageName
cfg.version = version
// Reset the environment because there's a very good chance the default environment doesn't apply to the
// non-default image being used
cfg.env = nil
}
}
func Version(version string) ContainerOpt {
return func(cfg *containerConfig) {
cfg.version = version
}
}
func CopyFromTo(copyFromTo map[string]string) ContainerOpt {
return func(cfg *containerConfig) {
cfg.copyFromTo = copyFromTo
}
}
func Env(keyValue string) ContainerOpt {
return func(cfg *containerConfig) {
cfg.env = append(cfg.env, keyValue)
}
}
func SslOpts(sslOpts *gocql.SslOptions) ContainerOpt {
return func(cfg *containerConfig) {
cfg.sslOpts = sslOpts
}
}
type Host struct {
Name string
Port string
}
func (h Host) ConnectionURL() string {
return net.JoinHostPort(h.Name, h.Port)
}
func PrepareTestContainer(t *testing.T, opts ...ContainerOpt) (Host, func()) {
t.Helper()
if os.Getenv("CASSANDRA_HOSTS") != "" {
host, port, err := net.SplitHostPort(os.Getenv("CASSANDRA_HOSTS"))
if err != nil {
t.Fatalf("Failed to split host & port from CASSANDRA_HOSTS (%s): %s", os.Getenv("CASSANDRA_HOSTS"), err)
}
h := Host{
Name: host,
Port: port,
}
return h, func() {}
}
containerCfg := &containerConfig{
imageName: "docker.mirror.hashicorp.services/library/cassandra",
containerName: "cassandra",
version: "3.11",
env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"},
}
for _, opt := range opts {
opt(containerCfg)
}
copyFromTo := map[string]string{}
for from, to := range containerCfg.copyFromTo {
absFrom, err := filepath.Abs(from)
if err != nil {
t.Fatalf("Unable to get absolute path for file %s", from)
}
copyFromTo[absFrom] = to
}
runOpts := docker.RunOptions{
ContainerName: containerCfg.containerName,
ImageRepo: containerCfg.imageName,
ImageTag: containerCfg.version,
Ports: []string{"9042/tcp"},
CopyFromTo: copyFromTo,
Env: containerCfg.env,
}
runner, err := docker.NewServiceRunner(runOpts)
if err != nil {
t.Fatalf("Could not start docker cassandra: %s", err)
}
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
cfg := docker.NewServiceHostPort(host, port)
clusterConfig := gocql.NewCluster(cfg.Address())
clusterConfig.Authenticator = gocql.PasswordAuthenticator{
Username: "cassandra",
Password: "cassandra",
}
clusterConfig.Timeout = 30 * time.Second
clusterConfig.ProtoVersion = 4
clusterConfig.Port = port
clusterConfig.SslOpts = containerCfg.sslOpts
session, err := clusterConfig.CreateSession()
if err != nil {
return nil, fmt.Errorf("error creating session: %s", err)
}
defer session.Close()
// Create keyspace
query := session.Query(`CREATE KEYSPACE "vault" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };`)
if err := query.Exec(); err != nil {
t.Fatalf("could not create cassandra keyspace: %v", err)
}
// Create table
query = session.Query(`CREATE TABLE "vault"."entries" (
bucket text,
key text,
value blob,
PRIMARY KEY (bucket, key)
) WITH CLUSTERING ORDER BY (key ASC);`)
if err := query.Exec(); err != nil {
t.Fatalf("could not create cassandra table: %v", err)
}
return cfg, nil
})
if err != nil {
t.Fatalf("Could not start docker cassandra: %s", err)
}
host, port, err := net.SplitHostPort(svc.Config.Address())
if err != nil {
t.Fatalf("Failed to split host & port from address (%s): %s", svc.Config.Address(), err)
}
h := Host{
Name: host,
Port: port,
}
return h, svc.Cleanup
}

View File

@ -155,7 +155,6 @@ func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string {
"mysql-rds-database-plugin",
"mysql-legacy-database-plugin",
"cassandra-database-plugin",
"influxdb-database-plugin",
"postgresql-database-plugin",
}

View File

@ -1,90 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package etcd
import (
"context"
"fmt"
"net/url"
"os"
"testing"
"time"
"github.com/hashicorp/vault/sdk/helper/docker"
clientv3 "go.etcd.io/etcd/client/v3"
)
type Config struct {
docker.ServiceURL
}
// PrepareTestContainer creates etcd docker container. If environment variabe
// ETCD_ADDR is set, the tests are executed against specified address and etcd
// container is not launched.
func PrepareTestContainer(t *testing.T, version string) (func(), *Config) {
if addr := os.Getenv("ETCD_ADDR"); addr != "" {
url, err := docker.NewServiceURLParse(addr)
if err != nil {
t.Fatal(err)
}
return func() {}, &Config{ServiceURL: *url}
}
// Check https://github.com/etcd-io/etcd/releases for latest releases.
runner, err := docker.NewServiceRunner(docker.RunOptions{
ContainerName: "etcd",
ImageRepo: "gcr.io/etcd-development/etcd",
ImageTag: version,
Cmd: []string{
"/usr/local/bin/etcd",
"--name", "s1",
"--listen-client-urls", "http://0.0.0.0:2379",
"--advertise-client-urls", "http://0.0.0.0:2379",
"--listen-peer-urls", "http://0.0.0.0:2380",
"--initial-advertise-peer-urls", "http://0.0.0.0:2380",
"--initial-cluster", "s1=http://0.0.0.0:2380",
"--initial-cluster-token", "tkn",
"--initial-cluster-state", "new",
"--log-level", "info",
"--logger", "zap",
"--log-outputs", "stderr",
},
Ports: []string{"2379/tcp"},
})
if err != nil {
t.Fatalf("Could not start docker etcd container: %s", err)
}
svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
address := fmt.Sprintf("%s:%d", host, port)
s := docker.NewServiceURL(url.URL{
Scheme: "http",
Host: address,
})
client, err := clientv3.New(clientv3.Config{
Endpoints: []string{address},
DialTimeout: 2 * time.Minute,
})
if err != nil {
return nil, fmt.Errorf("could not connect to etcd container: %w", err)
}
// Enable authentication for the tests.
client.RoleAdd(ctx, "root")
client.UserAdd(ctx, "root", "insecure")
client.UserGrantRole(ctx, "root", "root")
client.AuthEnable(ctx)
client.Close()
return &Config{
ServiceURL: *s,
}, nil
})
if err != nil {
t.Fatalf("Could not start docker etcd container: %s", err)
}
return svc.Cleanup, svc.Config.(*Config)
}

View File

@ -1,366 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"strconv"
"strings"
"time"
metrics "github.com/armon/go-metrics"
"github.com/gocql/gocql"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/physical"
)
// CassandraBackend is a physical backend that stores data in Cassandra.
type CassandraBackend struct {
sess *gocql.Session
table string
logger log.Logger
}
// Verify CassandraBackend satisfies the correct interfaces
var _ physical.Backend = (*CassandraBackend)(nil)
// NewCassandraBackend constructs a Cassandra backend using a pre-existing
// keyspace and table.
func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
splitArray := func(v string) []string {
return strings.FieldsFunc(v, func(r rune) bool {
return r == ','
})
}
var (
hosts = splitArray(conf["hosts"])
port = 9042
explicitPort = false
keyspace = conf["keyspace"]
table = conf["table"]
consistency = gocql.LocalQuorum
)
if len(hosts) == 0 {
hosts = []string{"localhost"}
}
for i, hp := range hosts {
h, ps, err := net.SplitHostPort(hp)
if err != nil {
continue
}
p, err := strconv.Atoi(ps)
if err != nil {
return nil, err
}
if explicitPort && p != port {
return nil, fmt.Errorf("all hosts must have the same port")
}
hosts[i], port = h, p
explicitPort = true
}
if keyspace == "" {
keyspace = "vault"
}
if table == "" {
table = "entries"
}
if cs, ok := conf["consistency"]; ok {
switch cs {
case "ANY":
consistency = gocql.Any
case "ONE":
consistency = gocql.One
case "TWO":
consistency = gocql.Two
case "THREE":
consistency = gocql.Three
case "QUORUM":
consistency = gocql.Quorum
case "ALL":
consistency = gocql.All
case "LOCAL_QUORUM":
consistency = gocql.LocalQuorum
case "EACH_QUORUM":
consistency = gocql.EachQuorum
case "LOCAL_ONE":
consistency = gocql.LocalOne
default:
return nil, fmt.Errorf("'consistency' must be one of {ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_QUORUM, EACH_QUORUM, LOCAL_ONE}")
}
}
connectStart := time.Now()
cluster := gocql.NewCluster(hosts...)
cluster.Port = port
cluster.Keyspace = keyspace
if retryCountStr, ok := conf["simple_retry_policy_retries"]; ok {
retryCount, err := strconv.Atoi(retryCountStr)
if err != nil || retryCount <= 0 {
return nil, fmt.Errorf("'simple_retry_policy_retries' must be a positive integer")
}
cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: retryCount}
}
cluster.ProtoVersion = 2
if protoVersionStr, ok := conf["protocol_version"]; ok {
protoVersion, err := strconv.Atoi(protoVersionStr)
if err != nil {
return nil, fmt.Errorf("'protocol_version' must be an integer")
}
cluster.ProtoVersion = protoVersion
}
if username, ok := conf["username"]; ok {
if cluster.ProtoVersion < 2 {
return nil, fmt.Errorf("authentication is not supported with protocol version < 2")
}
authenticator := gocql.PasswordAuthenticator{Username: username}
if password, ok := conf["password"]; ok {
authenticator.Password = password
}
cluster.Authenticator = authenticator
}
if initialConnectionTimeoutStr, ok := conf["initial_connection_timeout"]; ok {
initialConnectionTimeout, err := strconv.Atoi(initialConnectionTimeoutStr)
if err != nil || initialConnectionTimeout <= 0 {
return nil, fmt.Errorf("'initial_connection_timeout' must be a positive integer")
}
cluster.ConnectTimeout = time.Duration(initialConnectionTimeout) * time.Second
}
if connTimeoutStr, ok := conf["connection_timeout"]; ok {
connectionTimeout, err := strconv.Atoi(connTimeoutStr)
if err != nil || connectionTimeout <= 0 {
return nil, fmt.Errorf("'connection_timeout' must be a positive integer")
}
cluster.Timeout = time.Duration(connectionTimeout) * time.Second
}
if err := setupCassandraTLS(conf, cluster); err != nil {
return nil, err
}
sess, err := cluster.CreateSession()
if err != nil {
return nil, err
}
metrics.MeasureSince([]string{"cassandra", "connect"}, connectStart)
sess.SetConsistency(consistency)
impl := &CassandraBackend{
sess: sess,
table: table,
logger: logger,
}
return impl, nil
}
func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) error {
tlsOnStr, ok := conf["tls"]
if !ok {
return nil
}
tlsOn, err := strconv.Atoi(tlsOnStr)
if err != nil {
return fmt.Errorf("'tls' must be an integer (0 or 1)")
}
if tlsOn == 0 {
return nil
}
tlsConfig := &tls.Config{}
if pemBundlePath, ok := conf["pem_bundle_file"]; ok {
pemBundleData, err := ioutil.ReadFile(pemBundlePath)
if err != nil {
return fmt.Errorf("error reading pem bundle from %q: %w", pemBundlePath, err)
}
pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData))
if err != nil {
return fmt.Errorf("error parsing 'pem_bundle': %w", err)
}
tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient)
if err != nil {
return err
}
} else if pemJSONPath, ok := conf["pem_json_file"]; ok {
pemJSONData, err := ioutil.ReadFile(pemJSONPath)
if err != nil {
return fmt.Errorf("error reading json bundle from %q: %w", pemJSONPath, err)
}
pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData))
if err != nil {
return err
}
tlsConfig, err = pemJSON.GetTLSConfig(certutil.TLSClient)
if err != nil {
return err
}
}
if tlsSkipVerifyStr, ok := conf["tls_skip_verify"]; ok {
tlsSkipVerify, err := strconv.Atoi(tlsSkipVerifyStr)
if err != nil {
return fmt.Errorf("'tls_skip_verify' must be an integer (0 or 1)")
}
if tlsSkipVerify == 0 {
tlsConfig.InsecureSkipVerify = false
} else {
tlsConfig.InsecureSkipVerify = true
}
}
if tlsMinVersion, ok := conf["tls_min_version"]; ok {
switch tlsMinVersion {
case "tls10":
tlsConfig.MinVersion = tls.VersionTLS10
case "tls11":
tlsConfig.MinVersion = tls.VersionTLS11
case "tls12":
tlsConfig.MinVersion = tls.VersionTLS12
case "tls13":
tlsConfig.MinVersion = tls.VersionTLS13
default:
return fmt.Errorf("'tls_min_version' must be one of `tls10`, `tls11`, `tls12` or `tls13`")
}
}
cluster.SslOpts = &gocql.SslOptions{
Config: tlsConfig,
EnableHostVerification: !tlsConfig.InsecureSkipVerify,
}
return nil
}
// bucketName sanitises a bucket name for Cassandra
func (c *CassandraBackend) bucketName(name string) string {
if name == "" {
name = "."
}
return strings.TrimRight(name, "/")
}
// bucket returns all the prefix buckets the key should be stored at
func (c *CassandraBackend) buckets(key string) []string {
vals := append([]string{""}, physical.Prefixes(key)...)
for i, v := range vals {
vals[i] = c.bucketName(v)
}
return vals
}
// bucket returns the most specific bucket for the key
func (c *CassandraBackend) bucket(key string) string {
bs := c.buckets(key)
return bs[len(bs)-1]
}
// Put is used to insert or update an entry
func (c *CassandraBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"cassandra", "put"}, time.Now())
// Execute inserts to each key prefix simultaneously
stmt := fmt.Sprintf(`INSERT INTO "%s" (bucket, key, value) VALUES (?, ?, ?)`, c.table)
buckets := c.buckets(entry.Key)
results := make(chan error, len(buckets))
for i, _bucket := range buckets {
go func(i int, bucket string) {
var value []byte
if i == len(buckets)-1 {
// Only store the full value if this is the leaf bucket where the entry will actually be read
// otherwise this write is just to allow for list operations
value = entry.Value
}
results <- c.sess.Query(stmt, bucket, entry.Key, value).Exec()
}(i, _bucket)
}
for i := 0; i < len(buckets); i++ {
if err := <-results; err != nil {
return err
}
}
return nil
}
// Get is used to fetch an entry
func (c *CassandraBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"cassandra", "get"}, time.Now())
v := []byte(nil)
stmt := fmt.Sprintf(`SELECT value FROM "%s" WHERE bucket = ? AND key = ? LIMIT 1`, c.table)
q := c.sess.Query(stmt, c.bucket(key), key)
if err := q.Scan(&v); err != nil {
if err == gocql.ErrNotFound {
return nil, nil
}
return nil, err
}
return &physical.Entry{
Key: key,
Value: v,
}, nil
}
// Delete is used to permanently delete an entry
func (c *CassandraBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"cassandra", "delete"}, time.Now())
stmt := fmt.Sprintf(`DELETE FROM "%s" WHERE bucket = ? AND key = ?`, c.table)
buckets := c.buckets(key)
results := make(chan error, len(buckets))
for _, bucket := range buckets {
go func(bucket string) {
results <- c.sess.Query(stmt, bucket, key).Exec()
}(bucket)
}
for i := 0; i < len(buckets); i++ {
if err := <-results; err != nil {
return err
}
}
return nil
}
// List is used ot list all the keys under a given
// prefix, up to the next prefix.
func (c *CassandraBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"cassandra", "list"}, time.Now())
stmt := fmt.Sprintf(`SELECT key FROM "%s" WHERE bucket = ?`, c.table)
q := c.sess.Query(stmt, c.bucketName(prefix))
iter := q.Iter()
k, keys := "", []string{}
for iter.Scan(&k) {
// Only return the next "component" (with a trailing slash if it has children)
k = strings.TrimPrefix(k, prefix)
if parts := strings.SplitN(k, "/", 2); len(parts) > 1 {
k = parts[0] + "/"
} else {
k = parts[0]
}
// Deduplicate; this works because the keys are sorted
if len(keys) > 0 && keys[len(keys)-1] == k {
continue
}
keys = append(keys, k)
}
return keys, iter.Close()
}

View File

@ -1,60 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"os"
"reflect"
"testing"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/testhelpers/cassandra"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
)
func TestCassandraBackend(t *testing.T) {
if testing.Short() {
t.Skipf("skipping in short mode")
}
if os.Getenv("VAULT_CI_GO_TEST_RACE") != "" {
t.Skip("skipping race test in CI pending https://github.com/gocql/gocql/pull/1474")
}
host, cleanup := cassandra.PrepareTestContainer(t)
defer cleanup()
// Run vault tests
logger := logging.NewVaultLogger(log.Debug)
b, err := NewCassandraBackend(map[string]string{
"hosts": host.ConnectionURL(),
"protocol_version": "3",
"connection_timeout": "5",
"initial_connection_timeout": "5",
"simple_retry_policy_retries": "3",
}, logger)
if err != nil {
t.Fatalf("Failed to create new backend: %v", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
func TestCassandraBackendBuckets(t *testing.T) {
expectations := map[string][]string{
"": {"."},
"a": {"."},
"a/b": {".", "a"},
"a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"},
}
b := &CassandraBackend{}
for input, expected := range expectations {
actual := b.buckets(input)
if !reflect.DeepEqual(actual, expected) {
t.Errorf("bad: %v expected: %v", actual, expected)
}
}
}

View File

@ -1,317 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package couchdb
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
metrics "github.com/armon/go-metrics"
cleanhttp "github.com/hashicorp/go-cleanhttp"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
)
// CouchDBBackend allows the management of couchdb users
type CouchDBBackend struct {
logger log.Logger
client *couchDBClient
permitPool *physical.PermitPool
}
// Verify CouchDBBackend satisfies the correct interfaces
var (
_ physical.Backend = (*CouchDBBackend)(nil)
_ physical.PseudoTransactional = (*CouchDBBackend)(nil)
_ physical.PseudoTransactional = (*TransactionalCouchDBBackend)(nil)
)
type couchDBClient struct {
endpoint string
username string
password string
*http.Client
}
type couchDBListItem struct {
ID string `json:"id"`
Key string `json:"key"`
Value struct {
Revision string
} `json:"value"`
}
type couchDBList struct {
TotalRows int `json:"total_rows"`
Offset int `json:"offset"`
Rows []couchDBListItem `json:"rows"`
}
func (m *couchDBClient) rev(key string) (string, error) {
req, err := http.NewRequest("HEAD", fmt.Sprintf("%s/%s", m.endpoint, key), nil)
if err != nil {
return "", err
}
req.SetBasicAuth(m.username, m.password)
resp, err := m.Client.Do(req)
if err != nil {
return "", err
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", nil
}
etag := resp.Header.Get("Etag")
if len(etag) < 2 {
return "", nil
}
return etag[1 : len(etag)-1], nil
}
func (m *couchDBClient) put(e couchDBEntry) error {
bs, err := json.Marshal(e)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", m.endpoint, e.ID), bytes.NewReader(bs))
if err != nil {
return err
}
req.SetBasicAuth(m.username, m.password)
resp, err := m.Client.Do(req)
if err == nil {
resp.Body.Close()
}
return err
}
func (m *couchDBClient) get(key string) (*physical.Entry, error) {
req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", m.endpoint, url.PathEscape(key)), nil)
if err != nil {
return nil, err
}
req.SetBasicAuth(m.username, m.password)
resp, err := m.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, nil
} else if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("GET returned %q", resp.Status)
}
bs, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
entry := couchDBEntry{}
if err := json.Unmarshal(bs, &entry); err != nil {
return nil, err
}
return entry.Entry, nil
}
func (m *couchDBClient) list(prefix string) ([]couchDBListItem, error) {
req, _ := http.NewRequest("GET", fmt.Sprintf("%s/_all_docs", m.endpoint), nil)
req.SetBasicAuth(m.username, m.password)
values := req.URL.Query()
values.Set("skip", "0")
values.Set("include_docs", "false")
if prefix != "" {
values.Set("startkey", fmt.Sprintf("%q", prefix))
values.Set("endkey", fmt.Sprintf("%q", prefix+"{}"))
}
req.URL.RawQuery = values.Encode()
resp, err := m.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
results := couchDBList{}
if err := json.Unmarshal(data, &results); err != nil {
return nil, err
}
return results.Rows, nil
}
func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBackend, error) {
endpoint := os.Getenv("COUCHDB_ENDPOINT")
if endpoint == "" {
endpoint = conf["endpoint"]
}
if endpoint == "" {
return nil, fmt.Errorf("missing endpoint")
}
username := os.Getenv("COUCHDB_USERNAME")
if username == "" {
username = conf["username"]
}
password := os.Getenv("COUCHDB_PASSWORD")
if password == "" {
password = conf["password"]
}
maxParStr, ok := conf["max_parallel"]
var maxParInt int
var err error
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
}
}
return &CouchDBBackend{
client: &couchDBClient{
endpoint: endpoint,
username: username,
password: password,
Client: cleanhttp.DefaultPooledClient(),
},
logger: logger,
permitPool: physical.NewPermitPool(maxParInt),
}, nil
}
func NewCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
return buildCouchDBBackend(conf, logger)
}
type couchDBEntry struct {
Entry *physical.Entry `json:"entry"`
Rev string `json:"_rev,omitempty"`
ID string `json:"_id"`
Deleted *bool `json:"_deleted,omitempty"`
}
// Put is used to insert or update an entry
func (m *CouchDBBackend) Put(ctx context.Context, entry *physical.Entry) error {
m.permitPool.Acquire()
defer m.permitPool.Release()
return m.PutInternal(ctx, entry)
}
// Get is used to fetch an entry
func (m *CouchDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
m.permitPool.Acquire()
defer m.permitPool.Release()
return m.GetInternal(ctx, key)
}
// Delete is used to permanently delete an entry
func (m *CouchDBBackend) Delete(ctx context.Context, key string) error {
m.permitPool.Acquire()
defer m.permitPool.Release()
return m.DeleteInternal(ctx, key)
}
// List is used to list all the keys under a given prefix
func (m *CouchDBBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now())
m.permitPool.Acquire()
defer m.permitPool.Release()
items, err := m.client.list(prefix)
if err != nil {
return nil, err
}
var out []string
seen := make(map[string]interface{})
for _, result := range items {
trimmed := strings.TrimPrefix(result.ID, prefix)
sep := strings.Index(trimmed, "/")
if sep == -1 {
out = append(out, trimmed)
} else {
trimmed = trimmed[:sep+1]
if _, ok := seen[trimmed]; !ok {
out = append(out, trimmed)
seen[trimmed] = struct{}{}
}
}
}
return out, nil
}
// TransactionalCouchDBBackend creates a couchdb backend that forces all operations to happen
// in serial
type TransactionalCouchDBBackend struct {
CouchDBBackend
}
func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
backend, err := buildCouchDBBackend(conf, logger)
if err != nil {
return nil, err
}
backend.permitPool = physical.NewPermitPool(1)
return &TransactionalCouchDBBackend{
CouchDBBackend: *backend,
}, nil
}
// GetInternal is used to fetch an entry
func (m *CouchDBBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now())
return m.client.get(key)
}
// PutInternal is used to insert or update an entry
func (m *CouchDBBackend) PutInternal(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now())
revision, _ := m.client.rev(url.PathEscape(entry.Key))
return m.client.put(couchDBEntry{
Entry: entry,
Rev: revision,
ID: url.PathEscape(entry.Key),
})
}
// DeleteInternal is used to permanently delete an entry
func (m *CouchDBBackend) DeleteInternal(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now())
revision, _ := m.client.rev(url.PathEscape(key))
deleted := true
return m.client.put(couchDBEntry{
ID: url.PathEscape(key),
Rev: revision,
Deleted: &deleted,
})
}

View File

@ -1,165 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package couchdb
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/docker"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
)
func TestCouchDBBackend(t *testing.T) {
cleanup, config := prepareCouchdbDBTestContainer(t)
defer cleanup()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewCouchDBBackend(map[string]string{
"endpoint": config.URL().String(),
"username": config.username,
"password": config.password,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
func TestTransactionalCouchDBBackend(t *testing.T) {
cleanup, config := prepareCouchdbDBTestContainer(t)
defer cleanup()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewTransactionalCouchDBBackend(map[string]string{
"endpoint": config.URL().String(),
"username": config.username,
"password": config.password,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
type couchDB struct {
baseURL url.URL
dbname string
username string
password string
}
func (c couchDB) Address() string {
return c.baseURL.Host
}
func (c couchDB) URL() *url.URL {
u := c.baseURL
u.Path = c.dbname
return &u
}
var _ docker.ServiceConfig = &couchDB{}
func prepareCouchdbDBTestContainer(t *testing.T) (func(), *couchDB) {
// If environment variable is set, assume caller wants to target a real
// DynamoDB.
if os.Getenv("COUCHDB_ENDPOINT") != "" {
return func() {}, &couchDB{
baseURL: url.URL{Host: os.Getenv("COUCHDB_ENDPOINT")},
username: os.Getenv("COUCHDB_USERNAME"),
password: os.Getenv("COUCHDB_PASSWORD"),
}
}
runner, err := docker.NewServiceRunner(docker.RunOptions{
ContainerName: "couchdb",
ImageRepo: "docker.mirror.hashicorp.services/library/couchdb",
ImageTag: "1.6",
Ports: []string{"5984/tcp"},
DoNotAutoRemove: true,
})
if err != nil {
t.Fatalf("Could not start local CouchDB: %s", err)
}
svc, err := runner.StartService(context.Background(), setupCouchDB)
if err != nil {
t.Fatalf("Could not start local CouchDB: %s", err)
}
return svc.Cleanup, svc.Config.(*couchDB)
}
func setupCouchDB(ctx context.Context, host string, port int) (docker.ServiceConfig, error) {
c := &couchDB{
baseURL: url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)},
dbname: fmt.Sprintf("vault-test-%d", time.Now().Unix()),
username: "admin",
password: "admin",
}
{
resp, err := http.Get(c.baseURL.String())
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("expected couchdb to return status code 200, got (%s) instead", resp.Status)
}
}
{
req, err := http.NewRequest("PUT", c.URL().String(), nil)
if err != nil {
return nil, fmt.Errorf("could not create create database request: %q", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("could not create database: %q", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
bs, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("failed to create database: %s %s\n", resp.Status, string(bs))
}
}
{
u := c.baseURL
u.Path = fmt.Sprintf("_config/admins/%s", c.username)
req, err := http.NewRequest("PUT", u.String(), strings.NewReader(fmt.Sprintf(`"%s"`, c.password)))
if err != nil {
return nil, fmt.Errorf("Could not create admin user request: %q", err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("Could not create admin user: %q", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
bs, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("Failed to create admin user: %s %s\n", resp.Status, string(bs))
}
}
return c, nil
}

View File

@ -1,92 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package etcd
import (
"errors"
"fmt"
"net/url"
"os"
"strings"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/physical"
"go.etcd.io/etcd/client/v2"
)
var (
EtcdMultipleBootstrapError = errors.New("client setup failed: multiple discovery or bootstrap flags specified, use either \"address\" or \"discovery_srv\"")
EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')")
EtcdLockHeldError = errors.New("lock already held")
EtcdLockNotHeldError = errors.New("lock not held")
EtcdVersionUnknown = errors.New("etcd: unknown API version")
)
// NewEtcdBackend constructs a etcd backend using a given machine address.
func NewEtcdBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
var (
apiVersion string
ok bool
)
if apiVersion, ok = conf["etcd_api"]; !ok {
apiVersion = os.Getenv("ETCD_API")
}
if apiVersion == "" {
apiVersion = "v3"
}
switch apiVersion {
case "3", "etcd3", "v3":
return newEtcd3Backend(conf, logger)
default:
return nil, EtcdVersionUnknown
}
}
// Retrieves the config option in order of priority:
// 1. The named environment variable if it exist
// 2. The key in the config map
func getEtcdOption(conf map[string]string, confKey, envVar string) (string, bool) {
confVal, inConf := conf[confKey]
envVal, inEnv := os.LookupEnv(envVar)
if inEnv {
return envVal, true
}
return confVal, inConf
}
func getEtcdEndpoints(conf map[string]string) ([]string, error) {
address, staticBootstrap := getEtcdOption(conf, "address", "ETCD_ADDR")
domain, useSrv := getEtcdOption(conf, "discovery_srv", "ETCD_DISCOVERY_SRV")
if useSrv && staticBootstrap {
return nil, EtcdMultipleBootstrapError
}
if staticBootstrap {
endpoints := strings.Split(address, ",")
// Verify that the machines are valid URLs
for _, e := range endpoints {
u, urlErr := url.Parse(e)
if urlErr != nil || u.Scheme == "" {
return nil, EtcdAddressError
}
}
return endpoints, nil
}
if useSrv {
srvName, _ := getEtcdOption(conf, "discovery_srv_name", "ETCD_DISCOVERY_SRV_NAME")
discoverer := client.NewSRVDiscover()
endpoints, err := discoverer.Discover(domain, srvName)
if err != nil {
return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %w", err)
}
return endpoints, nil
}
// Set a default endpoints list if no option was set
return []string{"http://127.0.0.1:2379"}, nil
}

View File

@ -1,383 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package etcd
import (
"context"
"errors"
"fmt"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/armon/go-metrics"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/vault/sdk/physical"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/client/v3/concurrency"
)
// EtcdBackend is a physical backend that stores data at specific
// prefix within etcd. It is used for most production situations as
// it allows Vault to run on multiple machines in a highly-available manner.
type EtcdBackend struct {
logger log.Logger
path string
haEnabled bool
lockTimeout time.Duration
requestTimeout time.Duration
permitPool *physical.PermitPool
etcd *clientv3.Client
}
// Verify EtcdBackend satisfies the correct interfaces
var (
_ physical.Backend = (*EtcdBackend)(nil)
_ physical.HABackend = (*EtcdBackend)(nil)
_ physical.Lock = (*EtcdLock)(nil)
)
// newEtcd3Backend constructs a etcd3 backend.
func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the etcd path form the configuration.
path, ok := conf["path"]
if !ok {
path = "/vault"
}
// Ensure path is prefixed.
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
endpoints, err := getEtcdEndpoints(conf)
if err != nil {
return nil, err
}
cfg := clientv3.Config{
Endpoints: endpoints,
}
haEnabled := os.Getenv("ETCD_HA_ENABLED")
if haEnabled == "" {
haEnabled = conf["ha_enabled"]
}
if haEnabled == "" {
haEnabled = "false"
}
haEnabledBool, err := strconv.ParseBool(haEnabled)
if err != nil {
return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabled)
}
cert, hasCert := conf["tls_cert_file"]
key, hasKey := conf["tls_key_file"]
ca, hasCa := conf["tls_ca_file"]
if (hasCert && hasKey) || hasCa {
tls := transport.TLSInfo{
TrustedCAFile: ca,
CertFile: cert,
KeyFile: key,
}
tlscfg, err := tls.ClientConfig()
if err != nil {
return nil, err
}
cfg.TLS = tlscfg
}
// Set credentials.
username := os.Getenv("ETCD_USERNAME")
if username == "" {
username, _ = conf["username"]
}
password := os.Getenv("ETCD_PASSWORD")
if password == "" {
password, _ = conf["password"]
}
if username != "" && password != "" {
cfg.Username = username
cfg.Password = password
}
if maxReceive, ok := conf["max_receive_size"]; ok {
// grpc converts this to uint32 internally, so parse as that to avoid passing invalid values
val, err := strconv.ParseUint(maxReceive, 10, 32)
if err != nil {
return nil, fmt.Errorf("value of 'max_receive_size' (%v) could not be understood: %w", maxReceive, err)
}
cfg.MaxCallRecvMsgSize = int(val)
}
etcd, err := clientv3.New(cfg)
if err != nil {
return nil, err
}
sReqTimeout := conf["request_timeout"]
if sReqTimeout == "" {
// etcd3 default request timeout is set to 5s. It should be long enough
// for most cases, even with internal retry.
sReqTimeout = "5s"
}
reqTimeout, err := parseutil.ParseDurationSecond(sReqTimeout)
if err != nil {
return nil, fmt.Errorf("value [%v] of 'request_timeout' could not be understood: %w", sReqTimeout, err)
}
ssync, ok := conf["sync"]
if !ok {
ssync = "true"
}
sync, err := strconv.ParseBool(ssync)
if err != nil {
return nil, fmt.Errorf("value of 'sync' (%v) could not be understood: %w", ssync, err)
}
if sync {
ctx, cancel := context.WithTimeout(context.Background(), reqTimeout)
err := etcd.Sync(ctx)
cancel()
if err != nil {
return nil, err
}
}
sLock := conf["lock_timeout"]
if sLock == "" {
// etcd3 default lease duration is 60s. set to 15s for faster recovery.
sLock = "15s"
}
lock, err := parseutil.ParseDurationSecond(sLock)
if err != nil {
return nil, fmt.Errorf("value [%v] of 'lock_timeout' could not be understood: %w", sLock, err)
}
return &EtcdBackend{
path: path,
etcd: etcd,
permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
logger: logger,
haEnabled: haEnabledBool,
lockTimeout: lock,
requestTimeout: reqTimeout,
}, nil
}
func (c *EtcdBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
_, err := c.etcd.Put(ctx, path.Join(c.path, entry.Key), string(entry.Value))
return err
}
func (c *EtcdBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
resp, err := c.etcd.Get(ctx, path.Join(c.path, key))
if err != nil {
return nil, err
}
if len(resp.Kvs) == 0 {
return nil, nil
}
if len(resp.Kvs) > 1 {
return nil, errors.New("unexpected number of keys from a get request")
}
return &physical.Entry{
Key: key,
Value: resp.Kvs[0].Value,
}, nil
}
func (c *EtcdBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
_, err := c.etcd.Delete(ctx, path.Join(c.path, key))
if err != nil {
return err
}
return nil
}
func (c *EtcdBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now())
c.permitPool.Acquire()
defer c.permitPool.Release()
ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
prefix = path.Join(c.path, prefix) + "/"
resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithKeysOnly())
if err != nil {
return nil, err
}
keys := []string{}
for _, kv := range resp.Kvs {
key := strings.TrimPrefix(string(kv.Key), prefix)
key = strings.TrimPrefix(key, "/")
if len(key) == 0 {
continue
}
if i := strings.Index(key, "/"); i == -1 {
keys = append(keys, key)
} else if i != -1 {
keys = strutil.AppendIfMissing(keys, key[:i+1])
}
}
return keys, nil
}
func (e *EtcdBackend) HAEnabled() bool {
return e.haEnabled
}
// EtcdLock implements a lock using and etcd backend.
type EtcdLock struct {
lock sync.Mutex
held bool
timeout time.Duration
requestTimeout time.Duration
etcdSession *concurrency.Session
etcdMu *concurrency.Mutex
prefix string
value string
etcd *clientv3.Client
}
// Lock is used for mutual exclusion based on the given key.
func (c *EtcdBackend) LockWith(key, value string) (physical.Lock, error) {
p := path.Join(c.path, key)
return &EtcdLock{
prefix: p,
value: value,
etcd: c.etcd,
timeout: c.lockTimeout,
requestTimeout: c.requestTimeout,
}, nil
}
func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.etcdMu == nil {
if err := c.initMu(); err != nil {
return nil, err
}
}
if c.held {
return nil, EtcdLockHeldError
}
select {
case _, ok := <-c.etcdSession.Done():
if !ok {
// The session's done channel is closed, so the session is over,
// and we need a new lock with a new session.
if err := c.initMu(); err != nil {
return nil, err
}
}
default:
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
if err := c.etcdMu.Lock(ctx); err != nil {
if err == context.Canceled {
return nil, nil
}
return nil, err
}
pctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
if _, err := c.etcd.Put(pctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil {
return nil, err
}
c.held = true
return c.etcdSession.Done(), nil
}
func (c *EtcdLock) Unlock() error {
c.lock.Lock()
defer c.lock.Unlock()
if !c.held {
return EtcdLockNotHeldError
}
ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
return c.etcdMu.Unlock(ctx)
}
func (c *EtcdLock) Value() (bool, string, error) {
ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout)
defer cancel()
resp, err := c.etcd.Get(ctx,
c.prefix, clientv3.WithPrefix(),
clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend))
if err != nil {
return false, "", err
}
if len(resp.Kvs) == 0 {
return false, "", nil
}
return true, string(resp.Kvs[0].Value), nil
}
func (c *EtcdLock) initMu() error {
session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(int(c.timeout.Seconds())))
if err != nil {
return err
}
c.etcdSession = session
c.etcdMu = concurrency.NewMutex(session, c.prefix)
return nil
}

View File

@ -1,46 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package etcd
import (
"fmt"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/testhelpers/etcd"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
)
func TestEtcd3Backend(t *testing.T) {
cleanup, config := etcd.PrepareTestContainer(t, "v3.5.0")
defer cleanup()
logger := logging.NewVaultLogger(log.Debug)
configMap := map[string]string{
"address": config.URL().String(),
"path": fmt.Sprintf("/vault-%d", time.Now().Unix()),
"etcd_api": "3",
"username": "root",
"password": "insecure",
// Syncing advertised client urls should be disabled since docker port mapping confuses the client.
"sync": "false",
}
b, err := NewEtcdBackend(configMap, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
b2, err := NewEtcdBackend(configMap, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend))
}

View File

@ -1,249 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package swift
import (
"context"
"fmt"
"os"
"sort"
"strconv"
"strings"
"time"
log "github.com/hashicorp/go-hclog"
metrics "github.com/armon/go-metrics"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/vault/sdk/physical"
"github.com/ncw/swift"
)
// Verify SwiftBackend satisfies the correct interfaces
var _ physical.Backend = (*SwiftBackend)(nil)
// SwiftBackend is a physical backend that stores data
// within an OpenStack Swift container.
type SwiftBackend struct {
container string
client *swift.Connection
logger log.Logger
permitPool *physical.PermitPool
}
// NewSwiftBackend constructs a Swift backend using a pre-existing
// container. Credentials can be provided to the backend, sourced
// from the environment.
func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
var ok bool
username := os.Getenv("OS_USERNAME")
if username == "" {
username = conf["username"]
if username == "" {
return nil, fmt.Errorf("missing username")
}
}
password := os.Getenv("OS_PASSWORD")
if password == "" {
password = conf["password"]
if password == "" {
return nil, fmt.Errorf("missing password")
}
}
authUrl := os.Getenv("OS_AUTH_URL")
if authUrl == "" {
authUrl = conf["auth_url"]
if authUrl == "" {
return nil, fmt.Errorf("missing auth_url")
}
}
container := os.Getenv("OS_CONTAINER")
if container == "" {
container = conf["container"]
if container == "" {
return nil, fmt.Errorf("missing container")
}
}
project := os.Getenv("OS_PROJECT_NAME")
if project == "" {
if project, ok = conf["project"]; !ok {
// Check for KeyStone naming prior to V3
project = os.Getenv("OS_TENANT_NAME")
if project == "" {
project = conf["tenant"]
}
}
}
domain := os.Getenv("OS_USER_DOMAIN_NAME")
if domain == "" {
domain = conf["domain"]
}
projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
if projectDomain == "" {
projectDomain = conf["project-domain"]
}
region := os.Getenv("OS_REGION_NAME")
if region == "" {
region = conf["region"]
}
tenantID := os.Getenv("OS_TENANT_ID")
if tenantID == "" {
tenantID = conf["tenant_id"]
}
trustID := os.Getenv("OS_TRUST_ID")
if trustID == "" {
trustID = conf["trust_id"]
}
storageUrl := os.Getenv("OS_STORAGE_URL")
if storageUrl == "" {
storageUrl = conf["storage_url"]
}
authToken := os.Getenv("OS_AUTH_TOKEN")
if authToken == "" {
authToken = conf["auth_token"]
}
c := swift.Connection{
Domain: domain,
UserName: username,
ApiKey: password,
AuthUrl: authUrl,
Tenant: project,
TenantDomain: projectDomain,
Region: region,
TenantId: tenantID,
TrustId: trustID,
StorageUrl: storageUrl,
AuthToken: authToken,
Transport: cleanhttp.DefaultPooledTransport(),
}
err := c.Authenticate()
if err != nil {
return nil, err
}
_, _, err = c.Container(container)
if err != nil {
return nil, fmt.Errorf("Unable to access container %q: %w", container, err)
}
maxParStr, ok := conf["max_parallel"]
var maxParInt int
if ok {
maxParInt, err = strconv.Atoi(maxParStr)
if err != nil {
return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err)
}
if logger.IsDebug() {
logger.Debug("max_parallel set", "max_parallel", maxParInt)
}
}
s := &SwiftBackend{
client: &c,
container: container,
logger: logger,
permitPool: physical.NewPermitPool(maxParInt),
}
return s, nil
}
// Put is used to insert or update an entry
func (s *SwiftBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"swift", "put"}, time.Now())
s.permitPool.Acquire()
defer s.permitPool.Release()
err := s.client.ObjectPutBytes(s.container, entry.Key, entry.Value, "")
if err != nil {
return err
}
return nil
}
// Get is used to fetch an entry
func (s *SwiftBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"swift", "get"}, time.Now())
s.permitPool.Acquire()
defer s.permitPool.Release()
// Do a list of names with the key first since eventual consistency means
// it might be deleted, but a node might return a read of bytes which fails
// the physical test
list, err := s.client.ObjectNames(s.container, &swift.ObjectsOpts{Prefix: key})
if err != nil {
return nil, err
}
if 0 == len(list) {
return nil, nil
}
data, err := s.client.ObjectGetBytes(s.container, key)
if err == swift.ObjectNotFound {
return nil, nil
}
if err != nil {
return nil, err
}
ent := &physical.Entry{
Key: key,
Value: data,
}
return ent, nil
}
// Delete is used to permanently delete an entry
func (s *SwiftBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"swift", "delete"}, time.Now())
s.permitPool.Acquire()
defer s.permitPool.Release()
err := s.client.ObjectDelete(s.container, key)
if err != nil && err != swift.ObjectNotFound {
return err
}
return nil
}
// List is used to list all the keys under a given
// prefix, up to the next prefix.
func (s *SwiftBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"swift", "list"}, time.Now())
s.permitPool.Acquire()
defer s.permitPool.Release()
list, err := s.client.ObjectNamesAll(s.container, &swift.ObjectsOpts{Prefix: prefix})
if nil != err {
return nil, err
}
keys := []string{}
for _, key := range list {
key := strings.TrimPrefix(key, prefix)
if i := strings.Index(key, "/"); i == -1 {
// Add objects only from the current 'folder'
keys = append(keys, key)
} else if i != -1 {
// Add truncated 'folder' paths
keys = strutil.AppendIfMissing(keys, key[:i+1])
}
}
sort.Strings(keys)
return keys, nil
}

View File

@ -1,94 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package swift
import (
"fmt"
"os"
"testing"
"time"
log "github.com/hashicorp/go-hclog"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
"github.com/ncw/swift"
)
func TestSwiftBackend(t *testing.T) {
if os.Getenv("OS_USERNAME") == "" || os.Getenv("OS_PASSWORD") == "" ||
os.Getenv("OS_AUTH_URL") == "" {
t.SkipNow()
}
username := os.Getenv("OS_USERNAME")
password := os.Getenv("OS_PASSWORD")
authUrl := os.Getenv("OS_AUTH_URL")
project := os.Getenv("OS_PROJECT_NAME")
domain := os.Getenv("OS_USER_DOMAIN_NAME")
projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
region := os.Getenv("OS_REGION_NAME")
tenantID := os.Getenv("OS_TENANT_ID")
ts := time.Now().UnixNano()
container := fmt.Sprintf("vault-test-%d", ts)
cleaner := swift.Connection{
Domain: domain,
UserName: username,
ApiKey: password,
AuthUrl: authUrl,
Tenant: project,
TenantDomain: projectDomain,
Region: region,
TenantId: tenantID,
Transport: cleanhttp.DefaultPooledTransport(),
}
err := cleaner.Authenticate()
if err != nil {
t.Fatalf("err: %s", err)
}
err = cleaner.ContainerCreate(container, nil)
if nil != err {
t.Fatalf("Unable to create test container %q: %v", container, err)
}
defer func() {
newObjects, err := cleaner.ObjectNamesAll(container, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
for _, o := range newObjects {
err := cleaner.ObjectDelete(container, o)
if err != nil {
t.Fatalf("err: %s", err)
}
}
err = cleaner.ContainerDelete(container)
if err != nil {
t.Fatalf("err: %s", err)
}
}()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewSwiftBackend(map[string]string{
"username": username,
"password": password,
"container": container,
"auth_url": authUrl,
"project": project,
"domain": domain,
"project-domain": projectDomain,
"tenant_id": tenantID,
"region": region,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}

View File

@ -1,671 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package zookeeper
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"path/filepath"
"sort"
"strings"
"sync"
"time"
metrics "github.com/armon/go-metrics"
"github.com/go-zookeeper/zk"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/go-secure-stdlib/tlsutil"
"github.com/hashicorp/vault/sdk/physical"
)
const (
// ZKNodeFilePrefix is prefixed to any "files" in ZooKeeper,
// so that they do not collide with directory entries. Otherwise,
// we cannot delete a file if the path is a full-prefix of another
// key.
ZKNodeFilePrefix = "_"
)
// Verify ZooKeeperBackend satisfies the correct interfaces
var (
_ physical.Backend = (*ZooKeeperBackend)(nil)
_ physical.HABackend = (*ZooKeeperBackend)(nil)
_ physical.Lock = (*ZooKeeperHALock)(nil)
)
// ZooKeeperBackend is a physical backend that stores data at specific
// prefix within ZooKeeper. It is used in production situations as
// it allows Vault to run on multiple machines in a highly-available manner.
type ZooKeeperBackend struct {
path string
client *zk.Conn
acl []zk.ACL
logger log.Logger
}
// NewZooKeeperBackend constructs a ZooKeeper backend using the given API client
// and the prefix in the KV store.
func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the path in ZooKeeper
path, ok := conf["path"]
if !ok {
path = "vault/"
}
// Ensure path is suffixed and prefixed (zk requires prefix /)
if !strings.HasSuffix(path, "/") {
path += "/"
}
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
// Configure the client, default to localhost instance
var machines string
machines, ok = conf["address"]
if !ok {
machines = "localhost:2181"
}
// zNode owner and schema.
var owner string
var schema string
var schemaAndOwner string
schemaAndOwner, ok = conf["znode_owner"]
if !ok {
owner = "anyone"
schema = "world"
} else {
parsedSchemaAndOwner := strings.SplitN(schemaAndOwner, ":", 2)
if len(parsedSchemaAndOwner) != 2 {
return nil, fmt.Errorf("znode_owner expected format is 'schema:owner'")
} else {
schema = parsedSchemaAndOwner[0]
owner = parsedSchemaAndOwner[1]
// znode_owner is in config and structured correctly - but does it make any sense?
// Either 'owner' or 'schema' was set but not both - this seems like a failed attempt
// (e.g. ':MyUser' which omit the schema, or ':' omitting both)
if owner == "" || schema == "" {
return nil, fmt.Errorf("znode_owner expected format is 'schema:auth'")
}
}
}
acl := []zk.ACL{
{
Perms: zk.PermAll,
Scheme: schema,
ID: owner,
},
}
// Authentication info
var schemaAndUser string
var useAddAuth bool
schemaAndUser, useAddAuth = conf["auth_info"]
if useAddAuth {
parsedSchemaAndUser := strings.SplitN(schemaAndUser, ":", 2)
if len(parsedSchemaAndUser) != 2 {
return nil, fmt.Errorf("auth_info expected format is 'schema:auth'")
} else {
schema = parsedSchemaAndUser[0]
owner = parsedSchemaAndUser[1]
// auth_info is in config and structured correctly - but does it make any sense?
// Either 'owner' or 'schema' was set but not both - this seems like a failed attempt
// (e.g. ':MyUser' which omit the schema, or ':' omitting both)
if owner == "" || schema == "" {
return nil, fmt.Errorf("auth_info expected format is 'schema:auth'")
}
}
}
// We have all of the configuration in hand - let's try and connect to ZK
client, _, err := createClient(conf, machines, time.Second)
if err != nil {
return nil, fmt.Errorf("client setup failed: %w", err)
}
// ZK AddAuth API if the user asked for it
if useAddAuth {
err = client.AddAuth(schema, []byte(owner))
if err != nil {
return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %w", err)
}
}
// Setup the backend
c := &ZooKeeperBackend{
path: path,
client: client,
acl: acl,
logger: logger,
}
return c, nil
}
func caseInsenstiveContains(superset, val string) bool {
return strings.Contains(strings.ToUpper(superset), strings.ToUpper(val))
}
// Returns a client for ZK connection. Config value 'tls_enabled' determines if TLS is enabled or not.
func createClient(conf map[string]string, machines string, timeout time.Duration) (*zk.Conn, <-chan zk.Event, error) {
// 'tls_enabled' defaults to false
isTlsEnabled := false
isTlsEnabledStr, ok := conf["tls_enabled"]
if ok && isTlsEnabledStr != "" {
parsedBoolval, err := parseutil.ParseBool(isTlsEnabledStr)
if err != nil {
return nil, nil, fmt.Errorf("failed parsing tls_enabled parameter: %w", err)
}
isTlsEnabled = parsedBoolval
}
if isTlsEnabled {
// Create a custom Dialer with cert configuration for TLS handshake.
tlsDialer := customTLSDial(conf, machines)
options := zk.WithDialer(tlsDialer)
return zk.Connect(strings.Split(machines, ","), timeout, options)
} else {
return zk.Connect(strings.Split(machines, ","), timeout)
}
}
// Vault config file properties:
// 1. tls_skip_verify: skip host name verification.
// 2. tls_min_version: minimum supported/acceptable tls version
// 3. tls_cert_file: Cert file Absolute path
// 4. tls_key_file: Key file Absolute path
// 5. tls_ca_file: ca file absolute path
// 6. tls_verify_ip: If set to true, server's IP is verified in certificate if tls_skip_verify is false.
func customTLSDial(conf map[string]string, machines string) zk.Dialer {
return func(network, addr string, timeout time.Duration) (net.Conn, error) {
// Sets the serverName. *Note* the addr field comes in as an IP address
serverName, _, sParseErr := net.SplitHostPort(addr)
if sParseErr != nil {
// If the address is only missing port, assign the full address anyway
if strings.Contains(sParseErr.Error(), "missing port") {
serverName = addr
} else {
return nil, fmt.Errorf("failed parsing the server address for 'serverName' setting %w", sParseErr)
}
}
insecureSkipVerify := false
tlsSkipVerify, ok := conf["tls_skip_verify"]
if ok && tlsSkipVerify != "" {
b, err := parseutil.ParseBool(tlsSkipVerify)
if err != nil {
return nil, fmt.Errorf("failed parsing tls_skip_verify parameter: %w", err)
}
insecureSkipVerify = b
}
if !insecureSkipVerify {
// If tls_verify_ip is set to false, Server's DNS name is verified in the CN/SAN of the certificate.
// if tls_verify_ip is true, Server's IP is verified in the CN/SAN of the certificate.
// These checks happen only when tls_skip_verify is set to false.
// This value defaults to false
ipSanCheck := false
configVal, lookupOk := conf["tls_verify_ip"]
if lookupOk && configVal != "" {
parsedIpSanCheck, ipSanErr := parseutil.ParseBool(configVal)
if ipSanErr != nil {
return nil, fmt.Errorf("failed parsing tls_verify_ip parameter: %w", ipSanErr)
}
ipSanCheck = parsedIpSanCheck
}
// The addr/serverName parameter to this method comes in as an IP address.
// Here we lookup the DNS name and assign it to serverName if ipSanCheck is set to false
if !ipSanCheck {
lookupAddressMany, lookupErr := net.LookupAddr(serverName)
if lookupErr == nil {
for _, lookupAddress := range lookupAddressMany {
// strip the trailing '.' from lookupAddr
if lookupAddress[len(lookupAddress)-1] == '.' {
lookupAddress = lookupAddress[:len(lookupAddress)-1]
}
// Allow serverName to be replaced only if the lookupname is part of the
// supplied machine names
// If there is no match, the serverName will continue to be an IP value.
if caseInsenstiveContains(machines, lookupAddress) {
serverName = lookupAddress
break
}
}
}
}
}
tlsMinVersionStr, ok := conf["tls_min_version"]
if !ok {
// Set the default value
tlsMinVersionStr = "tls12"
}
tlsMinVersion, ok := tlsutil.TLSLookup[tlsMinVersionStr]
if !ok {
return nil, fmt.Errorf("invalid 'tls_min_version'")
}
tlsClientConfig := &tls.Config{
MinVersion: tlsMinVersion,
InsecureSkipVerify: insecureSkipVerify,
ServerName: serverName,
}
_, okCert := conf["tls_cert_file"]
_, okKey := conf["tls_key_file"]
if okCert && okKey {
tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"])
if err != nil {
return nil, fmt.Errorf("client tls setup failed for ZK: %w", err)
}
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
}
if tlsCaFile, ok := conf["tls_ca_file"]; ok {
caPool := x509.NewCertPool()
data, err := ioutil.ReadFile(tlsCaFile)
if err != nil {
return nil, fmt.Errorf("failed to read ZK CA file: %w", err)
}
if !caPool.AppendCertsFromPEM(data) {
return nil, fmt.Errorf("failed to parse ZK CA certificate")
}
tlsClientConfig.RootCAs = caPool
}
if network != "tcp" {
return nil, fmt.Errorf("unsupported network %q", network)
}
tcpConn, err := net.DialTimeout("tcp", addr, timeout)
if err != nil {
return nil, err
}
conn := tls.Client(tcpConn, tlsClientConfig)
if err := conn.Handshake(); err != nil {
return nil, fmt.Errorf("Handshake failed with Zookeeper : %v", err)
}
return conn, nil
}
}
// ensurePath is used to create each node in the path hierarchy.
// We avoid calling this optimistically, and invoke it when we get
// an error during an operation
func (c *ZooKeeperBackend) ensurePath(path string, value []byte) error {
nodes := strings.Split(path, "/")
fullPath := ""
for index, node := range nodes {
if strings.TrimSpace(node) != "" {
fullPath += "/" + node
isLastNode := index+1 == len(nodes)
// set parent nodes to nil, leaf to value
// this block reduces round trips by being smart on the leaf create/set
if exists, _, _ := c.client.Exists(fullPath); !isLastNode && !exists {
if _, err := c.client.Create(fullPath, nil, int32(0), c.acl); err != nil {
return err
}
} else if isLastNode && !exists {
if _, err := c.client.Create(fullPath, value, int32(0), c.acl); err != nil {
return err
}
} else if isLastNode && exists {
if _, err := c.client.Set(fullPath, value, int32(-1)); err != nil {
return err
}
}
}
}
return nil
}
// cleanupLogicalPath is used to remove all empty nodes, beginning with deepest one,
// aborting on first non-empty one, up to top-level node.
func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error {
nodes := strings.Split(path, "/")
for i := len(nodes) - 1; i > 0; i-- {
fullPath := c.path + strings.Join(nodes[:i], "/")
_, stat, err := c.client.Exists(fullPath)
if err != nil {
return fmt.Errorf("failed to acquire node data: %w", err)
}
if stat.DataLength > 0 && stat.NumChildren > 0 {
panic(fmt.Sprintf("node %q is both of data and leaf type", fullPath))
} else if stat.DataLength > 0 {
panic(fmt.Sprintf("node %q is a data node, this is either a bug or backend data is corrupted", fullPath))
} else if stat.NumChildren > 0 {
return nil
} else {
// Empty node, lets clean it up!
if err := c.client.Delete(fullPath, -1); err != nil && err != zk.ErrNoNode {
return fmt.Errorf("removal of node %q failed: %w", fullPath, err)
}
}
}
return nil
}
// nodePath returns an zk path based on the given key.
func (c *ZooKeeperBackend) nodePath(key string) string {
return filepath.Join(c.path, filepath.Dir(key), ZKNodeFilePrefix+filepath.Base(key))
}
// Put is used to insert or update an entry
func (c *ZooKeeperBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"zookeeper", "put"}, time.Now())
// Attempt to set the full path
fullPath := c.nodePath(entry.Key)
_, err := c.client.Set(fullPath, entry.Value, -1)
// If we get ErrNoNode, we need to construct the path hierarchy
if err == zk.ErrNoNode {
return c.ensurePath(fullPath, entry.Value)
}
return err
}
// Get is used to fetch an entry
func (c *ZooKeeperBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"zookeeper", "get"}, time.Now())
// Attempt to read the full path
fullPath := c.nodePath(key)
value, _, err := c.client.Get(fullPath)
// Ignore if the node does not exist
if err == zk.ErrNoNode {
err = nil
}
if err != nil {
return nil, err
}
// Handle a non-existing value
if value == nil {
return nil, nil
}
ent := &physical.Entry{
Key: key,
Value: value,
}
return ent, nil
}
// Delete is used to permanently delete an entry
func (c *ZooKeeperBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"zookeeper", "delete"}, time.Now())
if key == "" {
return nil
}
// Delete the full path
fullPath := c.nodePath(key)
err := c.client.Delete(fullPath, -1)
// Mask if the node does not exist
if err != nil && err != zk.ErrNoNode {
return fmt.Errorf("failed to remove %q: %w", fullPath, err)
}
err = c.cleanupLogicalPath(key)
return err
}
// List is used ot list all the keys under a given
// prefix, up to the next prefix.
func (c *ZooKeeperBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"zookeeper", "list"}, time.Now())
// Query the children at the full path
fullPath := strings.TrimSuffix(c.path+prefix, "/")
result, _, err := c.client.Children(fullPath)
// If the path nodes are missing, no children!
if err == zk.ErrNoNode {
return []string{}, nil
} else if err != nil {
return []string{}, err
}
children := []string{}
for _, key := range result {
childPath := fullPath + "/" + key
_, stat, err := c.client.Exists(childPath)
if err != nil {
// Node is ought to exists, so it must be something different
return []string{}, err
}
// Check if this entry is a leaf of a node,
// and append the slash which is what Vault depends on
// for iteration
if stat.DataLength > 0 && stat.NumChildren > 0 {
if childPath == c.nodePath("core/lock") {
// go-zookeeper Lock() breaks Vault semantics and creates a directory
// under the lock file; just treat it like the file Vault expects
children = append(children, key[1:])
} else {
panic(fmt.Sprintf("node %q is both of data and leaf type", childPath))
}
} else if stat.DataLength == 0 {
// No, we cannot differentiate here on number of children as node
// can have all it leafs removed, and it still is a node.
children = append(children, key+"/")
} else {
children = append(children, key[1:])
}
}
sort.Strings(children)
return children, nil
}
// LockWith is used for mutual exclusion based on the given key.
func (c *ZooKeeperBackend) LockWith(key, value string) (physical.Lock, error) {
l := &ZooKeeperHALock{
in: c,
key: key,
value: value,
logger: c.logger,
}
return l, nil
}
// HAEnabled indicates whether the HA functionality should be exposed.
// Currently always returns true.
func (c *ZooKeeperBackend) HAEnabled() bool {
return true
}
// ZooKeeperHALock is a ZooKeeper Lock implementation for the HABackend
type ZooKeeperHALock struct {
in *ZooKeeperBackend
key string
value string
logger log.Logger
held bool
localLock sync.Mutex
leaderCh chan struct{}
stopCh <-chan struct{}
zkLock *zk.Lock
}
func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
i.localLock.Lock()
defer i.localLock.Unlock()
if i.held {
return nil, fmt.Errorf("lock already held")
}
// Attempt an async acquisition
didLock := make(chan struct{})
failLock := make(chan error, 1)
releaseCh := make(chan bool, 1)
lockpath := i.in.nodePath(i.key)
go i.attemptLock(lockpath, didLock, failLock, releaseCh)
// Wait for lock acquisition, failure, or shutdown
select {
case <-didLock:
releaseCh <- false
case err := <-failLock:
return nil, err
case <-stopCh:
releaseCh <- true
return nil, nil
}
// Create the leader channel
i.held = true
i.leaderCh = make(chan struct{})
// Watch for Events which could result in loss of our zkLock and close(i.leaderCh)
currentVal, _, lockeventCh, err := i.in.client.GetW(lockpath)
if err != nil {
return nil, fmt.Errorf("unable to watch HA lock: %w", err)
}
if i.value != string(currentVal) {
return nil, fmt.Errorf("lost HA lock immediately before watch")
}
go i.monitorLock(lockeventCh, i.leaderCh)
i.stopCh = stopCh
return i.leaderCh, nil
}
func (i *ZooKeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) {
// Wait to acquire the lock in ZK
lock := zk.NewLock(i.in.client, lockpath, i.in.acl)
err := lock.Lock()
if err != nil {
failLock <- err
return
}
// Set node value
data := []byte(i.value)
err = i.in.ensurePath(lockpath, data)
if err != nil {
failLock <- err
lock.Unlock()
return
}
i.zkLock = lock
// Signal that lock is held
close(didLock)
// Handle an early abort
release := <-releaseCh
if release {
lock.Unlock()
}
}
func (i *ZooKeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) {
for {
select {
case event := <-lockeventCh:
// Lost connection?
switch event.State {
case zk.StateConnected:
case zk.StateHasSession:
default:
close(leaderCh)
return
}
// Lost lock?
switch event.Type {
case zk.EventNodeChildrenChanged:
case zk.EventSession:
default:
close(leaderCh)
return
}
}
}
}
func (i *ZooKeeperHALock) unlockInternal() error {
i.localLock.Lock()
defer i.localLock.Unlock()
if !i.held {
return nil
}
err := i.zkLock.Unlock()
if err == nil {
i.held = false
return nil
}
return err
}
func (i *ZooKeeperHALock) Unlock() error {
var err error
if err = i.unlockInternal(); err != nil {
i.logger.Error("failed to release distributed lock", "error", err)
go func(i *ZooKeeperHALock) {
attempts := 0
i.logger.Info("launching automated distributed lock release")
for {
if err := i.unlockInternal(); err == nil {
i.logger.Info("distributed lock released")
return
}
timer := time.NewTimer(time.Second)
select {
case <-timer.C:
attempts := attempts + 1
if attempts >= 10 {
i.logger.Error("release lock max attempts reached. Lock may not be released", "error", err)
return
}
continue
case <-i.stopCh:
timer.Stop()
return
}
}
}(i)
}
return err
}
func (i *ZooKeeperHALock) Value() (bool, string, error) {
lockpath := i.in.nodePath(i.key)
value, _, err := i.in.client.Get(lockpath)
return (value != nil), string(value), err
}

View File

@ -1,102 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package zookeeper
import (
"fmt"
"os"
"testing"
"time"
"github.com/go-zookeeper/zk"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/physical"
)
func TestZooKeeperBackend(t *testing.T) {
addr := os.Getenv("ZOOKEEPER_ADDR")
if addr == "" {
t.SkipNow()
}
client, _, err := zk.Connect([]string{addr}, time.Second)
if err != nil {
t.Fatalf("err: %v", err)
}
randPath := fmt.Sprintf("/vault-%d", time.Now().Unix())
acl := zk.WorldACL(zk.PermAll)
_, err = client.Create(randPath, []byte("hi"), int32(0), acl)
if err != nil {
t.Fatalf("err: %v", err)
}
defer func() {
client.Delete(randPath+"/foo/nested1/nested2/nested3", -1)
client.Delete(randPath+"/foo/nested1/nested2", -1)
client.Delete(randPath+"/foo/nested1", -1)
client.Delete(randPath+"/foo/bar/baz", -1)
client.Delete(randPath+"/foo/bar", -1)
client.Delete(randPath+"/foo", -1)
client.Delete(randPath, -1)
client.Close()
}()
logger := logging.NewVaultLogger(log.Debug)
b, err := NewZooKeeperBackend(map[string]string{
"address": addr + "," + addr,
"path": randPath,
}, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
}
func TestZooKeeperHABackend(t *testing.T) {
addr := os.Getenv("ZOOKEEPER_ADDR")
if addr == "" {
t.SkipNow()
}
client, _, err := zk.Connect([]string{addr}, time.Second)
if err != nil {
t.Fatalf("err: %v", err)
}
randPath := fmt.Sprintf("/vault-ha-%d", time.Now().Unix())
acl := zk.WorldACL(zk.PermAll)
_, err = client.Create(randPath, []byte("hi"), int32(0), acl)
if err != nil {
t.Fatalf("err: %v", err)
}
defer func() {
client.Delete(randPath+"/foo", -1)
client.Delete(randPath, -1)
client.Close()
}()
logger := logging.NewVaultLogger(log.Debug)
config := map[string]string{
"address": addr + "," + addr,
"path": randPath,
}
b, err := NewZooKeeperBackend(config, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
b2, err := NewZooKeeperBackend(config, logger)
if err != nil {
t.Fatalf("err: %s", err)
}
physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend))
}

View File

@ -1,27 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package main
import (
"log"
"os"
"github.com/hashicorp/vault/plugins/database/cassandra"
"github.com/hashicorp/vault/sdk/database/dbplugin/v5"
)
func main() {
err := Run()
if err != nil {
log.Println(err)
os.Exit(1)
}
}
// Run instantiates a Cassandra object, and runs the RPC server for the plugin
func Run() error {
dbplugin.ServeMultiplex(cassandra.New)
return nil
}

View File

@ -1,263 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/vault/sdk/helper/template"
"github.com/gocql/gocql"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-secure-stdlib/strutil"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
)
const (
defaultUserCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;`
defaultUserDeletionCQL = `DROP USER '{{username}}';`
defaultChangePasswordCQL = `ALTER USER '{{username}}' WITH PASSWORD '{{password}}';`
cassandraTypeName = "cassandra"
defaultUserNameTemplate = `{{ printf "v_%s_%s_%s_%s" (.DisplayName | truncate 15) (.RoleName | truncate 15) (random 20) (unix_time) | truncate 100 | replace "-" "_" | lowercase }}`
)
var _ dbplugin.Database = &Cassandra{}
// Cassandra is an implementation of Database interface
type Cassandra struct {
*cassandraConnectionProducer
usernameProducer template.StringTemplate
}
// New returns a new Cassandra instance
func New() (interface{}, error) {
db := new()
dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues)
return dbType, nil
}
func new() *Cassandra {
connProducer := &cassandraConnectionProducer{}
connProducer.Type = cassandraTypeName
return &Cassandra{
cassandraConnectionProducer: connProducer,
}
}
// Type returns the TypeName for this backend
func (c *Cassandra) Type() (string, error) {
return cassandraTypeName, nil
}
func (c *Cassandra) getConnection(ctx context.Context) (*gocql.Session, error) {
session, err := c.Connection(ctx)
if err != nil {
return nil, err
}
return session.(*gocql.Session), nil
}
func (c *Cassandra) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) {
usernameTemplate, err := strutil.GetString(req.Config, "username_template")
if err != nil {
return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err)
}
if usernameTemplate == "" {
usernameTemplate = defaultUserNameTemplate
}
up, err := template.NewTemplate(template.Template(usernameTemplate))
if err != nil {
return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err)
}
c.usernameProducer = up
_, err = c.usernameProducer.Generate(dbplugin.UsernameMetadata{})
if err != nil {
return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err)
}
err = c.cassandraConnectionProducer.Initialize(ctx, req)
if err != nil {
return dbplugin.InitializeResponse{}, fmt.Errorf("failed to initialize: %w", err)
}
resp := dbplugin.InitializeResponse{
Config: req.Config,
}
return resp, nil
}
// NewUser generates the username/password on the underlying Cassandra secret backend as instructed by
// the statements provided.
func (c *Cassandra) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) {
c.Lock()
defer c.Unlock()
session, err := c.getConnection(ctx)
if err != nil {
return dbplugin.NewUserResponse{}, err
}
creationCQL := req.Statements.Commands
if len(creationCQL) == 0 {
creationCQL = []string{defaultUserCreationCQL}
}
rollbackCQL := req.RollbackStatements.Commands
if len(rollbackCQL) == 0 {
rollbackCQL = []string{defaultUserDeletionCQL}
}
username, err := c.usernameProducer.Generate(req.UsernameConfig)
if err != nil {
return dbplugin.NewUserResponse{}, err
}
for _, stmt := range creationCQL {
for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
m := map[string]string{
"username": username,
"password": req.Password,
}
err = session.
Query(dbutil.QueryHelper(query, m)).
WithContext(ctx).
Exec()
if err != nil {
rollbackErr := rollbackUser(ctx, session, username, rollbackCQL)
if rollbackErr != nil {
err = multierror.Append(err, rollbackErr)
}
return dbplugin.NewUserResponse{}, err
}
}
}
resp := dbplugin.NewUserResponse{
Username: username,
}
return resp, nil
}
func rollbackUser(ctx context.Context, session *gocql.Session, username string, rollbackCQL []string) error {
for _, stmt := range rollbackCQL {
for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
m := map[string]string{
"username": username,
}
err := session.
Query(dbutil.QueryHelper(query, m)).
WithContext(ctx).
Exec()
if err != nil {
return fmt.Errorf("failed to roll back user %s: %w", username, err)
}
}
}
return nil
}
func (c *Cassandra) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) {
if req.Password == nil && req.Expiration == nil {
return dbplugin.UpdateUserResponse{}, fmt.Errorf("no changes requested")
}
if req.Password != nil {
err := c.changeUserPassword(ctx, req.Username, req.Password)
return dbplugin.UpdateUserResponse{}, err
}
// Expiration is no-op
return dbplugin.UpdateUserResponse{}, nil
}
func (c *Cassandra) changeUserPassword(ctx context.Context, username string, changePass *dbplugin.ChangePassword) error {
session, err := c.getConnection(ctx)
if err != nil {
return err
}
rotateCQL := changePass.Statements.Commands
if len(rotateCQL) == 0 {
rotateCQL = []string{defaultChangePasswordCQL}
}
var result *multierror.Error
for _, stmt := range rotateCQL {
for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
m := map[string]string{
"username": username,
"password": changePass.NewPassword,
}
err := session.
Query(dbutil.QueryHelper(query, m)).
WithContext(ctx).
Exec()
result = multierror.Append(result, err)
}
}
return result.ErrorOrNil()
}
// DeleteUser attempts to drop the specified user.
func (c *Cassandra) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) {
c.Lock()
defer c.Unlock()
session, err := c.getConnection(ctx)
if err != nil {
return dbplugin.DeleteUserResponse{}, err
}
revocationCQL := req.Statements.Commands
if len(revocationCQL) == 0 {
revocationCQL = []string{defaultUserDeletionCQL}
}
var result *multierror.Error
for _, stmt := range revocationCQL {
for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
m := map[string]string{
"username": req.Username,
}
err := session.
Query(dbutil.QueryHelper(query, m)).
WithContext(ctx).
Exec()
result = multierror.Append(result, err)
}
}
return dbplugin.DeleteUserResponse{}, result.ErrorOrNil()
}

View File

@ -1,308 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"context"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
backoff "github.com/cenkalti/backoff/v3"
"github.com/gocql/gocql"
"github.com/hashicorp/vault/helper/testhelpers/cassandra"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
)
func getCassandra(t *testing.T, protocolVersion interface{}) (*Cassandra, func()) {
host, cleanup := cassandra.PrepareTestContainer(t,
cassandra.Version("3.11"),
cassandra.CopyFromTo(insecureFileMounts),
)
db := new()
initReq := dbplugin.InitializeRequest{
Config: map[string]interface{}{
"hosts": host.ConnectionURL(),
"port": host.Port,
"username": "cassandra",
"password": "cassandra",
"protocol_version": protocolVersion,
"connect_timeout": "20s",
},
VerifyConnection: true,
}
expectedConfig := map[string]interface{}{
"hosts": host.ConnectionURL(),
"port": host.Port,
"username": "cassandra",
"password": "cassandra",
"protocol_version": protocolVersion,
"connect_timeout": "20s",
}
initResp := dbtesting.AssertInitialize(t, db, initReq)
if !reflect.DeepEqual(initResp.Config, expectedConfig) {
t.Fatalf("Initialize response config actual: %#v\nExpected: %#v", initResp.Config, expectedConfig)
}
if !db.Initialized {
t.Fatal("Database should be initialized")
}
return db, cleanup
}
func TestInitialize(t *testing.T) {
t.Run("integer protocol version", func(t *testing.T) {
// getCassandra performs an Initialize call
db, cleanup := getCassandra(t, 4)
t.Cleanup(cleanup)
err := db.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
})
t.Run("string protocol version", func(t *testing.T) {
// getCassandra performs an Initialize call
db, cleanup := getCassandra(t, "4")
t.Cleanup(cleanup)
err := db.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
})
}
func TestCreateUser(t *testing.T) {
type testCase struct {
// Config will have the hosts & port added to it during the test
config map[string]interface{}
newUserReq dbplugin.NewUserRequest
expectErr bool
expectedUsernameRegex string
assertCreds func(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions, timeout time.Duration)
}
tests := map[string]testCase{
"default username_template": {
config: map[string]interface{}{
"username": "cassandra",
"password": "cassandra",
"protocol_version": "4",
"connect_timeout": "20s",
},
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "mylongrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{createUserStatements},
},
Password: "bfn985wjAHIh6t",
Expiration: time.Now().Add(1 * time.Minute),
},
expectErr: false,
expectedUsernameRegex: `^v_token_mylongrolenamew_[a-z0-9]{20}_[0-9]{10}$`,
assertCreds: assertCreds,
},
"custom username_template": {
config: map[string]interface{}{
"username": "cassandra",
"password": "cassandra",
"protocol_version": "4",
"connect_timeout": "20s",
"username_template": `foo_{{random 20}}_{{.RoleName | replace "e" "3"}}_{{unix_time}}`,
},
newUserReq: dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "token",
RoleName: "mylongrolenamewithmanycharacters",
},
Statements: dbplugin.Statements{
Commands: []string{createUserStatements},
},
Password: "bfn985wjAHIh6t",
Expiration: time.Now().Add(1 * time.Minute),
},
expectErr: false,
expectedUsernameRegex: `^foo_[a-zA-Z0-9]{20}_mylongrol3nam3withmanycharact3rs_[0-9]{10}$`,
assertCreds: assertCreds,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
host, cleanup := cassandra.PrepareTestContainer(t,
cassandra.Version("3.11"),
cassandra.CopyFromTo(insecureFileMounts),
)
defer cleanup()
db := new()
config := test.config
config["hosts"] = host.ConnectionURL()
config["port"] = host.Port
initReq := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
dbtesting.AssertInitialize(t, db, initReq)
require.True(t, db.Initialized, "Database is not initialized")
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
newUserResp, err := db.NewUser(ctx, test.newUserReq)
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username)
test.assertCreds(t, db.Hosts, db.Port, newUserResp.Username, test.newUserReq.Password, nil, 5*time.Second)
})
}
}
func TestUpdateUserPassword(t *testing.T) {
db, cleanup := getCassandra(t, 4)
defer cleanup()
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{createUserStatements},
},
Password: password,
Expiration: time.Now().Add(1 * time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCreds(t, db.Hosts, db.Port, createResp.Username, password, nil, 5*time.Second)
newPassword := "somenewpassword"
updateReq := dbplugin.UpdateUserRequest{
Username: createResp.Username,
Password: &dbplugin.ChangePassword{
NewPassword: newPassword,
Statements: dbplugin.Statements{},
},
Expiration: nil,
}
dbtesting.AssertUpdateUser(t, db, updateReq)
assertCreds(t, db.Hosts, db.Port, createResp.Username, newPassword, nil, 5*time.Second)
}
func TestDeleteUser(t *testing.T) {
db, cleanup := getCassandra(t, 4)
defer cleanup()
password := "myreallysecurepassword"
createReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "test",
RoleName: "test",
},
Statements: dbplugin.Statements{
Commands: []string{createUserStatements},
},
Password: password,
Expiration: time.Now().Add(1 * time.Minute),
}
createResp := dbtesting.AssertNewUser(t, db, createReq)
assertCreds(t, db.Hosts, db.Port, createResp.Username, password, nil, 5*time.Second)
deleteReq := dbplugin.DeleteUserRequest{
Username: createResp.Username,
}
dbtesting.AssertDeleteUser(t, db, deleteReq)
assertNoCreds(t, db.Hosts, db.Port, createResp.Username, password, nil, 5*time.Second)
}
func assertCreds(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions, timeout time.Duration) {
t.Helper()
op := func() error {
return connect(t, address, port, username, password, sslOpts)
}
bo := backoff.NewExponentialBackOff()
bo.MaxElapsedTime = timeout
bo.InitialInterval = 500 * time.Millisecond
bo.MaxInterval = bo.InitialInterval
bo.RandomizationFactor = 0.0
err := backoff.Retry(op, bo)
if err != nil {
t.Fatalf("failed to connect after %s: %s", timeout, err)
}
}
func connect(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions) error {
t.Helper()
clusterConfig := gocql.NewCluster(address)
clusterConfig.Authenticator = gocql.PasswordAuthenticator{
Username: username,
Password: password,
}
clusterConfig.ProtoVersion = 4
clusterConfig.Port = port
clusterConfig.SslOpts = sslOpts
session, err := clusterConfig.CreateSession()
if err != nil {
return err
}
defer session.Close()
return nil
}
func assertNoCreds(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions, timeout time.Duration) {
t.Helper()
op := func() error {
// "Invert" the error so the backoff logic sees a failure to connect as a success
err := connect(t, address, port, username, password, sslOpts)
if err != nil {
return nil
}
return nil
}
bo := backoff.NewExponentialBackOff()
bo.MaxElapsedTime = timeout
bo.InitialInterval = 500 * time.Millisecond
bo.MaxInterval = bo.InitialInterval
bo.RandomizationFactor = 0.0
err := backoff.Retry(op, bo)
if err != nil {
t.Fatalf("successfully connected after %s when it shouldn't", timeout)
}
}
const createUserStatements = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;
GRANT ALL PERMISSIONS ON ALL KEYSPACES TO '{{username}}';`

View File

@ -1,244 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"context"
"crypto/tls"
"fmt"
"strings"
"sync"
"time"
"github.com/gocql/gocql"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/go-secure-stdlib/tlsutil"
dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
"github.com/hashicorp/vault/sdk/database/helper/connutil"
"github.com/hashicorp/vault/sdk/database/helper/dbutil"
"github.com/mitchellh/mapstructure"
)
// cassandraConnectionProducer implements ConnectionProducer and provides an
// interface for cassandra databases to make connections.
type cassandraConnectionProducer struct {
Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"`
Port int `json:"port" structs:"port" mapstructure:"port"`
Username string `json:"username" structs:"username" mapstructure:"username"`
Password string `json:"password" structs:"password" mapstructure:"password"`
TLS bool `json:"tls" structs:"tls" mapstructure:"tls"`
InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"`
TLSServerName string `json:"tls_server_name" structs:"tls_server_name" mapstructure:"tls_server_name"`
ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"`
ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"`
SocketKeepAliveRaw interface{} `json:"socket_keep_alive" structs:"socket_keep_alive" mapstructure:"socket_keep_alive"`
TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"`
Consistency string `json:"consistency" structs:"consistency" mapstructure:"consistency"`
LocalDatacenter string `json:"local_datacenter" structs:"local_datacenter" mapstructure:"local_datacenter"`
PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"`
PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"`
SkipVerification bool `json:"skip_verification" structs:"skip_verification" mapstructure:"skip_verification"`
connectTimeout time.Duration
socketKeepAlive time.Duration
sslOpts *gocql.SslOptions
rawConfig map[string]interface{}
Initialized bool
Type string
session *gocql.Session
sync.Mutex
}
func (c *cassandraConnectionProducer) Initialize(ctx context.Context, req dbplugin.InitializeRequest) error {
c.Lock()
defer c.Unlock()
c.rawConfig = req.Config
err := mapstructure.WeakDecode(req.Config, c)
if err != nil {
return err
}
if c.ConnectTimeoutRaw == nil {
c.ConnectTimeoutRaw = "5s"
}
c.connectTimeout, err = parseutil.ParseDurationSecond(c.ConnectTimeoutRaw)
if err != nil {
return fmt.Errorf("invalid connect_timeout: %w", err)
}
if c.SocketKeepAliveRaw == nil {
c.SocketKeepAliveRaw = "0s"
}
c.socketKeepAlive, err = parseutil.ParseDurationSecond(c.SocketKeepAliveRaw)
if err != nil {
return fmt.Errorf("invalid socket_keep_alive: %w", err)
}
switch {
case len(c.Hosts) == 0:
return fmt.Errorf("hosts cannot be empty")
case len(c.Username) == 0:
return fmt.Errorf("username cannot be empty")
case len(c.Password) == 0:
return fmt.Errorf("password cannot be empty")
case len(c.PemJSON) > 0 && len(c.PemBundle) > 0:
return fmt.Errorf("cannot specify both pem_json and pem_bundle")
}
var tlsMinVersion uint16 = tls.VersionTLS12
if c.TLSMinVersion != "" {
ver, exists := tlsutil.TLSLookup[c.TLSMinVersion]
if !exists {
return fmt.Errorf("unrecognized TLS version [%s]", c.TLSMinVersion)
}
tlsMinVersion = ver
}
switch {
case len(c.PemJSON) != 0:
cfg, err := jsonBundleToTLSConfig(c.PemJSON, tlsMinVersion, c.TLSServerName, c.InsecureTLS)
if err != nil {
return fmt.Errorf("failed to parse pem_json: %w", err)
}
c.sslOpts = &gocql.SslOptions{
Config: cfg,
EnableHostVerification: !cfg.InsecureSkipVerify,
}
c.TLS = true
case len(c.PemBundle) != 0:
cfg, err := pemBundleToTLSConfig(c.PemBundle, tlsMinVersion, c.TLSServerName, c.InsecureTLS)
if err != nil {
return fmt.Errorf("failed to parse pem_bundle: %w", err)
}
c.sslOpts = &gocql.SslOptions{
Config: cfg,
EnableHostVerification: !cfg.InsecureSkipVerify,
}
c.TLS = true
case c.InsecureTLS:
c.sslOpts = &gocql.SslOptions{
EnableHostVerification: !c.InsecureTLS,
}
}
// Set initialized to true at this point since all fields are set,
// and the connection can be established at a later time.
c.Initialized = true
if req.VerifyConnection {
if _, err := c.Connection(ctx); err != nil {
return fmt.Errorf("error verifying connection: %w", err)
}
}
return nil
}
func (c *cassandraConnectionProducer) Connection(ctx context.Context) (interface{}, error) {
if !c.Initialized {
return nil, connutil.ErrNotInitialized
}
// If we already have a DB, return it
if c.session != nil && !c.session.Closed() {
return c.session, nil
}
session, err := c.createSession(ctx)
if err != nil {
return nil, err
}
// Store the session in backend for reuse
c.session = session
return session, nil
}
func (c *cassandraConnectionProducer) Close() error {
c.Lock()
defer c.Unlock()
if c.session != nil {
c.session.Close()
}
c.session = nil
return nil
}
func (c *cassandraConnectionProducer) createSession(ctx context.Context) (*gocql.Session, error) {
hosts := strings.Split(c.Hosts, ",")
clusterConfig := gocql.NewCluster(hosts...)
clusterConfig.Authenticator = gocql.PasswordAuthenticator{
Username: c.Username,
Password: c.Password,
}
if c.Port != 0 {
clusterConfig.Port = c.Port
}
clusterConfig.ProtoVersion = c.ProtocolVersion
if clusterConfig.ProtoVersion == 0 {
clusterConfig.ProtoVersion = 2
}
clusterConfig.Timeout = c.connectTimeout
clusterConfig.ConnectTimeout = c.connectTimeout
clusterConfig.SocketKeepalive = c.socketKeepAlive
clusterConfig.SslOpts = c.sslOpts
if c.LocalDatacenter != "" {
clusterConfig.PoolConfig.HostSelectionPolicy = gocql.DCAwareRoundRobinPolicy(c.LocalDatacenter)
}
session, err := clusterConfig.CreateSession()
if err != nil {
return nil, fmt.Errorf("error creating session: %w", err)
}
if c.Consistency != "" {
consistencyValue, err := gocql.ParseConsistencyWrapper(c.Consistency)
if err != nil {
session.Close()
return nil, err
}
session.SetConsistency(consistencyValue)
}
if !c.SkipVerification {
err = session.Query(`LIST ALL`).WithContext(ctx).Exec()
if err != nil && len(c.Username) != 0 && strings.Contains(err.Error(), "not authorized") {
rowNum := session.Query(dbutil.QueryHelper(`LIST CREATE ON ALL ROLES OF '{{username}}';`, map[string]string{
"username": c.Username,
})).Iter().NumRows()
if rowNum < 1 {
session.Close()
return nil, fmt.Errorf("error validating connection info: No role create permissions found, previous error: %w", err)
}
} else if err != nil {
session.Close()
return nil, fmt.Errorf("error validating connection info: %w", err)
}
}
return session, nil
}
func (c *cassandraConnectionProducer) secretValues() map[string]string {
return map[string]string{
c.Password: "[password]",
c.PemBundle: "[pem_bundle]",
c.PemJSON: "[pem_json]",
}
}

View File

@ -1,233 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"io/ioutil"
"testing"
"time"
"github.com/gocql/gocql"
"github.com/hashicorp/vault/helper/testhelpers/cassandra"
"github.com/hashicorp/vault/sdk/database/dbplugin/v5"
dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/stretchr/testify/require"
)
var insecureFileMounts = map[string]string{
"test-fixtures/no_tls/cassandra.yaml": "/etc/cassandra/cassandra.yaml",
}
func TestSelfSignedCA(t *testing.T) {
copyFromTo := map[string]string{
"test-fixtures/with_tls/stores": "/bitnami/cassandra/secrets/",
"test-fixtures/with_tls/cqlshrc": "/.cassandra/cqlshrc",
}
tlsConfig := loadServerCA(t, "test-fixtures/with_tls/ca.pem")
// Note about CI behavior: when running these tests locally, they seem to pass without issue. However, if the
// ServerName is not set, the tests fail within CI. It's not entirely clear to me why they are failing in CI
// however by manually setting the ServerName we can get around the hostname/DNS issue and get them passing.
// Setting the ServerName isn't the ideal solution, but it was the only reliable one I was able to find
tlsConfig.ServerName = "cassandra"
sslOpts := &gocql.SslOptions{
Config: tlsConfig,
EnableHostVerification: true,
}
host, cleanup := cassandra.PrepareTestContainer(t,
cassandra.ContainerName("cassandra"),
cassandra.Image("bitnami/cassandra", "3.11.11"),
cassandra.CopyFromTo(copyFromTo),
cassandra.SslOpts(sslOpts),
cassandra.Env("CASSANDRA_KEYSTORE_PASSWORD=cassandra"),
cassandra.Env("CASSANDRA_TRUSTSTORE_PASSWORD=cassandra"),
cassandra.Env("CASSANDRA_INTERNODE_ENCRYPTION=none"),
cassandra.Env("CASSANDRA_CLIENT_ENCRYPTION=true"),
)
t.Cleanup(cleanup)
type testCase struct {
config map[string]interface{}
expectErr bool
}
caPEM := loadFile(t, "test-fixtures/with_tls/ca.pem")
badCAPEM := loadFile(t, "test-fixtures/with_tls/bad_ca.pem")
tests := map[string]testCase{
// ///////////////////////
// pem_json tests
"pem_json/ca only": {
config: map[string]interface{}{
"pem_json": toJSON(t, certutil.CertBundle{
CAChain: []string{caPEM},
}),
},
expectErr: false,
},
"pem_json/bad ca": {
config: map[string]interface{}{
"pem_json": toJSON(t, certutil.CertBundle{
CAChain: []string{badCAPEM},
}),
},
expectErr: true,
},
"pem_json/missing ca": {
config: map[string]interface{}{
"pem_json": "",
},
expectErr: true,
},
// ///////////////////////
// pem_bundle tests
"pem_bundle/ca only": {
config: map[string]interface{}{
"pem_bundle": caPEM,
},
expectErr: false,
},
"pem_bundle/unrecognized CA": {
config: map[string]interface{}{
"pem_bundle": badCAPEM,
},
expectErr: true,
},
"pem_bundle/missing ca": {
config: map[string]interface{}{
"pem_bundle": "",
},
expectErr: true,
},
// ///////////////////////
// no cert data provided
"no cert data/tls=true": {
config: map[string]interface{}{
"tls": "true",
},
expectErr: true,
},
"no cert data/tls=false": {
config: map[string]interface{}{
"tls": "false",
},
expectErr: true,
},
"no cert data/insecure_tls": {
config: map[string]interface{}{
"insecure_tls": "true",
},
expectErr: false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
// Set values that we don't know until the cassandra container is started
config := map[string]interface{}{
"hosts": host.Name,
"port": host.Port,
"username": "cassandra",
"password": "cassandra",
"protocol_version": "4",
"connect_timeout": "30s",
"tls": "true",
// Note about CI behavior: when running these tests locally, they seem to pass without issue. However, if the
// tls_server_name is not set, the tests fail within CI. It's not entirely clear to me why they are failing in CI
// however by manually setting the tls_server_name we can get around the hostname/DNS issue and get them passing.
// Setting the tls_server_name isn't the ideal solution, but it was the only reliable one I was able to find
"tls_server_name": "cassandra",
}
// Apply the generated & common fields to the config to be sent to the DB
for k, v := range test.config {
config[k] = v
}
db := new()
initReq := dbplugin.InitializeRequest{
Config: config,
VerifyConnection: true,
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, err := db.Initialize(ctx, initReq)
if test.expectErr && err == nil {
t.Fatalf("err expected, got nil")
}
if !test.expectErr && err != nil {
t.Fatalf("no error expected, got: %s", err)
}
// If no error expected, run a NewUser query to make sure the connection
// actually works in case Initialize doesn't catch it
if !test.expectErr {
assertNewUser(t, db, sslOpts)
}
})
}
}
func assertNewUser(t *testing.T, db *Cassandra, sslOpts *gocql.SslOptions) {
newUserReq := dbplugin.NewUserRequest{
UsernameConfig: dbplugin.UsernameMetadata{
DisplayName: "dispname",
RoleName: "rolename",
},
Statements: dbplugin.Statements{
Commands: []string{
"create user '{{username}}' with password '{{password}}'",
},
},
RollbackStatements: dbplugin.Statements{},
Password: "gh8eruajASDFAsgy89svn",
Expiration: time.Now().Add(5 * time.Second),
}
newUserResp := dbtesting.AssertNewUser(t, db, newUserReq)
t.Logf("Username: %s", newUserResp.Username)
assertCreds(t, db.Hosts, db.Port, newUserResp.Username, newUserReq.Password, sslOpts, 5*time.Second)
}
func loadServerCA(t *testing.T, file string) *tls.Config {
t.Helper()
pemData, err := ioutil.ReadFile(file)
require.NoError(t, err)
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(pemData)
config := &tls.Config{
RootCAs: pool,
}
return config
}
func loadFile(t *testing.T, filename string) string {
t.Helper()
contents, err := ioutil.ReadFile(filename)
require.NoError(t, err)
return string(contents)
}
func toJSON(t *testing.T, val interface{}) string {
t.Helper()
b, err := json.Marshal(val)
require.NoError(t, err)
return string(b)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEFjCCAv6gAwIBAgIUHNknw0iUWaMC5UCpiribG8DQhZYwDQYJKoZIhvcNAQEL
BQAwgaIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMRIwEAYDVQQKEwlIYXNoaUNvcnAxIzAhBgNVBAsTGlRl
c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MS0wKwYDVQQDEyRQcm90b3R5cGUgVGVz
dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEwNjE0MjAyNDAwWhcNMjYwNjEz
MjAyNDAwWjCBojELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAU
BgNVBAcTDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoTCUhhc2hpQ29ycDEjMCEGA1UE
CxMaVGVzdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxLTArBgNVBAMTJFByb3RvdHlw
ZSBUZXN0IENlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBANc0MEZOJ7xm4JrCceerX0kWcdPIczXFIIZTJYdTB7YPHTiL
PFSZ9ugu8W6R7wOMLUazcD7Ugw0hjt+JkiRIY1AOvuZRX7DR3Q0sGy9qFb1y2kOk
lTSAFOV96FxxAg9Fn23mcvjV1TDO1dlxvOuAo0NMjk82TzHk7LVuYOKuJ/Sc9i8a
Ba4vndbiwkSGpytymCu0X4T4ZEARLUZ4feGhr5RbYRehq2Nb8kw/KNLZZyzlzJbr
8OkVizW796bkVJwRfCFubZPl8EvRslxZ2+sMFSozoofoFlB1FsGAvlnEfkxqTJJo
WafmsYnOVnbNfwOogDP0+bp8WAZrAxJqTAWm/LMCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHyfBUnvAULGlcFSljTI
DegUVLB5MA0GCSqGSIb3DQEBCwUAA4IBAQBOdVqZpMCKq+X2TBi3nJmz6kjePVBh
ocHUG02nRkL533x+PUxRpDG3AMzWF3niPxtMuVIZDfpi27zlm2QCh9b3sQi83w+9
UX1/j3dUoUyiVi/U0iZeZmuDY3ne59DNFdOgGY9p3FvJ+b9WfPg8+v2w26rGoSMz
21XKNZcRFcjOJ5LJ3i9+liaCkpXLfErA+AtqNeraHOorJ5UO4mA7OlFowV8adOQq
SinFIoXCExBTxqMv0lVzEhGN6Wd261CmKY5e4QLqASCO+s7zwGhHyzwjdA0pCNtI
PmHIk13m0p56G8hpz+M/5hBQFb0MIIR3Je6QVzfRty2ipUO91E9Ydm7C
-----END CERTIFICATE-----

View File

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEFjCCAv6gAwIBAgIUWd8FZSev3ygjhWE7O8orqHPQ4IEwDQYJKoZIhvcNAQEL
BQAwgaIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMRIwEAYDVQQKEwlIYXNoaUNvcnAxIzAhBgNVBAsTGlRl
c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MS0wKwYDVQQDEyRQcm90b3R5cGUgVGVz
dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEwNjEwMjAwNDAwWhcNMjYwNjA5
MjAwNDAwWjCBojELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAU
BgNVBAcTDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoTCUhhc2hpQ29ycDEjMCEGA1UE
CxMaVGVzdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxLTArBgNVBAMTJFByb3RvdHlw
ZSBUZXN0IENlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD
ggEPADCCAQoCggEBAMXTnIDpOXXiHuKyI9EZxv7qg81DmelOB+iAzhvRsigMSuka
qZH29Aaf4PBvKLlSVN6sVP16cXRvk48qa0C78tP0kTPKWdEyE1xQUZb270SZ6Tm3
T7sNRTRwWTsgeC1n6SHlBUn3MviQgA1dZM1CbZIXQpBxtuPg+p9eu3YP/CZJFJjT
LYVKT6kRumBQEX/UUesNfUnUpVIOxxOwbVeF6a/wGxeLY6/fOQ+TJhVUjSy/pvaI
6NnycrwD/4ck6gusV5HKakidCID9MwV610Vc7AFi070VGYCjKfiv6EYMMnjycYqi
KHz623Ca4rO4qtWWvT1K/+GkryDKXeI3KHuEsdsCAwEAAaNCMEAwDgYDVR0PAQH/
BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIy8cvyabFclVWwcZ4rl
ADoLEdyAMA0GCSqGSIb3DQEBCwUAA4IBAQCzn9QbsOpBuvhhgdH/Jk0q7H0kmpVS
rbLhcQyWv9xiyopYbbUfh0Hud15rnqAkyT9nd2Kvo8T/X9rc1OXa6oDO6aoXjIm1
aKOFikET8fc/81rT81E7TVPO7TZW5s9Cej30zCOJQWZ+ibHNyequuyihtImNacXF
+1pAAldj/JMu+Ky1YFrs2iccGOpGCGbsWfLQt+wYKwya7dpSz1ceqigKavIJSOMV
CNsyC59UtFbvdk139FyEvCmecsCbWuo0JVg3do5n6upwqrgvLRNP8EHzm17DWu5T
aNtsBbv85uUgMmF7kzxr+t6VdtG9u+q0HCmW1/1VVK3ZsA+UTB7UBddD
-----END CERTIFICATE-----

View File

@ -1,3 +0,0 @@
{
"ca_chain": ["-----BEGIN CERTIFICATE-----\nMIIEFjCCAv6gAwIBAgIUWd8FZSev3ygjhWE7O8orqHPQ4IEwDQYJKoZIhvcNAQEL\nBQAwgaIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\nEw1TYW4gRnJhbmNpc2NvMRIwEAYDVQQKEwlIYXNoaUNvcnAxIzAhBgNVBAsTGlRl\nc3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MS0wKwYDVQQDEyRQcm90b3R5cGUgVGVz\ndCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEwNjEwMjAwNDAwWhcNMjYwNjA5\nMjAwNDAwWjCBojELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAU\nBgNVBAcTDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoTCUhhc2hpQ29ycDEjMCEGA1UE\nCxMaVGVzdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxLTArBgNVBAMTJFByb3RvdHlw\nZSBUZXN0IENlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBAMXTnIDpOXXiHuKyI9EZxv7qg81DmelOB+iAzhvRsigMSuka\nqZH29Aaf4PBvKLlSVN6sVP16cXRvk48qa0C78tP0kTPKWdEyE1xQUZb270SZ6Tm3\nT7sNRTRwWTsgeC1n6SHlBUn3MviQgA1dZM1CbZIXQpBxtuPg+p9eu3YP/CZJFJjT\nLYVKT6kRumBQEX/UUesNfUnUpVIOxxOwbVeF6a/wGxeLY6/fOQ+TJhVUjSy/pvaI\n6NnycrwD/4ck6gusV5HKakidCID9MwV610Vc7AFi070VGYCjKfiv6EYMMnjycYqi\nKHz623Ca4rO4qtWWvT1K/+GkryDKXeI3KHuEsdsCAwEAAaNCMEAwDgYDVR0PAQH/\nBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIy8cvyabFclVWwcZ4rl\nADoLEdyAMA0GCSqGSIb3DQEBCwUAA4IBAQCzn9QbsOpBuvhhgdH/Jk0q7H0kmpVS\nrbLhcQyWv9xiyopYbbUfh0Hud15rnqAkyT9nd2Kvo8T/X9rc1OXa6oDO6aoXjIm1\naKOFikET8fc/81rT81E7TVPO7TZW5s9Cej30zCOJQWZ+ibHNyequuyihtImNacXF\n+1pAAldj/JMu+Ky1YFrs2iccGOpGCGbsWfLQt+wYKwya7dpSz1ceqigKavIJSOMV\nCNsyC59UtFbvdk139FyEvCmecsCbWuo0JVg3do5n6upwqrgvLRNP8EHzm17DWu5T\naNtsBbv85uUgMmF7kzxr+t6VdtG9u+q0HCmW1/1VVK3ZsA+UTB7UBddD\n-----END CERTIFICATE-----\n"]
}

View File

@ -1,3 +0,0 @@
[ssl]
validate = false
version = SSLv23

View File

@ -1,120 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cassandra
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/helper/errutil"
)
func jsonBundleToTLSConfig(rawJSON string, tlsMinVersion uint16, serverName string, insecureSkipVerify bool) (*tls.Config, error) {
var certBundle certutil.CertBundle
err := json.Unmarshal([]byte(rawJSON), &certBundle)
if err != nil {
return nil, fmt.Errorf("failed to parse JSON: %w", err)
}
if certBundle.IssuingCA != "" && len(certBundle.CAChain) > 0 {
return nil, fmt.Errorf("issuing_ca and ca_chain cannot both be specified")
}
if certBundle.IssuingCA != "" {
certBundle.CAChain = []string{certBundle.IssuingCA}
certBundle.IssuingCA = ""
}
return toClientTLSConfig(certBundle.Certificate, certBundle.PrivateKey, certBundle.CAChain, tlsMinVersion, serverName, insecureSkipVerify)
}
func pemBundleToTLSConfig(pemBundle string, tlsMinVersion uint16, serverName string, insecureSkipVerify bool) (*tls.Config, error) {
if len(pemBundle) == 0 {
return nil, errutil.UserError{Err: "empty pem bundle"}
}
pemBytes := []byte(pemBundle)
var pemBlock *pem.Block
certificate := ""
privateKey := ""
caChain := []string{}
for len(pemBytes) > 0 {
pemBlock, pemBytes = pem.Decode(pemBytes)
if pemBlock == nil {
return nil, errutil.UserError{Err: "no data found in PEM block"}
}
blockBytes := pem.EncodeToMemory(pemBlock)
switch pemBlock.Type {
case "CERTIFICATE":
// Parse the cert so we know if it's a CA or not
cert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate: %w", err)
}
if cert.IsCA {
caChain = append(caChain, string(blockBytes))
continue
}
// Only one leaf certificate supported
if certificate != "" {
return nil, errutil.UserError{Err: "multiple leaf certificates not supported"}
}
certificate = string(blockBytes)
case "RSA PRIVATE KEY", "EC PRIVATE KEY", "PRIVATE KEY":
if privateKey != "" {
return nil, errutil.UserError{Err: "multiple private keys not supported"}
}
privateKey = string(blockBytes)
default:
return nil, fmt.Errorf("unsupported PEM block type [%s]", pemBlock.Type)
}
}
return toClientTLSConfig(certificate, privateKey, caChain, tlsMinVersion, serverName, insecureSkipVerify)
}
func toClientTLSConfig(certificatePEM string, privateKeyPEM string, caChainPEMs []string, tlsMinVersion uint16, serverName string, insecureSkipVerify bool) (*tls.Config, error) {
if certificatePEM != "" && privateKeyPEM == "" {
return nil, fmt.Errorf("found certificate for client-side TLS authentication but no private key")
} else if certificatePEM == "" && privateKeyPEM != "" {
return nil, fmt.Errorf("found private key for client-side TLS authentication but no certificate")
}
var certificates []tls.Certificate
if certificatePEM != "" {
certificate, err := tls.X509KeyPair([]byte(certificatePEM), []byte(privateKeyPEM))
if err != nil {
return nil, fmt.Errorf("failed to parse certificate and private key pair: %w", err)
}
certificates = append(certificates, certificate)
}
var rootCAs *x509.CertPool
if len(caChainPEMs) > 0 {
rootCAs = x509.NewCertPool()
for _, caBlock := range caChainPEMs {
ok := rootCAs.AppendCertsFromPEM([]byte(caBlock))
if !ok {
return nil, fmt.Errorf("failed to add CA certificate to certificate pool: it may be malformed or empty")
}
}
}
config := &tls.Config{
Certificates: certificates,
RootCAs: rootCAs,
ServerName: serverName,
InsecureSkipVerify: insecureSkipVerify,
MinVersion: tlsMinVersion,
}
return config, nil
}