Break DB integration tests out into their own package (#16133)

Making all of our integration tests run in entirely parallel requires
a large engineering effort to enforce test isolation and remove all race
conditions between tests.

A lower-effort alternative may be to split apart the various test suites
into their own Go packages, and test those packages in parallel, even if
the tests inside are still executed serially. Auditing the test suites
for races on system-level resources (e.g. files, ports) is much easier
than chasing down every p[ossible race in the testing system.

This patch acts as a trial run, breaking a fairly well-defined and
self-contained test suite out into its own package. Note that the goal of
this change is not necessarily to shave minutes off the build (although
that would be nice), but to act as an illustration of how other, less
well-formed test suites might be broken apart.

See-Also: #12421
See-Also: #14408
This commit is contained in:
Trent Clarke 2022-09-07 11:04:35 +10:00 committed by GitHub
parent dc371d91b6
commit 9514a313c3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 1703 additions and 1630 deletions

View file

@ -61,7 +61,7 @@ func TestClientWithExpiredCredentialsAndDetailedErrorMessage(t *testing.T) {
defer rc.StopAll()
// Create an expired identity file: ttl is 1 second in the past
identityFilePath := MustCreateUserIdentityFile(t, rc, username, -time.Second)
identityFilePath := helpers.MustCreateUserIdentityFile(t, rc, username, -time.Second)
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second)
defer cancelFunc()

View file

@ -0,0 +1,860 @@
/*
Copyright 2020-2021 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package db
import (
"context"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/go-mysql-org/go-mysql/client"
"github.com/gravitational/trace"
"github.com/jackc/pgconn"
"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/bson"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/srv/db"
"github.com/gravitational/teleport/lib/srv/db/common"
"github.com/gravitational/teleport/lib/srv/db/mongodb"
"github.com/gravitational/teleport/lib/srv/db/mysql"
"github.com/gravitational/teleport/lib/srv/db/postgres"
"github.com/gravitational/teleport/lib/tlsca"
)
// TestDatabaseAccess runs the database access integration test suite.
//
// It allows to make the entire cluster set up once, instead of per test,
// which speeds things up significantly.
func TestDatabaseAccess(t *testing.T) {
pack := SetupDatabaseTest(t,
// set tighter rotation intervals
WithLeafConfig(func(config *service.Config) {
config.PollingPeriod = 5 * time.Second
config.RotationConnectionInterval = 2 * time.Second
}),
WithRootConfig(func(config *service.Config) {
config.PollingPeriod = 5 * time.Second
config.RotationConnectionInterval = 2 * time.Second
}),
)
pack.WaitForLeaf(t)
t.Run("PostgresRootCluster", pack.testPostgresRootCluster)
t.Run("PostgresLeafCluster", pack.testPostgresLeafCluster)
t.Run("MySQLRootCluster", pack.testMySQLRootCluster)
t.Run("MySQLLeafCluster", pack.testMySQLLeafCluster)
t.Run("MongoRootCluster", pack.testMongoRootCluster)
t.Run("MongoLeafCluster", pack.testMongoLeafCluster)
t.Run("MongoConnectionCount", pack.testMongoConnectionCount)
t.Run("HARootCluster", pack.testHARootCluster)
t.Run("HALeafCluster", pack.testHALeafCluster)
t.Run("LargeQuery", pack.testLargeQuery)
t.Run("AgentState", pack.testAgentState)
// This test should go last because it rotates the Database CA.
t.Run("RotateTrustedCluster", pack.testRotateTrustedCluster)
}
// TestDatabaseAccessSeparateListeners tests the Mongo and Postgres separate port setup.
func TestDatabaseAccessSeparateListeners(t *testing.T) {
pack := SetupDatabaseTest(t,
WithListenerSetupDatabaseTest(helpers.SeparateMongoAndPostgresPortSetup),
)
t.Run("PostgresSeparateListener", pack.testPostgresSeparateListener)
t.Run("MongoSeparateListener", pack.testMongoSeparateListener)
}
// testPostgresRootCluster tests a scenario where a user connects
// to a Postgres database running in a root cluster.
func (p *DatabasePack) testPostgresRootCluster(t *testing.T) {
// Connect to the database service in root cluster.
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.PostgresService.Name,
Protocol: p.Root.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.postgres.QueryCount() + 1
wantLeafQueryCount := p.Leaf.postgres.QueryCount()
// Execute a query.
result, err := client.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, wantRootQueryCount, p.Root.postgres.QueryCount())
require.Equal(t, wantLeafQueryCount, p.Leaf.postgres.QueryCount())
// Disconnect.
err = client.Close(context.Background())
require.NoError(t, err)
}
// testPostgresLeafCluster tests a scenario where a user connects
// to a Postgres database running in a leaf cluster via a root cluster.
func (p *DatabasePack) testPostgresLeafCluster(t *testing.T) {
// Connect to the database service in leaf cluster via root cluster.
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web, // Connecting via root cluster.
Cluster: p.Leaf.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Leaf.PostgresService.Name,
Protocol: p.Leaf.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.postgres.QueryCount()
wantLeafQueryCount := p.Leaf.postgres.QueryCount() + 1
// Execute a query.
result, err := client.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, wantLeafQueryCount, p.Leaf.postgres.QueryCount())
require.Equal(t, wantRootQueryCount, p.Root.postgres.QueryCount())
// Disconnect.
err = client.Close(context.Background())
require.NoError(t, err)
}
func (p *DatabasePack) testRotateTrustedCluster(t *testing.T) {
// TODO(jakule): Fix flaky test
t.Skip("flaky test, skip for now")
var (
ctx = context.Background()
rootCluster = p.Root.Cluster
authServer = rootCluster.Process.GetAuthServer()
clusterRootName = rootCluster.Secrets.SiteName
clusterLeafName = p.Leaf.Cluster.Secrets.SiteName
)
pw := phaseWatcher{
clusterRootName: clusterRootName,
pollingPeriod: rootCluster.Process.Config.PollingPeriod,
clock: p.clock,
siteAPI: rootCluster.GetSiteAPI(clusterLeafName),
certType: types.DatabaseCA,
}
currentDbCA, err := p.Root.dbAuthClient.GetCertAuthority(ctx, types.CertAuthID{
Type: types.DatabaseCA,
DomainName: clusterRootName,
}, false)
require.NoError(t, err)
rotationPhases := []string{
types.RotationPhaseInit, types.RotationPhaseUpdateClients,
types.RotationPhaseUpdateServers, types.RotationPhaseStandby,
}
waitForEvent := func(process *service.TeleportProcess, event string) {
_, err := process.WaitForEventTimeout(20*time.Second, event)
require.NoError(t, err, "timeout waiting for service to broadcast event %s", event)
}
for _, phase := range rotationPhases {
errChan := make(chan error, 1)
go func() {
errChan <- pw.waitForPhase(phase, func() error {
return authServer.RotateCertAuthority(ctx, auth.RotateRequest{
Type: types.DatabaseCA,
TargetPhase: phase,
Mode: types.RotationModeManual,
})
})
}()
err = <-errChan
if err != nil && strings.Contains(err.Error(), "context deadline exceeded") {
// TODO(jakule): Workaround for CertAuthorityWatcher failing to get the correct rotation status.
// Query auth server directly to see if the incorrect rotation status is a rotation or watcher problem.
dbCA, err := p.Leaf.Cluster.Process.GetAuthServer().GetCertAuthority(ctx, types.CertAuthID{
Type: types.DatabaseCA,
DomainName: clusterRootName,
}, false)
require.NoError(t, err)
require.Equal(t, dbCA.GetRotation().Phase, phase)
} else {
require.NoError(t, err)
}
// Reload doesn't happen on Init
if phase == types.RotationPhaseInit {
continue
}
waitForEvent(p.Root.Cluster.Process, service.TeleportReloadEvent)
waitForEvent(p.Leaf.Cluster.Process, service.TeleportReadyEvent)
p.WaitForLeaf(t)
}
rotatedDbCA, err := authServer.GetCertAuthority(ctx, types.CertAuthID{
Type: types.DatabaseCA,
DomainName: clusterRootName,
}, false)
require.NoError(t, err)
// Sanity check. Check if the CA was rotated.
require.NotEqual(t, currentDbCA.GetActiveKeys(), rotatedDbCA.GetActiveKeys())
// Connect to the database service in leaf cluster via root cluster.
dbClient, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web, // Connecting via root cluster.
Cluster: p.Leaf.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Leaf.PostgresService.Name,
Protocol: p.Leaf.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
wantLeafQueryCount := p.Leaf.postgres.QueryCount() + 1
wantRootQueryCount := p.Root.postgres.QueryCount()
result, err := dbClient.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, wantLeafQueryCount, p.Leaf.postgres.QueryCount())
require.Equal(t, wantRootQueryCount, p.Root.postgres.QueryCount())
// Disconnect.
err = dbClient.Close(context.Background())
require.NoError(t, err)
}
// phaseWatcher holds all arguments required by rotation watcher.
type phaseWatcher struct {
clusterRootName string
pollingPeriod time.Duration
clock clockwork.Clock
siteAPI types.Events
certType types.CertAuthType
}
// waitForPhase waits until rootCluster cluster detects the rotation. fn is a rotation function that is called after
// watcher is created.
func (p *phaseWatcher) waitForPhase(phase string, fn func() error) error {
ctx, cancel := context.WithTimeout(context.Background(), p.pollingPeriod*10)
defer cancel()
watcher, err := services.NewCertAuthorityWatcher(ctx, services.CertAuthorityWatcherConfig{
ResourceWatcherConfig: services.ResourceWatcherConfig{
Component: teleport.ComponentProxy,
Clock: p.clock,
Client: p.siteAPI,
},
Types: []types.CertAuthType{p.certType},
})
if err != nil {
return err
}
defer watcher.Close()
if err := fn(); err != nil {
return trace.Wrap(err)
}
sub, err := watcher.Subscribe(ctx, types.CertAuthorityFilter{
p.certType: p.clusterRootName,
})
if err != nil {
return trace.Wrap(err)
}
defer sub.Close()
var lastPhase string
for i := 0; i < 10; i++ {
select {
case <-ctx.Done():
return trace.CompareFailed("failed to converge to phase %q, last phase %q certType: %v err: %v", phase, lastPhase, p.certType, ctx.Err())
case <-sub.Done():
return trace.CompareFailed("failed to converge to phase %q, last phase %q certType: %v err: %v", phase, lastPhase, p.certType, sub.Error())
case evt := <-sub.Events():
switch evt.Type {
case types.OpPut:
ca, ok := evt.Resource.(types.CertAuthority)
if !ok {
return trace.BadParameter("expected a ca got type %T", evt.Resource)
}
if ca.GetRotation().Phase == phase {
return nil
}
lastPhase = ca.GetRotation().Phase
}
}
}
return trace.CompareFailed("failed to converge to phase %q, last phase %q", phase, lastPhase)
}
// testMySQLRootCluster tests a scenario where a user connects
// to a MySQL database running in a root cluster.
func (p *DatabasePack) testMySQLRootCluster(t *testing.T) {
// Connect to the database service in root cluster.
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.MySQL,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.MysqlService.Name,
Protocol: p.Root.MysqlService.Protocol,
Username: "root",
// With MySQL database name doesn't matter as it's not subject to RBAC atm.
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.mysql.QueryCount() + 1
wantLeafQueryCount := p.Leaf.mysql.QueryCount()
// Execute a query.
result, err := client.Execute("select 1")
require.NoError(t, err)
require.Equal(t, mysql.TestQueryResponse, result)
require.Equal(t, wantRootQueryCount, p.Root.mysql.QueryCount())
require.Equal(t, wantLeafQueryCount, p.Leaf.mysql.QueryCount())
// Disconnect.
err = client.Close()
require.NoError(t, err)
}
// testMySQLLeafCluster tests a scenario where a user connects
// to a MySQL database running in a leaf cluster via a root cluster.
func (p *DatabasePack) testMySQLLeafCluster(t *testing.T) {
// Connect to the database service in leaf cluster via root cluster.
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.MySQL, // Connecting via root cluster.
Cluster: p.Leaf.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Leaf.MysqlService.Name,
Protocol: p.Leaf.MysqlService.Protocol,
Username: "root",
// With MySQL database name doesn't matter as it's not subject to RBAC atm.
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.mysql.QueryCount()
wantLeafQueryCount := p.Leaf.mysql.QueryCount() + 1
// Execute a query.
result, err := client.Execute("select 1")
require.NoError(t, err)
require.Equal(t, mysql.TestQueryResponse, result)
require.Equal(t, wantLeafQueryCount, p.Leaf.mysql.QueryCount())
require.Equal(t, wantRootQueryCount, p.Root.mysql.QueryCount())
// Disconnect.
err = client.Close()
require.NoError(t, err)
}
// testMongoRootCluster tests a scenario where a user connects
// to a Mongo database running in a root cluster.
func (p *DatabasePack) testMongoRootCluster(t *testing.T) {
// Connect to the database service in root cluster.
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.MongoService.Name,
Protocol: p.Root.MongoService.Protocol,
Username: "admin",
},
})
require.NoError(t, err)
// Execute a query.
_, err = client.Database("test").Collection("test").Find(context.Background(), bson.M{})
require.NoError(t, err)
// Disconnect.
err = client.Disconnect(context.Background())
require.NoError(t, err)
}
// testMongoConnectionCount tests if mongo service releases
// resource after a mongo client disconnect.
func (p *DatabasePack) testMongoConnectionCount(t *testing.T) {
connectMongoClient := func(t *testing.T) (serverConnectionCount int32) {
// Connect to the database service in root cluster.
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.MongoService.Name,
Protocol: p.Root.MongoService.Protocol,
Username: "admin",
},
})
require.NoError(t, err)
// Execute a query.
_, err = client.Database("test").Collection("test").Find(context.Background(), bson.M{})
require.NoError(t, err)
// Get a server connection count before disconnect.
serverConnectionCount = p.Root.mongo.GetActiveConnectionsCount()
// Disconnect.
err = client.Disconnect(context.Background())
require.NoError(t, err)
return serverConnectionCount
}
// Get connection count while the first client is connected.
initialConnectionCount := connectMongoClient(t)
// Check if active connections count is not growing over time when new
// clients connect to the mongo server.
clientCount := 8
for i := 0; i < clientCount; i++ {
// Note that connection count per client fluctuates between 6 and 9.
// Use InDelta to avoid flaky test.
require.InDelta(t, initialConnectionCount, connectMongoClient(t), 3)
}
// Wait until the server reports no more connections. This usually happens
// really quick but wait a little longer just in case.
waitUntilNoConnections := func() bool {
return p.Root.mongo.GetActiveConnectionsCount() == 0
}
require.Eventually(t, waitUntilNoConnections, 5*time.Second, 100*time.Millisecond)
}
// testMongoLeafCluster tests a scenario where a user connects
// to a Mongo database running in a leaf cluster.
func (p *DatabasePack) testMongoLeafCluster(t *testing.T) {
// Connect to the database service in root cluster.
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web, // Connecting via root cluster.
Cluster: p.Leaf.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Leaf.MongoService.Name,
Protocol: p.Leaf.MongoService.Protocol,
Username: "admin",
},
})
require.NoError(t, err)
// Execute a query.
_, err = client.Database("test").Collection("test").Find(context.Background(), bson.M{})
require.NoError(t, err)
// Disconnect.
err = client.Disconnect(context.Background())
require.NoError(t, err)
}
// TestRootLeafIdleTimeout tests idle client connection termination by proxy and DB services in
// trusted cluster setup.
func TestDatabaseRootLeafIdleTimeout(t *testing.T) {
clock := clockwork.NewFakeClockAt(time.Now())
pack := SetupDatabaseTest(t, WithClock(clock))
pack.WaitForLeaf(t)
var (
rootAuthServer = pack.Root.Cluster.Process.GetAuthServer()
rootRole = pack.Root.role
leafAuthServer = pack.Leaf.Cluster.Process.GetAuthServer()
leafRole = pack.Leaf.role
idleTimeout = time.Minute
)
mkMySQLLeafDBClient := func(t *testing.T) *client.Conn {
// Connect to the database service in leaf cluster via root cluster.
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: pack.Root.Cluster.MySQL, // Connecting via root cluster.
Cluster: pack.Leaf.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.Leaf.MysqlService.Name,
Protocol: pack.Leaf.MysqlService.Protocol,
Username: "root",
},
})
require.NoError(t, err)
return client
}
t.Run("root role without idle timeout", func(t *testing.T) {
client := mkMySQLLeafDBClient(t)
_, err := client.Execute("select 1")
require.NoError(t, err)
clock.Advance(idleTimeout)
_, err = client.Execute("select 1")
require.NoError(t, err)
err = client.Close()
require.NoError(t, err)
})
t.Run("root role with idle timeout", func(t *testing.T) {
setRoleIdleTimeout(t, rootAuthServer, rootRole, idleTimeout)
client := mkMySQLLeafDBClient(t)
_, err := client.Execute("select 1")
require.NoError(t, err)
now := clock.Now()
clock.Advance(idleTimeout)
helpers.WaitForAuditEventTypeWithBackoff(t, pack.Root.Cluster.Process.GetAuthServer(), now, events.ClientDisconnectEvent)
_, err = client.Execute("select 1")
require.Error(t, err)
setRoleIdleTimeout(t, rootAuthServer, rootRole, time.Hour)
})
t.Run("leaf role with idle timeout", func(t *testing.T) {
setRoleIdleTimeout(t, leafAuthServer, leafRole, idleTimeout)
client := mkMySQLLeafDBClient(t)
_, err := client.Execute("select 1")
require.NoError(t, err)
now := clock.Now()
clock.Advance(idleTimeout)
helpers.WaitForAuditEventTypeWithBackoff(t, pack.Leaf.Cluster.Process.GetAuthServer(), now, events.ClientDisconnectEvent)
_, err = client.Execute("select 1")
require.Error(t, err)
setRoleIdleTimeout(t, leafAuthServer, leafRole, time.Hour)
})
}
// TestDatabaseAccessUnspecifiedHostname tests DB agent reverse tunnel connection in case where host address is
// unspecified thus is not present in the valid principal list. The DB agent should replace unspecified address (0.0.0.0)
// with localhost and successfully establish reverse tunnel connection.
func TestDatabaseAccessUnspecifiedHostname(t *testing.T) {
pack := SetupDatabaseTest(t,
WithNodeName("0.0.0.0"),
)
// Connect to the database service in root cluster.
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: pack.Root.Cluster.Web,
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.Root.PostgresService.Name,
Protocol: pack.Root.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
// Execute a query.
result, err := client.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, uint32(1), pack.Root.postgres.QueryCount())
require.Equal(t, uint32(0), pack.Leaf.postgres.QueryCount())
// Disconnect.
err = client.Close(context.Background())
require.NoError(t, err)
}
func (p *DatabasePack) testPostgresSeparateListener(t *testing.T) {
// Connect to the database service in root cluster.
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Postgres,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.PostgresService.Name,
Protocol: p.Root.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.postgres.QueryCount() + 1
wantLeafQueryCount := p.Root.postgres.QueryCount()
// Execute a query.
result, err := client.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, wantRootQueryCount, p.Root.postgres.QueryCount())
require.Equal(t, wantLeafQueryCount, p.Leaf.postgres.QueryCount())
// Disconnect.
err = client.Close(context.Background())
require.NoError(t, err)
}
// TestDatabaseAccessPostgresSeparateListener tests postgres proxy listener running on separate port
// with DisableTLS.
func TestDatabaseAccessPostgresSeparateListenerTLSDisabled(t *testing.T) {
pack := SetupDatabaseTest(t,
WithListenerSetupDatabaseTest(helpers.SeparatePostgresPortSetup),
WithRootConfig(func(config *service.Config) {
config.Proxy.DisableTLS = true
}),
)
pack.testPostgresSeparateListener(t)
}
func init() {
// Override database agents shuffle behavior to ensure they're always
// tried in the same order during tests. Used for HA tests.
db.SetShuffleFunc(db.ShuffleSort)
}
// testHARootCluster verifies that proxy falls back to a healthy
// database agent when multiple agents are serving the same database and one
// of them is down in a root cluster.
func (p *DatabasePack) testHARootCluster(t *testing.T) {
// Insert a database server entry not backed by an actual running agent
// to simulate a scenario when an agent is down but the resource hasn't
// expired from the backend yet.
dbServer, err := types.NewDatabaseServerV3(types.Metadata{
Name: p.Root.PostgresService.Name,
}, types.DatabaseServerSpecV3{
Protocol: defaults.ProtocolPostgres,
URI: p.Root.postgresAddr,
// To make sure unhealthy server is always picked in tests first, make
// sure its host ID always compares as "smaller" as the tests sort
// agents.
HostID: "0000",
Hostname: "test",
})
require.NoError(t, err)
_, err = p.Root.Cluster.Process.GetAuthServer().UpsertDatabaseServer(
context.Background(), dbServer)
require.NoError(t, err)
// Connect to the database service in root cluster.
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.PostgresService.Name,
Protocol: p.Root.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.postgres.QueryCount() + 1
wantLeafQueryCount := p.Leaf.postgres.QueryCount()
// Execute a query.
result, err := client.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, wantRootQueryCount, p.Root.postgres.QueryCount())
require.Equal(t, wantLeafQueryCount, p.Leaf.postgres.QueryCount())
// Disconnect.
err = client.Close(context.Background())
require.NoError(t, err)
}
// testHALeafCluster verifies that proxy falls back to a healthy
// database agent when multiple agents are serving the same database and one
// of them is down in a leaf cluster.
func (p *DatabasePack) testHALeafCluster(t *testing.T) {
// Insert a database server entry not backed by an actual running agent
// to simulate a scenario when an agent is down but the resource hasn't
// expired from the backend yet.
dbServer, err := types.NewDatabaseServerV3(types.Metadata{
Name: p.Leaf.PostgresService.Name,
}, types.DatabaseServerSpecV3{
Protocol: defaults.ProtocolPostgres,
URI: p.Leaf.postgresAddr,
// To make sure unhealthy server is always picked in tests first, make
// sure its host ID always compares as "smaller" as the tests sort
// agents.
HostID: "0000",
Hostname: "test",
})
require.NoError(t, err)
_, err = p.Leaf.Cluster.Process.GetAuthServer().UpsertDatabaseServer(
context.Background(), dbServer)
require.NoError(t, err)
// Connect to the database service in leaf cluster via root cluster.
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Web, // Connecting via root cluster.
Cluster: p.Leaf.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Leaf.PostgresService.Name,
Protocol: p.Leaf.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
})
require.NoError(t, err)
wantRootQueryCount := p.Root.postgres.QueryCount()
wantLeafQueryCount := p.Leaf.postgres.QueryCount() + 1
// Execute a query.
result, err := client.Exec(context.Background(), "select 1").ReadAll()
require.NoError(t, err)
require.Equal(t, []*pgconn.Result{postgres.TestQueryResponse}, result)
require.Equal(t, wantLeafQueryCount, p.Leaf.postgres.QueryCount())
require.Equal(t, wantRootQueryCount, p.Root.postgres.QueryCount())
// Disconnect.
err = client.Close(context.Background())
require.NoError(t, err)
}
// testDatabaseAccessMongoSeparateListener tests mongo proxy listener running on separate port.
func (p *DatabasePack) testMongoSeparateListener(t *testing.T) {
// Connect to the database service in root cluster.
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.Mongo,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.MongoService.Name,
Protocol: p.Root.MongoService.Protocol,
Username: "admin",
},
})
require.NoError(t, err)
// Execute a query.
_, err = client.Database("test").Collection("test").Find(context.Background(), bson.M{})
require.NoError(t, err)
// Disconnect.
err = client.Disconnect(context.Background())
require.NoError(t, err)
}
func (p *DatabasePack) testAgentState(t *testing.T) {
tests := map[string]struct {
agentParams databaseAgentStartParams
}{
"WithStaticDatabases": {
agentParams: databaseAgentStartParams{
databases: []service.Database{
{Name: "mysql", Protocol: defaults.ProtocolMySQL, URI: "localhost:3306"},
{Name: "pg", Protocol: defaults.ProtocolPostgres, URI: "localhost:5432"},
},
},
},
"WithResourceMatchers": {
agentParams: databaseAgentStartParams{
resourceMatchers: []services.ResourceMatcher{
{Labels: types.Labels{"*": []string{"*"}}},
},
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
// Start also ensures that the database agent has the “ready” state.
// If the agent cant make it, this function will fail the test.
agent, _ := p.startRootDatabaseAgent(t, test.agentParams)
// In addition to the checks performed during the agent start,
// well request the diagnostic server to ensure the readyz route
// is returning to the proper state.
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%v/readyz", agent.Config.DiagnosticAddr.Addr), nil)
require.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
})
}
}
func setRoleIdleTimeout(t *testing.T, authServer *auth.Server, role types.Role, idleTimout time.Duration) {
opts := role.GetOptions()
opts.ClientIdleTimeout = types.Duration(idleTimout)
role.SetOptions(opts)
err := authServer.UpsertRole(context.Background(), role)
require.NoError(t, err)
}

479
integration/db/fixture.go Normal file
View file

@ -0,0 +1,479 @@
// Copyright 2022 Gravitational, Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package db
import (
"context"
"fmt"
"net"
"path/filepath"
"strings"
"testing"
"time"
"github.com/google/uuid"
"github.com/gravitational/teleport/api/breaker"
apidefaults "github.com/gravitational/teleport/api/defaults"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/auth/testauthority"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/srv/db/common"
"github.com/gravitational/teleport/lib/srv/db/mongodb"
"github.com/gravitational/teleport/lib/srv/db/mysql"
"github.com/gravitational/teleport/lib/srv/db/postgres"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/require"
)
type DatabasePack struct {
Root databaseClusterPack
Leaf databaseClusterPack
clock clockwork.Clock
}
type databaseClusterPack struct {
Cluster *helpers.TeleInstance
User types.User
role types.Role
dbProcess *service.TeleportProcess
dbAuthClient *auth.Client
PostgresService service.Database
postgresAddr string
postgres *postgres.TestServer
MysqlService service.Database
mysqlAddr string
mysql *mysql.TestServer
MongoService service.Database
mongoAddr string
mongo *mongodb.TestServer
name string
}
func mustListen(t *testing.T) (net.Listener, string) {
t.Helper()
listener, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
t.Cleanup(func() { listener.Close() })
_, port, err := net.SplitHostPort(listener.Addr().String())
require.NoError(t, err)
return listener, net.JoinHostPort("localhost", port)
}
func (pack *databaseClusterPack) StartDatabaseServices(t *testing.T, clock clockwork.Clock) {
var err error
var postgresListener, mysqlListener, mongoListener net.Listener
postgresListener, pack.postgresAddr = mustListen(t)
pack.PostgresService = service.Database{
Name: fmt.Sprintf("%s-postgres", pack.name),
Protocol: defaults.ProtocolPostgres,
URI: pack.postgresAddr,
}
mysqlListener, pack.mysqlAddr = mustListen(t)
pack.MysqlService = service.Database{
Name: fmt.Sprintf("%s-mysql", pack.name),
Protocol: defaults.ProtocolMySQL,
URI: pack.mysqlAddr,
}
mongoListener, pack.mongoAddr = mustListen(t)
pack.MongoService = service.Database{
Name: fmt.Sprintf("%s-mongo", pack.name),
Protocol: defaults.ProtocolMongoDB,
URI: pack.mongoAddr,
}
conf := service.MakeDefaultConfig()
conf.DataDir = filepath.Join(t.TempDir(), pack.name)
conf.SetToken("static-token-value")
conf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: pack.Cluster.Web,
},
}
conf.Databases.Enabled = true
conf.Databases.Databases = []service.Database{
pack.PostgresService,
pack.MysqlService,
pack.MongoService,
}
conf.Clock = clock
conf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
pack.dbProcess, pack.dbAuthClient, err = pack.Cluster.StartDatabase(conf)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, pack.dbProcess.Close()) })
// Create and start test Postgres in the leaf cluster.
pack.postgres, err = postgres.NewTestServer(common.TestServerConfig{
AuthClient: pack.dbAuthClient,
Name: pack.PostgresService.Name,
Listener: postgresListener,
})
require.NoError(t, err)
go pack.postgres.Serve()
t.Cleanup(func() { pack.postgres.Close() })
// Create and start test MySQL in the leaf cluster.
pack.mysql, err = mysql.NewTestServer(common.TestServerConfig{
AuthClient: pack.dbAuthClient,
Name: pack.MysqlService.Name,
Listener: mysqlListener,
})
require.NoError(t, err)
go pack.mysql.Serve()
t.Cleanup(func() { pack.mysql.Close() })
// Create and start test Mongo in the leaf cluster.
pack.mongo, err = mongodb.NewTestServer(common.TestServerConfig{
AuthClient: pack.dbAuthClient,
Name: pack.MongoService.Name,
Listener: mongoListener,
})
require.NoError(t, err)
go pack.mongo.Serve()
t.Cleanup(func() { pack.mongo.Close() })
}
type testOptions struct {
clock clockwork.Clock
listenerSetup helpers.InstanceListenerSetupFunc
rootConfig func(config *service.Config)
leafConfig func(config *service.Config)
nodeName string
}
type TestOptionFunc func(*testOptions)
func (o *testOptions) setDefaultIfNotSet() {
if o.clock == nil {
o.clock = clockwork.NewRealClock()
}
if o.listenerSetup == nil {
o.listenerSetup = helpers.StandardListenerSetup
}
if o.nodeName == "" {
o.nodeName = helpers.Host
}
}
func WithClock(clock clockwork.Clock) TestOptionFunc {
return func(o *testOptions) {
o.clock = clock
}
}
func WithNodeName(nodeName string) TestOptionFunc {
return func(o *testOptions) {
o.nodeName = nodeName
}
}
func WithListenerSetupDatabaseTest(fn helpers.InstanceListenerSetupFunc) TestOptionFunc {
return func(o *testOptions) {
o.listenerSetup = fn
}
}
func WithRootConfig(fn func(*service.Config)) TestOptionFunc {
return func(o *testOptions) {
o.rootConfig = fn
}
}
func WithLeafConfig(fn func(*service.Config)) TestOptionFunc {
return func(o *testOptions) {
o.leafConfig = fn
}
}
func SetupDatabaseTest(t *testing.T, options ...TestOptionFunc) *DatabasePack {
var opts testOptions
for _, opt := range options {
opt(&opts)
}
opts.setDefaultIfNotSet()
// Some global setup.
tracer := utils.NewTracer(utils.ThisFunction()).Start()
t.Cleanup(func() { tracer.Stop() })
lib.SetInsecureDevMode(true)
log := utils.NewLoggerForTests()
// Generate keypair.
privateKey, publicKey, err := testauthority.New().GenerateKeyPair()
require.NoError(t, err)
p := &DatabasePack{
clock: opts.clock,
Root: databaseClusterPack{name: "root"},
Leaf: databaseClusterPack{name: "leaf"},
}
// Create root cluster.
rootCfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: opts.nodeName,
Priv: privateKey,
Pub: publicKey,
Log: log,
}
rootCfg.Listeners = opts.listenerSetup(t, &rootCfg.Fds)
p.Root.Cluster = helpers.NewInstance(t, rootCfg)
// Create leaf cluster.
leafCfg := helpers.InstanceConfig{
ClusterName: "leaf.example.com",
HostID: uuid.New().String(),
NodeName: opts.nodeName,
Priv: privateKey,
Pub: publicKey,
Log: log,
}
leafCfg.Listeners = opts.listenerSetup(t, &leafCfg.Fds)
p.Leaf.Cluster = helpers.NewInstance(t, leafCfg)
// Make root cluster config.
rcConf := service.MakeDefaultConfig()
rcConf.DataDir = t.TempDir()
rcConf.Auth.Enabled = true
rcConf.Auth.Preference.SetSecondFactor("off")
rcConf.Proxy.Enabled = true
rcConf.Proxy.DisableWebInterface = true
rcConf.Clock = p.clock
rcConf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
if opts.rootConfig != nil {
opts.rootConfig(rcConf)
}
// Make leaf cluster config.
lcConf := service.MakeDefaultConfig()
lcConf.DataDir = t.TempDir()
lcConf.Auth.Enabled = true
lcConf.Auth.Preference.SetSecondFactor("off")
lcConf.Proxy.Enabled = true
lcConf.Proxy.DisableWebInterface = true
lcConf.Clock = p.clock
lcConf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
if opts.leafConfig != nil {
opts.rootConfig(lcConf)
}
// Establish trust b/w root and leaf.
err = p.Root.Cluster.CreateEx(t, p.Leaf.Cluster.Secrets.AsSlice(), rcConf)
require.NoError(t, err)
err = p.Leaf.Cluster.CreateEx(t, p.Root.Cluster.Secrets.AsSlice(), lcConf)
require.NoError(t, err)
// Start both clusters.
err = p.Leaf.Cluster.Start()
require.NoError(t, err)
t.Cleanup(func() {
p.Leaf.Cluster.StopAll()
})
err = p.Root.Cluster.Start()
require.NoError(t, err)
t.Cleanup(func() {
p.Root.Cluster.StopAll()
})
// Setup users and roles on both clusters.
p.setupUsersAndRoles(t)
// Update root's certificate authority on leaf to configure role mapping.
ca, err := p.Leaf.Cluster.Process.GetAuthServer().GetCertAuthority(context.Background(), types.CertAuthID{
Type: types.UserCA,
DomainName: p.Root.Cluster.Secrets.SiteName,
}, false)
require.NoError(t, err)
ca.SetRoles(nil) // Reset roles, otherwise they will take precedence.
ca.SetRoleMap(types.RoleMap{
{Remote: p.Root.role.GetName(), Local: []string{p.Leaf.role.GetName()}},
})
err = p.Leaf.Cluster.Process.GetAuthServer().UpsertCertAuthority(ca)
require.NoError(t, err)
// Start database service and test servers in the clusters
p.StartDatabases(t)
return p
}
func (p *DatabasePack) setupUsersAndRoles(t *testing.T) {
var err error
p.Root.User, p.Root.role, err = auth.CreateUserAndRole(p.Root.Cluster.Process.GetAuthServer(), "root-user", nil)
require.NoError(t, err)
p.Root.role.SetDatabaseUsers(types.Allow, []string{types.Wildcard})
p.Root.role.SetDatabaseNames(types.Allow, []string{types.Wildcard})
err = p.Root.Cluster.Process.GetAuthServer().UpsertRole(context.Background(), p.Root.role)
require.NoError(t, err)
p.Leaf.User, p.Leaf.role, err = auth.CreateUserAndRole(p.Root.Cluster.Process.GetAuthServer(), "leaf-user", nil)
require.NoError(t, err)
p.Leaf.role.SetDatabaseUsers(types.Allow, []string{types.Wildcard})
p.Leaf.role.SetDatabaseNames(types.Allow, []string{types.Wildcard})
err = p.Leaf.Cluster.Process.GetAuthServer().UpsertRole(context.Background(), p.Leaf.role)
require.NoError(t, err)
}
func (p *DatabasePack) WaitForLeaf(t *testing.T) {
helpers.WaitForProxyCount(p.Leaf.Cluster, p.Root.Cluster.Secrets.SiteName, 1)
site, err := p.Root.Cluster.Tunnel.GetSite(p.Leaf.Cluster.Secrets.SiteName)
require.NoError(t, err)
accessPoint, err := site.CachingAccessPoint()
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
servers, err := accessPoint.GetDatabaseServers(ctx, apidefaults.Namespace)
if err != nil {
// Use root logger as we need a configured logger instance and the root cluster have one.
p.Root.Cluster.Log.WithError(err).Debugf("Leaf cluster access point is unavailable.")
continue
}
if !containsDB(servers, p.Leaf.MysqlService.Name) {
p.Root.Cluster.Log.WithError(err).Debugf("Leaf db service %q is unavailable.", p.Leaf.MysqlService.Name)
continue
}
if !containsDB(servers, p.Leaf.PostgresService.Name) {
p.Root.Cluster.Log.WithError(err).Debugf("Leaf db service %q is unavailable.", p.Leaf.PostgresService.Name)
continue
}
return
case <-ctx.Done():
t.Fatal("Leaf cluster access point is unavailable.")
}
}
}
func (p *DatabasePack) StartDatabases(t *testing.T) {
p.Root.StartDatabaseServices(t, p.clock)
p.Leaf.StartDatabaseServices(t, p.clock)
}
// databaseAgentStartParams parameters used to configure a database agent.
type databaseAgentStartParams struct {
databases []service.Database
resourceMatchers []services.ResourceMatcher
}
// startRootDatabaseAgent starts a database agent with the provided
// configuration on the root cluster.
func (p *DatabasePack) startRootDatabaseAgent(t *testing.T, params databaseAgentStartParams) (*service.TeleportProcess, *auth.Client) {
conf := service.MakeDefaultConfig()
conf.DataDir = t.TempDir()
conf.SetToken("static-token-value")
conf.DiagnosticAddr = *utils.MustParseAddr(helpers.NewListener(t, service.ListenerDiagnostic, &conf.FileDescriptors))
conf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: p.Root.Cluster.Web,
},
}
conf.Clock = p.clock
conf.Databases.Enabled = true
conf.Databases.Databases = params.databases
conf.Databases.ResourceMatchers = params.resourceMatchers
conf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
server, authClient, err := p.Root.Cluster.StartDatabase(conf)
require.NoError(t, err)
t.Cleanup(func() {
server.Close()
})
return server, authClient
}
func containsDB(servers []types.DatabaseServer, name string) bool {
for _, server := range servers {
if server.GetDatabase().GetName() == name {
return true
}
}
return false
}
// testLargeQuery tests a scenario where a user connects
// to a MySQL database running in a root cluster.
func (p *DatabasePack) testLargeQuery(t *testing.T) {
// Connect to the database service in root cluster.
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: p.Root.Cluster.GetSiteAPI(p.Root.Cluster.Secrets.SiteName),
AuthServer: p.Root.Cluster.Process.GetAuthServer(),
Address: p.Root.Cluster.MySQL,
Cluster: p.Root.Cluster.Secrets.SiteName,
Username: p.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: p.Root.MysqlService.Name,
Protocol: p.Root.MysqlService.Protocol,
Username: "root",
},
})
require.NoError(t, err)
now := time.Now()
query := fmt.Sprintf("select %s", strings.Repeat("A", 100*1024))
result, err := client.Execute(query)
require.NoError(t, err)
require.Equal(t, mysql.TestQueryResponse, result)
result.Close()
require.NoError(t, err)
require.Equal(t, mysql.TestQueryResponse, result)
result.Close()
ee := helpers.WaitForAuditEventTypeWithBackoff(t, p.Root.Cluster.Process.GetAuthServer(), now, events.DatabaseSessionQueryEvent)
require.Len(t, ee, 1)
query = "select 1"
result, err = client.Execute(query)
require.NoError(t, err)
require.Equal(t, mysql.TestQueryResponse, result)
result.Close()
require.Eventually(t, func() bool {
ee := helpers.WaitForAuditEventTypeWithBackoff(t, p.Root.Cluster.Process.GetAuthServer(), now, events.DatabaseSessionQueryEvent)
return len(ee) == 2
}, time.Second*3, time.Millisecond*500)
// Disconnect.
err = client.Close()
require.NoError(t, err)
}

View file

@ -0,0 +1,28 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package db
import (
"testing"
"github.com/gravitational/teleport/integration/helpers"
)
// TestMain will re-execute Teleport to run a command if "exec" is passed to
// it as an argument. Otherwise, it will run tests as normal.
func TestMain(m *testing.M) {
helpers.TestMainImplementation(m)
}

File diff suppressed because it is too large Load diff

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
package helpers
import (
"context"
@ -29,38 +29,35 @@ import (
"time"
"github.com/gravitational/teleport/api/constants"
apidefaults "github.com/gravitational/teleport/api/defaults"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/integration/helpers"
apievents "github.com/gravitational/teleport/api/types/events"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/client"
libclient "github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/client/identityfile"
"github.com/gravitational/teleport/lib/teleagent"
"github.com/gravitational/teleport/lib/utils"
"github.com/stretchr/testify/require"
"github.com/gravitational/trace"
"golang.org/x/crypto/ssh/agent"
)
const (
Loopback = "127.0.0.1"
Host = "localhost"
)
// commandOptions controls how the SSH command is built.
type commandOptions struct {
forwardAgent bool
forcePTY bool
controlPath string
socketPath string
proxyPort string
nodePort string
command string
type CommandOptions struct {
ForwardAgent bool
ForcePTY bool
ControlPath string
SocketPath string
ProxyPort string
NodePort string
Command string
}
// externalSSHCommand runs an external SSH command (if an external ssh binary
// ExternalSSHCommand runs an external SSH command (if an external ssh binary
// exists) with the passed in parameters.
func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {
func ExternalSSHCommand(o CommandOptions) (*exec.Cmd, error) {
var execArgs []string
// Don't check the host certificate as part of the testing an external SSH
@ -69,30 +66,30 @@ func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {
execArgs = append(execArgs, "-oUserKnownHostsFile=/dev/null")
// ControlMaster is often used by applications like Ansible.
if o.controlPath != "" {
if o.ControlPath != "" {
execArgs = append(execArgs, "-oControlMaster=auto")
execArgs = append(execArgs, "-oControlPersist=1s")
execArgs = append(execArgs, "-oConnectTimeout=2")
execArgs = append(execArgs, fmt.Sprintf("-oControlPath=%v", o.controlPath))
execArgs = append(execArgs, fmt.Sprintf("-oControlPath=%v", o.ControlPath))
}
// The -tt flag is used to force PTY allocation. It's often used by
// applications like Ansible.
if o.forcePTY {
if o.ForcePTY {
execArgs = append(execArgs, "-tt")
}
// Connect to node on the passed in port.
execArgs = append(execArgs, fmt.Sprintf("-p %v", o.nodePort))
execArgs = append(execArgs, fmt.Sprintf("-p %v", o.NodePort))
// Build proxy command.
proxyCommand := []string{"ssh"}
proxyCommand = append(proxyCommand, "-oStrictHostKeyChecking=no")
proxyCommand = append(proxyCommand, "-oUserKnownHostsFile=/dev/null")
if o.forwardAgent {
if o.ForwardAgent {
proxyCommand = append(proxyCommand, "-oForwardAgent=yes")
}
proxyCommand = append(proxyCommand, fmt.Sprintf("-p %v", o.proxyPort))
proxyCommand = append(proxyCommand, fmt.Sprintf("-p %v", o.ProxyPort))
proxyCommand = append(proxyCommand, `%r@localhost -s proxy:%h:%p`)
// Add in ProxyCommand option, needed for all Teleport connections.
@ -100,7 +97,7 @@ func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {
// Add in the host to connect to and the command to run when connected.
execArgs = append(execArgs, Host)
execArgs = append(execArgs, o.command)
execArgs = append(execArgs, o.Command)
// Find the OpenSSH binary.
sshpath, err := exec.LookPath("ssh")
@ -113,15 +110,15 @@ func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {
if err != nil {
return nil, trace.Wrap(err)
}
cmd.Env = []string{fmt.Sprintf("SSH_AUTH_SOCK=%v", o.socketPath)}
cmd.Env = []string{fmt.Sprintf("SSH_AUTH_SOCK=%v", o.SocketPath)}
return cmd, nil
}
// createAgent creates a SSH agent with the passed in private key and
// CreateAgent creates a SSH agent with the passed in private key and
// certificate that can be used in tests. This is useful so tests don't
// clobber your system agent.
func createAgent(me *user.User, key *client.Key) (*teleagent.AgentServer, string, string, error) {
func CreateAgent(me *user.User, key *client.Key) (*teleagent.AgentServer, string, string, error) {
// create a path to the unix socket
sockDirName := "int-test"
sockName := "agent.sock"
@ -151,7 +148,7 @@ func createAgent(me *user.User, key *client.Key) (*teleagent.AgentServer, string
return teleAgent, teleAgent.Dir, teleAgent.Path, nil
}
func closeAgent(teleAgent *teleagent.AgentServer, socketDirPath string) error {
func CloseAgent(teleAgent *teleagent.AgentServer, socketDirPath string) error {
err := teleAgent.Close()
if err != nil {
return trace.Wrap(err)
@ -165,8 +162,8 @@ func closeAgent(teleAgent *teleagent.AgentServer, socketDirPath string) error {
return nil
}
// getLocalIP gets the non-loopback IP address of this host.
func getLocalIP() (string, error) {
// GetLocalIP gets the non-loopback IP address of this host.
func GetLocalIP() (string, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", trace.Wrap(err)
@ -188,7 +185,7 @@ func getLocalIP() (string, error) {
return "", trace.NotFound("No non-loopback local IP address found")
}
func MustCreateUserIdentityFile(t *testing.T, tc *helpers.TeleInstance, username string, ttl time.Duration) string {
func MustCreateUserIdentityFile(t *testing.T, tc *TeleInstance, username string, ttl time.Duration) string {
key, err := libclient.GenerateRSAKey()
require.NoError(t, err)
key.ClusterName = tc.Secrets.SiteName
@ -216,3 +213,47 @@ func MustCreateUserIdentityFile(t *testing.T, tc *helpers.TeleInstance, username
require.NoError(t, err)
return idPath
}
// WaitForProxyCount waits a set time for the proxy count in clusterName to
// reach some value.
func WaitForProxyCount(t *TeleInstance, clusterName string, count int) error {
var counts map[string]int
start := time.Now()
for time.Since(start) < 17*time.Second {
counts = t.RemoteClusterWatcher.Counts()
if counts[clusterName] == count {
return nil
}
time.Sleep(500 * time.Millisecond)
}
return trace.BadParameter("proxy count on %v: %v (wanted %v)", clusterName, counts[clusterName], count)
}
func WaitForAuditEventTypeWithBackoff(t *testing.T, cli *auth.Server, startTime time.Time, eventType string) []apievents.AuditEvent {
max := time.Second
timeout := time.After(max)
bf, err := utils.NewLinear(utils.LinearConfig{
Step: max / 10,
Max: max,
})
if err != nil {
t.Fatalf("failed to create linear backoff: %v", err)
}
for {
events, _, err := cli.SearchEvents(startTime, time.Now().Add(time.Hour), apidefaults.Namespace, []string{eventType}, 100, types.EventOrderAscending, "")
if err != nil {
t.Fatalf("failed to call SearchEvents: %v", err)
}
if len(events) != 0 {
return events
}
select {
case <-bf.After():
bf.Inc()
case <-timeout:
t.Fatalf("event type %q not found after %v", eventType, max)
}
}
}

View file

@ -0,0 +1,43 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helpers
import (
"os"
"testing"
"time"
"github.com/gravitational/teleport/lib/srv"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/teleport/common"
)
// TestMainImplementation will re-execute Teleport to run a command if "exec" is passed to
// it as an argument. Otherwise, it will run tests as normal.
func TestMainImplementation(m *testing.M) {
utils.InitLoggerForTests()
SetTestTimeouts(100 * time.Millisecond)
// If the test is re-executing itself, execute the command that comes over
// the pipe.
if srv.IsReexec() {
common.Run(common.Options{Args: os.Args[1:]})
return
}
// Otherwise run tests as normal.
code := m.Run()
os.Exit(code)
}

View file

@ -17,3 +17,10 @@ limitations under the License.
// integration package tests Teleport on a high level creating clusters
// of servers in memory, connecting them together and connecting to them
package integration
import "github.com/gravitational/teleport/integration/helpers"
const (
Host = helpers.Host
Loopback = helpers.Loopback
)

View file

@ -1975,7 +1975,7 @@ func testTwoClustersProxy(t *testing.T, suite *integrationTestSuite) {
// httpproxy doesn't allow proxying when the target is localhost, so use
// this address instead.
addr, err := getLocalIP()
addr, err := helpers.GetLocalIP()
require.NoError(t, err)
a := suite.newNamedTeleportInstance(t, "site-A",
WithNodeName(addr),
@ -2928,12 +2928,12 @@ func testDiscoveryRecovers(t *testing.T, suite *integrationTestSuite) {
// create first numbered proxy
_, c0 := addNewMainProxy(pname(0))
// check that we now have two tunnel connections
require.NoError(t, waitForProxyCount(remote, "cluster-main", 2))
require.NoError(t, helpers.WaitForProxyCount(remote, "cluster-main", 2))
// check that first numbered proxy is OK.
testProxyConn(&c0, false)
// remove the initial proxy.
require.NoError(t, lb.RemoveBackend(mainProxyAddr))
require.NoError(t, waitForProxyCount(remote, "cluster-main", 1))
require.NoError(t, helpers.WaitForProxyCount(remote, "cluster-main", 1))
// force bad state by iteratively removing previous proxy before
// adding next proxy; this ensures that discovery protocol's list of
@ -2941,9 +2941,9 @@ func testDiscoveryRecovers(t *testing.T, suite *integrationTestSuite) {
for i := 0; i < 6; i++ {
prev, next := pname(i), pname(i+1)
killMainProxy(prev)
require.NoError(t, waitForProxyCount(remote, "cluster-main", 0))
require.NoError(t, helpers.WaitForProxyCount(remote, "cluster-main", 0))
_, cn := addNewMainProxy(next)
require.NoError(t, waitForProxyCount(remote, "cluster-main", 1))
require.NoError(t, helpers.WaitForProxyCount(remote, "cluster-main", 1))
testProxyConn(&cn, false)
}
@ -3071,7 +3071,7 @@ func testDiscovery(t *testing.T, suite *integrationTestSuite) {
require.NoError(t, err)
// Wait for the remote cluster to detect the outbound connection is gone.
require.NoError(t, waitForProxyCount(remote, "cluster-main", 1))
require.NoError(t, helpers.WaitForProxyCount(remote, "cluster-main", 1))
// Stop both clusters and remaining nodes.
require.NoError(t, remote.StopAll())
@ -3386,23 +3386,6 @@ func waitForActivePeerProxyConnections(t *testing.T, tunnel reversetunnel.Server
)
}
// waitForProxyCount waits a set time for the proxy count in clusterName to
// reach some value.
func waitForProxyCount(t *helpers.TeleInstance, clusterName string, count int) error {
var counts map[string]int
start := time.Now()
for time.Since(start) < 17*time.Second {
counts = t.RemoteClusterWatcher.Counts()
if counts[clusterName] == count {
return nil
}
time.Sleep(500 * time.Millisecond)
}
return trace.BadParameter("proxy count on %v: %v (wanted %v)", clusterName, counts[clusterName], count)
}
// waitForNodeCount waits for a certain number of nodes to show up in the remote site.
func waitForNodeCount(ctx context.Context, t *helpers.TeleInstance, clusterName string, count int) error {
const (
@ -3566,17 +3549,17 @@ func testExternalClient(t *testing.T, suite *integrationTestSuite) {
require.NoError(t, err)
// Start (and defer close) a agent that runs during this integration test.
teleAgent, socketDirPath, socketPath, err := createAgent(suite.Me, &creds.Key)
teleAgent, socketDirPath, socketPath, err := helpers.CreateAgent(suite.Me, &creds.Key)
require.NoError(t, err)
defer closeAgent(teleAgent, socketDirPath)
defer helpers.CloseAgent(teleAgent, socketDirPath)
// Create a *exec.Cmd that will execute the external SSH command.
execCmd, err := externalSSHCommand(commandOptions{
forwardAgent: tt.inForwardAgent,
socketPath: socketPath,
proxyPort: helpers.PortStr(t, teleport.SSHProxy),
nodePort: helpers.PortStr(t, teleport.SSH),
command: tt.inCommand,
execCmd, err := helpers.ExternalSSHCommand(helpers.CommandOptions{
ForwardAgent: tt.inForwardAgent,
SocketPath: socketPath,
ProxyPort: helpers.PortStr(t, teleport.SSHProxy),
NodePort: helpers.PortStr(t, teleport.SSH),
Command: tt.inCommand,
})
require.NoError(t, err)
@ -3662,22 +3645,22 @@ func testControlMaster(t *testing.T, suite *integrationTestSuite) {
require.NoError(t, err)
// Start (and defer close) a agent that runs during this integration test.
teleAgent, socketDirPath, socketPath, err := createAgent(suite.Me, &creds.Key)
teleAgent, socketDirPath, socketPath, err := helpers.CreateAgent(suite.Me, &creds.Key)
require.NoError(t, err)
defer closeAgent(teleAgent, socketDirPath)
defer helpers.CloseAgent(teleAgent, socketDirPath)
// Create and run an exec command twice with the passed in ControlPath. This
// will cause re-use of the connection and creation of two sessions within
// the connection.
for i := 0; i < 2; i++ {
execCmd, err := externalSSHCommand(commandOptions{
forcePTY: true,
forwardAgent: true,
controlPath: controlPath,
socketPath: socketPath,
proxyPort: helpers.PortStr(t, teleport.SSHProxy),
nodePort: helpers.PortStr(t, teleport.SSH),
command: "echo hello",
execCmd, err := helpers.ExternalSSHCommand(helpers.CommandOptions{
ForcePTY: true,
ForwardAgent: true,
ControlPath: controlPath,
SocketPath: socketPath,
ProxyPort: helpers.PortStr(t, teleport.SSHProxy),
NodePort: helpers.PortStr(t, teleport.SSH),
Command: "echo hello",
})
require.NoError(t, err)

View file

@ -16,29 +16,13 @@ limitations under the License.
package integration
import (
"os"
"testing"
"time"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/srv"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/teleport/common"
)
// TestMain will re-execute Teleport to run a command if "exec" is passed to
// it as an argument. Otherwise, it will run tests as normal.
func TestMain(m *testing.M) {
utils.InitLoggerForTests()
helpers.SetTestTimeouts(100 * time.Millisecond)
// If the test is re-executing itself, execute the command that comes over
// the pipe.
if srv.IsReexec() {
common.Run(common.Options{Args: os.Args[1:]})
return
}
// Otherwise run tests as normal.
code := m.Run()
os.Exit(code)
helpers.TestMainImplementation(m)
}

View file

@ -49,6 +49,8 @@ import (
"github.com/gravitational/teleport/lib/srv/db/postgres"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
dbhelpers "github.com/gravitational/teleport/integration/db"
)
// TestALPNSNIProxyMultiCluster tests SSH connection in multi-cluster setup with.
@ -222,7 +224,7 @@ func TestALPNSNIHTTPSProxy(t *testing.T) {
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
addr, err := getLocalIP()
addr, err := helpers.GetLocalIP()
require.NoError(t, err)
suite := newProxySuite(t,
@ -263,7 +265,7 @@ func TestMultiPortHTTPSProxy(t *testing.T) {
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
addr, err := getLocalIP()
addr, err := helpers.GetLocalIP()
require.NoError(t, err)
suite := newProxySuite(t,
@ -287,7 +289,7 @@ func TestMultiPortHTTPSProxy(t *testing.T) {
}
// TestAlpnSniProxyKube tests Kubernetes access with custom Kube API mock where traffic is forwarded via
//SNI ALPN proxy service to Kubernetes service based on TLS SNI value.
// SNI ALPN proxy service to Kubernetes service based on TLS SNI value.
func TestALPNSNIProxyKube(t *testing.T) {
const (
localK8SNI = "kube.teleport.cluster.local"
@ -399,29 +401,29 @@ func TestALPNSNIProxyKubeV2Leaf(t *testing.T) {
// TestALPNSNIProxyDatabaseAccess test DB connection forwarded through local SNI ALPN proxy where
// DB protocol is wrapped into TLS and forwarded to proxy ALPN SNI service and routed to appropriate db service.
func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
pack := setupDatabaseTest(t,
withListenerSetupDatabaseTest(helpers.SingleProxyPortSetup),
withLeafConfig(func(config *service.Config) {
pack := dbhelpers.SetupDatabaseTest(t,
dbhelpers.WithListenerSetupDatabaseTest(helpers.SingleProxyPortSetup),
dbhelpers.WithLeafConfig(func(config *service.Config) {
config.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
}),
withRootConfig(func(config *service.Config) {
dbhelpers.WithRootConfig(func(config *service.Config) {
config.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
}),
)
pack.waitForLeaf(t)
pack.WaitForLeaf(t)
t.Run("mysql", func(t *testing.T) {
lp := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolMySQL)
lp := mustStartALPNLocalProxy(t, pack.Root.Cluster.SSHProxy, alpncommon.ProtocolMySQL)
t.Run("connect to main cluster via proxy", func(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.mysqlService.Name,
Protocol: pack.root.mysqlService.Protocol,
ServiceName: pack.Root.MysqlService.Name,
Protocol: pack.Root.MysqlService.Protocol,
Username: "root",
},
})
@ -439,14 +441,14 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
t.Run("connect to leaf cluster via proxy", func(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.leaf.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Leaf.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.leaf.mysqlService.Name,
Protocol: pack.leaf.mysqlService.Protocol,
ServiceName: pack.Leaf.MysqlService.Name,
Protocol: pack.Leaf.MysqlService.Protocol,
Username: "root",
},
})
@ -462,16 +464,16 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
require.NoError(t, err)
})
t.Run("connect to main cluster via proxy using ping protocol", func(t *testing.T) {
pingProxy := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolWithPing(alpncommon.ProtocolMySQL))
pingProxy := mustStartALPNLocalProxy(t, pack.Root.Cluster.SSHProxy, alpncommon.ProtocolWithPing(alpncommon.ProtocolMySQL))
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: pingProxy.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.mysqlService.Name,
Protocol: pack.root.mysqlService.Protocol,
ServiceName: pack.Root.MysqlService.Name,
Protocol: pack.Root.MysqlService.Protocol,
Username: "root",
},
})
@ -490,17 +492,17 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
})
t.Run("postgres", func(t *testing.T) {
lp := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolPostgres)
lp := mustStartALPNLocalProxy(t, pack.Root.Cluster.SSHProxy, alpncommon.ProtocolPostgres)
t.Run("connect to main cluster via proxy", func(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.postgresService.Name,
Protocol: pack.root.postgresService.Protocol,
ServiceName: pack.Root.PostgresService.Name,
Protocol: pack.Root.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
@ -511,14 +513,14 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
})
t.Run("connect to leaf cluster via proxy", func(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.leaf.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Leaf.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.leaf.postgresService.Name,
Protocol: pack.leaf.postgresService.Protocol,
ServiceName: pack.Leaf.PostgresService.Name,
Protocol: pack.Leaf.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
@ -528,16 +530,16 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
mustClosePostgresClient(t, client)
})
t.Run("connect to main cluster via proxy with ping protocol", func(t *testing.T) {
pingProxy := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolWithPing(alpncommon.ProtocolPostgres))
pingProxy := mustStartALPNLocalProxy(t, pack.Root.Cluster.SSHProxy, alpncommon.ProtocolWithPing(alpncommon.ProtocolPostgres))
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: pingProxy.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.postgresService.Name,
Protocol: pack.root.postgresService.Protocol,
ServiceName: pack.Root.PostgresService.Name,
Protocol: pack.Root.PostgresService.Protocol,
Username: "postgres",
Database: "test",
},
@ -549,17 +551,17 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
})
t.Run("mongo", func(t *testing.T) {
lp := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolMongoDB)
lp := mustStartALPNLocalProxy(t, pack.Root.Cluster.SSHProxy, alpncommon.ProtocolMongoDB)
t.Run("connect to main cluster via proxy", func(t *testing.T) {
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.mongoService.Name,
Protocol: pack.root.mongoService.Protocol,
ServiceName: pack.Root.MongoService.Name,
Protocol: pack.Root.MongoService.Protocol,
Username: "admin",
},
})
@ -575,14 +577,14 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
})
t.Run("connect to leaf cluster via proxy", func(t *testing.T) {
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.leaf.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Leaf.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.leaf.mongoService.Name,
Protocol: pack.leaf.mongoService.Protocol,
ServiceName: pack.Leaf.MongoService.Name,
Protocol: pack.Leaf.MongoService.Protocol,
Username: "admin",
},
})
@ -597,16 +599,16 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
require.NoError(t, err)
})
t.Run("connect to main cluster via proxy with ping protocol", func(t *testing.T) {
pingProxy := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolWithPing(alpncommon.ProtocolMongoDB))
pingProxy := mustStartALPNLocalProxy(t, pack.Root.Cluster.SSHProxy, alpncommon.ProtocolWithPing(alpncommon.ProtocolMongoDB))
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: pingProxy.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.mongoService.Name,
Protocol: pack.root.mongoService.Protocol,
ServiceName: pack.Root.MongoService.Name,
Protocol: pack.Root.MongoService.Protocol,
Username: "admin",
},
})
@ -630,7 +632,7 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
t.Run("ALPN connection upgrade", func(t *testing.T) {
// Make a mock ALB which points to the Teleport Proxy Service. Then
// ALPN local proxies will point to this ALB instead.
albProxy := mustStartMockALBProxy(t, pack.root.cluster.Web)
albProxy := mustStartMockALBProxy(t, pack.Root.Cluster.Web)
// Test a protocol in the alpncommon.IsDBTLSProtocol list where
// the database client will perform a native TLS handshake.
@ -648,14 +650,14 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
InsecureSkipVerify: true,
})
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.mongoService.Name,
Protocol: pack.root.mongoService.Protocol,
ServiceName: pack.Root.MongoService.Name,
Protocol: pack.Root.MongoService.Protocol,
Username: "admin",
},
})
@ -685,14 +687,14 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
InsecureSkipVerify: true,
})
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Address: lp.GetAddr(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
ServiceName: pack.root.mysqlService.Name,
Protocol: pack.root.mysqlService.Protocol,
ServiceName: pack.Root.MysqlService.Name,
Protocol: pack.Root.MysqlService.Protocol,
Username: "root",
},
})
@ -716,15 +718,15 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
// - MySQL protocol
t.Run("authenticated tunnel", func(t *testing.T) {
routeToDatabase := tlsca.RouteToDatabase{
ServiceName: pack.root.mysqlService.Name,
Protocol: pack.root.mysqlService.Protocol,
ServiceName: pack.Root.MysqlService.Name,
Protocol: pack.Root.MysqlService.Protocol,
Username: "root",
}
clientTLSConfig, err := common.MakeTestClientTLSConfig(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
AuthClient: pack.Root.Cluster.GetSiteAPI(pack.Root.Cluster.Secrets.SiteName),
AuthServer: pack.Root.Cluster.Process.GetAuthServer(),
Cluster: pack.Root.Cluster.Secrets.SiteName,
Username: pack.Root.User.GetName(),
RouteToDatabase: routeToDatabase,
})
require.NoError(t, err)
@ -858,7 +860,7 @@ func TestALPNProxyAuthClientConnectWithUserIdentity(t *testing.T) {
require.NoError(t, err)
defer rc.StopAll()
identityFilePath := MustCreateUserIdentityFile(t, rc, username, time.Hour)
identityFilePath := helpers.MustCreateUserIdentityFile(t, rc, username, time.Hour)
identity := client.LoadIdentityFile(identityFilePath)
require.NoError(t, err)
@ -953,7 +955,7 @@ func TestALPNProxyHTTPProxyNoProxyDial(t *testing.T) {
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
addr, err := getLocalIP()
addr, err := helpers.GetLocalIP()
require.NoError(t, err)
instanceCfg := helpers.InstanceConfig{
@ -1032,7 +1034,7 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) {
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
rcAddr, err := getLocalIP()
rcAddr, err := helpers.GetLocalIP()
require.NoError(t, err)
log.Info("Creating Teleport instance...")

View file

@ -397,7 +397,13 @@ func (p *proxyTunnelStrategy) makeDatabase(t *testing.T) {
require.Fail(t, "database already initialized")
}
dbAddr := net.JoinHostPort(Host, helpers.NewPortStr())
dbListener, err := net.Listen("tcp", net.JoinHostPort(Host, "0"))
require.NoError(t, err)
_, portStr, err := net.SplitHostPort(dbListener.Addr().String())
require.NoError(t, err)
dbAddr := net.JoinHostPort(Host, portStr)
// setup database service
db := helpers.NewInstance(t, helpers.InstanceConfig{
@ -460,7 +466,7 @@ func (p *proxyTunnelStrategy) makeDatabase(t *testing.T) {
postgresDB, err := postgres.NewTestServer(common.TestServerConfig{
AuthClient: client,
Name: p.cluster + "-postgres",
Address: dbAddr,
Listener: dbListener,
})
require.NoError(t, err)
go postgresDB.Serve()

View file

@ -21,6 +21,7 @@ import (
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"net"
"time"
"github.com/gravitational/teleport/api/client/proto"
@ -40,8 +41,6 @@ type TestServerConfig struct {
AuthClient auth.ClientI
// Name is the server name for identification purposes.
Name string
// Address is an optional server listen address.
Address string
// AuthUser is used in tests simulating IAM token authentication.
AuthUser string
// AuthToken is used in tests simulating IAM token authentication.
@ -57,6 +56,40 @@ type TestServerConfig struct {
// ClientAuth sets tls.ClientAuth in server's tls.Config. It can be used to force client
// certificate validation in tests.
ClientAuth tls.ClientAuthType
Listener net.Listener
}
func (cfg *TestServerConfig) CheckAndSetDefaults() error {
if cfg.Listener == nil {
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
return trace.Wrap(err)
}
cfg.Listener = listener
}
return nil
}
func (cfg *TestServerConfig) CloseOnError(err *error) error {
if *err != nil {
return cfg.Close()
}
return nil
}
func (cfg *TestServerConfig) Close() error {
return cfg.Listener.Close()
}
func (cfg *TestServerConfig) Port() (string, error) {
_, port, err := net.SplitHostPort(cfg.Listener.Addr().String())
if err != nil {
return "", trace.Wrap(err)
}
return port, nil
}
// MakeTestServerTLSConfig returns TLS config suitable for configuring test

View file

@ -46,29 +46,28 @@ type TestServer struct {
}
// NewTestServer returns a new instance of a test Elasticsearch server.
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*TestServer, error) {
address := "localhost:0"
if config.Address != "" {
address = config.Address
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (svr *TestServer, err error) {
err = config.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
defer config.CloseOnError(&err)
tlsConfig, err := common.MakeTestServerTLSConfig(config)
if err != nil {
return nil, trace.Wrap(err)
}
tlsConfig.InsecureSkipVerify = true
listener, err := net.Listen("tcp", address)
if err != nil {
return nil, trace.Wrap(err)
}
_, port, err := net.SplitHostPort(listener.Addr().String())
port, err := config.Port()
if err != nil {
return nil, trace.Wrap(err)
}
testServer := &TestServer{
cfg: config,
listener: listener,
listener: config.Listener,
port: port,
tlsConfig: tlsConfig,
log: logrus.WithFields(logrus.Fields{

View file

@ -90,16 +90,14 @@ func TestServerWireVersion(wireVersion int) TestServerOption {
}
// NewTestServer returns a new instance of a test MongoDB server.
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*TestServer, error) {
address := "localhost:0"
if config.Address != "" {
address = config.Address
}
listener, err := net.Listen("tcp", address)
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (svr *TestServer, err error) {
err = config.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
_, port, err := net.SplitHostPort(listener.Addr().String())
defer config.CloseOnError(&err)
port, err := config.Port()
if err != nil {
return nil, trace.Wrap(err)
}
@ -114,7 +112,7 @@ func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*T
server := &TestServer{
cfg: config,
// MongoDB uses regular TLS handshake so standard TLS listener will work.
listener: tls.NewListener(listener, tlsConfig),
listener: tls.NewListener(config.Listener, tlsConfig),
port: port,
log: log,
}

View file

@ -97,28 +97,28 @@ func WithServerVersion(serverVersion string) TestServerOption {
}
// NewTestServer returns a new instance of a test MySQL server.
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*TestServer, error) {
address := "localhost:0"
if config.Address != "" {
address = config.Address
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (svr *TestServer, err error) {
err = config.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
defer config.CloseOnError(&err)
port, err := config.Port()
if err != nil {
return nil, trace.Wrap(err)
}
tlsConfig, err := common.MakeTestServerTLSConfig(config)
if err != nil {
return nil, trace.Wrap(err)
}
var listener net.Listener
listener := config.Listener
if config.ListenTLS {
listener, err = tls.Listen("tcp", address, tlsConfig)
} else {
listener, err = net.Listen("tcp", address)
}
if err != nil {
return nil, trace.Wrap(err)
}
_, port, err := net.SplitHostPort(listener.Addr().String())
if err != nil {
return nil, trace.Wrap(err)
listener = tls.NewListener(listener, tlsConfig)
}
log := logrus.WithFields(logrus.Fields{
trace.Component: defaults.ProtocolMySQL,
"name": config.Name,
@ -130,9 +130,11 @@ func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*T
log: log,
handler: &testHandler{log: log},
}
if !config.ListenTLS {
server.tlsConfig = tlsConfig
}
for _, o := range opts {
o(server)
}

View file

@ -76,16 +76,14 @@ type TestServer struct {
}
// NewTestServer returns a new instance of a test Postgres server.
func NewTestServer(config common.TestServerConfig) (*TestServer, error) {
address := "localhost:0"
if config.Address != "" {
address = config.Address
}
listener, err := net.Listen("tcp", address)
func NewTestServer(config common.TestServerConfig) (svr *TestServer, err error) {
err = config.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
_, port, err := net.SplitHostPort(listener.Addr().String())
defer config.CloseOnError(&err)
port, err := config.Port()
if err != nil {
return nil, trace.Wrap(err)
}
@ -93,9 +91,10 @@ func NewTestServer(config common.TestServerConfig) (*TestServer, error) {
if err != nil {
return nil, trace.Wrap(err)
}
return &TestServer{
cfg: config,
listener: listener,
listener: config.Listener,
port: port,
tlsConfig: tlsConfig,
log: logrus.WithFields(logrus.Fields{

View file

@ -66,28 +66,25 @@ type TestServer struct {
// NewTestServer returns a new instance of a test Snowflake server.
func NewTestServer(config common.TestServerConfig, opts ...TestServerOption) (*TestServer, error) {
address := "localhost:0"
if config.Address != "" {
address = config.Address
err := config.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
tlsConfig, err := common.MakeTestServerTLSConfig(config)
if err != nil {
return nil, trace.Wrap(err)
}
tlsConfig.InsecureSkipVerify = true
listener, err := net.Listen("tcp", address)
if err != nil {
return nil, trace.Wrap(err)
}
_, port, err := net.SplitHostPort(listener.Addr().String())
port, err := config.Port()
if err != nil {
return nil, trace.Wrap(err)
}
testServer := &TestServer{
cfg: config,
listener: listener,
listener: config.Listener,
port: port,
tlsConfig: tlsConfig,
log: logrus.WithFields(logrus.Fields{

View file

@ -118,17 +118,14 @@ type TestServer struct {
}
// NewTestServer returns a new instance of a test MSServer.
func NewTestServer(config common.TestServerConfig) (*TestServer, error) {
address := "localhost:0"
if config.Address != "" {
address = config.Address
}
listener, err := net.Listen("tcp", address)
func NewTestServer(config common.TestServerConfig) (svr *TestServer, err error) {
err = config.CheckAndSetDefaults()
if err != nil {
return nil, trace.Wrap(err)
}
_, port, err := net.SplitHostPort(listener.Addr().String())
defer config.CloseOnError(&err)
port, err := config.Port()
if err != nil {
return nil, trace.Wrap(err)
}
@ -138,7 +135,7 @@ func NewTestServer(config common.TestServerConfig) (*TestServer, error) {
})
server := &TestServer{
cfg: config,
listener: listener,
listener: config.Listener,
port: port,
log: log,
}

View file

@ -41,7 +41,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/envtest"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/integration"
"github.com/gravitational/teleport/integration/helpers"
resourcesv2 "github.com/gravitational/teleport/operator/apis/resources/v2"
resourcesv5 "github.com/gravitational/teleport/operator/apis/resources/v5"
@ -53,7 +52,7 @@ func fastEventually(t *testing.T, condition func() bool) {
}
func clientForTeleport(t *testing.T, teleportServer *helpers.TeleInstance, userName string) auth.ClientI {
identityFilePath := integration.MustCreateUserIdentityFile(t, teleportServer, userName, time.Hour)
identityFilePath := helpers.MustCreateUserIdentityFile(t, teleportServer, userName, time.Hour)
id, err := identityfile.ReadFile(identityFilePath)
require.NoError(t, err)
addr, err := utils.ParseAddr(teleportServer.Auth)
@ -80,7 +79,7 @@ func defaultTeleportServiceConfig(t *testing.T) (*helpers.TeleInstance, string)
teleportServer := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: integration.Loopback,
NodeName: helpers.Loopback,
Log: logrus.StandardLogger(),
})