Remove centralised port allocation for tests (#13658)

Ports used by the unit tests have been allocated by pulling them out of a list, with no guarantee that the port is not actually in use. This central allocation point also means that tests cannot be split into separate packages to be run in parallel, as the ports allocated between the various packages will be allocated multiple times and end up intermittently clashing.

There is also no guarantee, even when the tests are run serially, that the ports will not clash with services already running on the machine.

This patch (largely) replaces the use of this centralised port allocation with pre-created listeners injected into the test via the file descriptor import mechanism use by Teleport to pass open ports to child processes.

There are still some cases where the old port allocation system is still in use. I felt this was already getting beyond the bounds of sensibly reviewable, so I have left those for a further PR after this.

See-Also: #12421
See-Also: #14408
This commit is contained in:
Trent Clarke 2022-07-20 12:04:54 +10:00 committed by GitHub
parent 1e23103804
commit 1686a71c8a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
23 changed files with 771 additions and 571 deletions

View file

@ -132,7 +132,7 @@ readloop:
if args.report == byTest {
switch event.Action {
case actionPass, actionFail, actionSkip:
fmt.Printf("%s (in %.2fs): %s\n", event.Action, event.ElapsedSeconds, event.FullName())
fmt.Printf("%s (in %6.2fs): %s\n", event.Action, event.ElapsedSeconds, event.FullName())
}
}
@ -162,7 +162,7 @@ readloop:
}
// only display package results as progress messages
fmt.Printf("%s %s (in %.2fs): %s\n", covText, event.Action, event.ElapsedSeconds, event.Package)
fmt.Printf("%s %s (in %6.2fs): %s\n", covText, event.Action, event.ElapsedSeconds, event.Package)
}
// Don't need this no more

View file

@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"net/http/httptest"
"net/url"
@ -750,10 +749,10 @@ type pack struct {
}
type appTestOptions struct {
extraRootApps []service.App
extraLeafApps []service.App
rootClusterPorts *helpers.InstancePorts
leafClusterPorts *helpers.InstancePorts
extraRootApps []service.App
extraLeafApps []service.App
rootClusterListeners helpers.InstanceListenerSetupFunc
leafClusterListeners helpers.InstanceListenerSetupFunc
rootConfig func(config *service.Config)
leafConfig func(config *service.Config)
@ -911,26 +910,32 @@ func setupWithOptions(t *testing.T, opts appTestOptions) *pack {
require.NoError(t, err)
// Create a new Teleport instance with passed in configuration.
p.rootCluster = helpers.NewInstance(helpers.InstanceConfig{
rootCfg := helpers.InstanceConfig{
ClusterName: "example.com",
HostID: uuid.New().String(),
NodeName: Host,
Priv: privateKey,
Pub: publicKey,
Log: log,
Ports: opts.rootClusterPorts,
})
}
if opts.rootClusterListeners != nil {
rootCfg.Listeners = opts.rootClusterListeners(t, &rootCfg.Fds)
}
p.rootCluster = helpers.NewInstance(t, rootCfg)
// Create a new Teleport instance with passed in configuration.
p.leafCluster = helpers.NewInstance(helpers.InstanceConfig{
leafCfg := helpers.InstanceConfig{
ClusterName: "leaf.example.com",
HostID: uuid.New().String(),
NodeName: Host,
Priv: privateKey,
Pub: publicKey,
Log: log,
Ports: opts.leafClusterPorts,
})
}
if opts.leafClusterListeners != nil {
leafCfg.Listeners = opts.leafClusterListeners(t, &leafCfg.Fds)
}
p.leafCluster = helpers.NewInstance(t, leafCfg)
rcConf := service.MakeDefaultConfig()
rcConf.Console = nil
@ -1034,7 +1039,7 @@ func (p *pack) initWebSession(t *testing.T) {
// Create POST request to create session.
u := url.URL{
Scheme: "https",
Host: net.JoinHostPort(Loopback, p.rootCluster.GetPortWeb()),
Host: p.rootCluster.Web,
Path: "/v1/webapi/sessions/web",
}
req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewBuffer(csReq))
@ -1090,7 +1095,7 @@ func (p *pack) initTeleportClient(t *testing.T) {
Login: p.user.GetName(),
Cluster: p.rootCluster.Secrets.SiteName,
Host: Loopback,
Port: p.rootCluster.GetPortSSHInt(),
Port: helpers.Port(t, p.rootCluster.SSH),
}, *creds)
require.NoError(t, err)
@ -1124,7 +1129,7 @@ func (p *pack) createAppSession(t *testing.T, publicAddr, clusterName string) st
func (p *pack) makeWebapiRequest(method, endpoint string, payload []byte) (int, []byte, error) {
u := url.URL{
Scheme: "https",
Host: net.JoinHostPort(Loopback, p.rootCluster.GetPortWeb()),
Host: p.rootCluster.Web,
Path: fmt.Sprintf("/v1/webapi/%s", endpoint),
}
@ -1287,7 +1292,7 @@ func (p *pack) makeWebsocketRequest(sessionCookie, endpoint string) (string, err
dialer.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
conn, resp, err := dialer.Dial(fmt.Sprintf("wss://%s%s", net.JoinHostPort(Loopback, p.rootCluster.GetPortWeb()), endpoint), header)
conn, resp, err := dialer.Dial(fmt.Sprintf("wss://%s%s", p.rootCluster.Web, endpoint), header)
if err != nil {
return "", err
}
@ -1306,7 +1311,7 @@ func (p *pack) makeWebsocketRequest(sessionCookie, endpoint string) (string, err
func (p *pack) assembleRootProxyURL(endpoint string) string {
u := url.URL{
Scheme: "https",
Host: net.JoinHostPort(Loopback, p.rootCluster.GetPortWeb()),
Host: p.rootCluster.Web,
Path: endpoint,
}
return u.String()
@ -1381,7 +1386,7 @@ func (p *pack) startRootAppServers(t *testing.T, count int, extraApps []service.
raConf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, p.rootCluster.GetPortWeb()),
Addr: p.rootCluster.Web,
},
}
raConf.Auth.Enabled = false
@ -1510,7 +1515,7 @@ func (p *pack) startLeafAppServers(t *testing.T, count int, extraApps []service.
laConf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, p.leafCluster.GetPortWeb()),
Addr: p.leafCluster.Web,
},
}
laConf.Auth.Enabled = false

View file

@ -34,13 +34,14 @@ import (
// using an expired user identity
// We should receive an error message which contains the real cause (ssh: handshake)
func TestClientWithExpiredCredentialsAndDetailedErrorMessage(t *testing.T) {
rc := helpers.NewInstance(helpers.InstanceConfig{
cfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: Loopback,
Log: utils.NewLoggerForTests(),
Ports: helpers.SingleProxyPortSetup(),
})
}
cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds)
rc := helpers.NewInstance(t, cfg)
rcConf := service.MakeDefaultConfig()
rcConf.DataDir = t.TempDir()
@ -65,7 +66,7 @@ func TestClientWithExpiredCredentialsAndDetailedErrorMessage(t *testing.T) {
ctx, cancelFunc := context.WithTimeout(context.Background(), time.Second)
defer cancelFunc()
_, err = client.New(ctx, client.Config{
Addrs: []string{rc.GetAuthAddr()},
Addrs: []string{rc.Auth},
Credentials: []client.Credentials{client.LoadIdentityFile(identityFilePath)},
DialOpts: []grpc.DialOption{
// ask for underlying errors

View file

@ -92,7 +92,7 @@ func TestDatabaseAccess(t *testing.T) {
// TestDatabaseAccessSeparateListeners tests the Mongo and Postgres separate port setup.
func TestDatabaseAccessSeparateListeners(t *testing.T) {
pack := setupDatabaseTest(t,
withPortSetupDatabaseTest(helpers.SeparateMongoAndPostgresPortSetup),
withListenerSetupDatabaseTest(helpers.SeparateMongoAndPostgresPortSetup),
)
t.Run("PostgresSeparateListener", pack.testPostgresSeparateListener)
@ -106,7 +106,7 @@ func (p *databasePack) testPostgresRootCluster(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()),
Address: p.root.cluster.Web,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -140,7 +140,7 @@ func (p *databasePack) testPostgresLeafCluster(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()), // Connecting via root cluster.
Address: p.root.cluster.Web, // Connecting via root cluster.
Cluster: p.leaf.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -259,7 +259,7 @@ func (p *databasePack) testRotateTrustedCluster(t *testing.T) {
dbClient, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()), // Connecting via root cluster.
Address: p.root.cluster.Web, // Connecting via root cluster.
Cluster: p.leaf.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -356,7 +356,7 @@ func (p *databasePack) testMySQLRootCluster(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortMySQL()),
Address: p.root.cluster.MySQL,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -391,7 +391,7 @@ func (p *databasePack) testMySQLLeafCluster(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortMySQL()), // Connecting via root cluster.
Address: p.root.cluster.MySQL, // Connecting via root cluster.
Cluster: p.leaf.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -425,7 +425,7 @@ func (p *databasePack) testMongoRootCluster(t *testing.T) {
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()),
Address: p.root.cluster.Web,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -453,7 +453,7 @@ func (p *databasePack) testMongoConnectionCount(t *testing.T) {
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()),
Address: p.root.cluster.Web,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -505,7 +505,7 @@ func (p *databasePack) testMongoLeafCluster(t *testing.T) {
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()), // Connecting via root cluster.
Address: p.root.cluster.Web, // Connecting via root cluster.
Cluster: p.leaf.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -547,7 +547,7 @@ func TestDatabaseRootLeafIdleTimeout(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, pack.root.cluster.GetPortMySQL()), // Connecting via root cluster.
Address: pack.root.cluster.MySQL, // Connecting via root cluster.
Cluster: pack.leaf.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -615,7 +615,7 @@ func TestDatabaseAccessUnspecifiedHostname(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
AuthServer: pack.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, pack.root.cluster.GetPortWeb()),
Address: pack.root.cluster.Web,
Cluster: pack.root.cluster.Secrets.SiteName,
Username: pack.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -644,7 +644,7 @@ func (p *databasePack) testPostgresSeparateListener(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortPostgres()),
Address: p.root.cluster.Postgres,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -675,7 +675,7 @@ func (p *databasePack) testPostgresSeparateListener(t *testing.T) {
// with DisableTLS.
func TestDatabaseAccessPostgresSeparateListenerTLSDisabled(t *testing.T) {
pack := setupDatabaseTest(t,
withPortSetupDatabaseTest(helpers.SeparatePostgresPortSetup),
withListenerSetupDatabaseTest(helpers.SeparatePostgresPortSetup),
withRootConfig(func(config *service.Config) {
config.Proxy.DisableTLS = true
}),
@ -717,7 +717,7 @@ func (p *databasePack) testHARootCluster(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()),
Address: p.root.cluster.Web,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -771,7 +771,7 @@ func (p *databasePack) testHALeafCluster(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()), // Connecting via root cluster.
Address: p.root.cluster.Web, // Connecting via root cluster.
Cluster: p.leaf.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -798,12 +798,13 @@ func (p *databasePack) testHALeafCluster(t *testing.T) {
require.NoError(t, err)
}
// testDatabaseAccessMongoSeparateListener tests mongo proxy listener running on separate port.
func (p *databasePack) testMongoSeparateListener(t *testing.T) {
// Connect to the database service in root cluster.
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortMongo()),
Address: p.root.cluster.Mongo,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{
@ -923,11 +924,11 @@ type databaseClusterPack struct {
}
type testOptions struct {
clock clockwork.Clock
instancePortsFunc func() *helpers.InstancePorts
rootConfig func(config *service.Config)
leafConfig func(config *service.Config)
nodeName string
clock clockwork.Clock
listenerSetup helpers.InstanceListenerSetupFunc
rootConfig func(config *service.Config)
leafConfig func(config *service.Config)
nodeName string
}
type testOptionFunc func(*testOptions)
@ -936,8 +937,8 @@ func (o *testOptions) setDefaultIfNotSet() {
if o.clock == nil {
o.clock = clockwork.NewRealClock()
}
if o.instancePortsFunc == nil {
o.instancePortsFunc = helpers.StandardPortSetup
if o.listenerSetup == nil {
o.listenerSetup = helpers.StandardListenerSetup
}
if o.nodeName == "" {
o.nodeName = Host
@ -956,9 +957,9 @@ func withNodeName(nodeName string) testOptionFunc {
}
}
func withPortSetupDatabaseTest(portFn func() *helpers.InstancePorts) testOptionFunc {
func withListenerSetupDatabaseTest(fn helpers.InstanceListenerSetupFunc) testOptionFunc {
return func(o *testOptions) {
o.instancePortsFunc = portFn
o.listenerSetup = fn
}
}
@ -991,6 +992,8 @@ func setupDatabaseTest(t *testing.T, options ...testOptionFunc) *databasePack {
privateKey, publicKey, err := testauthority.New().GenerateKeyPair()
require.NoError(t, err)
//TODO(tcsc): Refactor the test database setup such that it does
// not use NewPortStr(),
p := &databasePack{
clock: opts.clock,
root: databaseClusterPack{
@ -1006,26 +1009,28 @@ func setupDatabaseTest(t *testing.T, options ...testOptionFunc) *databasePack {
}
// Create root cluster.
p.root.cluster = helpers.NewInstance(helpers.InstanceConfig{
rootCfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: opts.nodeName,
Priv: privateKey,
Pub: publicKey,
Log: log,
Ports: opts.instancePortsFunc(),
})
}
rootCfg.Listeners = opts.listenerSetup(t, &rootCfg.Fds)
p.root.cluster = helpers.NewInstance(t, rootCfg)
// Create leaf cluster.
p.leaf.cluster = helpers.NewInstance(helpers.InstanceConfig{
leafCfg := helpers.InstanceConfig{
ClusterName: "leaf.example.com",
HostID: uuid.New().String(),
NodeName: opts.nodeName,
Ports: opts.instancePortsFunc(),
Priv: privateKey,
Pub: publicKey,
Log: log,
})
}
leafCfg.Listeners = opts.listenerSetup(t, &leafCfg.Fds)
p.leaf.cluster = helpers.NewInstance(t, leafCfg)
// Make root cluster config.
rcConf := service.MakeDefaultConfig()
@ -1109,7 +1114,7 @@ func setupDatabaseTest(t *testing.T, options ...testOptionFunc) *databasePack {
rdConf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()),
Addr: p.root.cluster.Web,
},
}
rdConf.Databases.Enabled = true
@ -1147,7 +1152,7 @@ func setupDatabaseTest(t *testing.T, options ...testOptionFunc) *databasePack {
ldConf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, p.leaf.cluster.GetPortWeb()),
Addr: p.leaf.cluster.Web,
},
}
ldConf.Databases.Enabled = true
@ -1306,11 +1311,11 @@ func (p *databasePack) startRootDatabaseAgent(t *testing.T, params databaseAgent
conf := service.MakeDefaultConfig()
conf.DataDir = t.TempDir()
conf.Token = "static-token-value"
conf.DiagnosticAddr = utils.NetAddr{AddrNetwork: "tcp", Addr: net.JoinHostPort("localhost", helpers.NewPortStr())}
conf.DiagnosticAddr = *utils.MustParseAddr(helpers.NewListener(t, service.ListenerDiagnostic, &conf.FileDescriptors))
conf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, p.root.cluster.GetPortWeb()),
Addr: p.root.cluster.Web,
},
}
conf.Clock = p.clock
@ -1344,7 +1349,7 @@ func (p *databasePack) testLargeQuery(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: p.root.cluster.GetSiteAPI(p.root.cluster.Secrets.SiteName),
AuthServer: p.root.cluster.Process.GetAuthServer(),
Address: net.JoinHostPort(Loopback, p.root.cluster.GetPortMySQL()),
Address: p.root.cluster.MySQL,
Cluster: p.root.cluster.Secrets.SiteName,
Username: p.root.user.GetName(),
RouteToDatabase: tlsca.RouteToDatabase{

View file

@ -20,7 +20,6 @@ import (
"context"
"fmt"
"io"
"net"
"os"
"testing"
"time"
@ -60,7 +59,7 @@ func newNodeConfig(t *testing.T, authAddr utils.NetAddr, tokenName string, joinM
config.Token = tokenName
config.JoinMethod = joinMethod
config.SSH.Enabled = true
config.SSH.Addr.Addr = net.JoinHostPort(Host, helpers.NewPortStr())
config.SSH.Addr.Addr = helpers.NewListener(t, service.ListenerNodeSSH, &config.FileDescriptors)
config.Auth.Enabled = false
config.Proxy.Enabled = false
config.DataDir = t.TempDir()
@ -78,7 +77,7 @@ func newProxyConfig(t *testing.T, authAddr utils.NetAddr, tokenName string, join
config.SSH.Enabled = false
config.Auth.Enabled = false
proxyAddr := net.JoinHostPort(Host, helpers.NewPortStr())
proxyAddr := helpers.NewListener(t, service.ListenerProxyWeb, &config.FileDescriptors)
config.Proxy.Enabled = true
config.Proxy.DisableWebInterface = true
config.Proxy.WebAddr.Addr = proxyAddr
@ -103,7 +102,7 @@ func newAuthConfig(t *testing.T, clock clockwork.Clock) *service.Config {
config := service.MakeDefaultConfig()
config.DataDir = t.TempDir()
config.Auth.ListenAddr.Addr = net.JoinHostPort(Host, helpers.NewPortStr())
config.Auth.ListenAddr.Addr = helpers.NewListener(t, service.ListenerAuth, &config.FileDescriptors)
config.Auth.ClusterName, err = services.NewClusterNameWithRandomID(types.ClusterNameSpecV2{
ClusterName: "testcluster",
})
@ -337,13 +336,16 @@ func TestEC2Labels(t *testing.T) {
tconf.DataDir = t.TempDir()
tconf.Auth.Enabled = true
tconf.Proxy.Enabled = true
tconf.Proxy.SSHAddr.Addr = helpers.NewListener(t, service.ListenerProxySSH, &tconf.FileDescriptors)
tconf.Proxy.WebAddr.Addr = helpers.NewListener(t, service.ListenerProxyWeb, &tconf.FileDescriptors)
tconf.Proxy.ReverseTunnelListenAddr.Addr = helpers.NewListener(t, service.ListenerProxyTunnel, &tconf.FileDescriptors)
tconf.Proxy.DisableWebInterface = true
tconf.Auth.StorageConfig = storageConfig
tconf.Auth.ListenAddr.Addr = net.JoinHostPort(Host, helpers.NewPortStr())
tconf.Auth.ListenAddr.Addr = helpers.NewListener(t, service.ListenerAuth, &tconf.FileDescriptors)
tconf.AuthServers = append(tconf.AuthServers, tconf.Auth.ListenAddr)
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(Host, helpers.NewPortStr())
tconf.SSH.Addr.Addr = helpers.NewListener(t, service.ListenerNodeSSH, &tconf.FileDescriptors)
appConf := service.App{
Name: "test-app",
@ -441,12 +443,14 @@ func TestEC2Hostname(t *testing.T) {
tconf.Auth.Enabled = true
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebInterface = true
tconf.Proxy.SSHAddr.Addr = helpers.NewListener(t, service.ListenerProxySSH, &tconf.FileDescriptors)
tconf.Proxy.WebAddr.Addr = helpers.NewListener(t, service.ListenerProxyWeb, &tconf.FileDescriptors)
tconf.Auth.StorageConfig = storageConfig
tconf.Auth.ListenAddr.Addr = net.JoinHostPort(Host, helpers.NewPortStr())
tconf.Auth.ListenAddr.Addr = helpers.NewListener(t, service.ListenerAuth, &tconf.FileDescriptors)
tconf.AuthServers = append(tconf.AuthServers, tconf.Auth.ListenAddr)
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(Host, helpers.NewPortStr())
tconf.SSH.Addr.Addr = helpers.NewListener(t, service.ListenerNodeSSH, &tconf.FileDescriptors)
imClient := &mockIMDSClient{
tags: map[string]string{

View file

@ -73,7 +73,7 @@ func NewFixture(t *testing.T) *Fixture {
// Teleport instance with the passed in user, instance secrets, and Teleport
// configuration.
func (s *Fixture) NewTeleportWithConfig(t *testing.T, logins []string, instanceSecrets []*InstanceSecrets, teleportConfig *service.Config) *TeleInstance {
teleport := s.NewTeleportInstance()
teleport := s.NewTeleportInstance(t)
// use passed logins, but use suite's default login if nothing was passed
if len(logins) == 0 {
@ -95,18 +95,19 @@ func (s *Fixture) NewTeleportWithConfig(t *testing.T, logins []string, instanceS
return teleport
}
func (s *Fixture) NewTeleportInstance() *TeleInstance {
return NewInstance(s.DefaultInstanceConfig())
func (s *Fixture) NewTeleportInstance(t *testing.T) *TeleInstance {
return NewInstance(t, s.DefaultInstanceConfig(t))
}
func (s *Fixture) DefaultInstanceConfig() InstanceConfig {
return InstanceConfig{
func (s *Fixture) DefaultInstanceConfig(t *testing.T) InstanceConfig {
cfg := InstanceConfig{
ClusterName: Site,
HostID: HostID,
NodeName: Host,
Priv: s.Priv,
Pub: s.Pub,
Log: s.Log,
Ports: StandardPortSetup(),
}
cfg.Listeners = StandardListenerSetup(t, &cfg.Fds)
return cfg
}

View file

@ -197,8 +197,8 @@ func (i *TeleInstance) AsTrustedCluster(token string, roleMap types.RoleMap) typ
Spec: types.TrustedClusterSpecV2{
Token: token,
Enabled: true,
ProxyAddress: i.GetWebAddr(),
ReverseTunnelAddress: i.GetReverseTunnelAddr(),
ProxyAddress: i.Web,
ReverseTunnelAddress: i.ReverseTunnel,
RoleMap: roleMap,
},
}
@ -246,7 +246,8 @@ type TeleInstance struct {
// Log specifies the instance logger
Log utils.Logger
InstancePorts
InstanceListeners
Fds []service.FileDescriptor
}
// InstanceConfig is an instance configuration
@ -264,25 +265,24 @@ type InstanceConfig struct {
// Log specifies the logger
Log utils.Logger
// Ports is a collection of instance ports.
Ports *InstancePorts
Listeners *InstanceListeners
Fds []service.FileDescriptor
}
// NewInstance creates a new Teleport process instance.
//
// The caller is responsible for calling StopAll on the returned instance to
// clean up spawned processes.
func NewInstance(cfg InstanceConfig) *TeleInstance {
func NewInstance(t *testing.T, cfg InstanceConfig) *TeleInstance {
var err error
if cfg.NodeName == "" {
cfg.NodeName, err = os.Hostname()
fatalIf(err)
}
if cfg.Ports == nil {
cfg.Ports = StandardPortSetup()
}
if cfg.Ports.Host == "" {
cfg.Ports.Host = cfg.NodeName
if cfg.Listeners == nil {
cfg.Listeners = StandardListenerSetup(t, &cfg.Fds)
}
// generate instance secrets (keys):
@ -332,10 +332,11 @@ func NewInstance(cfg InstanceConfig) *TeleInstance {
fatalIf(err)
i := &TeleInstance{
Hostname: cfg.NodeName,
UploadEventsC: make(chan events.UploadEvent, 100),
Log: cfg.Log,
InstancePorts: *cfg.Ports,
Hostname: cfg.NodeName,
UploadEventsC: make(chan events.UploadEvent, 100),
Log: cfg.Log,
InstanceListeners: *cfg.Listeners,
Fds: cfg.Fds,
}
secrets := InstanceSecrets{
@ -345,7 +346,7 @@ func NewInstance(cfg InstanceConfig) *TeleInstance {
Cert: cert,
TLSCACert: tlsCACert,
TLSCert: tlsCert,
TunnelAddr: net.JoinHostPort(cfg.NodeName, i.GetPortReverseTunnel()),
TunnelAddr: i.ReverseTunnel,
Users: make(map[string]*User),
}
@ -450,7 +451,7 @@ func (i *TeleInstance) GenerateConfig(t *testing.T, trustedSecrets []*InstanceSe
}
}
tconf.HostUUID = i.Secrets.GetIdentity().ID.HostUUID
tconf.SSH.Addr.Addr = net.JoinHostPort(i.Hostname, i.GetPortSSH())
tconf.SSH.Addr.Addr = i.SSH
tconf.SSH.PublicAddrs = []utils.NetAddr{
{
AddrNetwork: "tcp",
@ -461,7 +462,7 @@ func (i *TeleInstance) GenerateConfig(t *testing.T, trustedSecrets []*InstanceSe
Addr: Host,
},
}
tconf.Auth.ListenAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortAuth())
tconf.Auth.ListenAddr.Addr = i.Auth
tconf.Auth.PublicAddrs = []utils.NetAddr{
{
AddrNetwork: "tcp",
@ -483,8 +484,8 @@ func (i *TeleInstance) GenerateConfig(t *testing.T, trustedSecrets []*InstanceSe
},
}
if i.isSinglePortSetup {
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortWeb())
if i.IsSinglePortSetup {
tconf.Proxy.WebAddr.Addr = i.Web
// Reset other addresses to ensure that teleport instance will expose only web port listener.
tconf.Proxy.ReverseTunnelListenAddr = utils.NetAddr{}
tconf.Proxy.MySQLAddr = utils.NetAddr{}
@ -495,16 +496,16 @@ func (i *TeleInstance) GenerateConfig(t *testing.T, trustedSecrets []*InstanceSe
return nil, trace.Wrap(err)
}
tconf.Proxy.ReverseTunnelListenAddr = *tunAddr
tconf.Proxy.SSHAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortProxy())
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortWeb())
tconf.Proxy.MySQLAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortMySQL())
if i.Postgres != nil {
tconf.Proxy.SSHAddr.Addr = i.SSHProxy
tconf.Proxy.WebAddr.Addr = i.Web
tconf.Proxy.MySQLAddr.Addr = i.MySQL
if i.Postgres != "" {
// Postgres proxy port was configured on a separate listener.
tconf.Proxy.PostgresAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortPostgres())
tconf.Proxy.PostgresAddr.Addr = i.Postgres
}
if i.Mongo != nil {
if i.Mongo != "" {
// Mongo proxy port was configured on a separate listener.
tconf.Proxy.MongoAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortMongo())
tconf.Proxy.MongoAddr.Addr = i.Mongo
}
}
tconf.AuthServers = append(tconf.AuthServers, tconf.Auth.ListenAddr)
@ -518,6 +519,7 @@ func (i *TeleInstance) GenerateConfig(t *testing.T, trustedSecrets []*InstanceSe
tconf.Keygen = testauthority.New()
tconf.MaxRetryPeriod = defaults.HighResPollingPeriod
tconf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
tconf.FileDescriptors = i.Fds
i.Config = tconf
return tconf, nil
@ -590,12 +592,20 @@ func (i *TeleInstance) CreateEx(t *testing.T, trustedSecrets []*InstanceSecrets,
// StartNode starts a SSH node and connects it to the cluster.
func (i *TeleInstance) StartNode(tconf *service.Config) (*service.TeleportProcess, error) {
return i.startNode(tconf, i.GetPortAuth())
_, port, err := net.SplitHostPort(i.Auth)
if err != nil {
return nil, trace.Wrap(err)
}
return i.startNode(tconf, port)
}
// StartReverseTunnelNode starts a SSH node and connects it to the cluster via reverse tunnel.
func (i *TeleInstance) StartReverseTunnelNode(tconf *service.Config) (*service.TeleportProcess, error) {
return i.startNode(tconf, i.GetPortWeb())
_, port, err := net.SplitHostPort(i.Web)
if err != nil {
return nil, trace.Wrap(err)
}
return i.startNode(tconf, port)
}
// startNode starts a node and connects it to the cluster.
@ -664,7 +674,7 @@ func (i *TeleInstance) StartApp(conf *service.Config) (*service.TeleportProcess,
conf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, i.GetPortWeb()),
Addr: i.Web,
},
}
conf.Token = "token"
@ -716,7 +726,7 @@ func (i *TeleInstance) StartApps(configs []*service.Config) ([]*service.Teleport
cfg.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, i.GetPortWeb()),
Addr: i.Web,
},
}
cfg.Token = "token"
@ -780,7 +790,7 @@ func (i *TeleInstance) StartDatabase(conf *service.Config) (*service.TeleportPro
conf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, i.GetPortWeb()),
Addr: i.Web,
},
}
conf.Token = "token"
@ -832,7 +842,7 @@ func (i *TeleInstance) StartDatabase(conf *service.Config) (*service.TeleportPro
return process, client, nil
}
func (i *TeleInstance) StartKube(conf *service.Config, clusterName string) (*service.TeleportProcess, error) {
func (i *TeleInstance) StartKube(t *testing.T, conf *service.Config, clusterName string) (*service.TeleportProcess, error) {
dataDir, err := os.MkdirTemp("", "cluster-"+i.Secrets.SiteName)
if err != nil {
return nil, trace.Wrap(err)
@ -843,7 +853,7 @@ func (i *TeleInstance) StartKube(conf *service.Config, clusterName string) (*ser
conf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, i.GetPortWeb()),
Addr: i.Web,
},
}
conf.Token = "token"
@ -855,7 +865,7 @@ func (i *TeleInstance) StartKube(conf *service.Config, clusterName string) (*ser
conf.Databases.Enabled = false
conf.Kube.KubeconfigPath = filepath.Join(dataDir, "kube_config")
if err := EnableKube(conf, clusterName); err != nil {
if err := EnableKube(t, conf, clusterName); err != nil {
return nil, trace.Wrap(err)
}
conf.Kube.ListenAddr = nil
@ -883,17 +893,16 @@ func (i *TeleInstance) StartKube(conf *service.Config, clusterName string) (*ser
// StartNodeAndProxy starts a SSH node and a Proxy Server and connects it to
// the cluster.
func (i *TeleInstance) StartNodeAndProxy(name string, sshPort, proxyWebPort, proxySSHPort int) error {
func (i *TeleInstance) StartNodeAndProxy(t *testing.T, name string) (sshPort, webProxyPort, sshProxyPort int) {
dataDir, err := os.MkdirTemp("", "cluster-"+i.Secrets.SiteName)
if err != nil {
return trace.Wrap(err)
}
require.NoError(t, err)
i.tempDirs = append(i.tempDirs, dataDir)
tconf := service.MakeDefaultConfig()
tconf.Log = i.Log
authServer := utils.MustParseAddr(net.JoinHostPort(i.Hostname, i.GetPortAuth()))
authServer := utils.MustParseAddr(i.Auth)
tconf.AuthServers = append(tconf.AuthServers, *authServer)
tconf.Token = "token"
tconf.HostUUID = name
@ -907,13 +916,16 @@ func (i *TeleInstance) StartNodeAndProxy(name string, sshPort, proxyWebPort, pro
tconf.Auth.Enabled = false
tconf.Proxy.Enabled = true
tconf.Proxy.SSHAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", proxySSHPort))
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", proxyWebPort))
tconf.Proxy.SSHAddr.Addr = NewListenerOn(t, i.Hostname, service.ListenerProxySSH, &tconf.FileDescriptors)
sshProxyPort = Port(t, tconf.Proxy.SSHAddr.Addr)
tconf.Proxy.WebAddr.Addr = NewListenerOn(t, i.Hostname, service.ListenerProxyWeb, &tconf.FileDescriptors)
webProxyPort = Port(t, tconf.Proxy.WebAddr.Addr)
tconf.Proxy.DisableReverseTunnel = true
tconf.Proxy.DisableWebService = true
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", sshPort))
tconf.SSH.Addr.Addr = NewListenerOn(t, i.Hostname, service.ListenerNodeSSH, &tconf.FileDescriptors)
sshPort = Port(t, tconf.SSH.Addr.Addr)
tconf.SSH.PublicAddrs = []utils.NetAddr{
{
AddrNetwork: "tcp",
@ -929,9 +941,7 @@ func (i *TeleInstance) StartNodeAndProxy(name string, sshPort, proxyWebPort, pro
// Create a new Teleport process and add it to the list of nodes that
// compose this "cluster".
process, err := service.NewTeleport(tconf, service.WithIMDSClient(&DisabledIMDSClient{}))
if err != nil {
return trace.Wrap(err)
}
require.NoError(t, err)
i.Nodes = append(i.Nodes, process)
// Build a list of expected events to wait for before unblocking based off
@ -943,13 +953,12 @@ func (i *TeleInstance) StartNodeAndProxy(name string, sshPort, proxyWebPort, pro
// Start the process and block until the expected events have arrived.
receivedEvents, err := StartAndWait(process, expectedEvents)
if err != nil {
return trace.Wrap(err)
}
require.NoError(t, err)
log.Debugf("Teleport node and proxy (in instance %v) started: %v/%v events received.",
i.Secrets.SiteName, len(expectedEvents), len(receivedEvents))
return nil
return
}
// ProxyConfig is a set of configuration parameters for Proxy
@ -981,7 +990,7 @@ func (i *TeleInstance) StartProxy(cfg ProxyConfig) (reversetunnel.Server, error)
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Log = i.Log
authServer := utils.MustParseAddr(net.JoinHostPort(i.Hostname, i.GetPortAuth()))
authServer := utils.MustParseAddr(i.Auth)
tconf.AuthServers = append(tconf.AuthServers, *authServer)
tconf.CachePolicy = service.CachePolicy{Enabled: true}
tconf.DataDir = dataDir
@ -1217,8 +1226,8 @@ func (i *TeleInstance) NewUnauthenticatedClient(cfg ClientConfig) (tc *client.Te
var sshProxyAddr string
if cfg.Proxy == nil {
webProxyAddr = i.GetWebAddr()
sshProxyAddr = i.GetProxyAddr()
webProxyAddr = i.Web
sshProxyAddr = i.SSHProxy
} else {
webProxyAddr = net.JoinHostPort(proxyHost, strconv.Itoa(cfg.Proxy.WebPort))
sshProxyAddr = net.JoinHostPort(proxyHost, strconv.Itoa(cfg.Proxy.SSHPort))
@ -1242,7 +1251,7 @@ func (i *TeleInstance) NewUnauthenticatedClient(cfg ClientConfig) (tc *client.Te
WebProxyAddr: webProxyAddr,
SSHProxyAddr: sshProxyAddr,
Interactive: cfg.Interactive,
TLSRoutingEnabled: i.isSinglePortSetup,
TLSRoutingEnabled: i.IsSinglePortSetup,
Tracer: tracing.NoopProvider().Tracer("test"),
EnableEscapeSequences: cfg.EnableEscapeSequences,
}

View file

@ -17,7 +17,6 @@ package helpers
import (
"context"
"crypto/x509/pkix"
"net"
"path/filepath"
"testing"
"time"
@ -41,10 +40,10 @@ import (
func EnableKubernetesService(t *testing.T, config *service.Config) {
config.Kube.KubeconfigPath = filepath.Join(t.TempDir(), "kube_config")
require.NoError(t, EnableKube(config, "teleport-cluster"))
require.NoError(t, EnableKube(t, config, "teleport-cluster"))
}
func EnableKube(config *service.Config, clusterName string) error {
func EnableKube(t *testing.T, config *service.Config, clusterName string) error {
kubeConfigPath := config.Kube.KubeconfigPath
if kubeConfigPath == "" {
return trace.BadParameter("missing kubeconfig path")
@ -54,15 +53,18 @@ func EnableKube(config *service.Config, clusterName string) error {
return trace.Wrap(err)
}
err = kubeconfig.Update(kubeConfigPath, kubeconfig.Values{
// By default this needs to be an arbitrary address guaranteed not to
// be in use, so we're using port 0 for now.
ClusterAddr: "https://localhost:0",
TeleportClusterName: clusterName,
ClusterAddr: "https://" + net.JoinHostPort(Host, ports.Pop()),
Credentials: key,
})
if err != nil {
return trace.Wrap(err)
}
config.Kube.Enabled = true
config.Kube.ListenAddr = utils.MustParseAddr(net.JoinHostPort(Host, ports.Pop()))
config.Kube.ListenAddr = utils.MustParseAddr(NewListener(t, service.ListenerKube, &config.FileDescriptors))
return nil
}

View file

@ -20,8 +20,11 @@ import (
"fmt"
"net"
"strconv"
"testing"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/utils"
"github.com/stretchr/testify/require"
)
// ports contains tcp ports allocated for all integration tests.
@ -38,168 +41,218 @@ func init() {
}
}
// NewPortValue fetches a port from the pool.
// Deprecated: Use NewListener() and friends instead.
func NewPortValue() int {
return ports.PopInt()
}
// NewPortStr fetches aport from the pool as a string.
// Deprecated: Use NewListener() and friends instead.
func NewPortStr() string {
return ports.Pop()
}
// NewPortSlice fetches several ports from the pool at once.
// Deprecated: Use NewListener() and friends instead.
func NewPortSlice(n int) []int {
return ports.PopIntSlice(n)
}
func NewInstancePort() *InstancePort {
i := ports.PopInt()
p := InstancePort(i)
return &p
// InstanceListeners represents the listener configuration for a test cluster.
// Each address field is expected to be hull host:port pair.
type InstanceListeners struct {
Web string
SSH string
SSHProxy string
Auth string
ReverseTunnel string
MySQL string
Postgres string
Mongo string
IsSinglePortSetup bool
}
type InstancePort int
// InstanceListenerSetupFunc defines a function type used for specifying the
// listener setup for a given test. InstanceListenerSetupFuncs are useful when
// you need to have some distance between the test configuration and actually
// executing the listener setup.
type InstanceListenerSetupFunc func(*testing.T, *[]service.FileDescriptor) *InstanceListeners
func (p *InstancePort) String() string {
if p == nil {
return ""
}
return strconv.Itoa(int(*p))
}
func SingleProxyPortSetup() *InstancePorts {
v := NewInstancePort()
return &InstancePorts{
Web: v,
SSHProxy: v,
ReverseTunnel: v,
MySQL: v,
SSH: NewInstancePort(),
Auth: NewInstancePort(),
isSinglePortSetup: true,
}
}
func StandardPortSetup() *InstancePorts {
return &InstancePorts{
Web: NewInstancePort(),
SSH: NewInstancePort(),
Auth: NewInstancePort(),
SSHProxy: NewInstancePort(),
ReverseTunnel: NewInstancePort(),
MySQL: NewInstancePort(),
// StandardListenerSetupOn returns a InstanceListenerSetupFunc that will create
// a new InstanceListeners configured with each service listening on its own
// port, all bound to the supplied address
func StandardListenerSetupOn(addr string) func(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return func(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return &InstanceListeners{
Web: NewListenerOn(t, addr, service.ListenerProxyWeb, fds),
SSH: NewListenerOn(t, addr, service.ListenerNodeSSH, fds),
Auth: NewListenerOn(t, addr, service.ListenerAuth, fds),
SSHProxy: NewListenerOn(t, addr, service.ListenerProxySSH, fds),
ReverseTunnel: NewListenerOn(t, addr, service.ListenerProxyTunnel, fds),
MySQL: NewListenerOn(t, addr, service.ListenerProxyMySQL, fds),
}
}
}
func WebReverseTunnelMuxPortSetup() *InstancePorts {
v := NewInstancePort()
return &InstancePorts{
Web: v,
ReverseTunnel: v,
SSH: NewInstancePort(),
SSHProxy: NewInstancePort(),
MySQL: NewInstancePort(),
Auth: NewInstancePort(),
// StandardListenerSetup creates an InstanceListeners configures with each service
// listening on its own port, all bound to the loopback address
func StandardListenerSetup(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return StandardListenerSetupOn(Loopback)(t, fds)
}
// SingleProxyPortSetupOn creates a constructor function that will in turn generate an
// InstanceConfig that allows proxying of multiple protocols over a single port when
// invoked.
func SingleProxyPortSetupOn(addr string) func(*testing.T, *[]service.FileDescriptor) *InstanceListeners {
return func(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
ssh := NewListenerOn(t, addr, service.ListenerProxyWeb, fds)
return &InstanceListeners{
Web: ssh,
SSH: NewListenerOn(t, addr, service.ListenerNodeSSH, fds),
Auth: NewListenerOn(t, addr, service.ListenerAuth, fds),
SSHProxy: ssh,
ReverseTunnel: ssh,
MySQL: ssh,
IsSinglePortSetup: true,
}
}
}
func SeparatePostgresPortSetup() *InstancePorts {
return &InstancePorts{
Web: NewInstancePort(),
SSH: NewInstancePort(),
Auth: NewInstancePort(),
SSHProxy: NewInstancePort(),
ReverseTunnel: NewInstancePort(),
MySQL: NewInstancePort(),
Postgres: NewInstancePort(),
// SingleProxyPortSetup generates an InstanceConfig that allows proxying of multiple protocols
// over a single port.
func SingleProxyPortSetup(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return SingleProxyPortSetupOn(Loopback)(t, fds)
}
// WebReverseTunnelMuxPortSetup generates a listener config using the same port for web and
// tunnel, and independent ports for all other services.
func WebReverseTunnelMuxPortSetup(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
web := NewListener(t, service.ListenerProxyTunnelAndWeb, fds)
return &InstanceListeners{
Web: web,
ReverseTunnel: web,
SSH: NewListener(t, service.ListenerNodeSSH, fds),
SSHProxy: NewListener(t, service.ListenerProxySSH, fds),
MySQL: NewListener(t, service.ListenerProxyMySQL, fds),
Auth: NewListener(t, service.ListenerAuth, fds),
}
}
func SeparateMongoPortSetup() *InstancePorts {
return &InstancePorts{
Web: NewInstancePort(),
SSH: NewInstancePort(),
Auth: NewInstancePort(),
SSHProxy: NewInstancePort(),
ReverseTunnel: NewInstancePort(),
MySQL: NewInstancePort(),
Mongo: NewInstancePort(),
}
}
func SeparateMongoAndPostgresPortSetup() *InstancePorts {
return &InstancePorts{
Web: NewInstancePort(),
SSH: NewInstancePort(),
Auth: NewInstancePort(),
SSHProxy: NewInstancePort(),
ReverseTunnel: NewInstancePort(),
MySQL: NewInstancePort(),
Mongo: NewInstancePort(),
Postgres: NewInstancePort(),
// WebReverseTunnelMuxPortSetup generates a listener config with a defined port for Postgres
func SeparatePostgresPortSetup(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return &InstanceListeners{
Web: NewListener(t, service.ListenerProxyWeb, fds),
SSH: NewListener(t, service.ListenerNodeSSH, fds),
Auth: NewListener(t, service.ListenerAuth, fds),
SSHProxy: NewListener(t, service.ListenerProxySSH, fds),
ReverseTunnel: NewListener(t, service.ListenerProxyTunnel, fds),
MySQL: NewListener(t, service.ListenerProxyMySQL, fds),
Postgres: NewListener(t, service.ListenerProxyPostgres, fds),
}
}
type InstancePorts struct {
Host string
Web *InstancePort
// SSH is an instance of SSH server Port.
SSH *InstancePort
// SSHProxy is Teleport SSH Proxy Port.
SSHProxy *InstancePort
Auth *InstancePort
ReverseTunnel *InstancePort
MySQL *InstancePort
Postgres *InstancePort
Mongo *InstancePort
isSinglePortSetup bool
}
func (i *InstancePorts) GetPortSSHInt() int { return int(*i.SSH) }
func (i *InstancePorts) GetPortSSH() string { return i.SSH.String() }
func (i *InstancePorts) GetPortAuth() string { return i.Auth.String() }
func (i *InstancePorts) GetPortProxy() string { return i.SSHProxy.String() }
func (i *InstancePorts) GetPortWeb() string { return i.Web.String() }
func (i *InstancePorts) GetPortMySQL() string { return i.MySQL.String() }
func (i *InstancePorts) GetPortPostgres() string { return i.Postgres.String() }
func (i *InstancePorts) GetPortMongo() string { return i.Mongo.String() }
func (i *InstancePorts) GetPortReverseTunnel() string { return i.ReverseTunnel.String() }
func (i *InstancePorts) GetSSHAddr() string {
if i.SSH == nil {
return ""
// WebReverseTunnelMuxPortSetup generates a listener config with a defined port for MongoDB
func SeparateMongoPortSetup(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return &InstanceListeners{
Web: NewListener(t, service.ListenerProxyWeb, fds),
SSH: NewListener(t, service.ListenerNodeSSH, fds),
Auth: NewListener(t, service.ListenerAuth, fds),
SSHProxy: NewListener(t, service.ListenerProxySSH, fds),
ReverseTunnel: NewListener(t, service.ListenerProxyTunnel, fds),
MySQL: NewListener(t, service.ListenerProxyMySQL, fds),
Mongo: NewListener(t, service.ListenerProxyMongo, fds),
}
return net.JoinHostPort(i.Host, i.GetPortSSH())
}
func (i *InstancePorts) GetAuthAddr() string {
if i.Auth == nil {
return ""
// WebReverseTunnelMuxPortSetup generates a listener config with a defined port for Postgres and Mongo
func SeparateMongoAndPostgresPortSetup(t *testing.T, fds *[]service.FileDescriptor) *InstanceListeners {
return &InstanceListeners{
Web: NewListener(t, service.ListenerProxyWeb, fds),
SSH: NewListener(t, service.ListenerNodeSSH, fds),
Auth: NewListener(t, service.ListenerAuth, fds),
SSHProxy: NewListener(t, service.ListenerProxySSH, fds),
ReverseTunnel: NewListener(t, service.ListenerProxyTunnel, fds),
MySQL: NewListener(t, service.ListenerProxyMySQL, fds),
Mongo: NewListener(t, service.ListenerProxyMongo, fds),
Postgres: NewListener(t, service.ListenerProxyPostgres, fds),
}
return net.JoinHostPort(i.Host, i.GetPortAuth())
}
func (i *InstancePorts) GetProxyAddr() string {
if i.SSHProxy == nil {
return ""
}
return net.JoinHostPort(i.Host, i.GetPortProxy())
// PortStr extracts the port number from the supplied string, which is assumed
// to be a host:port pair. The port is returned as a string. Any errors result
// in an immediately failed test.
func PortStr(t *testing.T, addr string) string {
t.Helper()
_, portStr, err := net.SplitHostPort(addr)
require.NoError(t, err)
return portStr
}
func (i *InstancePorts) GetWebAddr() string {
if i.Web == nil {
return ""
}
return net.JoinHostPort(i.Host, i.GetPortWeb())
// PortStr extracts the port number from the supplied string, which is assumed
// to be a host:port pair. The port value is returned as an integer. Any errors
// result in an immediately failed test.
func Port(t *testing.T, addr string) int {
t.Helper()
portStr := PortStr(t, addr)
port, err := strconv.Atoi(portStr)
require.NoError(t, err)
return port
}
func (i *InstancePorts) GetMySQLAddr() string {
if i.MySQL == nil {
return ""
}
return net.JoinHostPort(i.Host, i.GetPortMySQL())
// NewListener creates a new TCP listener on `hostAddr`:0, adds it to the
// FileDescriptor slice (with the specified type) and returns its actual local
// address as a string (for use in configuration). The idea is to subvert
// Teleport's file-descriptor injection mechanism (used to share ports between
// parent and child processes) to inject preconfigured listeners to Teleport
// instances under test. The ports are allocated and bound at runtime, so there
// should be no issues with port clashes on parallel tests.
//
// The resulting file descriptor is added to the `fds` slice, which can then be
// given to a teleport instance on startup in order to suppl
func NewListenerOn(t *testing.T, hostAddr string, ty service.ListenerType, fds *[]service.FileDescriptor) string {
t.Helper()
l, err := net.Listen("tcp", net.JoinHostPort(hostAddr, "0"))
require.NoError(t, err)
defer l.Close()
addr := net.JoinHostPort(hostAddr, PortStr(t, l.Addr().String()))
// File() returns a dup of the listener's file descriptor as an *os.File, so
// the original net.Listener still needs to be closed.
lf, err := l.(*net.TCPListener).File()
require.NoError(t, err)
// If the file descriptor slice ends up being passed to a TeleportProcess
// that successfully starts, listeners will either get "imported" and used
// or discarded and closed, this is just an extra safety measure that closes
// the listener at the end of the test anyway (the finalizer would do that
// anyway, in principle).
t.Cleanup(func() { lf.Close() })
*fds = append(*fds, service.FileDescriptor{
Type: string(ty),
Address: addr,
File: lf,
})
return addr
}
func (i *InstancePorts) GetReverseTunnelAddr() string {
if i.ReverseTunnel == nil {
return ""
}
return net.JoinHostPort(i.Host, i.GetPortReverseTunnel())
// NewListener creates a new TCP listener on 127.0.0.1:0, adds it to the
// FileDescriptor slice (with the specified type) and returns its actual local
// address as a string (for use in configuration). The idea is to subvert
// Teleport's file-descriptor injection mechanism (used to share ports between
// parent and child processes) to inject preconfigured listeners to Teleport
// instances under test. The ports are allocated and bound at runtime, so there
// should be no issues with port clashes on parallel tests.
//
// The resulting file descriptor is added to the `fds` slice, which can then be
// given to a teleport instance on startup in order to suppl
func NewListener(t *testing.T, ty service.ListenerType, fds *[]service.FileDescriptor) string {
return NewListenerOn(t, Loopback, ty, fds)
}

View file

@ -1,3 +1,4 @@
// go:build linux
//go:build linux
// +build linux

View file

@ -68,9 +68,7 @@ import (
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/srv"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/teleport/common"
"github.com/google/uuid"
"github.com/gravitational/trace"
@ -104,6 +102,9 @@ func (s *integrationTestSuite) bind(test integrationTest) func(t *testing.T) {
// TestIntegrations acts as the master test suite for all integration tests
// requiring standardized setup and teardown.
func TestIntegrations(t *testing.T) {
// TODO: break all of these subtests out into individual tests so that we get
// better progress reporting, rather than have to wait for the entire
// suite to complete
suite := newSuite(t)
t.Run("AuditOff", suite.bind(testAuditOff))
@ -174,7 +175,7 @@ func testDifferentPinnedIP(t *testing.T, suite *integrationTestSuite) {
tconf.SSH.Enabled = true
tconf.SSH.DisableCreateHostUser = true
teleport := suite.NewTeleportInstance()
teleport := suite.NewTeleportInstance(t)
role := services.NewImplicitRole()
ro := role.GetOptions()
@ -276,19 +277,12 @@ func testAuditOn(t *testing.T, suite *integrationTestSuite) {
defer teleport.StopAll()
// Start a node.
nodeSSHPort := helpers.NewPortValue()
nodeConfig := func() *service.Config {
tconf := suite.defaultServiceConfig()
tconf.HostUUID = "node"
tconf.Hostname = "node"
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(teleport.Hostname, fmt.Sprintf("%v", nodeSSHPort))
return tconf
}
nodeProcess, err := teleport.StartNode(nodeConfig())
nodeConf := suite.defaultServiceConfig()
nodeConf.HostUUID = "node"
nodeConf.Hostname = "node"
nodeConf.SSH.Enabled = true
nodeConf.SSH.Addr.Addr = helpers.NewListener(t, service.ListenerNodeSSH, &nodeConf.FileDescriptors)
nodeProcess, err := teleport.StartNode(nodeConf)
require.NoError(t, err)
// get access to a authClient for the cluster
@ -331,7 +325,7 @@ func testAuditOn(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: nodeSSHPort,
Port: helpers.Port(t, nodeConf.SSH.Addr.Addr),
ForwardAgent: tt.inForwardAgent,
})
require.NoError(t, err)
@ -582,7 +576,7 @@ func testInteroperability(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
@ -617,26 +611,10 @@ func testInteroperability(t *testing.T, suite *integrationTestSuite) {
}
}
// TestMain will re-execute Teleport to run a command if "exec" is passed to
// it as an argument. Otherwise, it will run tests as normal.
func TestMain(m *testing.M) {
utils.InitLoggerForTests()
helpers.SetTestTimeouts(100 * time.Millisecond)
// If the test is re-executing itself, handle the appropriate sub-command.
if srv.IsReexec() {
common.Run(common.Options{Args: os.Args[1:]})
return
}
// Otherwise run tests as normal.
code := m.Run()
os.Exit(code)
}
// newUnstartedTeleport helper returns a created but not started Teleport instance pre-configured
// with the current user os.user.Current().
func (s *integrationTestSuite) newUnstartedTeleport(t *testing.T, logins []string, enableSSH bool) *helpers.TeleInstance {
teleport := s.NewTeleportInstance()
teleport := s.NewTeleportInstance(t)
// use passed logins, but use suite's default login if nothing was passed
if len(logins) == 0 {
logins = []string{s.Me.Username}
@ -682,7 +660,7 @@ func (s *integrationTestSuite) newTeleportIoT(t *testing.T, logins []string) *he
tconf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, main.GetPortWeb()),
Addr: main.Web,
},
}
@ -720,12 +698,11 @@ func testUUIDBasedProxy(t *testing.T, suite *integrationTestSuite) {
// addNode adds a node to the teleport instance, returning its uuid.
// All nodes added this way have the same hostname.
addNode := func() (string, error) {
nodeSSHPort := helpers.NewPortValue()
tconf := suite.defaultServiceConfig()
tconf.Hostname = Host
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(teleportSvr.Hostname, fmt.Sprintf("%v", nodeSSHPort))
tconf.SSH.Addr.Addr = helpers.NewListenerOn(t, teleportSvr.Hostname, service.ListenerNodeSSH, &tconf.FileDescriptors)
node, err := teleportSvr.StartNode(tconf)
if err != nil {
@ -1044,9 +1021,9 @@ func testCustomReverseTunnel(t *testing.T, suite *integrationTestSuite) {
}
conf.SSH.Enabled = false
instanceConfig := suite.DefaultInstanceConfig()
instanceConfig.Ports = helpers.WebReverseTunnelMuxPortSetup()
main := helpers.NewInstance(instanceConfig)
instanceConfig := suite.DefaultInstanceConfig(t)
instanceConfig.Listeners = helpers.WebReverseTunnelMuxPortSetup(t, &instanceConfig.Fds)
main := helpers.NewInstance(t, instanceConfig)
require.NoError(t, main.CreateEx(t, nil, conf))
require.NoError(t, main.Start())
@ -1059,13 +1036,15 @@ func testCustomReverseTunnel(t *testing.T, suite *integrationTestSuite) {
nodeConf.Auth.Enabled = false
nodeConf.Proxy.Enabled = false
nodeConf.SSH.Enabled = true
t.Setenv(apidefaults.TunnelPublicAddrEnvar, main.GetWebAddr())
t.Setenv(apidefaults.TunnelPublicAddrEnvar, main.Web)
// verify the node is able to join the cluster
_, err = main.StartReverseTunnelNode(nodeConf)
require.NoError(t, err)
}
// testEscapeSequenceTriggers asserts that both escape handling works, and that
// it can be reliably switched off via config.
func testEscapeSequenceTriggers(t *testing.T, suite *integrationTestSuite) {
type testCase struct {
name string
@ -1130,7 +1109,14 @@ func testEscapeSequenceTriggers(t *testing.T, suite *integrationTestSuite) {
}
func testEscapeSequenceYesTrigger(t *testing.T, terminal *Terminal, sess <-chan error) {
// Given a running terminal connected to a remote shell via an active
// Teleport SSH session, where Teleport has escape sequence processing
// ENABLED...
// When I enter some text containing the SSH disconnect escape string
terminal.Type("\a~.\n\r")
// Expect that the session will terminate shortly and without error
select {
case err := <-sess:
require.NoError(t, err)
@ -1140,15 +1126,33 @@ func testEscapeSequenceYesTrigger(t *testing.T, terminal *Terminal, sess <-chan
}
func testEscapeSequenceNoTrigger(t *testing.T, terminal *Terminal, sess <-chan error) {
terminal.Type("\a~.\n\r")
terminal.Type("\aecho hi\n\r")
// Given a running terminal connected to a remote shell via an active
// Teleport SSH session, where Teleport has escape sequence processing
// DISABLED...
// When I enter some text containing SSH escape string, followed by some
// arbitrary text....
terminal.Type("\a~.\n\r")
terminal.Type("\aecho made it to here!\n\r")
// Expect that the session will NOT be disconnected by the escape sequence,
// and so the arbitrary text will eventually end up in the terminal buffer.
require.Eventually(t, func() bool {
// if the session didn't end, we should see the output of the last write
return strings.Contains(terminal.Output(1000), "hi")
select {
case err := <-sess:
require.FailNow(t, "Session ended unexpectedly with %v", err)
return false
default:
// if the session didn't end, we should see the output of the last write
return strings.Contains(terminal.AllOutput(), "made it to here!")
}
}, time.Second*15, time.Millisecond*100)
// When I issue an explicit `exit` command to clean up the remote shell
terminal.Type("\aexit 0\n\r")
// Expect that the session will terminate shortly and without error
select {
case err := <-sess:
require.NoError(t, err)
@ -1262,7 +1266,7 @@ func testShutdown(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
cl.Stdout = person
@ -1306,7 +1310,7 @@ func testShutdown(t *testing.T, suite *integrationTestSuite) {
require.Eventually(t, func() bool {
// TODO: check that we either get a connection that fully works or a connection refused error
c, err := net.DialTimeout("tcp", teleport.GetReverseTunnelAddr(), 250*time.Millisecond)
c, err := net.DialTimeout("tcp", teleport.ReverseTunnel, 250*time.Millisecond)
if err != nil {
require.True(t, utils.IsConnectionRefused(trace.Unwrap(err)))
return true
@ -1468,7 +1472,7 @@ func testDisconnectScenarios(t *testing.T, suite *integrationTestSuite) {
}
func runDisconnectTest(t *testing.T, suite *integrationTestSuite, tc disconnectTestCase) {
teleport := suite.NewTeleportInstance()
teleport := suite.NewTeleportInstance(t)
username := suite.Me.Username
role, err := types.NewRoleV3("devs", types.RoleSpecV5{
@ -1522,7 +1526,7 @@ func runDisconnectTest(t *testing.T, suite *integrationTestSuite, tc disconnectT
Login: username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
cl.Stdout = person
@ -1633,7 +1637,7 @@ func testEnvironmentVariables(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
@ -1663,7 +1667,7 @@ func testInvalidLogins(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: "wrong-site",
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
err = tc.SSH(context.TODO(), cmd, false)
@ -1758,12 +1762,24 @@ func twoClustersTunnel(t *testing.T, suite *integrationTestSuite, now time.Time,
require.NoError(t, b.CreateEx(t, a.Secrets.AsSlice(), bcfg))
t.Cleanup(func() { require.NoError(t, b.StopAll()) })
require.NoError(t, a.CreateEx(t, b.Secrets.AsSlice(), acfg))
t.Cleanup(func() { require.NoError(t, a.StopAll()) })
require.NoError(t, b.Start())
require.NoError(t, a.Start())
// The Listener FDs injected into SiteA will be closed when SiteA restarts
// later in in the test, rendering them all invalid. This will make SiteA
// fail when it attempts to start back up again. We can't just inject a
// totally new listener config into SiteA when it restarts, or SiteB won't
// be able to find it.
//
// The least bad option is to duplicate all of SiteA's Listener FDs and
// inject those duplicates prior to restarting the SiteA cluster.
aFdCache, err := a.Process.ExportFileDescriptors()
require.NoError(t, err)
// Wait for both cluster to see each other via reverse tunnels.
require.Eventually(t, waitForClusters(a.Tunnel, 2), 10*time.Second, 1*time.Second,
"Two clusters do not see each other: tunnels are not working.")
@ -1779,7 +1795,7 @@ func twoClustersTunnel(t *testing.T, suite *integrationTestSuite, now time.Time,
require.Zero(t, ph.Count())
// if we got here, it means two sites are cross-connected. lets execute SSH commands
sshPort := a.GetPortSSHInt()
sshPort := helpers.Port(t, a.SSH)
cmd := []string{"echo", "hello world"}
// directly:
@ -1846,6 +1862,7 @@ func twoClustersTunnel(t *testing.T, suite *integrationTestSuite, now time.Time,
require.IsType(t, err, trace.ConnectionProblem(nil, ""))
// Reset and start "Site-A" again
a.Config.FileDescriptors = aFdCache
require.NoError(t, a.Reset())
require.NoError(t, a.Start())
@ -1902,8 +1919,14 @@ func testTwoClustersProxy(t *testing.T, suite *integrationTestSuite) {
// this address instead.
addr, err := getLocalIP()
require.NoError(t, err)
a := suite.newNamedTeleportInstance(t, "site-A", WithNodeName(addr))
b := suite.newNamedTeleportInstance(t, "site-B", WithNodeName(addr))
a := suite.newNamedTeleportInstance(t, "site-A",
WithNodeName(addr),
WithListeners(helpers.StandardListenerSetupOn(addr)),
)
b := suite.newNamedTeleportInstance(t, "site-B",
WithNodeName(addr),
WithListeners(helpers.StandardListenerSetupOn(addr)),
)
a.AddUser(username, []string{username})
b.AddUser(username, []string{username})
@ -1950,9 +1973,18 @@ func testHA(t *testing.T, suite *integrationTestSuite) {
require.NoError(t, b.Start())
require.NoError(t, a.Start())
nodePorts := helpers.NewPortSlice(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
require.NoError(t, a.StartNodeAndProxy("cluster-a-node", sshPort, proxyWebPort, proxySSHPort))
// The Listener FDs injected into SiteA will be closed when SiteA restarts
// later in in the test, rendering them all invalid. This will make SiteA
// fail when it attempts to start back up again. We can't just inject a
// totally new listener config into SiteA when it restarts, or SiteB won't
// be able to find it.
//
// The least bad option is to duplicate all of SiteA's Listener FDs and
// inject those duplicates prior to restarting the SiteA cluster.
aFdCache, err := a.Process.ExportFileDescriptors()
require.NoError(t, err)
sshPort, _, _ := a.StartNodeAndProxy(t, "cluster-a-node")
// Wait for both cluster to see each other via reverse tunnels.
require.Eventually(t, waitForClusters(a.Tunnel, 1), 10*time.Second, 1*time.Second,
@ -1994,6 +2026,7 @@ func testHA(t *testing.T, suite *integrationTestSuite) {
a.Config.Proxy.KeyPairs = nil
// Restart cluster "a".
a.Config.FileDescriptors = aFdCache
require.NoError(t, a.Reset())
require.NoError(t, a.Start())
@ -2094,9 +2127,7 @@ func testMapRoles(t *testing.T, suite *integrationTestSuite) {
tryCreateTrustedCluster(t, aux.Process.GetAuthServer(), trustedCluster)
waitForTunnelConnections(t, main.Process.GetAuthServer(), clusterAux, 1)
nodePorts := helpers.NewPortSlice(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
require.NoError(t, aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort))
sshPort, _, _ := aux.StartNodeAndProxy(t, "aux-node")
// Wait for both cluster to see each other via reverse tunnels.
require.Eventually(t, waitForClusters(main.Tunnel, 1), 10*time.Second, 1*time.Second,
@ -2320,11 +2351,11 @@ func testMultiplexingTrustedClusters(t *testing.T, suite *integrationTestSuite)
trustedClusters(t, suite, trustedClusterTest{multiplex: true})
}
func standardPortsOrMuxSetup(mux bool) *helpers.InstancePorts {
func standardPortsOrMuxSetup(t *testing.T, mux bool, fds *[]service.FileDescriptor) *helpers.InstanceListeners {
if mux {
return helpers.WebReverseTunnelMuxPortSetup()
return helpers.WebReverseTunnelMuxPortSetup(t, fds)
}
return helpers.StandardPortSetup()
return helpers.StandardListenerSetup(t, fds)
}
func trustedClusters(t *testing.T, suite *integrationTestSuite, test trustedClusterTest) {
@ -2333,15 +2364,16 @@ func trustedClusters(t *testing.T, suite *integrationTestSuite, test trustedClus
clusterMain := "cluster-main"
clusterAux := "cluster-aux"
main := helpers.NewInstance(helpers.InstanceConfig{
mainCfg := helpers.InstanceConfig{
ClusterName: clusterMain,
HostID: helpers.HostID,
NodeName: Host,
Priv: suite.Priv,
Pub: suite.Pub,
Log: suite.Log,
Ports: standardPortsOrMuxSetup(test.multiplex),
})
}
mainCfg.Listeners = standardPortsOrMuxSetup(t, test.multiplex, &mainCfg.Fds)
main := helpers.NewInstance(t, mainCfg)
aux := suite.newNamedTeleportInstance(t, clusterAux)
// main cluster has a local user and belongs to role "main-devs" and "main-admins"
@ -2441,9 +2473,7 @@ func trustedClusters(t *testing.T, suite *integrationTestSuite, test trustedClus
tryCreateTrustedCluster(t, aux.Process.GetAuthServer(), trustedCluster)
waitForTunnelConnections(t, main.Process.GetAuthServer(), clusterAux, 1)
nodePorts := helpers.NewPortSlice(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
require.NoError(t, aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort))
sshPort, _, _ := aux.StartNodeAndProxy(t, "aux-node")
// Wait for both cluster to see each other via reverse tunnels.
require.Eventually(t, waitForClusters(main.Tunnel, 1), 10*time.Second, 1*time.Second,
@ -2660,7 +2690,7 @@ func testTrustedTunnelNode(t *testing.T, suite *integrationTestSuite) {
tconf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, aux.GetPortWeb()),
Addr: aux.Web,
},
}
tconf.Auth.Enabled = false
@ -2689,7 +2719,7 @@ func testTrustedTunnelNode(t *testing.T, suite *integrationTestSuite) {
Login: username,
Cluster: clusterAux,
Host: Loopback,
Port: aux.GetPortSSHInt(),
Port: helpers.Port(t, aux.SSH),
})
require.NoError(t, err)
output := &bytes.Buffer{}
@ -2815,7 +2845,7 @@ func testDiscoveryRecovers(t *testing.T, suite *integrationTestSuite) {
Login: username,
Cluster: "cluster-remote",
Host: Loopback,
Port: remote.GetPortSSHInt(),
Port: helpers.Port(t, remote.SSH),
Proxy: conf,
}
output, err := runCommand(t, main, []string{"echo", "hello world"}, clientConf, 10)
@ -2929,7 +2959,7 @@ func testDiscovery(t *testing.T, suite *integrationTestSuite) {
Login: username,
Cluster: "cluster-remote",
Host: Loopback,
Port: remote.GetPortSSHInt(),
Port: helpers.Port(t, remote.SSH),
}
output, err := runCommand(t, main, []string{"echo", "hello world"}, cfg, 1)
require.NoError(t, err)
@ -2943,7 +2973,7 @@ func testDiscovery(t *testing.T, suite *integrationTestSuite) {
Login: username,
Cluster: "cluster-remote",
Host: Loopback,
Port: remote.GetPortSSHInt(),
Port: helpers.Port(t, remote.SSH),
Proxy: &proxyConfig,
}
output, err = runCommand(t, main, []string{"echo", "hello world"}, cfgProxy, 10)
@ -3047,7 +3077,7 @@ func testReverseTunnelCollapse(t *testing.T, suite *integrationTestSuite) {
proxyTunnel, err := main.StartProxy(proxyConfig)
require.NoError(t, err)
proxyOneBackend := utils.MustParseAddr(net.JoinHostPort(Loopback, main.GetPortReverseTunnel()))
proxyOneBackend := utils.MustParseAddr(main.ReverseTunnel)
lb.AddBackend(*proxyOneBackend)
proxyTwoBackend := utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(proxyReverseTunnelPort)))
lb.AddBackend(*proxyTwoBackend)
@ -3190,7 +3220,7 @@ func testDiscoveryNode(t *testing.T, suite *integrationTestSuite) {
proxyTunnel, err := main.StartProxy(proxyConfig)
require.NoError(t, err)
proxyOneBackend := utils.MustParseAddr(net.JoinHostPort(Loopback, main.GetPortReverseTunnel()))
proxyOneBackend := utils.MustParseAddr(main.ReverseTunnel)
lb.AddBackend(*proxyOneBackend)
proxyTwoBackend := utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(proxyReverseTunnelPort)))
lb.AddBackend(*proxyTwoBackend)
@ -3203,7 +3233,7 @@ func testDiscoveryNode(t *testing.T, suite *integrationTestSuite) {
tconf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, main.GetPortWeb()),
Addr: main.Web,
},
}
@ -3492,8 +3522,8 @@ func testExternalClient(t *testing.T, suite *integrationTestSuite) {
execCmd, err := externalSSHCommand(commandOptions{
forwardAgent: tt.inForwardAgent,
socketPath: socketPath,
proxyPort: teleport.GetPortProxy(),
nodePort: teleport.GetPortSSH(),
proxyPort: helpers.PortStr(t, teleport.SSHProxy),
nodePort: helpers.PortStr(t, teleport.SSH),
command: tt.inCommand,
})
require.NoError(t, err)
@ -3593,8 +3623,8 @@ func testControlMaster(t *testing.T, suite *integrationTestSuite) {
forwardAgent: true,
controlPath: controlPath,
socketPath: socketPath,
proxyPort: teleport.GetPortProxy(),
nodePort: teleport.GetPortSSH(),
proxyPort: helpers.PortStr(t, teleport.SSHProxy),
nodePort: helpers.PortStr(t, teleport.SSH),
command: "echo hello",
})
require.NoError(t, err)
@ -3742,7 +3772,7 @@ func testAuditOff(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
cl.Stdout = myTerm
@ -3915,7 +3945,7 @@ func testPAM(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
@ -3959,7 +3989,7 @@ func testRotateSuccess(t *testing.T, suite *integrationTestSuite) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
teleport := suite.NewTeleportInstance()
teleport := suite.NewTeleportInstance(t)
defer teleport.StopAll()
logins := []string{suite.Me.Username}
@ -4045,7 +4075,7 @@ func testRotateSuccess(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Loopback,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
}
clt, err := teleport.NewClientWithCreds(cfg, *initialCreds)
require.NoError(t, err)
@ -4130,7 +4160,7 @@ func testRotateRollback(t *testing.T, s *integrationTestSuite) {
defer cancel()
tconf := s.rotationConfig(true)
teleport := s.NewTeleportInstance()
teleport := s.NewTeleportInstance(t)
defer teleport.StopAll()
logins := []string{s.Me.Username}
for _, login := range logins {
@ -4206,7 +4236,7 @@ func testRotateRollback(t *testing.T, s *integrationTestSuite) {
Login: s.Me.Username,
Cluster: helpers.Site,
Host: Loopback,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
}
clt, err := teleport.NewClientWithCreds(cfg, *initialCreds)
require.NoError(t, err)
@ -4355,7 +4385,7 @@ func testRotateTrustedClusters(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Host: Loopback,
Cluster: clusterAux,
Port: aux.GetPortSSHInt(),
Port: helpers.Port(t, aux.SSH),
}
clt, err := main.NewClientWithCreds(cfg, *initialCreds)
require.NoError(t, err)
@ -4604,7 +4634,7 @@ func testWindowChange(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
@ -4630,7 +4660,7 @@ func testWindowChange(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
@ -4826,7 +4856,7 @@ func testList(t *testing.T, suite *integrationTestSuite) {
cfg := helpers.ClientConfig{
Login: tt.inLogin,
Cluster: helpers.Site,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
}
userClt, err := teleport.NewClientWithCreds(cfg, *initialCreds)
require.NoError(t, err)
@ -4956,7 +4986,7 @@ func testDataTransfer(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
}
// Write 1 MB to stdout.
@ -5067,7 +5097,7 @@ func testBPFInteractive(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
})
require.NoError(t, err)
@ -5190,7 +5220,7 @@ func testBPFExec(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
}
// Run exec command.
@ -5309,7 +5339,7 @@ func testSSHExitCode(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
Interactive: tt.interactive,
})
require.NoError(t, err)
@ -5398,7 +5428,7 @@ func testBPFSessionDifferentiation(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
})
if err != nil {
t.Errorf("Failed to create client: %v.", err)
@ -5506,7 +5536,7 @@ func testExecEvents(t *testing.T, suite *integrationTestSuite) {
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
Interactive: tt.isInteractive,
}
_, err := runCommand(t, main, []string{lsPath}, clientConfig, 1)
@ -5618,7 +5648,7 @@ func testSessionStartContainsAccessRequest(t *testing.T, suite *integrationTestS
Login: suite.Me.Username,
Cluster: helpers.Site,
Host: Host,
Port: main.GetPortSSHInt(),
Port: helpers.Port(t, main.SSH),
Interactive: false,
}
clientReissueParams := client.ReissueParams{
@ -5794,7 +5824,7 @@ func runCommand(t *testing.T, instance *helpers.TeleInstance, cmd []string, cfg
return output.String(), nil
}
type InstanceConfigOption func(config *helpers.InstanceConfig)
type InstanceConfigOption func(t *testing.T, config *helpers.InstanceConfig)
func (s *integrationTestSuite) newNamedTeleportInstance(t *testing.T, clusterName string, opts ...InstanceConfigOption) *helpers.TeleInstance {
cfg := helpers.InstanceConfig{
@ -5804,20 +5834,31 @@ func (s *integrationTestSuite) newNamedTeleportInstance(t *testing.T, clusterNam
Priv: s.Priv,
Pub: s.Pub,
Log: utils.WrapLogger(s.Log.WithField("cluster", clusterName)),
Ports: helpers.StandardPortSetup(),
}
for _, opt := range opts {
opt(&cfg)
opt(t, &cfg)
}
return helpers.NewInstance(cfg)
if cfg.Listeners == nil {
cfg.Listeners = helpers.StandardListenerSetupOn(cfg.NodeName)(t, &cfg.Fds)
}
return helpers.NewInstance(t, cfg)
}
func WithNodeName(nodeName string) InstanceConfigOption {
return func(config *helpers.InstanceConfig) {
return func(_ *testing.T, config *helpers.InstanceConfig) {
config.NodeName = nodeName
}
}
func WithListeners(setupFn helpers.InstanceListenerSetupFunc) InstanceConfigOption {
return func(t *testing.T, config *helpers.InstanceConfig) {
config.Listeners = setupFn(t, &config.Fds)
}
}
func (s *integrationTestSuite) defaultServiceConfig() *service.Config {
cfg := service.MakeDefaultConfig()
cfg.Console = nil
@ -5897,7 +5938,7 @@ func TestWebProxyInsecure(t *testing.T) {
privateKey, publicKey, err := testauthority.New().GenerateKeyPair()
require.NoError(t, err)
rc := helpers.NewInstance(helpers.InstanceConfig{
rc := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: "example.com",
HostID: uuid.New().String(),
NodeName: Host,
@ -5927,7 +5968,7 @@ func TestWebProxyInsecure(t *testing.T) {
// Web proxy endpoint should just respond with 200 when called over http://,
// content doesn't matter.
resp, err := http.Get(fmt.Sprintf("http://%v/webapi/ping", net.JoinHostPort(Loopback, rc.GetPortWeb())))
resp, err := http.Get(fmt.Sprintf("http://%v/webapi/ping", rc.Web))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, resp.Body.Close())
@ -5942,7 +5983,7 @@ func TestTraitsPropagation(t *testing.T) {
require.NoError(t, err)
// Create root cluster.
rc := helpers.NewInstance(helpers.InstanceConfig{
rc := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: Host,
@ -5952,7 +5993,7 @@ func TestTraitsPropagation(t *testing.T) {
})
// Create leaf cluster.
lc := helpers.NewInstance(helpers.InstanceConfig{
lc := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: "leaf.example.com",
HostID: uuid.New().String(),
NodeName: Host,
@ -5970,7 +6011,7 @@ func TestTraitsPropagation(t *testing.T) {
rcConf.Proxy.DisableWebService = true
rcConf.Proxy.DisableWebInterface = true
rcConf.SSH.Enabled = true
rcConf.SSH.Addr.Addr = net.JoinHostPort(rc.Hostname, rc.GetPortSSH())
rcConf.SSH.Addr.Addr = rc.SSH
rcConf.SSH.Labels = map[string]string{"env": "integration"}
rcConf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
@ -5982,7 +6023,7 @@ func TestTraitsPropagation(t *testing.T) {
lcConf.Proxy.Enabled = true
lcConf.Proxy.DisableWebInterface = true
lcConf.SSH.Enabled = true
lcConf.SSH.Addr.Addr = net.JoinHostPort(lc.Hostname, lc.GetPortSSH())
lcConf.SSH.Addr.Addr = lc.SSH
lcConf.SSH.Labels = map[string]string{"env": "integration"}
lcConf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
@ -6031,7 +6072,7 @@ func TestTraitsPropagation(t *testing.T) {
Login: me.Username,
Cluster: "root.example.com",
Host: Loopback,
Port: rc.GetPortSSHInt(),
Port: helpers.Port(t, rc.SSH),
}, 1)
require.NoError(t, err)
require.Equal(t, "hello root", strings.TrimSpace(outputRoot))
@ -6041,7 +6082,7 @@ func TestTraitsPropagation(t *testing.T) {
Login: me.Username,
Cluster: "leaf.example.com",
Host: Loopback,
Port: lc.GetPortSSHInt(),
Port: helpers.Port(t, lc.SSH),
}, 1)
require.NoError(t, err)
require.Equal(t, "hello leaf", strings.TrimSpace(outputLeaf))
@ -6208,7 +6249,7 @@ func testKubeAgentFiltering(t *testing.T, suite *integrationTestSuite) {
Login: testCase.user.GetName(),
Cluster: helpers.Site,
Host: Host,
Port: teleport.GetPortSSHInt(),
Port: helpers.Port(t, teleport.SSH),
})
require.NoError(t, err)
@ -6233,24 +6274,27 @@ func createTrustedClusterPair(t *testing.T, suite *integrationTestSuite, extraSe
leafName := fmt.Sprintf("leaf-%s", name)
// Create root and leaf clusters.
root := helpers.NewInstance(helpers.InstanceConfig{
rootCfg := helpers.InstanceConfig{
ClusterName: rootName,
HostID: helpers.HostID,
NodeName: Host,
Priv: suite.Priv,
Pub: suite.Pub,
Log: suite.Log,
Ports: standardPortsOrMuxSetup(false),
})
leaf := helpers.NewInstance(helpers.InstanceConfig{
}
rootCfg.Listeners = standardPortsOrMuxSetup(t, false, &rootCfg.Fds)
root := helpers.NewInstance(t, rootCfg)
leafCfg := helpers.InstanceConfig{
ClusterName: leafName,
HostID: helpers.HostID,
NodeName: Host,
Priv: suite.Priv,
Pub: suite.Pub,
Log: suite.Log,
Ports: standardPortsOrMuxSetup(false),
})
}
leafCfg.Listeners = standardPortsOrMuxSetup(t, false, &leafCfg.Fds)
leaf := helpers.NewInstance(t, leafCfg)
role, err := types.NewRoleV3("dev", types.RoleSpecV5{
Allow: types.RoleConditions{
@ -6286,23 +6330,17 @@ func createTrustedClusterPair(t *testing.T, suite *integrationTestSuite, extraSe
})
require.NoError(t, root.Start())
require.NoError(t, leaf.Start())
t.Cleanup(func() { root.StopAll() })
require.NoError(t, leaf.Start())
t.Cleanup(func() { leaf.StopAll() })
require.NoError(t, trustedCluster.CheckAndSetDefaults())
tryCreateTrustedCluster(t, leaf.Process.GetAuthServer(), trustedCluster)
waitForTunnelConnections(t, root.Process.GetAuthServer(), leafName, 1)
rootSSHPort := helpers.NewPortValue()
rootProxySSHPort := helpers.NewPortValue()
rootProxyWebPort := helpers.NewPortValue()
require.NoError(t, root.StartNodeAndProxy("root-zero", rootSSHPort, rootProxyWebPort, rootProxySSHPort))
leafSSHPort := helpers.NewPortValue()
leafProxySSHPort := helpers.NewPortValue()
leafProxyWebPort := helpers.NewPortValue()
require.NoError(t, leaf.StartNodeAndProxy("leaf-zero", leafSSHPort, leafProxyWebPort, leafProxySSHPort))
_, _, rootProxySSHPort := root.StartNodeAndProxy(t, "root-zero")
_, _, _ = leaf.StartNodeAndProxy(t, "leaf-zero")
// Add any extra services.
if extraServices != nil {
@ -6352,7 +6390,7 @@ func testListResourcesAcrossClusters(t *testing.T, suite *integrationTestSuite)
conf.Token = "token"
conf.UploadEventsC = i.UploadEventsC
conf.AuthServers = []utils.NetAddr{
*utils.MustParseAddr(net.JoinHostPort(i.Hostname, i.GetPortWeb())),
*utils.MustParseAddr(net.JoinHostPort(i.Hostname, helpers.PortStr(t, i.Web))),
}
conf.HostUUID = name
conf.Hostname = name
@ -6383,7 +6421,7 @@ func testListResourcesAcrossClusters(t *testing.T, suite *integrationTestSuite)
}
conf.Kube.KubeconfigPath = filepath.Join(conf.DataDir, "kube_config")
require.NoError(t, helpers.EnableKube(conf, name))
require.NoError(t, helpers.EnableKube(t, conf, name))
conf.Kube.ListenAddr = nil
process, err := service.NewTeleport(conf)
require.NoError(t, err)

View file

@ -169,7 +169,7 @@ func TestKube(t *testing.T) {
func testKubeExec(t *testing.T, suite *KubeSuite) {
tconf := suite.teleKubeConfig(Host)
teleport := helpers.NewInstance(helpers.InstanceConfig{
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: helpers.Site,
HostID: helpers.HostID,
NodeName: Host,
@ -338,7 +338,7 @@ loop:
func testKubeDeny(t *testing.T, suite *KubeSuite) {
tconf := suite.teleKubeConfig(Host)
teleport := helpers.NewInstance(helpers.InstanceConfig{
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: helpers.Site,
HostID: helpers.HostID,
NodeName: Host,
@ -390,7 +390,7 @@ func testKubeDeny(t *testing.T, suite *KubeSuite) {
func testKubePortForward(t *testing.T, suite *KubeSuite) {
tconf := suite.teleKubeConfig(Host)
teleport := helpers.NewInstance(helpers.InstanceConfig{
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: helpers.Site,
HostID: helpers.HostID,
NodeName: Host,
@ -485,7 +485,7 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) {
// Main cluster doesn't need a kubeconfig to forward requests to auxiliary
// cluster.
mainConf.Proxy.Kube.KubeconfigPath = ""
main := helpers.NewInstance(helpers.InstanceConfig{
main := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: clusterMain,
HostID: helpers.HostID,
NodeName: Host,
@ -508,7 +508,7 @@ func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) {
clusterAux := "cluster-aux"
auxConf := suite.teleKubeConfig(Host)
aux := helpers.NewInstance(helpers.InstanceConfig{
aux := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: clusterAux,
HostID: helpers.HostID,
NodeName: Host,
@ -736,7 +736,7 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) {
clusterMain := "cluster-main"
mainConf := suite.teleKubeConfig(Host)
main := helpers.NewInstance(helpers.InstanceConfig{
main := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: clusterMain,
HostID: helpers.HostID,
NodeName: Host,
@ -759,7 +759,7 @@ func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) {
clusterAux := "cluster-aux"
auxConf := suite.teleKubeConfig(Host)
aux := helpers.NewInstance(helpers.InstanceConfig{
aux := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: clusterAux,
HostID: helpers.HostID,
NodeName: Host,
@ -1011,7 +1011,7 @@ func testKubeDisconnect(t *testing.T, suite *KubeSuite) {
func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase) {
tconf := suite.teleKubeConfig(Host)
teleport := helpers.NewInstance(helpers.InstanceConfig{
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: helpers.Site,
HostID: helpers.HostID,
NodeName: Host,
@ -1083,7 +1083,7 @@ func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase
}()
// lets type something followed by "enter" and then hang the session
require.NoError(t, enterInput(sessionCtx, term, "echo boring platapus\r\n", ".*boring platapus.*"))
require.NoError(t, enterInput(sessionCtx, term, "echo boring platypus\r\n", ".*boring platypus.*"))
time.Sleep(tc.disconnectTimeout)
select {
case <-time.After(tc.disconnectTimeout):
@ -1097,7 +1097,7 @@ func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase
func testKubeTransportProtocol(t *testing.T, suite *KubeSuite) {
tconf := suite.teleKubeConfig(Host)
teleport := helpers.NewInstance(helpers.InstanceConfig{
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: helpers.Site,
HostID: helpers.HostID,
NodeName: Host,
@ -1506,7 +1506,7 @@ func kubeJoin(kubeConfig kubeProxyConfig, tc *client.TeleportClient, sessionID s
func testKubeJoin(t *testing.T, suite *KubeSuite) {
tconf := suite.teleKubeConfig(Host)
teleport := helpers.NewInstance(helpers.InstanceConfig{
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: helpers.Site,
HostID: helpers.HostID,
NodeName: Host,

44
integration/main_test.go Normal file
View file

@ -0,0 +1,44 @@
/*
Copyright 2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"os"
"testing"
"time"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/srv"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/teleport/common"
)
// TestMain will re-execute Teleport to run a command if "exec" is passed to
// it as an argument. Otherwise, it will run tests as normal.
func TestMain(m *testing.M) {
utils.InitLoggerForTests()
helpers.SetTestTimeouts(100 * time.Millisecond)
// If the test is re-executing itself, execute the command that comes over
// the pipe.
if srv.IsReexec() {
common.Run(common.Options{Args: os.Args[1:]})
return
}
// Otherwise run tests as normal.
code := m.Run()
os.Exit(code)
}

View file

@ -31,9 +31,10 @@ import (
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/client"
"github.com/stretchr/testify/require"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/trace"
"github.com/stretchr/testify/require"
)
func extractPort(svr *httptest.Server) (int, error) {
@ -125,7 +126,7 @@ func testPortForwarding(t *testing.T, suite *integrationTestSuite) {
remotePort, err := extractPort(remoteSvr)
require.NoError(t, err)
nodeSSHPort := teleport.GetPortSSHInt()
nodeSSHPort := helpers.Port(t, teleport.SSH)
cl, err := teleport.NewClient(helpers.ClientConfig{
Login: suite.Me.Username,
Cluster: helpers.Site,

View file

@ -28,17 +28,12 @@ import (
"testing"
"time"
"github.com/google/uuid"
"github.com/gravitational/teleport/api/breaker"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/trace"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/gravitational/teleport/api/constants"
"github.com/gravitational/teleport/api/types"
apiutils "github.com/gravitational/teleport/api/utils"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/auth"
libclient "github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/client/identityfile"
@ -48,10 +43,13 @@ import (
alpncommon "github.com/gravitational/teleport/lib/srv/alpnproxy/common"
"github.com/gravitational/teleport/lib/srv/db/postgres"
"github.com/gravitational/teleport/lib/utils"
"github.com/google/uuid"
"github.com/gravitational/trace"
"github.com/jackc/pgconn"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
type ProxySuite struct {
@ -69,8 +67,8 @@ type proxySuiteOptions struct {
rootClusterNodeName string
leafClusterNodeName string
rootClusterPorts *helpers.InstancePorts
leafClusterPorts *helpers.InstancePorts
rootClusterListeners helpers.InstanceListenerSetupFunc
leafClusterListeners helpers.InstanceListenerSetupFunc
rootTrustedSecretFunc func(suite *ProxySuite) []*helpers.InstanceSecrets
leafTrustedFunc func(suite *ProxySuite) []*helpers.InstanceSecrets
@ -84,33 +82,35 @@ type proxySuiteOptions struct {
func newProxySuite(t *testing.T, opts ...proxySuiteOptionsFunc) *ProxySuite {
options := proxySuiteOptions{
rootClusterNodeName: Host,
leafClusterNodeName: Host,
rootClusterPorts: helpers.SingleProxyPortSetup(),
leafClusterPorts: helpers.SingleProxyPortSetup(),
rootClusterNodeName: Host,
leafClusterNodeName: Host,
rootClusterListeners: helpers.SingleProxyPortSetupOn(Host),
leafClusterListeners: helpers.SingleProxyPortSetupOn(Host),
}
for _, opt := range opts {
opt(&options)
}
rc := helpers.NewInstance(helpers.InstanceConfig{
rCfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: options.rootClusterNodeName,
Log: utils.NewLoggerForTests(),
Ports: options.rootClusterPorts,
})
}
rCfg.Listeners = options.rootClusterListeners(t, &rCfg.Fds)
rc := helpers.NewInstance(t, rCfg)
// Create leaf cluster.
lc := helpers.NewInstance(helpers.InstanceConfig{
lCfg := helpers.InstanceConfig{
ClusterName: "leaf.example.com",
HostID: uuid.New().String(),
NodeName: options.leafClusterNodeName,
Priv: rc.Secrets.PrivKey,
Pub: rc.Secrets.PubKey,
Log: utils.NewLoggerForTests(),
Ports: options.leafClusterPorts,
})
}
lCfg.Listeners = options.leafClusterListeners(t, &lCfg.Fds)
lc := helpers.NewInstance(t, lCfg)
suite := &ProxySuite{
root: rc,
leaf: lc,
@ -179,7 +179,7 @@ func (p *ProxySuite) addNodeToLeafCluster(t *testing.T, tunnelNodeHostname strin
tconf.AuthServers = []utils.NetAddr{
{
AddrNetwork: "tcp",
Addr: net.JoinHostPort(Loopback, p.leaf.GetPortWeb()),
Addr: p.leaf.Web,
},
}
tconf.Auth.Enabled = false
@ -284,15 +284,15 @@ func withLeafClusterNodeName(nodeName string) proxySuiteOptionsFunc {
}
}
func withRootClusterPorts(ports *helpers.InstancePorts) proxySuiteOptionsFunc {
func withRootClusterListeners(fn helpers.InstanceListenerSetupFunc) proxySuiteOptionsFunc {
return func(options *proxySuiteOptions) {
options.rootClusterPorts = ports
options.rootClusterListeners = fn
}
}
func withLeafClusterPorts(ports *helpers.InstancePorts) proxySuiteOptionsFunc {
func withLeafClusterListeners(fn helpers.InstanceListenerSetupFunc) proxySuiteOptionsFunc {
return func(options *proxySuiteOptions) {
options.leafClusterPorts = ports
options.leafClusterListeners = fn
}
}
@ -315,11 +315,11 @@ func rootClusterStandardConfig(t *testing.T) func(suite *ProxySuite) *service.Co
config.Auth.Preference.SetSecondFactor("off")
config.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
config.Proxy.Enabled = true
config.Proxy.WebAddr.Addr = net.JoinHostPort(rc.Hostname, rc.GetPortWeb())
config.Proxy.WebAddr.Addr = rc.Web
config.Proxy.DisableWebService = false
config.Proxy.DisableWebInterface = true
config.SSH.Enabled = true
config.SSH.Addr.Addr = net.JoinHostPort(rc.Hostname, rc.GetPortSSH())
config.SSH.Addr.Addr = rc.SSH
config.SSH.Labels = map[string]string{"env": "integration"}
config.CircuitBreakerConfig = breaker.NoopBreakerConfig()
return config
@ -335,11 +335,11 @@ func leafClusterStandardConfig(t *testing.T) func(suite *ProxySuite) *service.Co
config.Auth.Preference.SetSecondFactor("off")
config.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
config.Proxy.Enabled = true
config.Proxy.WebAddr.Addr = net.JoinHostPort(lc.Hostname, lc.GetPortWeb())
config.Proxy.WebAddr.Addr = lc.Web
config.Proxy.DisableWebService = false
config.Proxy.DisableWebInterface = true
config.SSH.Enabled = true
config.SSH.Addr.Addr = net.JoinHostPort(lc.Hostname, lc.GetPortSSH())
config.SSH.Addr.Addr = lc.SSH
config.SSH.Labels = map[string]string{"env": "integration"}
config.CircuitBreakerConfig = breaker.NoopBreakerConfig()
return config

View file

@ -54,37 +54,37 @@ import (
func TestALPNSNIProxyMultiCluster(t *testing.T) {
testCase := []struct {
name string
mainClusterPortSetup *helpers.InstancePorts
secondClusterPortSetup *helpers.InstancePorts
mainClusterPortSetup helpers.InstanceListenerSetupFunc
secondClusterPortSetup helpers.InstanceListenerSetupFunc
disableALPNListenerOnRoot bool
disableALPNListenerOnLeaf bool
}{
{
name: "StandardAndOnePortSetupMasterALPNDisabled",
mainClusterPortSetup: helpers.StandardPortSetup(),
secondClusterPortSetup: helpers.SingleProxyPortSetup(),
mainClusterPortSetup: helpers.StandardListenerSetup,
secondClusterPortSetup: helpers.SingleProxyPortSetup,
disableALPNListenerOnRoot: true,
},
{
name: "StandardAndOnePortSetup",
mainClusterPortSetup: helpers.StandardPortSetup(),
secondClusterPortSetup: helpers.SingleProxyPortSetup(),
mainClusterPortSetup: helpers.StandardListenerSetup,
secondClusterPortSetup: helpers.SingleProxyPortSetup,
},
{
name: "TwoClusterOnePortSetup",
mainClusterPortSetup: helpers.SingleProxyPortSetup(),
secondClusterPortSetup: helpers.SingleProxyPortSetup(),
mainClusterPortSetup: helpers.SingleProxyPortSetup,
secondClusterPortSetup: helpers.SingleProxyPortSetup,
},
{
name: "OnePortAndStandardPortSetupLeafALPNDisabled",
mainClusterPortSetup: helpers.SingleProxyPortSetup(),
secondClusterPortSetup: helpers.StandardPortSetup(),
name: "OnePortAndStandardListenerSetupLeafALPNDisabled",
mainClusterPortSetup: helpers.SingleProxyPortSetup,
secondClusterPortSetup: helpers.StandardListenerSetup,
disableALPNListenerOnLeaf: true,
},
{
name: "OnePortAndStandardPortSetup",
mainClusterPortSetup: helpers.SingleProxyPortSetup(),
secondClusterPortSetup: helpers.StandardPortSetup(),
name: "OnePortAndStandardListenerSetup",
mainClusterPortSetup: helpers.SingleProxyPortSetup,
secondClusterPortSetup: helpers.StandardListenerSetup,
},
}
@ -102,8 +102,8 @@ func TestALPNSNIProxyMultiCluster(t *testing.T) {
withLeafClusterConfig(leafClusterStandardConfig(t), func(config *service.Config) {
config.Proxy.DisableALPNSNIListener = tc.disableALPNListenerOnLeaf
}),
withRootClusterPorts(tc.mainClusterPortSetup),
withLeafClusterPorts(tc.secondClusterPortSetup),
withRootClusterListeners(tc.mainClusterPortSetup),
withLeafClusterListeners(tc.secondClusterPortSetup),
withRootAndLeafClusterRoles(createTestRole(username)),
withStandardRoleMapping(),
)
@ -112,14 +112,14 @@ func TestALPNSNIProxyMultiCluster(t *testing.T) {
Login: username,
Cluster: suite.root.Secrets.SiteName,
Host: Loopback,
Port: suite.root.GetPortSSHInt(),
Port: helpers.Port(t, suite.root.SSH),
})
// Run command in leaf.
suite.mustConnectToClusterAndRunSSHCommand(t, helpers.ClientConfig{
Login: username,
Cluster: suite.leaf.Secrets.SiteName,
Host: Loopback,
Port: suite.leaf.GetPortSSHInt(),
Port: helpers.Port(t, suite.leaf.SSH),
})
})
}
@ -128,38 +128,38 @@ func TestALPNSNIProxyMultiCluster(t *testing.T) {
// TestALPNSNIProxyTrustedClusterNode tests ssh connection to a trusted cluster node.
func TestALPNSNIProxyTrustedClusterNode(t *testing.T) {
testCase := []struct {
name string
mainClusterPortSetup *helpers.InstancePorts
secondClusterPortSetup *helpers.InstancePorts
disableALPNListenerOnRoot bool
disableALPNListenerOnLeaf bool
name string
mainClusterListenerSetup helpers.InstanceListenerSetupFunc
secondClusterListenerSetup helpers.InstanceListenerSetupFunc
disableALPNListenerOnRoot bool
disableALPNListenerOnLeaf bool
}{
{
name: "StandardAndOnePortSetupMasterALPNDisabled",
mainClusterPortSetup: helpers.StandardPortSetup(),
secondClusterPortSetup: helpers.SingleProxyPortSetup(),
disableALPNListenerOnRoot: true,
name: "StandardAndOnePortSetupMasterALPNDisabled",
mainClusterListenerSetup: helpers.StandardListenerSetup,
secondClusterListenerSetup: helpers.SingleProxyPortSetup,
disableALPNListenerOnRoot: true,
},
{
name: "StandardAndOnePortSetup",
mainClusterPortSetup: helpers.StandardPortSetup(),
secondClusterPortSetup: helpers.SingleProxyPortSetup(),
name: "StandardAndOnePortSetup",
mainClusterListenerSetup: helpers.StandardListenerSetup,
secondClusterListenerSetup: helpers.SingleProxyPortSetup,
},
{
name: "TwoClusterOnePortSetup",
mainClusterPortSetup: helpers.SingleProxyPortSetup(),
secondClusterPortSetup: helpers.SingleProxyPortSetup(),
name: "TwoClusterOnePortSetup",
mainClusterListenerSetup: helpers.SingleProxyPortSetup,
secondClusterListenerSetup: helpers.SingleProxyPortSetup,
},
{
name: "OnePortAndStandardPortSetupLeafALPNDisabled",
mainClusterPortSetup: helpers.SingleProxyPortSetup(),
secondClusterPortSetup: helpers.StandardPortSetup(),
disableALPNListenerOnLeaf: true,
name: "OnePortAndStandardListenerSetupLeafALPNDisabled",
mainClusterListenerSetup: helpers.SingleProxyPortSetup,
secondClusterListenerSetup: helpers.StandardListenerSetup,
disableALPNListenerOnLeaf: true,
},
{
name: "OnePortAndStandardPortSetup",
mainClusterPortSetup: helpers.SingleProxyPortSetup(),
secondClusterPortSetup: helpers.StandardPortSetup(),
name: "OnePortAndStandardListenerSetup",
mainClusterListenerSetup: helpers.SingleProxyPortSetup,
secondClusterListenerSetup: helpers.StandardListenerSetup,
},
}
for _, tc := range testCase {
@ -172,8 +172,8 @@ func TestALPNSNIProxyTrustedClusterNode(t *testing.T) {
suite := newProxySuite(t,
withRootClusterConfig(rootClusterStandardConfig(t)),
withLeafClusterConfig(leafClusterStandardConfig(t)),
withRootClusterPorts(tc.mainClusterPortSetup),
withLeafClusterPorts(tc.secondClusterPortSetup),
withRootClusterListeners(tc.mainClusterListenerSetup),
withLeafClusterListeners(tc.secondClusterListenerSetup),
withRootClusterRoles(newRole(t, "maindevs", username)),
withLeafClusterRoles(newRole(t, "auxdevs", username)),
withRootAndLeafTrustedClusterReset(),
@ -189,7 +189,7 @@ func TestALPNSNIProxyTrustedClusterNode(t *testing.T) {
Login: username,
Cluster: suite.leaf.Secrets.SiteName,
Host: Loopback,
Port: suite.leaf.GetPortSSHInt(),
Port: helpers.Port(t, suite.leaf.SSH),
})
// Try and connect to a node in the Aux cluster from the Root cluster using
@ -217,7 +217,10 @@ func TestALPNSNIHTTPSProxy(t *testing.T) {
t.Setenv("http_proxy", u.Host)
username := mustGetCurrentUser(t).Username
// httpproxy won't proxy when target address is localhost, so use this instead.
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
addr, err := getLocalIP()
require.NoError(t, err)
@ -226,8 +229,8 @@ func TestALPNSNIHTTPSProxy(t *testing.T) {
withLeafClusterConfig(leafClusterStandardConfig(t)),
withRootClusterNodeName(addr),
withLeafClusterNodeName(addr),
withRootClusterPorts(helpers.SingleProxyPortSetup()),
withLeafClusterPorts(helpers.SingleProxyPortSetup()),
withRootClusterListeners(helpers.SingleProxyPortSetupOn(addr)),
withLeafClusterListeners(helpers.SingleProxyPortSetupOn(addr)),
withRootAndLeafClusterRoles(createTestRole(username)),
withStandardRoleMapping(),
)
@ -255,7 +258,10 @@ func TestMultiPortHTTPSProxy(t *testing.T) {
t.Setenv("http_proxy", u.Host)
username := mustGetCurrentUser(t).Username
// httpproxy won't proxy when target address is localhost, so use this instead.
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
addr, err := getLocalIP()
require.NoError(t, err)
@ -264,8 +270,8 @@ func TestMultiPortHTTPSProxy(t *testing.T) {
withLeafClusterConfig(leafClusterStandardConfig(t)),
withRootClusterNodeName(addr),
withLeafClusterNodeName(addr),
withRootClusterPorts(helpers.StandardPortSetup()),
withLeafClusterPorts(helpers.StandardPortSetup()),
withRootClusterListeners(helpers.SingleProxyPortSetupOn(addr)),
withLeafClusterListeners(helpers.SingleProxyPortSetupOn(addr)),
withRootAndLeafClusterRoles(createTestRole(username)),
withStandardRoleMapping(),
)
@ -393,7 +399,7 @@ func TestALPNSNIProxyKubeV2Leaf(t *testing.T) {
// DB protocol is wrapped into TLS and forwarded to proxy ALPN SNI service and routed to appropriate db service.
func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
pack := setupDatabaseTest(t,
withPortSetupDatabaseTest(helpers.SingleProxyPortSetup),
withListenerSetupDatabaseTest(helpers.SingleProxyPortSetup),
withLeafConfig(func(config *service.Config) {
config.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
}),
@ -404,7 +410,7 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
pack.waitForLeaf(t)
t.Run("mysql", func(t *testing.T) {
lp := mustStartALPNLocalProxy(t, pack.root.cluster.GetProxyAddr(), alpncommon.ProtocolMySQL)
lp := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolMySQL)
t.Run("connect to main cluster via proxy", func(t *testing.T) {
client, err := mysql.MakeTestClient(common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
@ -457,7 +463,7 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
})
t.Run("postgres", func(t *testing.T) {
lp := mustStartALPNLocalProxy(t, pack.root.cluster.GetProxyAddr(), alpncommon.ProtocolPostgres)
lp := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolPostgres)
t.Run("connect to main cluster via proxy", func(t *testing.T) {
client, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
@ -497,7 +503,7 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
})
t.Run("mongo", func(t *testing.T) {
lp := mustStartALPNLocalProxy(t, pack.root.cluster.GetProxyAddr(), alpncommon.ProtocolMongoDB)
lp := mustStartALPNLocalProxy(t, pack.root.cluster.SSHProxy, alpncommon.ProtocolMongoDB)
t.Run("connect to main cluster via proxy", func(t *testing.T) {
client, err := mongodb.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: pack.root.cluster.GetSiteAPI(pack.root.cluster.Secrets.SiteName),
@ -550,8 +556,8 @@ func TestALPNSNIProxyDatabaseAccess(t *testing.T) {
// TestALPNSNIProxyAppAccess tests application access via ALPN SNI proxy service.
func TestALPNSNIProxyAppAccess(t *testing.T) {
pack := setupWithOptions(t, appTestOptions{
rootClusterPorts: helpers.SingleProxyPortSetup(),
leafClusterPorts: helpers.SingleProxyPortSetup(),
rootClusterListeners: helpers.SingleProxyPortSetup,
leafClusterListeners: helpers.SingleProxyPortSetup,
rootConfig: func(config *service.Config) {
config.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
},
@ -582,8 +588,8 @@ func TestALPNProxyRootLeafAuthDial(t *testing.T) {
suite := newProxySuite(t,
withRootClusterConfig(rootClusterStandardConfig(t)),
withLeafClusterConfig(leafClusterStandardConfig(t)),
withRootClusterPorts(helpers.SingleProxyPortSetup()),
withLeafClusterPorts(helpers.SingleProxyPortSetup()),
withRootClusterListeners(helpers.SingleProxyPortSetup),
withLeafClusterListeners(helpers.SingleProxyPortSetup),
withRootClusterRoles(newRole(t, "rootdevs", username)),
withLeafClusterRoles(newRole(t, "leafdevs", username)),
withRootAndLeafTrustedClusterReset(),
@ -625,13 +631,14 @@ func TestALPNProxyAuthClientConnectWithUserIdentity(t *testing.T) {
lib.SetInsecureDevMode(true)
defer lib.SetInsecureDevMode(false)
rc := helpers.NewInstance(helpers.InstanceConfig{
cfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: Loopback,
Log: utils.NewLoggerForTests(),
Ports: helpers.SingleProxyPortSetup(),
})
}
cfg.Listeners = helpers.SingleProxyPortSetup(t, &cfg.Fds)
rc := helpers.NewInstance(t, cfg)
rcConf := service.MakeDefaultConfig()
rcConf.DataDir = t.TempDir()
@ -659,7 +666,7 @@ func TestALPNProxyAuthClientConnectWithUserIdentity(t *testing.T) {
require.NoError(t, err)
tc, err := client.New(context.Background(), client.Config{
Addrs: []string{rc.GetWebAddr()},
Addrs: []string{rc.Web},
Credentials: []client.Credentials{identity},
InsecureAddressDiscovery: true,
})
@ -679,15 +686,16 @@ func TestALPNProxyDialProxySSHWithoutInsecureMode(t *testing.T) {
privateKey, publicKey, err := testauthority.New().GenerateKeyPair()
require.NoError(t, err)
rc := helpers.NewInstance(helpers.InstanceConfig{
rootCfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: Loopback,
Priv: privateKey,
Pub: publicKey,
Log: utils.NewLoggerForTests(),
Ports: helpers.StandardPortSetup(),
})
}
rootCfg.Listeners = helpers.StandardListenerSetup(t, &rootCfg.Fds)
rc := helpers.NewInstance(t, rootCfg)
username := mustGetCurrentUser(t).Username
rc.AddUser(username, []string{username})
@ -744,16 +752,20 @@ func TestALPNProxyHTTPProxyNoProxyDial(t *testing.T) {
lib.SetInsecureDevMode(true)
defer lib.SetInsecureDevMode(false)
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
addr, err := getLocalIP()
require.NoError(t, err)
rc := helpers.NewInstance(helpers.InstanceConfig{
instanceCfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: addr,
Log: utils.NewLoggerForTests(),
Ports: helpers.SingleProxyPortSetup(),
})
}
instanceCfg.Listeners = helpers.SingleProxyPortSetupOn(addr)(t, &instanceCfg.Fds)
rc := helpers.NewInstance(t, instanceCfg)
username := mustGetCurrentUser(t).Username
rc.AddUser(username, []string{username})
@ -785,7 +797,7 @@ func TestALPNProxyHTTPProxyNoProxyDial(t *testing.T) {
t.Setenv("http_proxy", u.Host)
t.Setenv("no_proxy", addr)
rcProxyAddr := net.JoinHostPort(addr, rc.GetPortWeb())
rcProxyAddr := rc.Web
// Start the node, due to no_proxy=127.0.0.1 env variable the connection established
// to the proxy should not go through the http_proxy server.
@ -817,16 +829,25 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) {
lib.SetInsecureDevMode(true)
defer lib.SetInsecureDevMode(false)
log := utils.NewLoggerForTests()
// We need to use the non-loopback address for our Teleport cluster, as the
// Go HTTP library will recognize requests to the loopback address and
// refuse to use the HTTP proxy, which will invalidate the test.
rcAddr, err := getLocalIP()
require.NoError(t, err)
rc := helpers.NewInstance(helpers.InstanceConfig{
log.Info("Creating Teleport instance...")
cfg := helpers.InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New().String(),
NodeName: rcAddr,
Log: utils.NewLoggerForTests(),
Ports: helpers.SingleProxyPortSetup(),
})
Log: log,
}
cfg.Listeners = helpers.SingleProxyPortSetupOn(rcAddr)(t, &cfg.Fds)
rc := helpers.NewInstance(t, cfg)
log.Info("Teleport root cluster instance created")
username := mustGetCurrentUser(t).Username
rc.AddUser(username, []string{username})
@ -840,9 +861,13 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) {
rcConf.SSH.Enabled = false
rcConf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
log.Infof("Root cluster config: %#v", rcConf)
log.Info("Creating Root cluster...")
err = rc.CreateEx(t, nil, rcConf)
require.NoError(t, err)
log.Info("Starting Root Cluster...")
err = rc.Start()
require.NoError(t, err)
defer rc.StopAll()
@ -854,6 +879,7 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) {
validPass := "open sesame"
// Create and start http_proxy server.
log.Info("Creating HTTP Proxy server...")
ph := &helpers.ProxyHandler{}
authorizer := helpers.NewProxyAuthorizer(ph, map[string]string{validUser: validPass})
ts := httptest.NewServer(authorizer)
@ -861,8 +887,9 @@ func TestALPNProxyHTTPProxyBasicAuthDial(t *testing.T) {
proxyURL, err := url.Parse(ts.URL)
require.NoError(t, err)
log.Infof("HTTP Proxy server running on %s", proxyURL)
rcProxyAddr := net.JoinHostPort(rcAddr, rc.GetPortWeb())
rcProxyAddr := net.JoinHostPort(rcAddr, helpers.PortStr(t, rc.Web))
// proxy url is just the host with no auth credentials
t.Setenv("http_proxy", proxyURL.Host)

View file

@ -237,7 +237,7 @@ func (p *proxyTunnelStrategy) dialDatabase(t *testing.T) {
connClient, err := postgres.MakeTestClient(context.Background(), common.TestClientConfig{
AuthClient: p.dbAuthClient,
AuthServer: p.auth.Process.GetAuthServer(),
Address: proxy.GetWebAddr(),
Address: proxy.Web,
Cluster: p.cluster,
Username: p.username,
RouteToDatabase: tlsca.RouteToDatabase{
@ -290,7 +290,7 @@ func (p *proxyTunnelStrategy) makeAuth(t *testing.T) {
privateKey, publicKey, err := testauthority.New().GenerateKeyPair()
require.NoError(t, err)
auth := helpers.NewInstance(helpers.InstanceConfig{
auth := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: p.cluster,
HostID: uuid.New().String(),
NodeName: Loopback,
@ -318,14 +318,14 @@ func (p *proxyTunnelStrategy) makeAuth(t *testing.T) {
// makeProxy bootstraps a new teleport proxy instance.
// It's public address points to a load balancer.
func (p *proxyTunnelStrategy) makeProxy(t *testing.T) {
proxy := helpers.NewInstance(helpers.InstanceConfig{
proxy := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: p.cluster,
HostID: uuid.New().String(),
NodeName: Loopback,
Log: utils.NewLoggerForTests(),
})
authAddr := utils.MustParseAddr(net.JoinHostPort(p.auth.Hostname, p.auth.GetPortAuth()))
authAddr := utils.MustParseAddr(p.auth.Auth)
conf := service.MakeDefaultConfig()
conf.AuthServers = append(conf.AuthServers, *authAddr)
@ -335,13 +335,15 @@ func (p *proxyTunnelStrategy) makeProxy(t *testing.T) {
conf.Auth.Enabled = false
conf.SSH.Enabled = false
// TODO: Replace old-style NewPortStr() call with preconfigured listener
conf.Proxy.Enabled = true
conf.Proxy.ReverseTunnelListenAddr.Addr = net.JoinHostPort(Loopback, proxy.GetPortReverseTunnel())
conf.Proxy.SSHAddr.Addr = net.JoinHostPort(Loopback, proxy.GetPortProxy())
conf.Proxy.WebAddr.Addr = net.JoinHostPort(Loopback, proxy.GetPortWeb())
conf.Proxy.ReverseTunnelListenAddr.Addr = proxy.ReverseTunnel
conf.Proxy.SSHAddr.Addr = proxy.SSHProxy
conf.Proxy.WebAddr.Addr = proxy.Web
conf.Proxy.PeerAddr.Addr = net.JoinHostPort(Loopback, helpers.NewPortStr())
conf.Proxy.PublicAddrs = append(conf.Proxy.PublicAddrs, utils.FromAddr(p.lb.Addr()))
conf.Proxy.DisableWebInterface = true
conf.FileDescriptors = proxy.Fds
process, err := service.NewTeleport(conf)
require.NoError(t, err)
@ -361,7 +363,7 @@ func (p *proxyTunnelStrategy) makeNode(t *testing.T) {
require.Fail(t, "node already initialized")
}
node := helpers.NewInstance(helpers.InstanceConfig{
node := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: p.cluster,
HostID: uuid.New().String(),
NodeName: Loopback,
@ -397,7 +399,7 @@ func (p *proxyTunnelStrategy) makeDatabase(t *testing.T) {
dbAddr := net.JoinHostPort(Host, helpers.NewPortStr())
// setup database service
db := helpers.NewInstance(helpers.InstanceConfig{
db := helpers.NewInstance(t, helpers.InstanceConfig{
ClusterName: p.cluster,
HostID: uuid.New().String(),
NodeName: Loopback,

View file

@ -56,6 +56,13 @@ func (t *Terminal) Output(limit int) string {
return strings.TrimSpace(string(buff))
}
// AllOutput returns the entire recorded output from the fake terminal
func (t *Terminal) AllOutput() string {
t.mu.Lock()
defer t.mu.Unlock()
return strings.TrimSpace(t.written.String())
}
func (t *Terminal) Write(data []byte) (n int, err error) {
t.mu.Lock()
defer t.mu.Unlock()

View file

@ -106,7 +106,7 @@ func (process *TeleportProcess) initWindowsDesktopServiceRegistered(log *logrus.
// Start a local listener and let proxies dial in.
case !useTunnel && !cfg.WindowsDesktop.ListenAddr.IsEmpty():
log.Info("Using local listener and registering directly with auth server")
listener, err = process.importOrCreateListener(listenerWindowsDesktop, cfg.WindowsDesktop.ListenAddr.Addr)
listener, err = process.importOrCreateListener(ListenerWindowsDesktop, cfg.WindowsDesktop.ListenAddr.Addr)
if err != nil {
return trace.Wrap(err)
}

View file

@ -127,7 +127,7 @@ func (process *TeleportProcess) initKubernetesService(log *logrus.Entry, conn *C
// Start a local listener and let proxies dial in.
case !conn.UseTunnel() && !cfg.Kube.ListenAddr.IsEmpty():
log.Debug("Turning on Kubernetes service listening address.")
listener, err = process.importOrCreateListener(listenerKube, cfg.Kube.ListenAddr.Addr)
listener, err = process.importOrCreateListener(ListenerKube, cfg.Kube.ListenAddr.Addr)
if err != nil {
return trace.Wrap(err)
}

View file

@ -23,82 +23,82 @@ import (
"github.com/gravitational/teleport/lib/utils"
)
// listenerType identifies different registered listeners in
// ListenerType identifies different registered listeners in
// process.registeredListeners.
type listenerType string
type ListenerType string
var (
listenerAuth = listenerType(teleport.ComponentAuth)
listenerNodeSSH = listenerType(teleport.ComponentNode)
listenerProxySSH = listenerType(teleport.Component(teleport.ComponentProxy, "ssh"))
listenerDiagnostic = listenerType(teleport.ComponentDiagnostic)
listenerProxyKube = listenerType(teleport.Component(teleport.ComponentProxy, "kube"))
listenerKube = listenerType(teleport.ComponentKube)
ListenerAuth = ListenerType(teleport.ComponentAuth)
ListenerNodeSSH = ListenerType(teleport.ComponentNode)
ListenerProxySSH = ListenerType(teleport.Component(teleport.ComponentProxy, "ssh"))
ListenerDiagnostic = ListenerType(teleport.ComponentDiagnostic)
ListenerProxyKube = ListenerType(teleport.Component(teleport.ComponentProxy, "kube"))
ListenerKube = ListenerType(teleport.ComponentKube)
// Proxy can use the same listener for tunnels and web interface
// (multiplexing the requests).
listenerProxyTunnelAndWeb = listenerType(teleport.Component(teleport.ComponentProxy, "tunnel", "web"))
listenerProxyWeb = listenerType(teleport.Component(teleport.ComponentProxy, "web"))
listenerProxyTunnel = listenerType(teleport.Component(teleport.ComponentProxy, "tunnel"))
listenerProxyMySQL = listenerType(teleport.Component(teleport.ComponentProxy, "mysql"))
listenerProxyPostgres = listenerType(teleport.Component(teleport.ComponentProxy, "postgres"))
listenerProxyMongo = listenerType(teleport.Component(teleport.ComponentProxy, "mongo"))
listenerProxyPeer = listenerType(teleport.Component(teleport.ComponentProxy, "peer"))
listenerMetrics = listenerType(teleport.ComponentMetrics)
listenerWindowsDesktop = listenerType(teleport.ComponentWindowsDesktop)
ListenerProxyTunnelAndWeb = ListenerType(teleport.Component(teleport.ComponentProxy, "tunnel", "web"))
ListenerProxyWeb = ListenerType(teleport.Component(teleport.ComponentProxy, "web"))
ListenerProxyTunnel = ListenerType(teleport.Component(teleport.ComponentProxy, "tunnel"))
ListenerProxyMySQL = ListenerType(teleport.Component(teleport.ComponentProxy, "mysql"))
ListenerProxyPostgres = ListenerType(teleport.Component(teleport.ComponentProxy, "postgres"))
ListenerProxyMongo = ListenerType(teleport.Component(teleport.ComponentProxy, "mongo"))
ListenerProxyPeer = ListenerType(teleport.Component(teleport.ComponentProxy, "peer"))
ListenerMetrics = ListenerType(teleport.ComponentMetrics)
ListenerWindowsDesktop = ListenerType(teleport.ComponentWindowsDesktop)
)
// AuthAddr returns auth server endpoint, if configured and started.
func (process *TeleportProcess) AuthAddr() (*utils.NetAddr, error) {
return process.registeredListenerAddr(listenerAuth)
return process.registeredListenerAddr(ListenerAuth)
}
// NodeSSHAddr returns the node SSH endpoint, if configured and started.
func (process *TeleportProcess) NodeSSHAddr() (*utils.NetAddr, error) {
return process.registeredListenerAddr(listenerNodeSSH)
return process.registeredListenerAddr(ListenerNodeSSH)
}
// ProxySSHAddr returns the proxy SSH endpoint, if configured and started.
func (process *TeleportProcess) ProxySSHAddr() (*utils.NetAddr, error) {
return process.registeredListenerAddr(listenerProxySSH)
return process.registeredListenerAddr(ListenerProxySSH)
}
// DiagnosticAddr returns the diagnostic endpoint, if configured and started.
func (process *TeleportProcess) DiagnosticAddr() (*utils.NetAddr, error) {
return process.registeredListenerAddr(listenerDiagnostic)
return process.registeredListenerAddr(ListenerDiagnostic)
}
// ProxyKubeAddr returns the proxy kubernetes endpoint, if configured and
// started.
func (process *TeleportProcess) ProxyKubeAddr() (*utils.NetAddr, error) {
return process.registeredListenerAddr(listenerProxyKube)
return process.registeredListenerAddr(ListenerProxyKube)
}
// ProxyWebAddr returns the proxy web interface endpoint, if configured and
// started.
func (process *TeleportProcess) ProxyWebAddr() (*utils.NetAddr, error) {
addr, err := process.registeredListenerAddr(listenerProxyTunnelAndWeb)
addr, err := process.registeredListenerAddr(ListenerProxyTunnelAndWeb)
if err == nil {
return addr, nil
}
return process.registeredListenerAddr(listenerProxyWeb)
return process.registeredListenerAddr(ListenerProxyWeb)
}
// ProxyTunnelAddr returns the proxy reverse tunnel endpoint, if configured and
// started.
func (process *TeleportProcess) ProxyTunnelAddr() (*utils.NetAddr, error) {
addr, err := process.registeredListenerAddr(listenerProxyTunnelAndWeb)
addr, err := process.registeredListenerAddr(ListenerProxyTunnelAndWeb)
if err == nil {
return addr, nil
}
return process.registeredListenerAddr(listenerProxyTunnel)
return process.registeredListenerAddr(ListenerProxyTunnel)
}
// ProxyTunnelAddr returns the proxy peer address, if configured and started.
func (process *TeleportProcess) ProxyPeerAddr() (*utils.NetAddr, error) {
return process.registeredListenerAddr(listenerProxyPeer)
return process.registeredListenerAddr(ListenerProxyPeer)
}
func (process *TeleportProcess) registeredListenerAddr(typ listenerType) (*utils.NetAddr, error) {
func (process *TeleportProcess) registeredListenerAddr(typ ListenerType) (*utils.NetAddr, error) {
process.Lock()
defer process.Unlock()

View file

@ -919,7 +919,7 @@ func NewTeleport(cfg *Config, opts ...NewTeleportOption) (*TeleportProcess, erro
if len(process.Config.AuthServers) != 0 && process.Config.AuthServers[0].Port(0) == 0 {
// port appears undefined, attempt early listener creation so that we can get the real port
listener, err := process.importOrCreateListener(listenerAuth, process.Config.Auth.ListenAddr.Addr)
listener, err := process.importOrCreateListener(ListenerAuth, process.Config.Auth.ListenAddr.Addr)
if err == nil {
process.Config.AuthServers = []utils.NetAddr{utils.FromAddr(listener.Addr())}
}
@ -1597,7 +1597,7 @@ func (process *TeleportProcess) initAuthService() error {
if err != nil {
return trace.Wrap(err)
}
listener, err := process.importOrCreateListener(listenerAuth, cfg.Auth.ListenAddr.Addr)
listener, err := process.importOrCreateListener(ListenerAuth, cfg.Auth.ListenAddr.Addr)
if err != nil {
log.Errorf("PID: %v Failed to bind to address %v: %v, exiting.", os.Getpid(), cfg.Auth.ListenAddr.Addr, err)
return trace.Wrap(err)
@ -2300,7 +2300,7 @@ func (process *TeleportProcess) initSSH() error {
}
if !conn.UseTunnel() {
listener, err := process.importOrCreateListener(listenerNodeSSH, cfg.SSH.Addr.Addr)
listener, err := process.importOrCreateListener(ListenerNodeSSH, cfg.SSH.Addr.Addr)
if err != nil {
return trace.Wrap(err)
}
@ -2518,7 +2518,7 @@ func (process *TeleportProcess) initMetricsService() error {
trace.Component: teleport.Component(teleport.ComponentMetrics, process.id),
})
listener, err := process.importOrCreateListener(listenerMetrics, process.Config.Metrics.ListenAddr.Addr)
listener, err := process.importOrCreateListener(ListenerMetrics, process.Config.Metrics.ListenAddr.Addr)
if err != nil {
return trace.Wrap(err)
}
@ -2675,7 +2675,7 @@ func (process *TeleportProcess) initDiagnosticService() error {
}
})
listener, err := process.importOrCreateListener(listenerDiagnostic, process.Config.DiagnosticAddr.Addr)
listener, err := process.importOrCreateListener(ListenerDiagnostic, process.Config.DiagnosticAddr.Addr)
if err != nil {
return trace.Wrap(err)
}
@ -2973,7 +2973,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
var listeners proxyListeners
if !cfg.Proxy.SSHAddr.IsEmpty() {
listeners.ssh, err = process.importOrCreateListener(listenerProxySSH, cfg.Proxy.SSHAddr.Addr)
listeners.ssh, err = process.importOrCreateListener(ListenerProxySSH, cfg.Proxy.SSHAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -2981,7 +2981,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
if cfg.Proxy.Kube.Enabled && !cfg.Proxy.Kube.ListenAddr.IsEmpty() {
process.log.Debugf("Setup Proxy: turning on Kubernetes proxy.")
listener, err := process.importOrCreateListener(listenerProxyKube, cfg.Proxy.Kube.ListenAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyKube, cfg.Proxy.Kube.ListenAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -2991,7 +2991,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
if !cfg.Proxy.DisableDatabaseProxy {
if !cfg.Proxy.MySQLAddr.IsEmpty() {
process.log.Debugf("Setup Proxy: MySQL proxy address: %v.", cfg.Proxy.MySQLAddr.Addr)
listener, err := process.importOrCreateListener(listenerProxyMySQL, cfg.Proxy.MySQLAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyMySQL, cfg.Proxy.MySQLAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -3000,7 +3000,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
if !cfg.Proxy.MongoAddr.IsEmpty() {
process.log.Debugf("Setup Proxy: Mongo proxy address: %v.", cfg.Proxy.MongoAddr.Addr)
listener, err := process.importOrCreateListener(listenerProxyMongo, cfg.Proxy.MongoAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyMongo, cfg.Proxy.MongoAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -3009,7 +3009,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
if !cfg.Proxy.PostgresAddr.IsEmpty() {
process.log.Debugf("Setup Proxy: Postgres proxy address: %v.", cfg.Proxy.PostgresAddr.Addr)
listener, err := process.importOrCreateListener(listenerProxyPostgres, cfg.Proxy.PostgresAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyPostgres, cfg.Proxy.PostgresAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -3034,7 +3034,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
return nil, trace.Wrap(err)
}
listener, err := process.importOrCreateListener(listenerProxyPeer, addr.String())
listener, err := process.importOrCreateListener(ListenerProxyPeer, addr.String())
if err != nil {
return nil, trace.Wrap(err)
}
@ -3048,7 +3048,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
return &listeners, nil
case cfg.Proxy.ReverseTunnelListenAddr == cfg.Proxy.WebAddr && !cfg.Proxy.DisableTLS:
process.log.Debugf("Setup Proxy: Reverse tunnel proxy and web proxy listen on the same port, multiplexing is on.")
listener, err := process.importOrCreateListener(listenerProxyTunnelAndWeb, cfg.Proxy.WebAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyTunnelAndWeb, cfg.Proxy.WebAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -3072,7 +3072,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
return &listeners, nil
case cfg.Proxy.EnableProxyProtocol && !cfg.Proxy.DisableWebService && !cfg.Proxy.DisableTLS:
process.log.Debugf("Setup Proxy: Proxy protocol is enabled for web service, multiplexing is on.")
listener, err := process.importOrCreateListener(listenerProxyWeb, cfg.Proxy.WebAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyWeb, cfg.Proxy.WebAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -3088,7 +3088,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
listeners.web = listeners.mux.TLS()
process.muxPostgresOnWebPort(cfg, &listeners)
if !cfg.Proxy.ReverseTunnelListenAddr.IsEmpty() {
listeners.reverseTunnel, err = process.importOrCreateListener(listenerProxyTunnel, cfg.Proxy.ReverseTunnelListenAddr.Addr)
listeners.reverseTunnel, err = process.importOrCreateListener(ListenerProxyTunnel, cfg.Proxy.ReverseTunnelListenAddr.Addr)
if err != nil {
listener.Close()
listeners.Close()
@ -3100,14 +3100,14 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
default:
process.log.Debug("Setup Proxy: Proxy and reverse tunnel are listening on separate ports.")
if !cfg.Proxy.DisableReverseTunnel && !cfg.Proxy.ReverseTunnelListenAddr.IsEmpty() {
listeners.reverseTunnel, err = process.importOrCreateListener(listenerProxyTunnel, cfg.Proxy.ReverseTunnelListenAddr.Addr)
listeners.reverseTunnel, err = process.importOrCreateListener(ListenerProxyTunnel, cfg.Proxy.ReverseTunnelListenAddr.Addr)
if err != nil {
listeners.Close()
return nil, trace.Wrap(err)
}
}
if !cfg.Proxy.DisableWebService && !cfg.Proxy.WebAddr.IsEmpty() {
listener, err := process.importOrCreateListener(listenerProxyWeb, cfg.Proxy.WebAddr.Addr)
listener, err := process.importOrCreateListener(ListenerProxyWeb, cfg.Proxy.WebAddr.Addr)
if err != nil {
listeners.Close()
return nil, trace.Wrap(err)
@ -3138,7 +3138,7 @@ func (process *TeleportProcess) setupProxyListeners(networkingConfig types.Clust
// Even if web service API was disabled create a web listener used for ALPN/SNI service as the master port
if cfg.Proxy.DisableWebService && !cfg.Proxy.DisableTLS && listeners.web == nil {
listeners.web, err = process.importOrCreateListener(listenerProxyWeb, cfg.Proxy.WebAddr.Addr)
listeners.web, err = process.importOrCreateListener(ListenerProxyWeb, cfg.Proxy.WebAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
@ -3326,7 +3326,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
process.RegisterCriticalFunc("proxy.reversetunnel.server", func() error {
utils.Consolef(cfg.Console, log, teleport.ComponentProxy, "Reverse tunnel service %s:%s is starting on %v.",
teleport.Version, teleport.Gitref, cfg.Proxy.ReverseTunnelListenAddr.Addr)
log.Infof("Starting %s:%s on %v using %v", teleport.Version, teleport.Gitref, listeners.reverseTunnel.Addr(), process.Config.CachePolicy)
log.Infof("Starting %s:%s on %v using %v", teleport.Version, teleport.Gitref, cfg.Proxy.ReverseTunnelListenAddr.Addr, process.Config.CachePolicy)
if err := tsrv.Start(); err != nil {
log.Error(err)
return trace.Wrap(err)
@ -3493,7 +3493,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
process.RegisterCriticalFunc("proxy.ssh", func() error {
utils.Consolef(cfg.Console, log, teleport.ComponentProxy, "SSH proxy service %s:%s is starting on %v.",
teleport.Version, teleport.Gitref, cfg.Proxy.SSHAddr.Addr)
log.Infof("SSH proxy service %s:%s is starting on %v", teleport.Version, teleport.Gitref, listeners.ssh.Addr())
log.Infof("SSH proxy service %s:%s is starting on %v", teleport.Version, teleport.Gitref, cfg.Proxy.SSHAddr)
go sshProxy.Serve(listeners.ssh)
// broadcast that the proxy ssh server has started
process.BroadcastEvent(Event{Name: ProxySSHReady, Payload: nil})
@ -3581,7 +3581,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
trace.Component: component,
})
log.Infof("Starting Kube proxy on %v.", listeners.kube.Addr())
log.Infof("Starting Kube proxy on %v.", cfg.Proxy.Kube.ListenAddr.Addr)
err := kubeServer.Serve(listeners.kube)
if err != nil && err != http.ErrServerClosed {
log.Warningf("Kube TLS server exited with error: %v.", err)
@ -3661,7 +3661,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
}
if listeners.db.mysql != nil {
process.RegisterCriticalFunc("proxy.db.mysql", func() error {
log.Infof("Starting Database MySQL proxy server on %v.", listeners.db.mysql.Addr())
log.Infof("Starting MySQL proxy server on %v.", cfg.Proxy.MySQLAddr.Addr)
if err := dbProxyServer.ServeMySQL(listeners.db.mysql); err != nil {
log.WithError(err).Warn("MySQL proxy server exited with error.")
}
@ -3670,7 +3670,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
}
if listeners.db.tls != nil {
process.RegisterCriticalFunc("proxy.db.tls", func() error {
log.Infof("Starting Database TLS proxy server on %v.", listeners.db.tls.Addr())
log.Infof("Starting Database TLS proxy server on %v.", cfg.Proxy.WebAddr.Addr)
if err := dbProxyServer.ServeTLS(listeners.db.tls); err != nil {
log.WithError(err).Warn("Database TLS proxy server exited with error.")
}
@ -3680,7 +3680,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
if listeners.db.mongo != nil {
process.RegisterCriticalFunc("proxy.db.mongo", func() error {
log.Infof("Starting Database Mongo proxy server on %v.", listeners.db.mongo.Addr())
log.Infof("Starting Database Mongo proxy server on %v.", cfg.Proxy.MongoAddr.Addr)
if err := dbProxyServer.ServeMongo(listeners.db.mongo, tlsConfigWeb.Clone()); err != nil {
log.WithError(err).Warn("Database Mongo proxy server exited with error.")
}

View file

@ -200,7 +200,7 @@ func (process *TeleportProcess) closeImportedDescriptors(prefix string) error {
// importOrCreateListener imports listener passed by the parent process (happens during live reload)
// or creates a new listener if there was no listener registered
func (process *TeleportProcess) importOrCreateListener(typ listenerType, address string) (net.Listener, error) {
func (process *TeleportProcess) importOrCreateListener(typ ListenerType, address string) (net.Listener, error) {
l, err := process.importListener(typ, address)
if err == nil {
process.log.Infof("Using file descriptor %v %v passed by the parent process.", typ, address)
@ -230,7 +230,7 @@ func (process *TeleportProcess) importSignalPipe() (*os.File, error) {
// importListener imports listener passed by the parent process, if no listener is found
// returns NotFound, otherwise removes the file from the list
func (process *TeleportProcess) importListener(typ listenerType, address string) (net.Listener, error) {
func (process *TeleportProcess) importListener(typ ListenerType, address string) (net.Listener, error) {
process.Lock()
defer process.Unlock()
@ -252,7 +252,7 @@ func (process *TeleportProcess) importListener(typ listenerType, address string)
}
// createListener creates listener and adds to a list of tracked listeners
func (process *TeleportProcess) createListener(typ listenerType, address string) (net.Listener, error) {
func (process *TeleportProcess) createListener(typ ListenerType, address string) (net.Listener, error) {
listenersClosed := func() bool {
process.Lock()
defer process.Unlock()
@ -296,7 +296,7 @@ func (process *TeleportProcess) createListener(typ listenerType, address string)
}
// getListenerNeedsLock tries to get an existing listener that matches the type/addr.
func (process *TeleportProcess) getListenerNeedsLock(typ listenerType, address string) (listener net.Listener, ok bool) {
func (process *TeleportProcess) getListenerNeedsLock(typ ListenerType, address string) (listener net.Listener, ok bool) {
for _, l := range process.registeredListeners {
if l.typ == typ && l.address == address {
return l.listener, true
@ -357,7 +357,7 @@ func importFileDescriptors(log logrus.FieldLogger) ([]FileDescriptor, error) {
// within teleport process, can be passed to child process
type registeredListener struct {
// Type is a listener type, e.g. auth:ssh
typ listenerType
typ ListenerType
// Address is an address listener is serving on, e.g. 127.0.0.1:3025
address string
// Listener is a file listener object