Fix agentless leaf node authorization (#33993)

* Use remoteClient for remoteSite to ensure the correct authorization mechanism is used for openssh leaf nodes.

* Use remoteClient only for auth handler access point.

* Resolve nomenclature comments.

* Update integration test to cover same-name role mapping logic.

* Add nil check.

* Apply suggestions from code review

Co-authored-by: rosstimothy <39066650+rosstimothy@users.noreply.github.com>

---------

Co-authored-by: rosstimothy <39066650+rosstimothy@users.noreply.github.com>
This commit is contained in:
Brian Joerger 2023-10-30 12:23:57 -07:00 committed by GitHub
parent 3348618d47
commit 129e5901d0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 127 additions and 128 deletions

View file

@ -3796,35 +3796,7 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) {
}
mainCfg.Listeners = standardPortsOrMuxSetup(t, false, &mainCfg.Fds)
main := helpers.NewInstance(t, mainCfg)
aux := suite.newNamedTeleportInstance(t, clusterAux)
// main cluster has a local user and belongs to role "main-devs" and "main-admins"
mainDevs := "main-devs"
devsRole, err := types.NewRole(mainDevs, types.RoleSpecV6{
Options: types.RoleOptions{
ForwardAgent: types.NewBool(true),
},
Allow: types.RoleConditions{
Logins: []string{username},
NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}},
},
})
// If the test is using labels, the cluster will be labeled
// and user will be granted access if labels match.
// Otherwise, to preserve backwards-compatibility
// roles with no labels will grant access to clusters with no labels.
devsRole.SetClusterLabels(types.Allow, types.Labels{"access": []string{"prod"}})
require.NoError(t, err)
mainAdmins := "main-admins"
adminsRole, err := types.NewRole(mainAdmins, types.RoleSpecV6{
Allow: types.RoleConditions{
Logins: []string{"superuser"},
},
})
require.NoError(t, err)
main.AddUserWithRole(username, devsRole, adminsRole)
leaf := suite.newNamedTeleportInstance(t, clusterAux)
// for role mapping test we turn on Web API on the main cluster
// as it's used
@ -3839,23 +3811,57 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) {
defer lib.SetInsecureDevMode(false)
require.NoError(t, main.CreateEx(makeConfig(false)))
require.NoError(t, aux.CreateEx(makeConfig(false)))
require.NoError(t, leaf.CreateEx(makeConfig(false)))
// auxiliary cluster has only a role aux-devs
// connect aux cluster to main cluster
// using trusted clusters, so remote user will be allowed to assume
// role specified by mapping remote role "devs" to local role "local-devs"
auxDevs := "aux-devs"
auxRole, err := types.NewRole(auxDevs, types.RoleSpecV6{
// Root and leaf clusters will both have a "devsRoleName" role with different permissions granted.
// If the role mapping works as expected, local devsRoleName should be authorized using the remote devsRoleName role.
// Previously, here was a bug that caused agentless authorization to bypass role mappings.
devsRoleName := "devs"
// Create local role that gives no privilidges.
mainRole, err := types.NewRole(devsRoleName, types.RoleSpecV6{
Allow: types.RoleConditions{
Logins: []string{username},
// If the test is using labels, the cluster will be labeled
// and user will be granted access if labels match.
// Otherwise, to preserve backwards-compatibility
// roles with no labels will grant access to clusters with no labels.
ClusterLabels: types.Labels{"access": []string{"prod"}},
},
})
require.NoError(t, err)
// main.AddUserWithRole(username, devsRole)
// Create remote role that gives access to leaf nodes and agent forwarding.
// local users with the local role should be mapped onto this role, granting access.
leafRole, err := types.NewRole(devsRoleName, types.RoleSpecV6{
Options: types.RoleOptions{
ForwardAgent: types.NewBool(true),
},
Allow: types.RoleConditions{
Logins: []string{username},
NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}},
},
})
require.NoError(t, err)
_, err = aux.Process.GetAuthServer().UpsertRole(ctx, auxRole)
// create roles and local user in backend
_, err = main.Process.GetAuthServer().UpsertRole(ctx, mainRole)
require.NoError(t, err)
_, err = leaf.Process.GetAuthServer().UpsertRole(ctx, leafRole)
require.NoError(t, err)
_, err = main.Process.GetAuthServer().UpsertUser(ctx, &types.UserV2{
Kind: types.KindUser,
Metadata: types.Metadata{
Name: username,
},
Spec: types.UserSpecV2{
Roles: []string{devsRoleName},
},
})
require.NoError(t, err)
// Create trusted cluster.
trustedClusterToken := "trusted-cluster-token"
tokenResource, err := types.NewProvisionToken(trustedClusterToken, []types.SystemRole{types.RoleTrustedCluster}, time.Time{})
require.NoError(t, err)
@ -3864,10 +3870,8 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) {
tokenResource.SetMetadata(meta)
err = main.Process.GetAuthServer().UpsertToken(ctx, tokenResource)
require.NoError(t, err)
// Note that the mapping omits admins role, this is to cover the scenario
// when root cluster and leaf clusters have different role sets
trustedCluster := main.AsTrustedCluster(trustedClusterToken, types.RoleMap{
{Remote: mainDevs, Local: []string{auxDevs}},
{Remote: devsRoleName, Local: []string{devsRoleName}},
})
// modify trusted cluster resource name, so it would not
@ -3875,39 +3879,23 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) {
trustedCluster.SetName(main.Secrets.SiteName + "-cluster")
require.NoError(t, main.Start())
require.NoError(t, aux.Start())
// create user in backend
_, err = main.Process.GetAuthServer().UpsertRole(ctx, devsRole)
require.NoError(t, err)
_, err = main.Process.GetAuthServer().UpsertRole(ctx, adminsRole)
require.NoError(t, err)
_, err = main.Process.GetAuthServer().UpsertUser(ctx, &types.UserV2{
Kind: types.KindUser,
Metadata: types.Metadata{
Name: username,
},
Spec: types.UserSpecV2{
Roles: []string{mainDevs, mainAdmins},
},
})
require.NoError(t, err)
require.NoError(t, leaf.Start())
err = trustedCluster.CheckAndSetDefaults()
require.NoError(t, err)
// try and upsert a trusted cluster
helpers.TryCreateTrustedCluster(t, aux.Process.GetAuthServer(), trustedCluster)
helpers.TryCreateTrustedCluster(t, leaf.Process.GetAuthServer(), trustedCluster)
helpers.WaitForTunnelConnections(t, main.Process.GetAuthServer(), clusterAux, 1)
// Wait for both cluster to see each other via reverse tunnels.
require.Eventually(t, helpers.WaitForClusters(main.Tunnel, 1), 10*time.Second, 1*time.Second,
"Two clusters do not see each other: tunnels are not working.")
require.Eventually(t, helpers.WaitForClusters(aux.Tunnel, 1), 10*time.Second, 1*time.Second,
require.Eventually(t, helpers.WaitForClusters(leaf.Tunnel, 1), 10*time.Second, 1*time.Second,
"Two clusters do not see each other: tunnels are not working.")
// create agentless node in leaf cluster
node := createAgentlessNode(t, aux.Process.GetAuthServer(), clusterAux, "leaf-agentless-node")
node := createAgentlessNode(t, leaf.Process.GetAuthServer(), clusterAux, "leaf-agentless-node")
// connect to leaf agentless node
creds, err := helpers.GenerateUserCreds(helpers.UserCredsRequest{
@ -3919,17 +3907,19 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) {
// create client for leaf cluster through root cluster
leafTC, err := main.NewClientWithCreds(helpers.ClientConfig{
Login: username,
Cluster: clusterAux,
Host: aux.InstanceListeners.ReverseTunnel,
TeleportUser: username,
Login: username,
Cluster: clusterAux,
Host: leaf.InstanceListeners.ReverseTunnel,
}, *creds)
require.NoError(t, err)
// create client for root cluster
tc, err := main.NewClient(helpers.ClientConfig{
Login: suite.Me.Username,
Cluster: clusterMain,
Host: main.InstanceListeners.ReverseTunnel,
TeleportUser: username,
Login: username,
Cluster: clusterMain,
Host: main.InstanceListeners.ReverseTunnel,
})
require.NoError(t, err)
@ -3937,7 +3927,7 @@ func testTrustedClusterAgentless(t *testing.T, suite *integrationTestSuite) {
// Stop clusters and remaining nodes.
require.NoError(t, main.StopAll())
require.NoError(t, aux.StopAll())
require.NoError(t, leaf.StopAll())
}
// TestDiscoveryRecovers ensures that discovery protocol recovers from a bad discovery

View file

@ -397,29 +397,30 @@ func (s *localSite) dialAndForward(params reversetunnelclient.DialParams) (_ net
// server does not need to close, it will close and release all resources
// once conn is closed.
serverConfig := forward.ServerConfig{
AuthClient: s.client,
UserAgent: userAgent,
IsAgentlessNode: params.IsAgentlessNode,
AgentlessSigner: params.AgentlessSigner,
TargetConn: targetConn,
SrcAddr: params.From,
DstAddr: params.To,
HostCertificate: hostCertificate,
Ciphers: s.srv.Config.Ciphers,
KEXAlgorithms: s.srv.Config.KEXAlgorithms,
MACAlgorithms: s.srv.Config.MACAlgorithms,
DataDir: s.srv.Config.DataDir,
Address: params.Address,
UseTunnel: useTunnel,
HostUUID: s.srv.ID,
Emitter: s.srv.Config.Emitter,
ParentContext: s.srv.Context,
LockWatcher: s.srv.LockWatcher,
TargetID: params.ServerID,
TargetAddr: params.To.String(),
TargetHostname: params.Address,
TargetServer: params.TargetServer,
Clock: s.clock,
LocalAuthClient: s.client,
TargetClusterAccessPoint: s.accessPoint,
UserAgent: userAgent,
IsAgentlessNode: params.IsAgentlessNode,
AgentlessSigner: params.AgentlessSigner,
TargetConn: targetConn,
SrcAddr: params.From,
DstAddr: params.To,
HostCertificate: hostCertificate,
Ciphers: s.srv.Config.Ciphers,
KEXAlgorithms: s.srv.Config.KEXAlgorithms,
MACAlgorithms: s.srv.Config.MACAlgorithms,
DataDir: s.srv.Config.DataDir,
Address: params.Address,
UseTunnel: useTunnel,
HostUUID: s.srv.ID,
Emitter: s.srv.Config.Emitter,
ParentContext: s.srv.Context,
LockWatcher: s.srv.LockWatcher,
TargetID: params.ServerID,
TargetAddr: params.To.String(),
TargetHostname: params.Address,
TargetServer: params.TargetServer,
Clock: s.clock,
}
// Ensure the hostname is set correctly if we have details of the target
if params.TargetServer != nil {

View file

@ -851,34 +851,32 @@ func (s *remoteSite) dialAndForward(params reversetunnelclient.DialParams) (_ ne
// Create a forwarding server that serves a single SSH connection on it. This
// server does not need to close, it will close and release all resources
// once conn is closed.
//
// Note: A localClient is passed to the forwarding server to make sure the
// session gets recorded in the local cluster instead of the remote cluster.
serverConfig := forward.ServerConfig{
AuthClient: s.localClient,
UserAgent: userAgent,
IsAgentlessNode: params.IsAgentlessNode,
AgentlessSigner: params.AgentlessSigner,
TargetConn: targetConn,
SrcAddr: params.From,
DstAddr: params.To,
HostCertificate: hostCertificate,
Ciphers: s.srv.Config.Ciphers,
KEXAlgorithms: s.srv.Config.KEXAlgorithms,
MACAlgorithms: s.srv.Config.MACAlgorithms,
DataDir: s.srv.Config.DataDir,
Address: params.Address,
UseTunnel: UseTunnel(s.logger, targetConn),
FIPS: s.srv.FIPS,
HostUUID: s.srv.ID,
Emitter: s.srv.Config.Emitter,
ParentContext: s.srv.Context,
LockWatcher: s.srv.LockWatcher,
TargetID: params.ServerID,
TargetAddr: params.To.String(),
TargetHostname: params.Address,
TargetServer: params.TargetServer,
Clock: s.clock,
LocalAuthClient: s.localClient,
TargetClusterAccessPoint: s.remoteAccessPoint,
UserAgent: userAgent,
IsAgentlessNode: params.IsAgentlessNode,
AgentlessSigner: params.AgentlessSigner,
TargetConn: targetConn,
SrcAddr: params.From,
DstAddr: params.To,
HostCertificate: hostCertificate,
Ciphers: s.srv.Config.Ciphers,
KEXAlgorithms: s.srv.Config.KEXAlgorithms,
MACAlgorithms: s.srv.Config.MACAlgorithms,
DataDir: s.srv.Config.DataDir,
Address: params.Address,
UseTunnel: UseTunnel(s.logger, targetConn),
FIPS: s.srv.FIPS,
HostUUID: s.srv.ID,
Emitter: s.srv.Config.Emitter,
ParentContext: s.srv.Context,
LockWatcher: s.srv.LockWatcher,
TargetID: params.ServerID,
TargetAddr: params.To.String(),
TargetHostname: params.Address,
TargetServer: params.TargetServer,
Clock: s.clock,
}
// Ensure the hostname is set correctly if we have details of the target
if params.TargetServer != nil {

View file

@ -179,12 +179,19 @@ type Server struct {
// ServerConfig is the configuration needed to create an instance of a Server.
type ServerConfig struct {
AuthClient auth.ClientI
UserAgent teleagent.Agent
TargetConn net.Conn
SrcAddr net.Addr
DstAddr net.Addr
HostCertificate ssh.Signer
// LocalAuthClient is a client that provides access to this local cluster.
// This is used for actions that should always happen on the local cluster
// and not remote clusters, such as session recording.
LocalAuthClient auth.ClientI
// TargetClusterAccessPoint is a client that provides access to the cluster
// of the server being connected to, whether it is the local cluster or a
// remote cluster.
TargetClusterAccessPoint srv.AccessPoint
UserAgent teleagent.Agent
TargetConn net.Conn
SrcAddr net.Addr
DstAddr net.Addr
HostCertificate ssh.Signer
// AgentlessSigner is used for client authentication when no SSH
// user agent is provided, ie when connecting to agentless nodes.
@ -250,8 +257,11 @@ type ServerConfig struct {
// CheckDefaults makes sure all required parameters are passed in.
func (s *ServerConfig) CheckDefaults() error {
if s.AuthClient == nil {
return trace.BadParameter("auth client required")
if s.LocalAuthClient == nil {
return trace.BadParameter("local auth client required")
}
if s.TargetClusterAccessPoint == nil {
return trace.BadParameter("target cluster access point client required")
}
if s.DataDir == "" {
return trace.BadParameter("missing parameter DataDir")
@ -332,8 +342,8 @@ func New(c ServerConfig) (*Server, error) {
hostCertificate: c.HostCertificate,
useTunnel: c.UseTunnel,
address: c.Address,
authClient: c.AuthClient,
authService: c.AuthClient,
authClient: c.LocalAuthClient,
authService: c.LocalAuthClient,
dataDir: c.DataDir,
clock: c.Clock,
hostUUID: c.HostUUID,
@ -366,7 +376,7 @@ func New(c ServerConfig) (*Server, error) {
Server: s,
Component: teleport.ComponentForwardingNode,
Emitter: c.Emitter,
AccessPoint: c.AuthClient,
AccessPoint: c.TargetClusterAccessPoint,
TargetServer: c.TargetServer,
FIPS: c.FIPS,
Clock: c.Clock,