Extend host lock enforcement to other built in roles besides Node (#27018)

This PR extends Teleport support for applying `tctl lock
--server-id=<host_id>` for other builtin roles besides `RoleNode`.

Co-authored-by: Zac Bergquist <zac.bergquist@goteleport.com>
This commit is contained in:
Tiago Silva 2023-06-05 18:08:18 +01:00 committed by GitHub
parent 9f8214867a
commit f2a9311f4d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 1625 additions and 1399 deletions

View file

@ -4421,7 +4421,11 @@ message LockTarget {
// Node specifies the UUID of a Teleport node.
// A matching node is also prevented from heartbeating to the auth server.
string Node = 4 [(gogoproto.jsontag) = "node,omitempty"];
// DEPRECATED: use ServerID instead.
string Node = 4 [
deprecated = true,
(gogoproto.jsontag) = "node,omitempty"
];
// MFADevice specifies the UUID of a user MFA device.
string MFADevice = 5 [(gogoproto.jsontag) = "mfa_device,omitempty"];
@ -4435,6 +4439,9 @@ message LockTarget {
// Device is the device ID of a trusted device.
// Requires Teleport Enterprise.
string Device = 8 [(gogoproto.jsontag) = "device,omitempty"];
// ServerID is the host id of the Teleport instance.
string ServerID = 9 [(gogoproto.jsontag) = "server_id,omitempty"];
}
// AddressCondition represents a set of addresses. Presently the addresses are specified

View file

@ -198,6 +198,13 @@ func (c *LockV2) CheckAndSetDefaults() error {
if c.Spec.Target.IsEmpty() {
return trace.BadParameter("at least one target field must be set")
}
// If the user specifies a server ID but not a node, copy the server ID to the node
// field. This is for backwards compatibility with previous versions of Teleport
// so that locking a node still works.
// TODO: DELETE IN 15.0.0
if c.Spec.Target.ServerID != "" && c.Spec.Target.Node == "" {
c.Spec.Target.Node = c.Spec.Target.ServerID
}
return nil
}
@ -229,11 +236,17 @@ func (t LockTarget) Match(lock Lock) bool {
return (t.User == "" || lockTarget.User == t.User) &&
(t.Role == "" || lockTarget.Role == t.Role) &&
(t.Login == "" || lockTarget.Login == t.Login) &&
(t.Node == "" || lockTarget.Node == t.Node) &&
(t.MFADevice == "" || lockTarget.MFADevice == t.MFADevice) &&
(t.WindowsDesktop == "" || lockTarget.WindowsDesktop == t.WindowsDesktop) &&
(t.AccessRequest == "" || lockTarget.AccessRequest == t.AccessRequest) &&
(t.Device == "" || lockTarget.Device == t.Device)
(t.Device == "" || lockTarget.Device == t.Device) &&
((t.Node == "" && t.ServerID == "") ||
// Node lock overrides ServerID lock because we want to keep backwards compatibility
// with previous versions of Teleport where a node lock only locked the ssh_service
// and not the other services running on that host.
// Newer versions of Teleport will lock all services based on the ServerID field.
(lockTarget.Node != "" && lockTarget.Node == t.Node) ||
(lockTarget.ServerID != "" && lockTarget.ServerID == t.ServerID))
}
// String returns string representation of the LockTarget.

View file

@ -70,4 +70,45 @@ func TestLockTargetMatch(t *testing.T) {
// Empty target should match no lock.
emptyTarget := LockTarget{}
require.False(t, emptyTarget.Match(lock))
// Test that we still support old locks with only Node field set and that
// it only applies to nodes.
// For Nodes, LockTarget Node and ServerID fields are both set at the same
// time.
targetNode := LockTarget{
ServerID: "node-uuid",
Node: "node-uuid",
}
// Create a lock with only Node field set (old lock).
lockNode, err := NewLock("some-lock", LockSpecV2{
Target: LockTarget{
Node: "node-uuid",
},
},
)
require.NoError(t, err)
// Test that the old lock with only Node field set matches a target generated
// from a Node identity (Node and ServerID fields set)
require.True(t, targetNode.Match(lockNode))
// Old locks with Node field should not match new lock targets with ServerID field
// set but Node field unset.
targetServerID := LockTarget{
ServerID: "node-uuid",
}
require.False(t, targetServerID.Match(lockNode))
// Test if locks with ServerID apply to nodes and other locks with ServerID.
lockServerID, err := NewLock("some-lock", LockSpecV2{
Target: LockTarget{
ServerID: "node-uuid",
},
},
)
require.NoError(t, err)
// Test that a lock with ServerID field set matches a target generated from a
// Node identity (Node and ServerID fields set)
require.True(t, targetNode.Match(lockServerID))
// Test that a lock with ServerID field set matches any target with ServerID.
require.True(t, targetServerID.Match(lockServerID))
}

File diff suppressed because it is too large Load diff

View file

@ -1322,13 +1322,31 @@ func (a *Server) generateHostCert(
if err != nil {
return nil, trace.Wrap(err)
}
if p.Role == types.RoleNode {
if lockErr := a.checkLockInForce(authPref.GetLockingMode(),
[]types.LockTarget{{Node: p.HostID}, {Node: HostFQDN(p.HostID, p.ClusterName)}},
); lockErr != nil {
return nil, trace.Wrap(lockErr)
}
var locks []types.LockTarget
switch p.Role {
case types.RoleNode:
// Node role is a special case because it was previously suported as a
// lock target that only locked the `ssh_service`. If the same Teleport server
// had multiple roles, Node lock would only lock the `ssh_service` while
// other roles would be able to generate certificates without a problem.
// To remove the ambiguity, we now lock the entire Teleport server for
// all roles using the LockTarget.ServerID field and `Node` field is
// deprecated.
// In order to support legacy behavior, we need fill in both `ServerID`
// and `Node` fields if the role is `Node` so that the previous behavior
// is preserved.
// This is a legacy behavior that we need to support for backwards compatibility.
locks = []types.LockTarget{{ServerID: p.HostID, Node: p.HostID}, {ServerID: HostFQDN(p.HostID, p.ClusterName), Node: HostFQDN(p.HostID, p.ClusterName)}}
default:
locks = []types.LockTarget{{ServerID: p.HostID}, {ServerID: HostFQDN(p.HostID, p.ClusterName)}}
}
if lockErr := a.checkLockInForce(authPref.GetLockingMode(),
locks,
); lockErr != nil {
return nil, trace.Wrap(lockErr)
}
return a.Authority.GenerateHostCert(p)
}

View file

@ -2023,7 +2023,7 @@ func TestGenerateHostCertWithLocks(t *testing.T) {
p.clusterName.GetClusterName(), types.RoleNode, time.Minute)
require.NoError(t, err)
target := types.LockTarget{Node: hostID}
target := types.LockTarget{ServerID: hostID}
lockWatch, err := p.a.lockWatcher.Subscribe(ctx, target)
require.NoError(t, err)
defer lockWatch.Close()
@ -2044,9 +2044,9 @@ func TestGenerateHostCertWithLocks(t *testing.T) {
require.Error(t, err)
require.EqualError(t, err, services.LockInForceAccessDenied(lock).Error())
// Locks targeting nodes should not apply to other system roles.
// Locks targeting server IDs should apply to other system roles.
_, err = p.a.GenerateHostCert(ctx, pub, hostID, "test-proxy", []string{}, p.clusterName.GetClusterName(), types.RoleProxy, time.Minute)
require.NoError(t, err)
require.Error(t, err)
}
func TestNewWebSession(t *testing.T) {

View file

@ -180,11 +180,30 @@ Loop:
}
lockTargets = append(lockTargets, unmappedTarget)
}
if r, ok := c.Identity.(BuiltinRole); ok && r.Role == types.RoleNode {
lockTargets = append(lockTargets,
types.LockTarget{Node: r.GetServerID()},
types.LockTarget{Node: r.Identity.Username},
)
if r, ok := c.Identity.(BuiltinRole); ok {
switch r.Role {
// Node role is a special case because it was previously suported as a
// lock target that only locked the `ssh_service`. If the same Teleport server
// had multiple roles, Node lock would only lock the `ssh_service` while
// other roles would be able to authenticate into Teleport without a problem.
// To remove the ambiguity, we now lock the entire Teleport server for
// all roles using the LockTarget.ServerID field and `Node` field is
// deprecated.
// In order to support legacy behavior, we need fill in both `ServerID`
// and `Node` fields if the role is `Node` so that the previous behavior
// is preserved.
// This is a legacy behavior that we need to support for backwards compatibility.
case types.RoleNode:
lockTargets = append(lockTargets,
types.LockTarget{Node: r.GetServerID(), ServerID: r.GetServerID()},
types.LockTarget{Node: r.Identity.Username, ServerID: r.Identity.Username},
)
default:
lockTargets = append(lockTargets,
types.LockTarget{ServerID: r.GetServerID()},
types.LockTarget{ServerID: r.Identity.Username},
)
}
}
return lockTargets
}

View file

@ -44,33 +44,94 @@ const clusterName = "test-cluster"
func TestContextLockTargets(t *testing.T) {
t.Parallel()
authContext := &Context{
Identity: BuiltinRole{
Role: types.RoleNode,
ClusterName: "cluster",
Identity: tlsca.Identity{
Username: "node.cluster",
Groups: []string{"role1", "role2"},
DeviceExtensions: tlsca.DeviceExtensions{
DeviceID: "device1",
},
tests := []struct {
role types.SystemRole
want []types.LockTarget
}{
{
role: types.RoleNode,
want: []types.LockTarget{
{Node: "node", ServerID: "node"},
{Node: "node.cluster", ServerID: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
{Device: "device1"},
},
},
{
role: types.RoleAuth,
want: []types.LockTarget{
{ServerID: "node"},
{ServerID: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
{Device: "device1"},
},
},
{
role: types.RoleProxy,
want: []types.LockTarget{
{ServerID: "node"},
{ServerID: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
{Device: "device1"},
},
},
{
role: types.RoleKube,
want: []types.LockTarget{
{ServerID: "node"},
{ServerID: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
{Device: "device1"},
},
},
{
role: types.RoleDatabase,
want: []types.LockTarget{
{ServerID: "node"},
{ServerID: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
{Device: "device1"},
},
},
UnmappedIdentity: WrapIdentity(tlsca.Identity{
Username: "node.cluster",
Groups: []string{"mapped-role"},
}),
}
expected := []types.LockTarget{
{Node: "node"},
{Node: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
{Device: "device1"},
for _, tt := range tests {
t.Run(tt.role.String(), func(t *testing.T) {
authContext := &Context{
Identity: BuiltinRole{
Role: tt.role,
ClusterName: "cluster",
Identity: tlsca.Identity{
Username: "node.cluster",
Groups: []string{"role1", "role2"},
DeviceExtensions: tlsca.DeviceExtensions{
DeviceID: "device1",
},
},
},
UnmappedIdentity: WrapIdentity(tlsca.Identity{
Username: "node.cluster",
Groups: []string{"mapped-role"},
}),
}
require.ElementsMatch(t, authContext.LockTargets(), tt.want)
})
}
require.ElementsMatch(t, authContext.LockTargets(), expected)
}
func TestAuthorizeWithLocksForLocalUser(t *testing.T) {
@ -141,29 +202,32 @@ func TestAuthorizeWithLocksForBuiltinRole(t *testing.T) {
ctx := context.Background()
client, watcher, authorizer := newTestResources(t)
for _, role := range types.LocalServiceMappings() {
t.Run(role.String(), func(t *testing.T) {
builtinRole := BuiltinRole{
Username: "node",
Role: role,
Identity: tlsca.Identity{
Username: "node",
},
}
builtinRole := BuiltinRole{
Username: "node",
Role: types.RoleNode,
Identity: tlsca.Identity{
Username: "node",
},
// Apply a node lock.
nodeLock, err := types.NewLock("node-lock", types.LockSpecV2{
Target: types.LockTarget{ServerID: builtinRole.Identity.Username},
})
require.NoError(t, err)
upsertLockWithPutEvent(ctx, t, client, watcher, nodeLock)
_, err = authorizer.Authorize(context.WithValue(ctx, contextUser, builtinRole))
require.Error(t, err)
require.True(t, trace.IsAccessDenied(err))
builtinRole.Identity.Username = ""
_, err = authorizer.Authorize(context.WithValue(ctx, contextUser, builtinRole))
require.NoError(t, err)
})
}
// Apply a node lock.
nodeLock, err := types.NewLock("node-lock", types.LockSpecV2{
Target: types.LockTarget{Node: builtinRole.Identity.Username},
})
require.NoError(t, err)
upsertLockWithPutEvent(ctx, t, client, watcher, nodeLock)
_, err = authorizer.Authorize(context.WithValue(ctx, contextUser, builtinRole))
require.Error(t, err)
require.True(t, trace.IsAccessDenied(err))
builtinRole.Identity.Username = ""
_, err = authorizer.Authorize(context.WithValue(ctx, contextUser, builtinRole))
require.NoError(t, err)
}
func upsertLockWithPutEvent(ctx context.Context, t *testing.T, client *testClient, watcher *services.LockWatcher, lock types.Lock) {

View file

@ -47,7 +47,13 @@ func (c *LockCommand) Initialize(app *kingpin.Application, config *servicecfg.Co
c.mainCmd.Flag("user", "Name of a Teleport user to disable.").StringVar(&c.spec.Target.User)
c.mainCmd.Flag("role", "Name of a Teleport role to disable.").StringVar(&c.spec.Target.Role)
c.mainCmd.Flag("login", "Name of a local UNIX user to disable.").StringVar(&c.spec.Target.Login)
c.mainCmd.Flag("node", "UUID of a Teleport node to disable.").StringVar(&c.spec.Target.Node)
// Locking a node is now deprecated, but we still support it for backwards compatibility.
// Previously, locking a node would lock only the `ssh_service` from that node to
// access Teleport but didn't prevent any other roles that the same instance could run.
// Now, `tctl lock --server-id` should be used instead to lock the entire server
// and all roles that it runs (including the `ssh_service`) from accessing Teleport.
// TODO: DELETE IN 15.0.0
c.mainCmd.Flag("node", "UUID of a Teleport node to disable.").Hidden().StringVar(&c.spec.Target.Node)
c.mainCmd.Flag("mfa-device", "UUID of a user MFA device to disable.").StringVar(&c.spec.Target.MFADevice)
c.mainCmd.Flag("windows-desktop", "Name of a Windows desktop to disable.").StringVar(&c.spec.Target.WindowsDesktop)
c.mainCmd.Flag("access-request", "UUID of an access request to disable.").StringVar(&c.spec.Target.AccessRequest)
@ -55,6 +61,7 @@ func (c *LockCommand) Initialize(app *kingpin.Application, config *servicecfg.Co
c.mainCmd.Flag("message", "Message to display to locked-out users.").StringVar(&c.spec.Message)
c.mainCmd.Flag("expires", "Time point (RFC3339) when the lock expires.").StringVar(&c.expires)
c.mainCmd.Flag("ttl", "Time duration after which the lock expires.").DurationVar(&c.ttl)
c.mainCmd.Flag("server-id", "UUID of a Teleport server to disable.").StringVar(&c.spec.Target.ServerID)
}
// TryRun attempts to run subcommands.
@ -70,6 +77,15 @@ func (c *LockCommand) TryRun(ctx context.Context, cmd string, client auth.Client
// CreateLock creates a lock for the main `tctl lock` command.
func (c *LockCommand) CreateLock(ctx context.Context, client auth.ClientI) error {
// Locking a node is now deprecated, but we still support it for backwards compatibility.
// Previously, locking a node would lock only the `ssh_service` from that node to
// access Teleport but didn't prevent any other roles that the same instance could run.
// Now, `tctl lock --server-id` should be used instead to lock the entire server.
// TODO: DELETE IN 15.0.0
if c.spec.Target.Node != "" {
c.config.Log.Warnf("`tctl lock --node <id>` is now deprecated. Please use `tctl lock --server-id <id>` instead.")
}
lockExpiry, err := computeLockExpiry(c.expires, c.ttl)
if err != nil {
return trace.Wrap(err)