Introduce version v7 to Role resources (#26583)

This PR introduces a new role version - `v7` - to support other
Kubernetes resources such as `Deployments`, `Statefulsets`... while
keeping compatibility with previous role versions.

This PR adds the following validations:
- `v6`, `v5`, `v4`: `kubernetes_resources` can only include `kind: pod`
  resources
- `v7`: `kubernetes_resources` can include wildcards or other
  resourceces (to be introduced by #26244.

It also adds a role version downgrade (v13 clients connecting to v14
auth server) where we downgrade the role's version and remove unsuported
kubernetes_resources from the resource list.

Part of #20811
This commit is contained in:
Tiago Silva 2023-06-13 09:21:42 +01:00 committed by GitHub
parent 4970c1009c
commit 5bf67192e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 488 additions and 56 deletions

View file

@ -364,6 +364,9 @@ const (
// KindWatchStatus is a kind for WatchStatus resource which contains information about a successful Watch request.
KindWatchStatus = "watch_status"
// V7 is the seventh version of resources.
V7 = "v7"
// V6 is the sixth version of resources.
V6 = "v6"

View file

@ -244,11 +244,17 @@ type Role interface {
SetGroupLabels(RoleConditionType, Labels)
}
// NewRole constructs new standard V6 role.
// This creates a V6 role with V4+ RBAC semantics.
// NewRole constructs new standard V7 role.
// This creates a V7 role with V4+ RBAC semantics.
func NewRole(name string, spec RoleSpecV6) (Role, error) {
role, err := NewRoleWithVersion(name, V7, spec)
return role, trace.Wrap(err)
}
// NewRoleWithVersion constructs new standard role with the version specified.
func NewRoleWithVersion(name string, version string, spec RoleSpecV6) (Role, error) {
role := RoleV6{
Version: V6,
Version: version,
Metadata: Metadata{
Name: name,
},
@ -390,11 +396,53 @@ func (r *RoleV6) SetKubeGroups(rct RoleConditionType, groups []string) {
// access to.
func (r *RoleV6) GetKubeResources(rct RoleConditionType) []KubernetesResource {
if rct == Allow {
return r.Spec.Allow.KubernetesResources
return r.convertKubernetesResourcesBetweenRoleVersions(r.Spec.Allow.KubernetesResources)
}
return r.Spec.Deny.KubernetesResources
}
// convertKubeResourcesBetweenRoleVersions converts Kubernetes resources between role versions.
// This is required to keep compatibility between role versions to avoid breaking changes
// when upgrading Teleport.
// For roles v7, it returns the list as it is.
// For older roles <v7, if the kind is pod and name and namespace are wildcards,
// then return a wildcard resource since RoleV6 and below do not restrict access
// to other resources. This is a simple optimization to reduce the number of resources.
// Finally, if the older role version is not a wildcard, then it returns the pod resources as is
// and append the other supported resources - KubernetesResourcesKinds - for Role v7.
func (r *RoleV6) convertKubernetesResourcesBetweenRoleVersions(resources []KubernetesResource) []KubernetesResource {
switch r.Version {
case V7:
return resources
// Teleport does not support role versions < v3.
case V6, V5, V4, V3:
switch {
// If role does not have kube labels, return empty list since it won't match
// any kubernetes cluster.
case !r.HasLabelMatchers(Allow, KindKubernetesCluster):
return nil
// If role is not V7 and resources is wildcard, return wildcard for kind as well.
// This is an optimization to avoid appending multiple resources.
// This check ignores the Kind field because `validateKubeResources` ensures
// that for older roles, the Kind field can only be pod.
case len(resources) == 1 && resources[0].Name == Wildcard && resources[0].Namespace == Wildcard:
return []KubernetesResource{{Kind: Wildcard, Name: Wildcard, Namespace: Wildcard}}
default:
for _, resource := range KubernetesResourcesKinds {
// Ignore Pod resources for older roles because Pods were already supported
// so we don't need to keep backwards compatibility for them.
if resource == KindKubePod {
continue
}
resources = append(resources, KubernetesResource{Kind: resource, Name: Wildcard, Namespace: Wildcard})
}
return resources
}
default:
return nil
}
}
// SetKubeResources configures the Kubernetes Resources for the RoleConditionType.
func (r *RoleV6) SetKubeResources(rct RoleConditionType, pods []KubernetesResource) {
if rct == Allow {
@ -833,8 +881,8 @@ func (r *RoleV6) GetPrivateKeyPolicy() keys.PrivateKeyPolicy {
// setStaticFields sets static resource header and metadata fields.
func (r *RoleV6) setStaticFields() {
r.Kind = KindRole
if r.Version != V3 && r.Version != V4 && r.Version != V5 {
r.Version = V6
if r.Version != V3 && r.Version != V4 && r.Version != V5 && r.Version != V6 {
r.Version = V7
}
}
@ -938,7 +986,7 @@ func (r *RoleV6) CheckAndSetDefaults() error {
case V4, V5:
// Labels default to nil/empty for v4+ roles
// Allow unrestricted access to all pods.
if len(r.Spec.Allow.KubernetesResources) == 0 && len(r.Spec.Allow.KubernetesLabels) > 0 {
if len(r.Spec.Allow.KubernetesResources) == 0 && r.HasLabelMatchers(Allow, KindKubernetesCluster) {
r.Spec.Allow.KubernetesResources = []KubernetesResource{
{
Kind: KindKubePod,
@ -948,12 +996,27 @@ func (r *RoleV6) CheckAndSetDefaults() error {
}
}
if err := validateRoleSpecKubeResources(r.Spec); err != nil {
if err := validateRoleSpecKubeResources(r.Version, r.Spec); err != nil {
return trace.Wrap(err)
}
case V6:
if err := validateRoleSpecKubeResources(r.Spec); err != nil {
if err := validateRoleSpecKubeResources(r.Version, r.Spec); err != nil {
return trace.Wrap(err)
}
case V7:
// Kubernetes resources default to {kind:*, name:*, namespace:*} for v7 roles.
if len(r.Spec.Allow.KubernetesResources) == 0 && r.HasLabelMatchers(Allow, KindKubernetesCluster) {
r.Spec.Allow.KubernetesResources = []KubernetesResource{
{
Kind: Wildcard,
Namespace: Wildcard,
Name: Wildcard,
},
}
}
if err := validateRoleSpecKubeResources(r.Version, r.Spec); err != nil {
return trace.Wrap(err)
}
default:
@ -1512,11 +1575,11 @@ func (r *RoleV6) SetPreviewAsRoles(rct RoleConditionType, roles []string) {
// validateRoleSpecKubeResources validates the Allow/Deny Kubernetes Resources
// entries.
func validateRoleSpecKubeResources(spec RoleSpecV6) error {
if err := validateKubeResources(spec.Allow.KubernetesResources); err != nil {
func validateRoleSpecKubeResources(version string, spec RoleSpecV6) error {
if err := validateKubeResources(version, spec.Allow.KubernetesResources); err != nil {
return trace.Wrap(err)
}
if err := validateKubeResources(spec.Deny.KubernetesResources); err != nil {
if err := validateKubeResources(version, spec.Deny.KubernetesResources); err != nil {
return trace.Wrap(err)
}
return nil
@ -1526,11 +1589,22 @@ func validateRoleSpecKubeResources(spec RoleSpecV6) error {
// - Kind belongs to KubernetesResourcesKinds
// - Name is not empty
// - Namespace is not empty
func validateKubeResources(kubeResources []KubernetesResource) error {
func validateKubeResources(roleVersion string, kubeResources []KubernetesResource) error {
for _, kubeResource := range kubeResources {
if !slices.Contains(KubernetesResourcesKinds, kubeResource.Kind) {
return trace.BadParameter("KubernetesResource kind %q is invalid or unsupported; Supported: %v", kubeResource.Kind, KubernetesResourcesKinds)
if !slices.Contains(KubernetesResourcesKinds, kubeResource.Kind) && kubeResource.Kind != Wildcard {
return trace.BadParameter("KubernetesResource kind %q is invalid or unsupported; Supported: %v", kubeResource.Kind, append([]string{Wildcard}, KubernetesResourcesKinds...))
}
// Only Pod resources are supported in role version <=V6.
// This is mandatory because we must append the other resources to the
// kubernetes resources.
switch roleVersion {
// Teleport does not support role versions < v3.
case V6, V5, V4, V3:
if kubeResource.Kind != KindKubePod {
return trace.BadParameter("KubernetesResource %q is not supported in role version %q. Upgrade the role version to %q", kubeResource.Kind, roleVersion, V7)
}
}
if len(kubeResource.Namespace) == 0 {
return trace.BadParameter("KubernetesResource must include Namespace")
}
@ -1660,6 +1734,14 @@ func (r *RoleV6) SetLabelMatchers(rct RoleConditionType, kind string, labelMatch
return trace.BadParameter("can't set label matchers for resource kind %q", kind)
}
// HasLabelMatchers returns true if the role has label matchers for the
// specified resource kind and condition (allow/deny).
// If the kind is not supported, false is returned.
func (r *RoleV6) HasLabelMatchers(rct RoleConditionType, kind string) bool {
lm, err := r.GetLabelMatchers(rct, kind)
return err == nil && !lm.Empty()
}
// LabelMatcherKinds is the complete list of resource kinds that support label
// matchers.
var LabelMatcherKinds = []string{

View file

@ -143,3 +143,198 @@ func TestAccessReviewConditionsIsEmpty(t *testing.T) {
})
}
}
func TestRole_GetKubeResources(t *testing.T) {
kubeLabels := Labels{
Wildcard: {Wildcard},
}
labelsExpression := "contains(user.spec.traits[\"groups\"], \"prod\")"
type args struct {
version string
labels Labels
labelsExpression string
resources []KubernetesResource
}
tests := []struct {
name string
args args
want []KubernetesResource
assertErrorCreation require.ErrorAssertionFunc
}{
// TODO(tigrato): add more tests once we support other kubernetes resources.
{
name: "v7 with error",
args: args{
version: V7,
labels: kubeLabels,
resources: []KubernetesResource{
{
Kind: "invalid resource",
Namespace: "test",
Name: "test",
},
},
},
assertErrorCreation: require.Error,
},
{
name: "v7",
args: args{
version: V7,
labels: kubeLabels,
resources: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
assertErrorCreation: require.NoError,
want: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
{
name: "v7 with labels expression",
args: args{
version: V7,
labelsExpression: labelsExpression,
resources: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
assertErrorCreation: require.NoError,
want: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
{
name: "v6 to v7 without wildcard; labels expression",
args: args{
version: V6,
labelsExpression: labelsExpression,
resources: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
assertErrorCreation: require.NoError,
want: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
{
name: "v6 to v7 with wildcard",
args: args{
version: V6,
labels: kubeLabels,
resources: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: Wildcard,
Name: Wildcard,
},
},
},
assertErrorCreation: require.NoError,
want: []KubernetesResource{
{
Kind: Wildcard,
Namespace: Wildcard,
Name: Wildcard,
},
},
},
{
name: "v6 to v7 without wildcard",
args: args{
version: V6,
labels: kubeLabels,
resources: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
assertErrorCreation: require.NoError,
want: []KubernetesResource{
{
Kind: KindKubePod,
Namespace: "test",
Name: "test",
},
},
},
{
name: "v5 to v7: populate with defaults.",
args: args{
version: V5,
labels: kubeLabels,
resources: nil,
},
assertErrorCreation: require.NoError,
want: []KubernetesResource{
{
Kind: Wildcard,
Namespace: Wildcard,
Name: Wildcard,
},
},
},
{
name: "v5 to v7 without kube labels",
args: args{
version: V5,
resources: nil,
},
assertErrorCreation: require.NoError,
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r, err := NewRoleWithVersion(
"test",
tt.args.version,
RoleSpecV6{
Allow: RoleConditions{
Namespaces: []string{"default"},
KubernetesLabels: tt.args.labels,
KubernetesResources: tt.args.resources,
KubernetesLabelsExpression: tt.args.labelsExpression,
},
},
)
tt.assertErrorCreation(t, err)
if err != nil {
return
}
got := r.GetKubeResources(Allow)
require.Equal(t, tt.want, got)
got = r.GetKubeResources(Deny)
require.Empty(t, got)
})
}
}

View file

@ -89,7 +89,7 @@ func (res *streamResource) UnmarshalJSON(raw []byte) error {
}
case types.KindRole:
switch header.Version {
case types.V4, types.V5, types.V6:
case types.V4, types.V5, types.V6, types.V7:
resource = &types.RoleV6{}
default:
return trace.BadParameter("unsupported resource version %s", header.Version)

View file

@ -45,7 +45,10 @@ func (bootstrap *Bootstrap) AddUserWithRoles(name string, roles ...string) (type
}
func (bootstrap *Bootstrap) AddRole(name string, spec types.RoleSpecV6) (types.Role, error) {
role, err := types.NewRole(name, spec)
// TODO(justinas|marcoandredinis): Remove this once Test Integration is updated to build tctl
// instead of using the binary from the release tarball.
// https://github.com/gravitational/teleport/issues/27528
role, err := types.NewRoleWithVersion(name, types.V6, spec)
if err != nil {
return nil, trace.Wrap(err)
}

View file

@ -1946,6 +1946,11 @@ func maybeDowngradeRole(ctx context.Context, role *types.RoleV6) (*types.RoleV6,
return nil, trace.BadParameter("unrecognized client version: %s is not a valid semver", clientVersionString)
}
role, err = maybeDowngradeRoleToV6(ctx, role, clientVersion)
if err != nil {
return nil, trace.Wrap(err)
}
role, err = maybeDowngradeRoleLabelExpressions(ctx, role, clientVersion)
if err != nil {
return nil, trace.Wrap(err)
@ -1991,26 +1996,135 @@ func maybeDowngradeRoleLabelExpressions(ctx context.Context, role *types.RoleV6,
return role, nil
}
var minSupportedRoleV7Version = semver.New(utils.VersionBeforeAlpha("14.0.0"))
// maybeDowngradeRoleToV6 tests the client version passed through the GRPC metadata, and
// if the client version is less than the minimum supported version
// for V7 roles returns a shallow copy of the given role downgraded to V6, If
// the passed in role is already V6, it is returned unmodified.
func maybeDowngradeRoleToV6(ctx context.Context, role *types.RoleV6, clientVersion *semver.Version) (*types.RoleV6, error) {
if !clientVersion.LessThan(*minSupportedRoleV7Version) || role.Version != types.V7 {
return role, nil
}
log.Debugf(`Client version "%s" is less than 14.0.0, converting role to v6`, clientVersion.String())
switch downgraded, isRestricted, err := downgradeRoleToV6(role); {
case err != nil:
return nil, trace.Wrap(err)
case isRestricted:
reason := fmt.Sprintf(`Client version %q does not support Role v7. `+
`Role %q will be downgraded by adding more stringent restriction rules for Kubernetes clusters which will affect its behavior before returning to the client. `+
`In order to guarantee the correct behavior, all clients must be updated to version %q or higher.`,
clientVersion, downgraded.GetName(), minSupportedRoleV7Version)
if downgraded.Metadata.Labels == nil {
downgraded.Metadata.Labels = make(map[string]string, 1)
}
downgraded.Metadata.Labels[types.TeleportDowngradedLabel] = reason
log.Debugf(`Downgrading role %q before returning it to the client: %s`,
role.GetName(), reason)
return downgraded, nil
default:
return downgraded, nil
}
}
// downgradeRoleToV6 converts a V7 role to V6 so that it will be compatible with
// older instances. Makes a shallow copy if the conversion is necessary. The
// passed in role will not be mutated.
// DELETE IN 15.0.0
func downgradeRoleToV6(r *types.RoleV6) (*types.RoleV6, bool, error) {
switch r.Version {
case types.V3, types.V4, types.V5, types.V6:
return r, false, nil
case types.V7:
var (
downgraded types.RoleV6
restricted bool
)
downgraded = *r
downgraded.Version = types.V6
if len(downgraded.GetKubeResources(types.Deny)) > 0 {
// V6 roles don't know about kubernetes resources besides "pod",
// so if the role denies any other resources, we need to deny all
// access to kubernetes.
// This is more restrictive than the original V7 role and it's the best
// we can do without leaking access to kubernetes resources that V6
// doesn't know about.
hasOtherResources := false
for _, resource := range downgraded.GetKubeResources(types.Deny) {
if resource.Kind != types.KindKubePod {
hasOtherResources = true
break
}
}
if hasOtherResources {
// If the role has deny rules for resources other than "pod", we
// need to deny all access to kubernetes because the Kubernetes
// service requesting this role isn't able to exclude those resources
// from the responses and the client will receive them.
downgraded.SetLabelMatchers(
types.Deny,
types.KindKubernetesCluster,
types.LabelMatchers{
Labels: types.Labels{
types.Wildcard: []string{types.Wildcard},
},
},
)
// Clear out the deny list so that the V6 role doesn't include unknown
// resources in the deny list.
downgraded.SetKubeResources(types.Deny, nil)
restricted = true
}
}
if len(downgraded.GetKubeResources(types.Allow)) > 0 {
// V6 roles don't know about kubernetes resources besides "pod",
// so if the role allows any resources, we need remove the role
// from being used for kubernetes access.
// If the role specifies any kubernetes resources, the V6 role will
// be unable to be used for kubernetes access because the labels
// will be empty and won't match anything.
downgraded.SetLabelMatchers(
types.Allow,
types.KindKubernetesCluster,
types.LabelMatchers{
Labels: types.Labels{},
},
)
// Clear out the allow list so that the V6 role doesn't include unknown
// resources in the allow list.
downgraded.SetKubeResources(types.Allow, nil)
restricted = true
}
return &downgraded, restricted, nil
default:
return nil, false, trace.BadParameter("unrecognized role version %T", r.Version)
}
}
// GetRole retrieves a role by name.
func (g *GRPCServer) GetRole(ctx context.Context, req *proto.GetRoleRequest) (*types.RoleV6, error) {
auth, err := g.authenticate(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
role, err := auth.ServerWithRoles.GetRole(ctx, req.Name)
roleI, err := auth.ServerWithRoles.GetRole(ctx, req.Name)
if err != nil {
return nil, trace.Wrap(err)
}
roleV6, ok := role.(*types.RoleV6)
role, ok := roleI.(*types.RoleV6)
if !ok {
return nil, trace.Errorf("encountered unexpected role type: %T", role)
}
downgraded, err := maybeDowngradeRole(ctx, roleV6)
downgraded, err := maybeDowngradeRole(ctx, role)
if err != nil {
return nil, trace.Wrap(err)
}
return downgraded, nil
}
@ -2020,12 +2134,12 @@ func (g *GRPCServer) GetRoles(ctx context.Context, _ *emptypb.Empty) (*proto.Get
if err != nil {
return nil, trace.Wrap(err)
}
roles, err := auth.ServerWithRoles.GetRoles(ctx)
rolesI, err := auth.ServerWithRoles.GetRoles(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
var rolesV6 []*types.RoleV6
for _, r := range roles {
var roles []*types.RoleV6
for _, r := range rolesI {
role, ok := r.(*types.RoleV6)
if !ok {
return nil, trace.BadParameter("unexpected type %T", r)
@ -2034,10 +2148,10 @@ func (g *GRPCServer) GetRoles(ctx context.Context, _ *emptypb.Empty) (*proto.Get
if err != nil {
return nil, trace.Wrap(err)
}
rolesV6 = append(rolesV6, downgraded)
roles = append(roles, downgraded)
}
return &proto.GetRolesResponse{
Roles: rolesV6,
Roles: roles,
}, nil
}

View file

@ -4185,13 +4185,13 @@ func TestRoleVersions(t *testing.T) {
wildcardLabels := types.Labels{types.Wildcard: {types.Wildcard}}
newRole := func(spec types.RoleSpecV6) types.Role {
role, err := types.NewRole("test_rule", spec)
newRole := func(version string, spec types.RoleSpecV6) types.Role {
role, err := types.NewRoleWithVersion("test_rule", version, spec)
require.NoError(t, err)
return role
}
role := newRole(types.RoleSpecV6{
role := newRole(types.V7, types.RoleSpecV6{
Allow: types.RoleConditions{
NodeLabels: wildcardLabels,
AppLabels: wildcardLabels,
@ -4200,12 +4200,27 @@ func TestRoleVersions(t *testing.T) {
Rules: []types.Rule{
types.NewRule(types.KindRole, services.RW()),
},
KubernetesLabels: wildcardLabels,
KubernetesResources: []types.KubernetesResource{
{
Kind: types.Wildcard,
Namespace: types.Wildcard,
Name: types.Wildcard,
},
},
},
Deny: types.RoleConditions{
KubernetesLabels: types.Labels{"env": {"prod"}},
ClusterLabels: types.Labels{"env": {"prod"}},
ClusterLabelsExpression: `labels["env"] == "prod"`,
WindowsDesktopLabelsExpression: `labels["env"] == "prod"`,
KubernetesResources: []types.KubernetesResource{
{
Kind: types.Wildcard,
Namespace: types.Wildcard,
Name: types.Wildcard,
},
},
},
})
@ -4225,10 +4240,34 @@ func TestRoleVersions(t *testing.T) {
{
desc: "up to date",
clientVersions: []string{
minSupportedLabelExpressionVersion.String(), "13.3.0", "14.0.0-alpha.1", "15.1.2", api.Version, "",
"14.0.0-alpha.1", "15.1.2", api.Version, "",
},
expectedRole: role,
},
{
desc: "downgrade role to v6 but supports label expressions",
clientVersions: []string{
minSupportedLabelExpressionVersion.String(), "13.3.0",
},
expectedRole: newRole(types.V6, types.RoleSpecV6{
Allow: types.RoleConditions{
NodeLabels: wildcardLabels,
AppLabels: wildcardLabels,
AppLabelsExpression: `labels["env"] == "staging"`,
DatabaseLabelsExpression: `labels["env"] == "staging"`,
Rules: []types.Rule{
types.NewRule(types.KindRole, services.RW()),
},
},
Deny: types.RoleConditions{
KubernetesLabels: wildcardLabels,
ClusterLabels: types.Labels{"env": {"prod"}},
ClusterLabelsExpression: `labels["env"] == "prod"`,
WindowsDesktopLabelsExpression: `labels["env"] == "prod"`,
},
}),
expectDowngraded: true,
},
{
desc: "bad client versions",
clientVersions: []string{"Not a version", "13", "13.1"},
@ -4237,7 +4276,7 @@ func TestRoleVersions(t *testing.T) {
{
desc: "label expressions downgraded",
clientVersions: []string{"13.0.11", "12.4.3", "6.0.0"},
expectedRole: newRole(
expectedRole: newRole(types.V6,
types.RoleSpecV6{
Allow: types.RoleConditions{
// None of the allow labels change
@ -4251,7 +4290,7 @@ func TestRoleVersions(t *testing.T) {
},
Deny: types.RoleConditions{
// These fields don't change
KubernetesLabels: types.Labels{"env": {"prod"}},
KubernetesLabels: wildcardLabels,
ClusterLabelsExpression: `labels["env"] == "prod"`,
WindowsDesktopLabelsExpression: `labels["env"] == "prod"`,
// These all get set to wildcard deny because there is

View file

@ -188,7 +188,7 @@ func (e *SessionAccessEvaluator) matchesKind(allow []string) bool {
func RoleSupportsModeratedSessions(roles []types.Role) bool {
for _, role := range roles {
switch role.GetVersion() {
case types.V5, types.V6:
case types.V5, types.V6, types.V7:
return true
}
}

View file

@ -381,11 +381,12 @@ func (a *accessChecker) checkAllowedResources(r AccessCheckable) error {
for _, resourceID := range a.info.AllowedResourceIDs {
if resourceID.ClusterName == a.localCluster &&
// If the allowed resource has `Kind=types.KindKubePod`, we allow the user to
// If the allowed resource has `Kind=types.KindKubePod` or any other
// Kubernetes supported kinds - types.KubernetesResourcesKinds-, we allow the user to
// access the Kubernetes cluster that it belongs to.
// At this point, we do not verify that the accessed resource matches the
// allowed resources, but that verification happens in the caller function.
(resourceID.Kind == r.GetKind() || (resourceID.Kind == types.KindKubePod && r.GetKind() == types.KindKubernetesCluster)) &&
(resourceID.Kind == r.GetKind() || (slices.Contains(types.KubernetesResourcesKinds, resourceID.Kind) && r.GetKind() == types.KindKubernetesCluster)) &&
resourceID.Name == r.GetName() {
// Allowed to access this resource by resource ID, move on to role checks.
if isDebugEnabled {

View file

@ -673,7 +673,7 @@ type roleParser struct {
func (p *roleParser) parse(event backend.Event) (types.Resource, error) {
switch event.Type {
case types.OpDelete:
return resourceHeader(event, types.KindRole, types.V6, 1)
return resourceHeader(event, types.KindRole, types.V7, 1)
case types.OpPut:
resource, err := services.UnmarshalRole(event.Item.Value,
services.WithResourceID(event.Item.ID),

View file

@ -35,7 +35,7 @@ func NewPresetEditorRole() types.Role {
enterprise := modules.GetModules().BuildType() == modules.BuildEnterprise
role := &types.RoleV6{
Kind: types.KindRole,
Version: types.V6,
Version: types.V7,
Metadata: types.Metadata{
Name: teleport.PresetEditorRoleName,
Namespace: apidefaults.Namespace,
@ -107,7 +107,7 @@ func NewPresetAccessRole() types.Role {
enterprise := modules.GetModules().BuildType() == modules.BuildEnterprise
role := &types.RoleV6{
Kind: types.KindRole,
Version: types.V6,
Version: types.V7,
Metadata: types.Metadata{
Name: teleport.PresetAccessRoleName,
Namespace: apidefaults.Namespace,
@ -135,7 +135,7 @@ func NewPresetAccessRole() types.Role {
DatabaseRoles: []string{teleport.TraitInternalDBRolesVariable},
KubernetesResources: []types.KubernetesResource{
{
Kind: types.KindKubePod,
Kind: types.Wildcard,
Namespace: types.Wildcard,
Name: types.Wildcard,
},
@ -172,7 +172,7 @@ func NewPresetAccessRole() types.Role {
func NewPresetAuditorRole() types.Role {
role := &types.RoleV6{
Kind: types.KindRole,
Version: types.V6,
Version: types.V7,
Metadata: types.Metadata{
Name: teleport.PresetAuditorRoleName,
Namespace: apidefaults.Namespace,

View file

@ -3109,6 +3109,8 @@ func UnmarshalRole(bytes []byte, opts ...MarshalOption) (types.Role, error) {
}
switch h.Version {
case types.V7:
fallthrough
case types.V6:
fallthrough
case types.V5:

View file

@ -1166,6 +1166,7 @@ func newRole(mut func(*types.RoleV6)) *types.RoleV6 {
},
}
mut(r)
r.CheckAndSetDefaults()
return r
}
@ -6614,8 +6615,7 @@ func TestGetKubeResources(t *testing.T) {
roles: []types.Role{
newRole(func(r *types.RoleV6) {
r.Spec.Allow.KubernetesLabels = types.Labels{"env": {"prod"}}
r.Spec.Allow.KubernetesResources =
[]types.KubernetesResource{podA, podB}
r.Spec.Allow.KubernetesResources = []types.KubernetesResource{podA, podB}
}),
},
clusterLabels: map[string]string{"env": "prod"},
@ -6626,8 +6626,7 @@ func TestGetKubeResources(t *testing.T) {
roles: []types.Role{
newRole(func(r *types.RoleV6) {
r.Spec.Allow.KubernetesLabelsExpression = `labels["env"] == "prod"`
r.Spec.Allow.KubernetesResources =
[]types.KubernetesResource{podA, podB}
r.Spec.Allow.KubernetesResources = []types.KubernetesResource{podA, podB}
}),
},
clusterLabels: map[string]string{"env": "prod"},
@ -6638,13 +6637,11 @@ func TestGetKubeResources(t *testing.T) {
roles: []types.Role{
newRole(func(r *types.RoleV6) {
r.Spec.Allow.KubernetesLabelsExpression = `labels["env"] == "prod"`
r.Spec.Allow.KubernetesResources =
[]types.KubernetesResource{podA, podB}
r.Spec.Allow.KubernetesResources = []types.KubernetesResource{podA, podB}
}),
newRole(func(r *types.RoleV6) {
r.Spec.Deny.KubernetesLabels = types.Labels{"env": {"prod"}}
r.Spec.Deny.KubernetesResources =
[]types.KubernetesResource{podA}
r.Spec.Deny.KubernetesResources = []types.KubernetesResource{podA}
}),
},
clusterLabels: map[string]string{"env": "prod"},
@ -6656,13 +6653,11 @@ func TestGetKubeResources(t *testing.T) {
roles: []types.Role{
newRole(func(r *types.RoleV6) {
r.Spec.Allow.KubernetesLabelsExpression = `labels["env"] == "staging"`
r.Spec.Allow.KubernetesResources =
[]types.KubernetesResource{podA, podB}
r.Spec.Allow.KubernetesResources = []types.KubernetesResource{podA, podB}
}),
newRole(func(r *types.RoleV6) {
r.Spec.Deny.KubernetesLabels = types.Labels{"env": {"prod"}}
r.Spec.Deny.KubernetesResources =
[]types.KubernetesResource{podA}
r.Spec.Deny.KubernetesResources = []types.KubernetesResource{podA}
}),
},
clusterLabels: map[string]string{"env": "staging"},

View file

@ -96,9 +96,7 @@ type RegexpConfig struct {
// The wildcard (*) expansion is also supported.
func KubeResourceMatchesRegex(input types.KubernetesResource, resources []types.KubernetesResource) (bool, error) {
for _, resource := range resources {
// TODO(tigrato): evaluate if we should support wildcards as well
// for future compatibility.
if input.Kind != resource.Kind {
if input.Kind != resource.Kind && resource.Kind != types.Wildcard {
continue
}
switch ok, err := MatchString(input.Name, resource.Name); {

View file

@ -237,7 +237,7 @@ spec:
default: best_effort
desktop: true
ssh_file_copy: true
version: v6
version: v7
`
role, err := types.NewRole("roleName", types.RoleSpecV6{
Allow: types.RoleConditions{