remove tsh kube prefix matching (#31852)

* remove tsh kube prefix matching

* fix retry with relogin for ambiguous clusters
* fix access request func to only request specified cluster
* consolidate test setup for login/proxy kube selection tests
* add more test cases for kube selection
* remove prefix testing

* fix testism

* add origin cloud label in tests

* refactor the check for multiple cluster login into a func
This commit is contained in:
Gavin Frazar 2023-09-18 10:26:40 -07:00 committed by GitHub
parent 7a9a987847
commit 259d32f5b7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 502 additions and 538 deletions

View file

@ -1210,22 +1210,13 @@ func (c *kubeLoginCommand) run(cf *CLIConf) error {
case !c.all && c.getSelectors().IsEmpty():
return trace.BadParameter("kube-cluster name is required. Check 'tsh kube ls' for a list of available clusters.")
}
// If --all, --query, or --labels and --set-context-name are set, ensure
// that the template is valid and can produce distinct context names for
// each cluster before proceeding.
if c.all || c.labels != "" || c.predicateExpression != "" {
err := kubeconfig.CheckContextOverrideTemplate(c.overrideContextName)
if err != nil {
return trace.Wrap(err)
}
}
// NOTE: in case relogin-retry logic is used, we want to avoid having
// cf.KubernetesCluster set because kube cluster selection by prefix name is
// not supported in that flow
// cf.KubernetesCluster set because kube cluster selection by discovered
// name is not supported in that flow
// (it's equivalent to tsh login --kube-cluster=<name>).
// We will set that flag later, after listing the kube clusters and choosing
// one by prefix/labels/query (if a cluster name/prefix was given).
// one by name/labels/query (if a cluster name was given).
cf.Labels = c.labels
cf.PredicateExpression = c.predicateExpression
@ -1238,6 +1229,18 @@ func (c *kubeLoginCommand) run(cf *CLIConf) error {
cf.disableAccessRequest = c.disableAccessRequest
cf.RequestReason = c.requestReason
cf.ListAll = c.all
// If --all, --query, or --labels and --set-context-name are set, multiple
// kube clusters may be logged into - in that case, ensure that the template
// is valid and can produce distinct context names for each cluster before
// proceeding.
if !shouldLoginToOneKubeCluster(cf) {
err := kubeconfig.CheckContextOverrideTemplate(c.overrideContextName)
if err != nil {
return trace.Wrap(err)
}
}
tc, err := makeClient(cf)
if err != nil {
return trace.Wrap(err)
@ -1245,24 +1248,25 @@ func (c *kubeLoginCommand) run(cf *CLIConf) error {
var kubeStatus *kubernetesStatus
err = retryWithAccessRequest(cf, tc, func() error {
return client.RetryWithRelogin(cf.Context, tc, func() error {
// Check that this kube cluster exists.
err := client.RetryWithRelogin(cf.Context, tc, func() error {
var err error
kubeStatus, err = fetchKubeStatus(cf.Context, tc)
if err != nil {
return trace.Wrap(err)
}
err = c.checkClusterSelection(cf, tc, kubeStatus.kubeClusters)
if err != nil {
if trace.IsNotFound(err) {
// rewrap not found error as access denied, so we can retry
// fetching clusters with an access request.
return trace.AccessDenied(err.Error())
}
return trace.Wrap(err)
}
return nil
return trace.Wrap(err)
})
if err != nil {
return trace.Wrap(err)
}
// Check the kube cluster selection.
err = c.checkClusterSelection(cf, tc, kubeStatus.kubeClusters)
if err != nil {
if trace.IsNotFound(err) {
// rewrap not found errors as access denied, so we can retry
// fetching clusters with an access request.
return trace.AccessDenied(err.Error())
}
return trace.Wrap(err)
}
return nil
}, c.accessRequestForKubeCluster, c.selectorsOrWildcard())
if err != nil {
return trace.Wrap(err)
@ -1300,7 +1304,7 @@ func (c *kubeLoginCommand) selectorsOrWildcard() string {
// checkClusterSelection checks the kube clusters selected by user input.
func (c *kubeLoginCommand) checkClusterSelection(cf *CLIConf, tc *client.TeleportClient, clusters types.KubeClusters) error {
clusters = matchClustersByName(c.kubeCluster, clusters)
clusters = matchClustersByNameOrDiscoveredName(c.kubeCluster, clusters)
err := checkClusterSelection(cf, clusters, c.kubeCluster)
if err != nil {
return trace.Wrap(err)
@ -1320,7 +1324,7 @@ func (c *kubeLoginCommand) checkClusterSelection(cf *CLIConf, tc *client.Telepor
func checkClusterSelection(cf *CLIConf, clusters types.KubeClusters, name string) error {
switch {
// If the user is trying to login to a specific cluster, check that it
// exists and that a cluster matched the name/prefix unambiguously.
// exists and that a cluster matched the name unambiguously.
case name != "" && len(clusters) == 1:
return nil
// allow multiple selection without a name.
@ -1351,34 +1355,19 @@ func (c *kubeLoginCommand) getSelectors() resourceSelectors {
}
}
func matchClustersByName(nameOrPrefix string, clusters types.KubeClusters) types.KubeClusters {
if nameOrPrefix == "" {
func matchClustersByNameOrDiscoveredName(name string, clusters types.KubeClusters) types.KubeClusters {
if name == "" {
return clusters
}
// look for an exact full name match.
for _, kc := range clusters {
if kc.GetName() == nameOrPrefix {
if kc.GetName() == name {
return types.KubeClusters{kc}
}
}
// or look for exact "discovered name" matches.
if clusters, ok := findKubeClustersByDiscoveredName(clusters, nameOrPrefix); ok {
return clusters
}
// or just filter by prefix.
var prefixMatches types.KubeClusters
for _, kc := range clusters {
if strings.HasPrefix(kc.GetName(), nameOrPrefix) {
prefixMatches = append(prefixMatches, kc)
}
}
return prefixMatches
}
func findKubeClustersByDiscoveredName(clusters types.KubeClusters, name string) (types.KubeClusters, bool) {
var out types.KubeClusters
for _, kc := range clusters {
discoveredName, ok := kc.GetLabel(types.DiscoveredNameLabel)
@ -1386,7 +1375,7 @@ func findKubeClustersByDiscoveredName(clusters types.KubeClusters, name string)
out = append(out, kc)
}
}
return out, len(out) > 0
return out
}
func (c *kubeLoginCommand) printUserMessage(cf *CLIConf, tc *client.TeleportClient, names []string) {
@ -1545,7 +1534,7 @@ func buildKubeConfigUpdate(cf *CLIConf, kubeStatus *kubernetesStatus, overrideCo
return nil, trace.NotFound("Kubernetes cluster %q is not registered in this Teleport cluster; you can list registered Kubernetes clusters using 'tsh kube ls'.", cf.KubernetesCluster)
}
// If ListAll or labels/query is not enabled, update only cf.KubernetesCluster cluster.
if !cf.ListAll && cf.Labels == "" && cf.PredicateExpression == "" {
if shouldLoginToOneKubeCluster(cf) {
clusterNames = []string{cf.KubernetesCluster}
}
}
@ -1649,11 +1638,15 @@ func (c *kubeLoginCommand) accessRequestForKubeCluster(ctx context.Context, cf *
}
defer clt.Close()
predicate := ""
if shouldLoginToOneKubeCluster(cf) {
predicate = makeDiscoveredNameOrNamePredicate(c.kubeCluster)
}
kubes, err := apiclient.GetAllResources[types.KubeCluster](ctx, clt.AuthClient, &proto.ListResourcesRequest{
Namespace: apidefaults.Namespace,
ResourceType: types.KindKubernetesCluster,
UseSearchAsRoles: true,
PredicateExpression: tc.PredicateExpression,
PredicateExpression: makePredicateConjunction(predicate, tc.PredicateExpression),
Labels: tc.Labels,
})
if err != nil {
@ -1696,6 +1689,18 @@ func (c *kubeLoginCommand) accessRequestForKubeCluster(ctx context.Context, cf *
return req, nil
}
// shouldLoginToOneKubeCluster returns whether a single kube cluster should be
// logged into (meaning update the kube config for one kube cluster).
// NOTE: it does not check `cf.KubernetesCluster != ""` because that flag is not
// populated from the kubeLoginCommand flag until later
// (due to the relogin-retry logic interaction with "discovered name" matching).
// `tsh kube login` requires a kube cluster name arg when none of --all,
// --labels, or --query are given, so when all of those flags are not set, it
// implies that a kube cluster name was given.
func shouldLoginToOneKubeCluster(cf *CLIConf) bool {
return !cf.ListAll && cf.Labels == "" && cf.PredicateExpression == ""
}
// formatAmbiguousKubeCluster is a helper func that formats an ambiguous kube
// cluster error message.
func formatAmbiguousKubeCluster(cf *CLIConf, selectors resourceSelectors, kubeClusters types.KubeClusters) string {

View file

@ -540,11 +540,11 @@ func combineMatchedClusters(matchMap map[string]types.KubeClusters) types.KubeCl
}
// matchClustersByNames maps each name to the clusters it matches by exact name
// or by prefix.
// or by discovered name.
func matchClustersByNames(clusters types.KubeClusters, names ...string) map[string]types.KubeClusters {
matchesForNames := make(map[string]types.KubeClusters)
for _, name := range names {
matchesForNames[name] = matchClustersByName(name, clusters)
matchesForNames[name] = matchClustersByNameOrDiscoveredName(name, clusters)
}
return matchesForNames
}

View file

@ -25,10 +25,8 @@ import (
"path"
"strings"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@ -36,14 +34,9 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/gravitational/teleport/api/types"
apiutils "github.com/gravitational/teleport/api/utils"
"github.com/gravitational/teleport/api/utils/keypaths"
"github.com/gravitational/teleport/lib/kube/kubeconfig"
"github.com/gravitational/teleport/lib/service/servicecfg"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/srv/alpnproxy/common"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/teleport/testenv"
)
func (p *kubeTestPack) testProxyKube(t *testing.T) {
@ -93,233 +86,6 @@ func (p *kubeTestPack) testProxyKube(t *testing.T) {
})
}
func TestProxyKubeComplexSelectors(t *testing.T) {
testenv.WithInsecureDevMode(t, true)
testenv.WithResyncInterval(t, 0)
kubeFoo := "foo"
kubeFooBar := "foo-bar"
kubeBaz := "baz-qux"
kubeBazEKS := "baz-eks-us-west-1-123456789012"
kubeFooLeaf := "foo"
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
s := newTestSuite(t,
withRootConfigFunc(func(cfg *servicecfg.Config) {
cfg.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
cfg.SSH.Enabled = false
cfg.Kube.Enabled = true
cfg.Kube.ListenAddr = utils.MustParseAddr(localListenerAddr())
cfg.Kube.KubeconfigPath = newKubeConfigFile(t, kubeFoo, kubeFooBar, kubeBaz)
cfg.Kube.StaticLabels = map[string]string{"env": "root"}
cfg.Kube.ResourceMatchers = []services.ResourceMatcher{{
Labels: map[string]apiutils.Strings{"*": {"*"}},
}}
}),
withLeafCluster(),
withLeafConfigFunc(
func(cfg *servicecfg.Config) {
cfg.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
cfg.SSH.Enabled = false
cfg.Kube.Enabled = true
cfg.Kube.ListenAddr = utils.MustParseAddr(localListenerAddr())
cfg.Kube.KubeconfigPath = newKubeConfigFile(t, kubeFooLeaf)
cfg.Kube.StaticLabels = map[string]string{"env": "leaf"}
},
),
withValidationFunc(func(s *suite) bool {
rootClusters, err := s.root.GetAuthServer().GetKubernetesServers(ctx)
require.NoError(t, err)
leafClusters, err := s.leaf.GetAuthServer().GetKubernetesServers(ctx)
require.NoError(t, err)
return len(rootClusters) == 3 && len(leafClusters) == 1
}),
)
// setup a fake "discovered" kube cluster by adding a discovered name label
// to a dynamic kube cluster.
kc, err := types.NewKubernetesClusterV3(
types.Metadata{
Name: kubeBazEKS,
Labels: map[string]string{
types.DiscoveredNameLabel: "baz",
types.OriginLabel: types.OriginDynamic,
},
},
types.KubernetesClusterSpecV3{
Kubeconfig: newKubeConfig(t, kubeBazEKS),
},
)
require.NoError(t, err)
err = s.root.GetAuthServer().CreateKubernetesCluster(ctx, kc)
require.NoError(t, err)
require.EventuallyWithT(t, func(c *assert.CollectT) {
servers, err := s.root.GetAuthServer().GetKubernetesServers(ctx)
assert.NoError(c, err)
for _, ks := range servers {
if ks.GetName() == kubeBazEKS {
return
}
}
assert.Fail(c, "kube server not found")
}, time.Second*10, time.Millisecond*500, "failed to find dynamically created kube cluster %v", kubeBazEKS)
rootClusterName := s.root.Config.Auth.ClusterName.GetClusterName()
leafClusterName := s.leaf.Config.Auth.ClusterName.GetClusterName()
tests := []struct {
desc string
makeValidateCmdFn func(*testing.T) func(*exec.Cmd) error
args []string
wantErr string
}{
{
desc: "with full name",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeFoo)
return nil
}
},
args: []string{kubeFoo, "--insecure"},
},
{
desc: "with discovered name",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeBazEKS)
return nil
}
},
args: []string{"baz", "--insecure"},
},
{
desc: "with prefix name",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeFooBar)
return nil
}
},
args: []string{"foo-b", "--insecure"},
},
{
desc: "with labels",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeFoo)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeFooBar)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeBaz)
return nil
}
},
args: []string{"--labels", "env=root", "--insecure"},
},
{
desc: "with query",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeFoo)
return nil
}
},
args: []string{"--query", `labels["env"]=="root"`, "--insecure"},
},
{
desc: "with labels, query, and prefix",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kubeFoo)
return nil
}
},
args: []string{
"--labels", "env=root",
"--query", `name == "foo"`,
"f", // prefix of "foo".
"--insecure",
},
},
{
desc: "in leaf cluster with prefix name",
makeValidateCmdFn: func(t *testing.T) func(*exec.Cmd) error {
return func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
checkKubeLocalProxyConfig(t, s, config, leafClusterName, kubeFooLeaf)
return nil
}
},
args: []string{
"--cluster", leafClusterName,
"--insecure",
"f", // prefix of "foo" kube cluster in leaf teleport cluster.
},
},
{
desc: "ambiguous name prefix is an error",
args: []string{
"f", // prefix of foo, foo-bar in root cluster.
"--insecure",
},
wantErr: `kubernetes cluster "f" matches multiple`,
},
{
desc: "zero name matches is an error",
args: []string{
"xxx",
"--insecure",
},
wantErr: `kubernetes cluster "xxx" not found`,
},
{
desc: "zero label matches is an error",
args: []string{
"--labels", "env=nonexistent",
"--insecure",
},
wantErr: `kubernetes cluster with labels "env=nonexistent" not found`,
},
{
desc: "zero query matches is an error",
args: []string{
"--query", `labels["env"]=="nonexistent"`,
"--insecure",
},
wantErr: `kubernetes cluster with query (labels["env"]=="nonexistent") not found`,
},
}
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
// login for each parallel test to avoid races when multiple tsh
// clients work in the same profile dir.
tshHome, _ := mustLogin(t, s)
// Set kubeconfig to a non-exist file to avoid loading other things.
kubeConfigPath := path.Join(tshHome, "kube-config")
var cmdRunner func(*exec.Cmd) error
if test.makeValidateCmdFn != nil {
cmdRunner = test.makeValidateCmdFn(t)
}
err := Run(ctx, append([]string{"proxy", "kube", "--port", ports.Pop()}, test.args...),
setCmdRunner(cmdRunner),
setHomePath(tshHome),
setKubeConfigPath(kubeConfigPath),
)
if test.wantErr != "" {
require.ErrorContains(t, err, test.wantErr)
return
}
require.NoError(t, err)
})
}
}
func kubeConfigFromCmdEnv(t *testing.T, cmd *exec.Cmd) *clientcmdapi.Config {
t.Helper()

View file

@ -20,6 +20,9 @@ import (
"bytes"
"context"
"fmt"
"os/exec"
"os/user"
"path"
"path/filepath"
"reflect"
"strings"
@ -28,6 +31,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/gravitational/trace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
@ -36,6 +40,7 @@ import (
"github.com/gravitational/teleport/api/profile"
"github.com/gravitational/teleport/api/types"
apiutils "github.com/gravitational/teleport/api/utils"
"github.com/gravitational/teleport/api/utils/keypaths"
"github.com/gravitational/teleport/lib"
"github.com/gravitational/teleport/lib/asciitable"
@ -43,6 +48,7 @@ import (
kubeserver "github.com/gravitational/teleport/lib/kube/proxy/testing/kube_server"
"github.com/gravitational/teleport/lib/modules"
"github.com/gravitational/teleport/lib/service/servicecfg"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/common"
"github.com/gravitational/teleport/tool/teleport/testenv"
@ -253,7 +259,8 @@ func (p *kubeTestPack) testListKube(t *testing.T) {
}
}
func TestKubeLogin(t *testing.T) {
// Tests `tsh kube login`, `tsh proxy kube`.
func TestKubeSelection(t *testing.T) {
modules.SetTestModules(t,
&modules.TestModules{
TestBuildType: modules.BuildEnterprise,
@ -264,198 +271,21 @@ func TestKubeLogin(t *testing.T) {
)
testenv.WithInsecureDevMode(t, true)
testenv.WithResyncInterval(t, 0)
t.Run("complex filters", testKubeLoginWithFilters)
t.Run("access request", testKubeLoginAccessRequest)
}
func testKubeLoginWithFilters(t *testing.T) {
t.Parallel()
ctx := context.Background()
kubeFoo := "foo"
kubeFooBar := "foo-bar"
kubeBaz := "baz"
staticLabels := map[string]string{
"env": "root",
}
allKubes := []string{kubeFoo, kubeFooBar, kubeBaz}
s := newTestSuite(t,
withRootConfigFunc(func(cfg *servicecfg.Config) {
cfg.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
cfg.Kube.Enabled = true
cfg.Kube.ListenAddr = utils.MustParseAddr(localListenerAddr())
cfg.Kube.KubeconfigPath = newKubeConfigFile(t, allKubes...)
cfg.Kube.StaticLabels = staticLabels
}),
withValidationFunc(func(s *suite) bool {
rootClusters, err := s.root.GetAuthServer().GetKubernetesServers(ctx)
require.NoError(t, err)
return len(rootClusters) == 3
}),
)
tests := []struct {
desc string
args []string
wantLoggedIn []string
wantSelected string
wantErrContains string
}{
{
desc: "login with exact name and set current context",
args: []string{"foo"},
wantLoggedIn: []string{"foo"},
wantSelected: "foo",
},
{
desc: "login with prefix name and set current context",
args: []string{"foo-b"},
wantLoggedIn: []string{"foo-bar"},
wantSelected: "foo-bar",
},
{
desc: "login with all",
args: []string{"--all"},
wantLoggedIn: []string{"foo", "foo-bar", "baz"},
wantSelected: "",
},
{
desc: "login with labels",
args: []string{"--labels", "env=root"},
wantLoggedIn: []string{"foo", "foo-bar", "baz"},
wantSelected: "",
},
{
desc: "login with query",
args: []string{"--query", `name == "foo"`},
wantLoggedIn: []string{"foo"},
wantSelected: "",
},
{
desc: "login to multiple with all and set current context by name",
args: []string{"foo", "--all"},
wantLoggedIn: []string{"foo", "foo-bar", "baz"},
wantSelected: "foo",
},
{
desc: "login to multiple with labels and set current context by name",
args: []string{"foo", "--labels", "env=root"},
wantLoggedIn: []string{"foo", "foo-bar", "baz"},
wantSelected: "foo",
},
{
desc: "login to multiple with query and set current context by prefix name",
args: []string{"foo-b", "--query", `name == "foo-bar" || name == "foo"`},
wantLoggedIn: []string{"foo", "foo-bar"},
wantSelected: "foo-bar",
},
{
desc: "all with labels is an error",
args: []string{"xxx", "--all", "--labels", `env=root`},
wantErrContains: "cannot use",
},
{
desc: "all with query is an error",
args: []string{"xxx", "--all", "--query", `name == "foo-bar" || name == "foo"`},
wantErrContains: "cannot use",
},
{
desc: "missing required args is an error",
args: []string{},
wantErrContains: "required",
},
}
tshHome, _ := mustLogin(t, s)
webProxyAddr, err := utils.ParseAddr(s.root.Config.Proxy.WebAddr.String())
// Create a role that allows the user to request access to a restricted
// cluster but not to access it directly.
user, err := user.Current()
require.NoError(t, err)
// profile kube config path depends on web proxy host
webProxyHost := webProxyAddr.Host()
for _, test := range tests {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
// clone the login dir for each parallel test to avoid profile kube config file races.
tshHome := mustCloneTempDir(t, tshHome)
kubeConfigPath := filepath.Join(t.TempDir(), "kubeconfig")
err := Run(
context.Background(),
append([]string{
"--insecure",
"kube",
"login",
},
test.args...,
),
setHomePath(tshHome),
// set a custom empty kube config for each test, as we do
// not want parallel (or even shuffled sequential) tests
// potentially racing on the same config
setKubeConfigPath(kubeConfigPath),
)
if test.wantErrContains != "" {
require.ErrorContains(t, err, test.wantErrContains)
return
}
require.NoError(t, err)
config, err := kubeconfig.Load(kubeConfigPath)
require.NoError(t, err)
if test.wantSelected == "" {
require.Empty(t, config.CurrentContext)
} else {
require.Equal(t, kubeconfig.ContextName("root", test.wantSelected), config.CurrentContext)
}
for _, name := range allKubes {
contextName := kubeconfig.ContextName("root", name)
if !slices.Contains(test.wantLoggedIn, name) {
require.NotContains(t, config.AuthInfos, contextName, "unexpected kube cluster %v in config update", name)
return
}
require.Contains(t, config.AuthInfos, contextName, "kube cluster %v not in config update", name)
authInfo := config.AuthInfos[contextName]
require.NotNil(t, authInfo)
require.Contains(t, authInfo.Exec.Args, fmt.Sprintf("--kube-cluster=%v", name))
}
// ensure the profile config only contains one
profileKubeConfigPath := keypaths.KubeConfigPath(
profile.FullProfilePath(tshHome),
webProxyHost,
s.user.GetName(),
s.root.Config.Auth.ClusterName.GetClusterName(),
test.wantSelected,
)
profileConfig, err := kubeconfig.Load(profileKubeConfigPath)
require.NoError(t, err)
for _, name := range allKubes {
contextName := kubeconfig.ContextName("root", name)
if name != test.wantSelected {
require.NotContains(t, profileConfig.AuthInfos, contextName, "unexpected kube cluster %v in profile config update", name)
return
}
require.Contains(t, profileConfig.AuthInfos, contextName, "kube cluster %v not in profile config update", name)
authInfo := profileConfig.AuthInfos[contextName]
require.NotNil(t, authInfo)
require.Contains(t, authInfo.Exec.Args, fmt.Sprintf("--kube-cluster=%v", name))
}
})
}
}
func testKubeLoginAccessRequest(t *testing.T) {
t.Parallel()
const (
roleName = "requester"
kubeCluster = "root-cluster"
)
// Create a role that allows the user to request access to the cluster but
// not to access it directly.
const roleName = "restricted"
role, err := types.NewRole(
roleName,
types.RoleSpecV6{
Allow: types.RoleConditions{
KubeGroups: []string{user.Username},
KubernetesLabels: types.Labels{
"env": []string{"dev", "prod"},
},
Request: &types.AccessRequestConditions{
SearchAsRoles: []string{"access"},
},
@ -463,88 +293,392 @@ func testKubeLoginAccessRequest(t *testing.T) {
},
)
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
s := newTestSuite(t,
withRootConfigFunc(func(cfg *servicecfg.Config) {
cfg.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
// reconfig the user to use the new role instead of the default ones
// User is the second bootstrap resource.
user, ok := cfg.Auth.BootstrapResources[1].(types.User)
require.True(t, ok)
user.SetRoles([]string{roleName})
// Add the role to the list of bootstrap resources.
cfg.Auth.BootstrapResources = append(cfg.Auth.BootstrapResources, role)
// Enable kube and set the kubeconfig path.
cfg.Auth.NetworkingConfig.SetProxyListenerMode(types.ProxyListenerMode_Multiplex)
cfg.SSH.Enabled = false
cfg.Kube.Enabled = true
cfg.Kube.ListenAddr = utils.MustParseAddr(localListenerAddr())
cfg.Kube.KubeconfigPath = newKubeConfigFile(t, kubeCluster)
}),
withValidationFunc(func(s *suite) bool {
// Check if the kube cluster was added.
rootClusters, err := s.root.GetAuthServer().GetKubernetesServers(context.Background())
require.NoError(t, err)
return len(rootClusters) == 1
cfg.Kube.ResourceMatchers = []services.ResourceMatcher{{
Labels: map[string]apiutils.Strings{"*": {"*"}},
}}
}),
)
// login as the user.
tshHome, kubeConfig := mustLogin(t, s)
kubeBarEKS := "bar-eks-us-west-1-123456789012"
kubeBazEKS1 := "baz-eks-us-west-1-123456789012"
kubeBazEKS2 := "baz-eks-us-west-2-123456789012"
kubeRootEKS := "root-cluster-eks-us-east-1-123456789012"
mustRegisterKubeClusters(t, ctx, s.root.GetAuthServer(),
mustMakeDynamicKubeCluster(t, kubeBarEKS, "bar", map[string]string{types.DiscoveryLabelRegion: "us-west-1", "env": "dev"}),
mustMakeDynamicKubeCluster(t, kubeBazEKS1, "baz", map[string]string{types.DiscoveryLabelRegion: "us-west-1", "env": "prod"}),
mustMakeDynamicKubeCluster(t, kubeBazEKS2, "baz", map[string]string{types.DiscoveryLabelRegion: "us-west-2", "env": "prod"}),
mustMakeDynamicKubeCluster(t, kubeRootEKS, "root-cluster", map[string]string{types.DiscoveryLabelRegion: "us-east-2", "env": "restricted"}),
)
allKubes := []string{kubeBarEKS, kubeBazEKS1, kubeBazEKS2}
// Run the login command in a goroutine so we can check if the access
// request was created and approved.
// The goroutine will exit when the access request is approved.
wg := &errgroup.Group{}
wg.Go(func() error {
err := Run(
context.Background(),
[]string{
"--insecure",
"kube",
"login",
// use a prefix of the kube cluster name
"root-c",
"--request-reason",
"test",
rootClusterName := s.root.Config.Auth.ClusterName.GetClusterName()
tests := []struct {
desc string
wantLoginCurrentContext string
wantLoggedIn []string
wantProxied []string
args []string
// indicate if a test case is only for one of the `tsh kube login`
// or `tsh proxy kube` test runners.
// A lot of the test cases can be shared to test `tsh kube login` and
// `tsh proxy kube`, but some are specific.
loginTestOnly bool
proxyTestOnly bool
wantErr string
}{
{
desc: "with full name",
wantLoginCurrentContext: kubeBazEKS1,
wantLoggedIn: []string{kubeBazEKS1},
wantProxied: []string{kubeBazEKS1},
args: []string{kubeBazEKS1},
},
{
desc: "with discovered name",
wantLoginCurrentContext: kubeBarEKS,
wantLoggedIn: []string{kubeBarEKS},
wantProxied: []string{kubeBarEKS},
args: []string{"bar"},
},
{
desc: "with labels",
wantLoggedIn: []string{kubeBazEKS1, kubeBazEKS2},
wantProxied: []string{kubeBazEKS1, kubeBazEKS2},
args: []string{"--labels", "env=prod"},
},
{
desc: "with query",
wantLoggedIn: []string{kubeBazEKS1},
wantProxied: []string{kubeBazEKS1},
args: []string{"--query", `labels["env"]=="prod" && labels["region"] == "us-west-1"`},
},
{
desc: "with labels and discovered name",
// both these match the labels, only one of them matches the discovered name to select the context.
wantLoginCurrentContext: kubeBazEKS1,
wantLoggedIn: []string{kubeBarEKS, kubeBazEKS1},
wantProxied: []string{kubeBazEKS1},
args: []string{
"--labels", "region=us-west-1",
"baz",
},
setHomePath(tshHome),
setKubeConfigPath(kubeConfig),
)
return trace.Wrap(err)
})
// Wait for the access request to be created and finally approve it.
var accessRequestID string
require.Eventually(t, func() bool {
accessRequests, err := s.root.GetAuthServer().
GetAccessRequests(
context.Background(),
types.AccessRequestFilter{State: types.RequestState_PENDING},
)
if err != nil || len(accessRequests) != 1 {
return false
},
{
desc: "with query and discovered name",
wantLoginCurrentContext: kubeBazEKS2,
wantLoggedIn: []string{kubeBazEKS2},
wantProxied: []string{kubeBazEKS2},
args: []string{
"--query", `labels["region"] == "us-west-2"`,
"baz",
},
},
{
desc: "ambiguous discovered name is an error",
args: []string{
"baz",
},
wantErr: `kubernetes cluster "baz" matches multiple`,
},
{
desc: "zero name matches is an error",
args: []string{
"xxx",
},
wantErr: `kubernetes cluster "xxx" not found`,
},
{
desc: "zero label matches is an error",
args: []string{
"--labels", "env=nonexistent",
},
wantErr: `kubernetes cluster with labels "env=nonexistent" not found`,
},
{
desc: "zero query matches is an error",
args: []string{
"--query", `labels["env"]=="nonexistent"`,
},
wantErr: `kubernetes cluster with query (labels["env"]=="nonexistent") not found`,
},
// cases specific to `tsh kube login` testing
{
desc: "login to all and set current context by full name",
args: []string{kubeBazEKS1, "--all"},
wantLoginCurrentContext: kubeBazEKS1,
wantLoggedIn: allKubes,
loginTestOnly: true,
},
{
desc: "login to all and set current context by discovered name",
args: []string{kubeBarEKS, "--all"},
wantLoginCurrentContext: kubeBarEKS,
wantLoggedIn: allKubes,
loginTestOnly: true,
},
{
desc: "login to all and set current context by ambiguous discovered name is an error",
args: []string{"baz", "--all"},
loginTestOnly: true,
wantErr: `kubernetes cluster "baz" matches multiple`,
},
{
desc: "login with all",
args: []string{"--all"},
wantLoggedIn: allKubes,
loginTestOnly: true,
},
{
desc: "all with labels is an error",
args: []string{"xxx", "--all", "--labels", `env=root`},
loginTestOnly: true,
wantErr: "cannot use",
},
{
desc: "all with query is an error",
args: []string{"xxx", "--all", "--query", `name == "foo-bar" || name == "foo"`},
loginTestOnly: true,
wantErr: "cannot use",
},
{
desc: "missing required args is an error",
args: []string{},
loginTestOnly: true,
wantErr: "required",
},
// cases specific to `tsh proxy kube` testing
{
desc: "proxy multiple",
wantProxied: []string{kubeBazEKS1, kubeBazEKS2, kubeBarEKS},
args: []string{kubeBazEKS1, kubeBazEKS2, kubeBarEKS},
proxyTestOnly: true,
},
{
desc: "proxy multiple with one ambiguous discovered name",
args: []string{kubeBarEKS, "baz"},
wantErr: "matches multiple",
proxyTestOnly: true,
},
{
desc: "proxy multiple with query resolving ambiguity",
wantProxied: []string{kubeBarEKS, kubeBazEKS2},
args: []string{kubeBarEKS, "baz", "--query", `labels.region == "us-west-2" || labels.env == "dev"`},
proxyTestOnly: true,
},
}
t.Run("proxy", func(t *testing.T) {
t.Parallel()
for _, test := range tests {
if test.loginTestOnly {
// skip test cases specific to `tsh kube login`.
continue
}
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
// login for each parallel test to avoid races when multiple tsh
// clients work in the same profile dir.
tshHome, _ := mustLogin(t, s)
// Set kubeconfig to a non-exist file to avoid loading other things.
kubeConfigPath := path.Join(tshHome, "kube-config")
var cmdRunner func(*exec.Cmd) error
if len(test.wantProxied) > 0 {
cmdRunner = func(cmd *exec.Cmd) error {
config := kubeConfigFromCmdEnv(t, cmd)
for _, kube := range test.wantProxied {
checkKubeLocalProxyConfig(t, s, config, rootClusterName, kube)
}
return nil
}
}
err := Run(ctx, append([]string{"proxy", "kube", "--insecure", "--port", ports.Pop()}, test.args...),
setCmdRunner(cmdRunner),
setHomePath(tshHome),
setKubeConfigPath(kubeConfigPath),
)
if test.wantErr != "" {
require.ErrorContains(t, err, test.wantErr)
return
}
require.NoError(t, err)
})
}
equal := reflect.DeepEqual(
accessRequests[0].GetRequestedResourceIDs(),
[]types.ResourceID{
{
ClusterName: s.root.Config.Auth.ClusterName.GetClusterName(),
Kind: types.KindKubernetesCluster,
Name: kubeCluster,
},
},
)
accessRequestID = accessRequests[0].GetName()
return equal
}, 10*time.Second, 500*time.Millisecond)
// Approve the access request to release the login command lock.
err = s.root.GetAuthServer().SetAccessRequestState(context.Background(), types.AccessRequestUpdate{
RequestID: accessRequestID,
State: types.RequestState_APPROVED,
})
require.NoError(t, err)
// Wait for the login command to exit after the request is approved
require.NoError(t, wg.Wait())
t.Run("login", func(t *testing.T) {
t.Parallel()
webProxyAddr, err := utils.ParseAddr(s.root.Config.Proxy.WebAddr.String())
require.NoError(t, err)
// profile kube config path depends on web proxy host
webProxyHost := webProxyAddr.Host()
for _, test := range tests {
if test.proxyTestOnly {
continue
}
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
tshHome, kubeConfigPath := mustLogin(t, s)
err := Run(
context.Background(),
append([]string{"kube", "login", "--insecure"},
test.args...,
),
setHomePath(tshHome),
// set a custom empty kube config for each test, as we do
// not want parallel (or even shuffled sequential) tests
// potentially racing on the same config
setKubeConfigPath(kubeConfigPath),
)
if test.wantErr != "" {
require.ErrorContains(t, err, test.wantErr)
return
}
require.NoError(t, err)
// load the global kube config.
config, err := kubeconfig.Load(kubeConfigPath)
require.NoError(t, err)
// check that the kube config context is set to what we expect.
if test.wantLoginCurrentContext == "" {
require.Empty(t, config.CurrentContext)
} else {
require.Equal(t,
kubeconfig.ContextName("root", test.wantLoginCurrentContext),
config.CurrentContext,
)
}
// check which kube clusters were added to the global kubeconfig.
for _, name := range allKubes {
contextName := kubeconfig.ContextName("root", name)
if !slices.Contains(test.wantLoggedIn, name) {
require.NotContains(t, config.AuthInfos, contextName, "unexpected kube cluster %v in config update", name)
continue
}
require.Contains(t, config.AuthInfos, contextName, "kube cluster %v not in config update", name)
authInfo := config.AuthInfos[contextName]
require.NotNil(t, authInfo)
require.Contains(t, authInfo.Exec.Args, fmt.Sprintf("--kube-cluster=%v", name))
}
// ensure the profile config only contains one kube cluster.
profileKubeConfigPath := keypaths.KubeConfigPath(
profile.FullProfilePath(tshHome),
webProxyHost,
s.user.GetName(),
s.root.Config.Auth.ClusterName.GetClusterName(),
test.wantLoginCurrentContext,
)
// load the profile kube config
profileConfig, err := kubeconfig.Load(profileKubeConfigPath)
require.NoError(t, err)
// check that the kube config context is set to what we expect.
if test.wantLoginCurrentContext == "" {
require.Empty(t, profileConfig.CurrentContext)
} else {
require.Equal(t,
kubeconfig.ContextName("root", test.wantLoginCurrentContext),
profileConfig.CurrentContext,
)
}
for _, name := range allKubes {
contextName := kubeconfig.ContextName("root", name)
if name != test.wantLoginCurrentContext {
require.NotContains(t, profileConfig.AuthInfos, contextName, "unexpected kube cluster %v in profile config update", name)
continue
}
require.Contains(t, profileConfig.AuthInfos, contextName, "kube cluster %v not in profile config update", name)
authInfo := profileConfig.AuthInfos[contextName]
require.NotNil(t, authInfo)
require.Contains(t, authInfo.Exec.Args, fmt.Sprintf("--kube-cluster=%v", name))
}
})
}
})
t.Run("access request", func(t *testing.T) {
t.Parallel()
// login as the user.
tshHome, kubeConfig := mustLogin(t, s)
// Run the login command in a goroutine so we can check if the access
// request was created and approved.
// The goroutine will exit when the access request is approved.
wg := &errgroup.Group{}
wg.Go(func() error {
err := Run(
context.Background(),
[]string{
"--insecure",
"kube",
"login",
// by discovered name
"root-cluster",
"--request-reason",
"test",
},
setHomePath(tshHome),
setKubeConfigPath(kubeConfig),
)
// assert no error for more useful error message when access request is
// never created. assert instead of require because it's in a goroutine.
assert.NoError(t, err)
return trace.Wrap(err)
})
// Wait for the access request to be created and finally approve it.
var accessRequestID string
require.Eventually(t, func() bool {
accessRequests, err := s.root.GetAuthServer().
GetAccessRequests(
context.Background(),
types.AccessRequestFilter{State: types.RequestState_PENDING},
)
if err != nil || len(accessRequests) != 1 {
return false
}
equal := reflect.DeepEqual(
accessRequests[0].GetRequestedResourceIDs(),
[]types.ResourceID{
{
ClusterName: s.root.Config.Auth.ClusterName.GetClusterName(),
Kind: types.KindKubernetesCluster,
Name: kubeRootEKS,
},
},
)
accessRequestID = accessRequests[0].GetName()
return equal
}, 10*time.Second, 500*time.Millisecond, "waiting for access request to be created")
// Approve the access request to release the login command lock.
err := s.root.GetAuthServer().SetAccessRequestState(context.Background(), types.AccessRequestUpdate{
RequestID: accessRequestID,
State: types.RequestState_APPROVED,
})
require.NoError(t, err)
// Wait for the login command to exit after the request is approved
require.NoError(t, wg.Wait())
})
}
func newKubeConfigFile(t *testing.T, clusterNames ...string) string {

View file

@ -28,12 +28,15 @@ import (
"time"
"github.com/gravitational/trace"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/gravitational/teleport/api/breaker"
apiclient "github.com/gravitational/teleport/api/client"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/integration/helpers"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/cloud"
"github.com/gravitational/teleport/lib/config"
"github.com/gravitational/teleport/lib/service"
@ -413,3 +416,59 @@ func mustCloneTempDir(t *testing.T, srcDir string) string {
require.NoError(t, err)
return dstDir
}
func mustMakeDynamicKubeCluster(t *testing.T, name, discoveredName string, labels map[string]string) types.KubeCluster {
t.Helper()
if labels == nil {
labels = make(map[string]string)
}
labels[types.OriginLabel] = types.OriginDynamic
if discoveredName != "" {
// setup a fake "discovered" kube cluster by adding a discovered name label
labels[types.DiscoveredNameLabel] = discoveredName
labels[types.OriginLabel] = types.OriginCloud
}
kc, err := types.NewKubernetesClusterV3(
types.Metadata{
Name: name,
Labels: labels,
},
types.KubernetesClusterSpecV3{
Kubeconfig: newKubeConfig(t, name),
},
)
require.NoError(t, err)
return kc
}
func mustRegisterKubeClusters(t *testing.T, ctx context.Context, authSrv *auth.Server, clusters ...types.KubeCluster) {
t.Helper()
if len(clusters) == 0 {
return
}
wg, _ := errgroup.WithContext(ctx)
wantNames := make([]string, 0, len(clusters))
for _, kc := range clusters {
kc := kc
wg.Go(func() error {
err := authSrv.CreateKubernetesCluster(ctx, kc)
return trace.Wrap(err)
})
wantNames = append(wantNames, kc.GetName())
}
require.NoError(t, wg.Wait())
require.EventuallyWithT(t, func(c *assert.CollectT) {
servers, err := authSrv.GetKubernetesServers(ctx)
assert.NoError(c, err)
gotNames := map[string]struct{}{}
for _, ks := range servers {
gotNames[ks.GetName()] = struct{}{}
}
for _, name := range wantNames {
assert.Contains(c, gotNames, name, "missing kube cluster")
}
}, time.Second*10, time.Millisecond*500, "dynamically created kube clusters failed to register")
}