2018-06-10 00:21:14 +00:00
|
|
|
/*
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
Copyright 2016-2020 Gravitational, Inc.
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package integration
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"os/user"
|
|
|
|
"strconv"
|
2023-05-22 20:48:07 +00:00
|
|
|
"sync"
|
2021-05-27 02:05:46 +00:00
|
|
|
"testing"
|
2018-06-10 00:21:14 +00:00
|
|
|
"time"
|
|
|
|
|
2022-10-28 20:20:28 +00:00
|
|
|
"github.com/gravitational/trace"
|
|
|
|
log "github.com/sirupsen/logrus"
|
2023-05-12 16:38:16 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2022-10-28 20:20:28 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"golang.org/x/net/http2"
|
2023-05-22 20:48:07 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2022-10-28 20:20:28 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
streamspdy "k8s.io/apimachinery/pkg/util/httpstream/spdy"
|
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
"k8s.io/client-go/rest"
|
|
|
|
"k8s.io/client-go/tools/portforward"
|
|
|
|
"k8s.io/client-go/tools/remotecommand"
|
|
|
|
"k8s.io/client-go/transport"
|
|
|
|
"k8s.io/client-go/transport/spdy"
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport"
|
2022-06-03 15:55:56 +00:00
|
|
|
"github.com/gravitational/teleport/api/breaker"
|
2023-05-15 15:11:02 +00:00
|
|
|
"github.com/gravitational/teleport/api/constants"
|
2022-10-28 20:20:28 +00:00
|
|
|
apidefaults "github.com/gravitational/teleport/api/defaults"
|
2021-07-30 22:34:19 +00:00
|
|
|
"github.com/gravitational/teleport/api/profile"
|
|
|
|
"github.com/gravitational/teleport/api/types"
|
2022-06-15 07:07:26 +00:00
|
|
|
"github.com/gravitational/teleport/integration/helpers"
|
2022-09-08 14:27:51 +00:00
|
|
|
"github.com/gravitational/teleport/integration/kube"
|
2018-06-18 00:53:02 +00:00
|
|
|
"github.com/gravitational/teleport/lib"
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport/lib/auth/testauthority"
|
2022-04-01 17:48:40 +00:00
|
|
|
"github.com/gravitational/teleport/lib/client"
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport/lib/events"
|
2022-10-28 20:20:28 +00:00
|
|
|
kubeutils "github.com/gravitational/teleport/lib/kube/utils"
|
2023-05-19 13:55:52 +00:00
|
|
|
"github.com/gravitational/teleport/lib/service"
|
2023-03-09 17:48:36 +00:00
|
|
|
"github.com/gravitational/teleport/lib/service/servicecfg"
|
2022-10-03 14:14:01 +00:00
|
|
|
"github.com/gravitational/teleport/lib/services"
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport/lib/session"
|
2020-04-30 00:27:47 +00:00
|
|
|
"github.com/gravitational/teleport/lib/tlsca"
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport/lib/utils"
|
|
|
|
)
|
|
|
|
|
|
|
|
type KubeSuite struct {
|
|
|
|
*kubernetes.Clientset
|
2018-09-26 00:11:51 +00:00
|
|
|
|
2021-04-29 16:39:43 +00:00
|
|
|
me *user.User
|
2018-06-10 00:21:14 +00:00
|
|
|
// priv/pub pair to avoid re-generating it
|
|
|
|
priv []byte
|
|
|
|
pub []byte
|
|
|
|
|
2018-09-26 00:11:51 +00:00
|
|
|
// kubeconfigPath is a path to valid kubeconfig
|
|
|
|
kubeConfigPath string
|
|
|
|
|
|
|
|
// kubeConfig is a kubernetes config struct
|
|
|
|
kubeConfig *rest.Config
|
2020-12-07 14:35:15 +00:00
|
|
|
|
|
|
|
// log defines the test-specific logger
|
|
|
|
log utils.Logger
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
func newKubeSuite(t *testing.T) *KubeSuite {
|
2020-12-07 14:35:15 +00:00
|
|
|
testEnabled := os.Getenv(teleport.KubeRunTests)
|
|
|
|
if ok, _ := strconv.ParseBool(testEnabled); !ok {
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Skip("Skipping Kubernetes test suite.")
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
suite := &KubeSuite{
|
|
|
|
kubeConfigPath: os.Getenv(teleport.EnvKubeConfig),
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NotEmpty(t, suite.kubeConfigPath, "This test requires path to valid kubeconfig.")
|
2018-06-10 00:21:14 +00:00
|
|
|
var err error
|
2022-04-25 09:26:10 +00:00
|
|
|
suite.priv, suite.pub, err = testauthority.New().GenerateKeyPair()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
suite.me, err = user.Current()
|
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// close & re-open stdin because 'go test' runs with os.stdin connected to /dev/null
|
|
|
|
stdin, err := os.Open("/dev/tty")
|
2020-12-07 14:35:15 +00:00
|
|
|
if err == nil {
|
2018-06-10 00:21:14 +00:00
|
|
|
os.Stdin.Close()
|
|
|
|
os.Stdin = stdin
|
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Cleanup(func() {
|
|
|
|
var err error
|
|
|
|
// restore os.Stdin to its original condition: connected to /dev/null
|
|
|
|
os.Stdin.Close()
|
|
|
|
os.Stdin, err = os.Open("/dev/null")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
suite.Clientset, suite.kubeConfig, err = kubeutils.GetKubeClient(suite.kubeConfigPath)
|
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// Create test namespace and pod to run k8s commands against.
|
2018-06-10 00:21:14 +00:00
|
|
|
ns := newNamespace(testNamespace)
|
2021-05-27 02:05:46 +00:00
|
|
|
_, err = suite.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
2018-06-10 00:21:14 +00:00
|
|
|
if err != nil {
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, errors.IsAlreadyExists(err), "Failed to create namespace: %v:", err)
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
2020-10-07 20:58:07 +00:00
|
|
|
p := newPod(testNamespace, testPod)
|
2021-05-27 02:05:46 +00:00
|
|
|
_, err = suite.CoreV1().Pods(testNamespace).Create(context.Background(), p, metav1.CreateOptions{})
|
2020-10-07 20:58:07 +00:00
|
|
|
if err != nil {
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, errors.IsAlreadyExists(err), "Failed to create test pod: %v", err)
|
2020-10-07 20:58:07 +00:00
|
|
|
}
|
2023-05-12 16:38:16 +00:00
|
|
|
// Wait for pod to be running.
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
rsp, err := suite.CoreV1().Pods(testNamespace).Get(context.Background(), testPod, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return rsp.Status.Phase == v1.PodRunning
|
|
|
|
}, 60*time.Second, time.Millisecond*500)
|
2021-05-27 02:05:46 +00:00
|
|
|
return suite
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
type kubeIntegrationTest func(t *testing.T, suite *KubeSuite)
|
2020-10-07 20:58:07 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
func (s *KubeSuite) bind(test kubeIntegrationTest) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
Attempts to make CI integration test logs more useful (#9626)
Actually tracking down the cause of a failure in the integration tests can
be hard:
* It's hard to get an overall summary of what failed
* The tests sometimes emit no output before timing out, meaning any
diagnostic info is lost
* The emitted logs are too voluminous for a human to parse
* The emitted logs can present information out of order
* It's often hard to tell where the output from one test ends
and the next one begins
This patch attempts to address these concerns without attempting to rewrite
any of the underlying teleport logging.
* It improves the render-tests script to (optionally) report progress per-
test, rather than on a per-package basis. My working hypothesis on the
tests that time out with no output is that go test ./integration is
waiting for the entire set of integration tests tests to be complete
before reporting success or failure. Reporting on a per-test cycle gives
faster feedback and means that any timed-out builds should give at least
some idea of where they are stuck.
* Adds the render-tests filter to the integration and integration-root make
targets. This will show an overall summary of test results, as well as
- Discarding log output from passing tests to increase signal-to-noise
ratio, and
- Strongly delimiting the output from each failed test, making failures
easier to find.
* Removes the notion of a failure-only logger in favour of post-processing
the log events with render-tests. The failure-only logger catches log
output from the tests and only forwards it to the console if the test
fails. Unfortunately, not all log output is guaranteed to pass through
this logger (some teleport packages do not honour the configured logger,
and reports from the go race detector certainly don't), meaning some
output is presented at the time it happens, and other output is batched
and displayed at the end of the test. This makes working out what
happened where harder than it need be.
In addition, this patch also promotes the render-tests script into a fully-
fledged program, with appropriate makefile targets, make clean support, etc.
It is now also more robust in the face on non-JSON output from go test
(which happens if a package fails to compile).
2022-01-04 23:42:07 +00:00
|
|
|
s.log = utils.NewLoggerForTests()
|
2021-05-27 02:05:46 +00:00
|
|
|
os.RemoveAll(profile.FullProfilePath(""))
|
|
|
|
t.Cleanup(func() { s.log = nil })
|
|
|
|
test(t, s)
|
|
|
|
}
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
func TestKube(t *testing.T) {
|
|
|
|
suite := newKubeSuite(t)
|
2022-01-28 21:24:22 +00:00
|
|
|
t.Run("Exec", suite.bind(testKubeExec))
|
|
|
|
t.Run("Deny", suite.bind(testKubeDeny))
|
|
|
|
t.Run("PortForward", suite.bind(testKubePortForward))
|
2022-01-14 20:03:40 +00:00
|
|
|
t.Run("TransportProtocol", suite.bind(testKubeTransportProtocol))
|
2022-01-28 21:24:22 +00:00
|
|
|
t.Run("TrustedClustersClientCert", suite.bind(testKubeTrustedClustersClientCert))
|
|
|
|
t.Run("TrustedClustersSNI", suite.bind(testKubeTrustedClustersSNI))
|
|
|
|
t.Run("Disconnect", suite.bind(testKubeDisconnect))
|
2022-02-15 16:02:10 +00:00
|
|
|
t.Run("Join", suite.bind(testKubeJoin))
|
2023-03-08 14:20:47 +00:00
|
|
|
t.Run("IPPinning", suite.bind(testIPPinning))
|
2023-05-19 13:55:52 +00:00
|
|
|
// ExecWithNoAuth tests that a user can get the pod and exec into it when
|
|
|
|
// moderated session is not enforced.
|
|
|
|
// Users under moderated session should only be able to get the pod and shouldn't
|
|
|
|
// be able to exec into a pod
|
|
|
|
t.Run("ExecWithNoAuth", suite.bind(testExecNoAuth))
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
|
|
|
|
2023-03-08 14:20:47 +00:00
|
|
|
func testExec(t *testing.T, suite *KubeSuite, pinnedIP string, clientError string) {
|
2021-05-27 02:05:46 +00:00
|
|
|
tconf := suite.teleKubeConfig(Host)
|
2020-12-07 14:35:15 +00:00
|
|
|
|
2022-07-20 02:04:54 +00:00
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
2022-06-15 07:07:26 +00:00
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
2018-06-10 00:21:14 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeUsers := []string{"alice@example.com"}
|
2023-01-11 16:58:22 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2018-06-10 00:21:14 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: {types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2018-06-10 00:21:14 +00:00
|
|
|
},
|
2023-03-08 14:20:47 +00:00
|
|
|
Options: types.RoleOptions{
|
|
|
|
PinSourceIP: pinnedIP != "",
|
|
|
|
},
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
// impersonating client requests will be denied if the headers
|
|
|
|
// are referencing users or groups not allowed by the existing roles
|
2022-09-08 14:27:51 +00:00
|
|
|
impersonatingProxyClient, impersonatingProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
2023-03-08 14:20:47 +00:00
|
|
|
PinnedIP: pinnedIP,
|
2022-09-08 14:27:51 +00:00
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
Impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{kube.TestImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2023-03-08 14:20:47 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// try get request to fetch a pod
|
2020-09-24 17:59:48 +00:00
|
|
|
ctx := context.Background()
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = impersonatingProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
// scoped client requests will be allowed, as long as the impersonation headers
|
|
|
|
// are referencing users and groups allowed by existing roles
|
2022-09-08 14:27:51 +00:00
|
|
|
scopedProxyClient, scopedProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
2023-03-08 14:20:47 +00:00
|
|
|
PinnedIP: pinnedIP,
|
2022-09-08 14:27:51 +00:00
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
Impersonation: &rest.ImpersonationConfig{
|
2021-10-05 21:04:18 +00:00
|
|
|
UserName: role.GetKubeUsers(types.Allow)[0],
|
|
|
|
Groups: role.GetKubeGroups(types.Allow),
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
},
|
|
|
|
})
|
2023-05-12 16:38:16 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = scopedProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2023-03-08 14:20:47 +00:00
|
|
|
if clientError != "" {
|
|
|
|
require.ErrorContains(t, err, clientError)
|
|
|
|
return
|
|
|
|
}
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// set up kube configuration using proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
|
|
|
KubeUsers: kubeUsers,
|
2023-05-12 16:38:16 +00:00
|
|
|
PinnedIP: pinnedIP,
|
2022-09-08 14:27:51 +00:00
|
|
|
KubeGroups: kubeGroups,
|
2020-04-30 00:27:47 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-09-26 00:11:51 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
2018-06-10 00:21:14 +00:00
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-09-26 00:11:51 +00:00
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, testNamespace, string(data))
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-06-10 00:21:14 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// verify the session stream output
|
|
|
|
sessionStream := out.String()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Contains(t, sessionStream, "echo hi")
|
|
|
|
require.Contains(t, sessionStream, "exit")
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// verify traffic capture and upload, wait for the upload to hit
|
|
|
|
var sessionID string
|
|
|
|
timeoutC := time.After(10 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
2021-05-27 02:05:46 +00:00
|
|
|
case event := <-teleport.UploadEventsC:
|
2018-06-10 00:21:14 +00:00
|
|
|
sessionID = event.SessionID
|
|
|
|
break loop
|
|
|
|
case <-timeoutC:
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for upload of session to complete")
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read back the entire session and verify that it matches the stated output
|
2021-06-08 19:08:55 +00:00
|
|
|
capturedStream, err := teleport.Process.GetAuthServer().GetSessionChunk(apidefaults.Namespace, session.ID(sessionID), 0, events.MaxChunkBytes)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, sessionStream, string(capturedStream))
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating kube exec should be denied
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(impersonatingProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-11 03:25:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-03-11 03:25:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
|
|
|
// scoped kube exec is allowed, impersonation headers
|
|
|
|
// are allowed by the role
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(scopedProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
}
|
|
|
|
|
2023-03-08 14:20:47 +00:00
|
|
|
// TestKubeExec tests kubernetes Exec command set
|
|
|
|
func testKubeExec(t *testing.T, suite *KubeSuite) {
|
|
|
|
testExec(t, suite, "", "")
|
|
|
|
}
|
|
|
|
|
|
|
|
func testIPPinning(t *testing.T, suite *KubeSuite) {
|
|
|
|
testCases := []struct {
|
|
|
|
desc string
|
|
|
|
pinnedIP string
|
|
|
|
wantError string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "pinned correct IP",
|
|
|
|
pinnedIP: "127.0.0.1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "pinned incorrect IP",
|
|
|
|
pinnedIP: "127.0.0.2",
|
|
|
|
wantError: "pinned IP doesn't match observed client IP",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range testCases {
|
|
|
|
t.Run(tt.desc, func(t *testing.T) {
|
|
|
|
testExec(t, suite, tt.pinnedIP, tt.wantError)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
// TestKubeDeny makes sure that deny rule conflicting with allow
|
2020-05-06 17:11:06 +00:00
|
|
|
// rule takes precedence
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeDeny(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2022-07-20 02:04:54 +00:00
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
2022-06-15 07:07:26 +00:00
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeUsers := []string{"alice@example.com"}
|
2023-01-11 16:58:22 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
},
|
2021-06-04 20:29:31 +00:00
|
|
|
Deny: types.RoleConditions{
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, _, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
2020-04-30 00:27:47 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-09-24 17:59:48 +00:00
|
|
|
ctx := context.Background()
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestKubePortForward tests kubernetes port forwarding
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubePortForward(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
2020-12-07 14:35:15 +00:00
|
|
|
|
2022-07-20 02:04:54 +00:00
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
2022-06-15 07:07:26 +00:00
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
2018-06-10 00:21:14 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2018-06-10 00:21:14 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2018-06-10 00:21:14 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
_, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: kubeGroups,
|
2020-04-30 00:27:47 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// forward local port to target port 80 of the nginx container
|
2022-09-14 06:53:19 +00:00
|
|
|
localPort := newPortValue()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
|
|
|
podName: testPod,
|
|
|
|
podNamespace: testNamespace,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
forwarderCh := make(chan error)
|
|
|
|
go func() { forwarderCh <- forwarder.ForwardPorts() }()
|
|
|
|
defer func() {
|
2023-05-12 16:38:16 +00:00
|
|
|
assert.NoError(t, <-forwarderCh, "Forward ports exited with error")
|
2018-06-10 00:21:14 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for port forwarding.")
|
2018-06-10 00:21:14 +00:00
|
|
|
case <-forwarder.readyC:
|
|
|
|
}
|
|
|
|
defer close(forwarder.stopC)
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", localPort))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
require.NoError(t, resp.Body.Close())
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2022-09-08 14:27:51 +00:00
|
|
|
// impersonating client requests will bse denied
|
|
|
|
_, impersonatingProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
Impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{kube.TestImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2022-09-14 06:53:19 +00:00
|
|
|
localPort = newPortValue()
|
2019-03-11 03:25:43 +00:00
|
|
|
impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
|
|
|
podName: testPod,
|
|
|
|
podNamespace: testNamespace,
|
2019-03-11 03:25:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// This request should be denied
|
|
|
|
err = impersonatingForwarder.ForwardPorts()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2019-07-16 00:40:43 +00:00
|
|
|
// TestKubeTrustedClustersClientCert tests scenario with trusted clusters
|
|
|
|
// using metadata encoded in the certificate
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) {
|
2020-06-15 21:24:34 +00:00
|
|
|
ctx := context.Background()
|
2019-07-16 00:40:43 +00:00
|
|
|
clusterMain := "cluster-main"
|
2021-05-27 02:05:46 +00:00
|
|
|
mainConf := suite.teleKubeConfig(Host)
|
2020-06-08 22:20:50 +00:00
|
|
|
// Main cluster doesn't need a kubeconfig to forward requests to auxiliary
|
|
|
|
// cluster.
|
|
|
|
mainConf.Proxy.Kube.KubeconfigPath = ""
|
2022-07-20 02:04:54 +00:00
|
|
|
main := helpers.NewInstance(t, helpers.InstanceConfig{
|
2019-07-16 00:40:43 +00:00
|
|
|
ClusterName: clusterMain,
|
2022-06-15 07:07:26 +00:00
|
|
|
HostID: helpers.HostID,
|
2019-07-16 00:40:43 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// main cluster has a role and user called main-kube
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
mainKubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
mainRole, err := types.NewRole("main-kube", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-07-16 00:40:43 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: mainKubeGroups,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2019-07-16 00:40:43 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
main.AddUserWithRole(username, mainRole)
|
|
|
|
|
|
|
|
clusterAux := "cluster-aux"
|
2021-05-27 02:05:46 +00:00
|
|
|
auxConf := suite.teleKubeConfig(Host)
|
2022-07-20 02:04:54 +00:00
|
|
|
aux := helpers.NewInstance(t, helpers.InstanceConfig{
|
2019-07-16 00:40:43 +00:00
|
|
|
ClusterName: clusterAux,
|
2022-06-15 07:07:26 +00:00
|
|
|
HostID: helpers.HostID,
|
2019-07-16 00:40:43 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
lib.SetInsecureDevMode(true)
|
|
|
|
defer lib.SetInsecureDevMode(false)
|
|
|
|
|
|
|
|
mainConf.Proxy.Kube.Enabled = true
|
2021-06-18 19:57:29 +00:00
|
|
|
err = main.CreateEx(t, nil, mainConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = aux.CreateEx(t, nil, auxConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// auxiliary cluster has a role aux-kube
|
|
|
|
// connect aux cluster to main cluster
|
|
|
|
// using trusted clusters, so remote user will be allowed to assume
|
|
|
|
// role specified by mapping remote role "aux-kube" to local role "main-kube"
|
2023-05-12 16:38:16 +00:00
|
|
|
auxKubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
auxRole, err := types.NewRole("aux-kube", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-07-16 00:40:43 +00:00
|
|
|
Logins: []string{username},
|
|
|
|
// Note that main cluster can pass it's kubernetes groups
|
|
|
|
// to the remote cluster, and remote cluster
|
|
|
|
// can choose to use them by using special variable
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: auxKubeGroups,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2019-07-16 00:40:43 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2023-10-18 17:06:50 +00:00
|
|
|
auxRole, err = aux.Process.GetAuthServer().UpsertRole(ctx, auxRole)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
trustedClusterToken := "trusted-clsuter-token"
|
2021-03-24 01:26:52 +00:00
|
|
|
err = main.Process.GetAuthServer().UpsertToken(ctx,
|
2022-10-03 14:14:01 +00:00
|
|
|
services.MustCreateProvisionToken(trustedClusterToken, []types.SystemRole{types.RoleTrustedCluster}, time.Time{}))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2021-09-13 09:54:49 +00:00
|
|
|
trustedCluster := main.AsTrustedCluster(trustedClusterToken, types.RoleMap{
|
2019-07-16 00:40:43 +00:00
|
|
|
{Remote: mainRole.GetName(), Local: []string{auxRole.GetName()}},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// start both clusters
|
|
|
|
err = main.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer main.StopAll()
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
err = aux.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer aux.StopAll()
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// try and upsert a trusted cluster
|
|
|
|
var upsertSuccess bool
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
|
2020-06-15 21:24:34 +00:00
|
|
|
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(ctx, trustedCluster)
|
2019-07-16 00:40:43 +00:00
|
|
|
if err != nil {
|
|
|
|
if trace.IsConnectionProblem(err) {
|
|
|
|
log.Debugf("retrying on connection problem: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("got non connection problem %v", err)
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
upsertSuccess = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// make sure we upsert a trusted cluster
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, upsertSuccess)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
Removed `TestProxyReverseTunnel`.
`TestProxyReverseTunnel` has been consistently failing with the following errors.
--- FAIL: TestProxyReverseTunnel (11.76s)
sshserver_test.go:1127:
Error Trace: sshserver_test.go:1127
Error: Received unexpected error:
timeout waiting for announce to be sent
Test: TestProxyReverseTunnel
FAIL
FAIL github.com/gravitational/teleport/lib/srv/regular 68.907s
--- FAIL: TestProxyReverseTunnel (23.43s)
assertion_compare.go:332:
Error Trace: sshserver_test.go:1144
Error: "5.775333527" is not less than "5"
Test: TestProxyReverseTunnel
Messages: []
FAIL
FAIL github.com/gravitational/teleport/lib/srv/regular 96.861s
These both appear to be timing related. By removing `t.Parallel()` or
increasing the timeouts it was possible to stabilize this test and have it
consistently pass. However, looking at what `TestProxyReverseTunnel` actually
tested, I don't think it can actually be removed completely. I've outlined
what it tested and why this is no longer necessary and why we should remove it.
* Reverse tunnels can be established
This test was written prior to integration tests existing. Teleport now has
integration tests that are more extensive, robust, and stable which cover
reverse tunnel functionality like `TwoClustersProxy`, `TwoClustersTunnel`, and
`TrustedTunnelNode`. The only thing they were missing that
`TestProxyReverseTunnel` had was checking `LastConnected` time. This PR has
been updated to add that to integration tests.
* Connectivity can be established over reverse tunnels
Similar to the above, we have more extensive, robust, and stable integration
test coverage for establishing connectivity over a reverse tunnel now.
The only bit of functionality that integration don't appear to have is
connecting by DNS name _and_ IP address at sshserver_test.go#L1066-L1067.
However, we do now have a dedicated unit test for this in
`TestProxySubsys_getMatchingServer`.
* Labels are synchronized
While this test does schenonize dynamic labels, it never actually checks if
they were synchronized correctly. That functionality was removed many years
ago in https://github.com/gravitational/teleport/pull/250.
We do now have unit test coverage for dynamic labels at
lib/labels/labels_test.go.
2022-01-30 18:38:29 +00:00
|
|
|
// Wait for both cluster to see each other via reverse tunnels.
|
2022-09-08 14:27:51 +00:00
|
|
|
require.Eventually(t, helpers.WaitForClusters(main.Tunnel, 1), 10*time.Second, 1*time.Second,
|
2022-02-02 16:05:45 +00:00
|
|
|
"Two clusters do not see each other: tunnels are not working.")
|
2022-09-08 14:27:51 +00:00
|
|
|
require.Eventually(t, helpers.WaitForClusters(aux.Tunnel, 1), 10*time.Second, 1*time.Second,
|
2022-02-02 16:05:45 +00:00
|
|
|
"Two clusters do not see each other: tunnels are not working.")
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2023-05-12 16:38:16 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
tc, err := main.Process.GetAuthServer().GetRemoteCluster(aux.Secrets.SiteName)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return tc.GetConnectionStatus() == teleport.RemoteClusterStatusOnline
|
|
|
|
}, 60*time.Second, 1*time.Second, "Main cluster does not see aux cluster as connected")
|
|
|
|
|
2019-07-16 00:40:43 +00:00
|
|
|
// impersonating client requests will be denied
|
2022-09-08 14:27:51 +00:00
|
|
|
impersonatingProxyClient, impersonatingProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: main,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: mainKubeGroups,
|
|
|
|
Impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{kube.TestImpersonationGroup}},
|
|
|
|
RouteToCluster: clusterAux,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = impersonatingProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// set up kube configuration using main proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: main,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: mainKubeGroups,
|
|
|
|
RouteToCluster: clusterAux,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-07-16 00:40:43 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, pod.Namespace, string(data))
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-07-16 00:40:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// verify the session stream output
|
|
|
|
sessionStream := out.String()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Contains(t, sessionStream, "echo hi")
|
|
|
|
require.Contains(t, sessionStream, "exit")
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// verify traffic capture and upload, wait for the upload to hit
|
|
|
|
var sessionID string
|
|
|
|
timeoutC := time.After(10 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-main.UploadEventsC:
|
|
|
|
sessionID = event.SessionID
|
|
|
|
break loop
|
|
|
|
case <-timeoutC:
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for upload of session to complete")
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read back the entire session and verify that it matches the stated output
|
2021-06-08 19:08:55 +00:00
|
|
|
capturedStream, err := main.Process.GetAuthServer().GetSessionChunk(apidefaults.Namespace, session.ID(sessionID), 0, events.MaxChunkBytes)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, sessionStream, string(capturedStream))
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// impersonating kube exec should be denied
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(impersonatingProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-07-16 00:40:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// forward local port to target port 80 of the nginx container
|
2022-09-14 06:53:19 +00:00
|
|
|
localPort := newPortValue()
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2019-07-16 00:40:43 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
forwarderCh := make(chan error)
|
|
|
|
go func() { forwarderCh <- forwarder.ForwardPorts() }()
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, <-forwarderCh, "Forward ports exited with error")
|
2019-07-16 00:40:43 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for port forwarding.")
|
2019-07-16 00:40:43 +00:00
|
|
|
case <-forwarder.readyC:
|
|
|
|
}
|
|
|
|
defer close(forwarder.stopC)
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", localPort))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
require.NoError(t, resp.Body.Close())
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// impersonating client requests will be denied
|
2022-09-14 06:53:19 +00:00
|
|
|
localPort = newPortValue()
|
2019-07-16 00:40:43 +00:00
|
|
|
impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2019-07-16 00:40:43 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// This request should be denied
|
|
|
|
err = impersonatingForwarder.ForwardPorts()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 21:01:33 +00:00
|
|
|
// TestKubeTrustedClustersSNI tests scenario with trusted clusters
|
2019-07-16 00:40:43 +00:00
|
|
|
// using SNI-forwarding
|
|
|
|
// DELETE IN(4.3.0)
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) {
|
2020-06-15 21:24:34 +00:00
|
|
|
ctx := context.Background()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
clusterMain := "cluster-main"
|
2021-05-27 02:05:46 +00:00
|
|
|
mainConf := suite.teleKubeConfig(Host)
|
2022-07-20 02:04:54 +00:00
|
|
|
main := helpers.NewInstance(t, helpers.InstanceConfig{
|
2018-06-18 00:53:02 +00:00
|
|
|
ClusterName: clusterMain,
|
2022-06-15 07:07:26 +00:00
|
|
|
HostID: helpers.HostID,
|
2018-06-18 00:53:02 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2018-06-18 00:53:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// main cluster has a role and user called main-kube
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
mainKubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
mainRole, err := types.NewRole("main-kube", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2018-06-18 00:53:02 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: mainKubeGroups,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2018-06-18 00:53:02 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
main.AddUserWithRole(username, mainRole)
|
|
|
|
|
|
|
|
clusterAux := "cluster-aux"
|
2021-05-27 02:05:46 +00:00
|
|
|
auxConf := suite.teleKubeConfig(Host)
|
2022-07-20 02:04:54 +00:00
|
|
|
aux := helpers.NewInstance(t, helpers.InstanceConfig{
|
2018-06-18 00:53:02 +00:00
|
|
|
ClusterName: clusterAux,
|
2022-06-15 07:07:26 +00:00
|
|
|
HostID: helpers.HostID,
|
2018-06-18 00:53:02 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2018-06-18 00:53:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
lib.SetInsecureDevMode(true)
|
|
|
|
defer lib.SetInsecureDevMode(false)
|
|
|
|
|
2018-08-02 00:25:16 +00:00
|
|
|
// route all the traffic to the aux cluster
|
|
|
|
mainConf.Proxy.Kube.Enabled = true
|
2019-03-11 03:25:43 +00:00
|
|
|
// ClusterOverride forces connection to be routed
|
|
|
|
// to cluster aux
|
2018-08-02 00:25:16 +00:00
|
|
|
mainConf.Proxy.Kube.ClusterOverride = clusterAux
|
2021-06-18 19:57:29 +00:00
|
|
|
err = main.CreateEx(t, nil, mainConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = aux.CreateEx(t, nil, auxConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// auxiliary cluster has a role aux-kube
|
|
|
|
// connect aux cluster to main cluster
|
|
|
|
// using trusted clusters, so remote user will be allowed to assume
|
|
|
|
// role specified by mapping remote role "aux-kube" to local role "main-kube"
|
2023-05-12 16:38:16 +00:00
|
|
|
auxKubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
auxRole, err := types.NewRole("aux-kube", types.RoleSpecV6{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-01-17 02:55:59 +00:00
|
|
|
Logins: []string{username},
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2019-01-17 02:55:59 +00:00
|
|
|
// Note that main cluster can pass it's kubernetes groups
|
|
|
|
// to the remote cluster, and remote cluster
|
|
|
|
// can choose to use them by using special variable
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: auxKubeGroups,
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2018-06-18 00:53:02 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2023-10-18 17:06:50 +00:00
|
|
|
auxRole, err = aux.Process.GetAuthServer().UpsertRole(ctx, auxRole)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-10-07 21:01:33 +00:00
|
|
|
trustedClusterToken := "trusted-cluster-token"
|
2021-03-24 01:26:52 +00:00
|
|
|
err = main.Process.GetAuthServer().UpsertToken(ctx,
|
2022-10-03 14:14:01 +00:00
|
|
|
services.MustCreateProvisionToken(trustedClusterToken, []types.SystemRole{types.RoleTrustedCluster}, time.Time{}))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2021-09-13 09:54:49 +00:00
|
|
|
trustedCluster := main.AsTrustedCluster(trustedClusterToken, types.RoleMap{
|
2018-06-18 00:53:02 +00:00
|
|
|
{Remote: mainRole.GetName(), Local: []string{auxRole.GetName()}},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// start both clusters
|
|
|
|
err = main.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer main.StopAll()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
err = aux.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer aux.StopAll()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// try and upsert a trusted cluster
|
|
|
|
var upsertSuccess bool
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
|
2020-06-15 21:24:34 +00:00
|
|
|
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(ctx, trustedCluster)
|
2018-06-18 00:53:02 +00:00
|
|
|
if err != nil {
|
|
|
|
if trace.IsConnectionProblem(err) {
|
|
|
|
log.Debugf("retrying on connection problem: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("got non connection problem %v", err)
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
upsertSuccess = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// make sure we upsert a trusted cluster
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, upsertSuccess)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
Removed `TestProxyReverseTunnel`.
`TestProxyReverseTunnel` has been consistently failing with the following errors.
--- FAIL: TestProxyReverseTunnel (11.76s)
sshserver_test.go:1127:
Error Trace: sshserver_test.go:1127
Error: Received unexpected error:
timeout waiting for announce to be sent
Test: TestProxyReverseTunnel
FAIL
FAIL github.com/gravitational/teleport/lib/srv/regular 68.907s
--- FAIL: TestProxyReverseTunnel (23.43s)
assertion_compare.go:332:
Error Trace: sshserver_test.go:1144
Error: "5.775333527" is not less than "5"
Test: TestProxyReverseTunnel
Messages: []
FAIL
FAIL github.com/gravitational/teleport/lib/srv/regular 96.861s
These both appear to be timing related. By removing `t.Parallel()` or
increasing the timeouts it was possible to stabilize this test and have it
consistently pass. However, looking at what `TestProxyReverseTunnel` actually
tested, I don't think it can actually be removed completely. I've outlined
what it tested and why this is no longer necessary and why we should remove it.
* Reverse tunnels can be established
This test was written prior to integration tests existing. Teleport now has
integration tests that are more extensive, robust, and stable which cover
reverse tunnel functionality like `TwoClustersProxy`, `TwoClustersTunnel`, and
`TrustedTunnelNode`. The only thing they were missing that
`TestProxyReverseTunnel` had was checking `LastConnected` time. This PR has
been updated to add that to integration tests.
* Connectivity can be established over reverse tunnels
Similar to the above, we have more extensive, robust, and stable integration
test coverage for establishing connectivity over a reverse tunnel now.
The only bit of functionality that integration don't appear to have is
connecting by DNS name _and_ IP address at sshserver_test.go#L1066-L1067.
However, we do now have a dedicated unit test for this in
`TestProxySubsys_getMatchingServer`.
* Labels are synchronized
While this test does schenonize dynamic labels, it never actually checks if
they were synchronized correctly. That functionality was removed many years
ago in https://github.com/gravitational/teleport/pull/250.
We do now have unit test coverage for dynamic labels at
lib/labels/labels_test.go.
2022-01-30 18:38:29 +00:00
|
|
|
// Wait for both cluster to see each other via reverse tunnels.
|
2022-09-08 14:27:51 +00:00
|
|
|
require.Eventually(t, helpers.WaitForClusters(main.Tunnel, 1), 10*time.Second, 1*time.Second,
|
2022-02-02 16:05:45 +00:00
|
|
|
"Two clusters do not see each other: tunnels are not working.")
|
2022-09-08 14:27:51 +00:00
|
|
|
require.Eventually(t, helpers.WaitForClusters(aux.Tunnel, 1), 10*time.Second, 1*time.Second,
|
2022-02-02 16:05:45 +00:00
|
|
|
"Two clusters do not see each other: tunnels are not working.")
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2023-05-12 16:38:16 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
tc, err := main.Process.GetAuthServer().GetRemoteCluster(aux.Secrets.SiteName)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return tc.GetConnectionStatus() == teleport.RemoteClusterStatusOnline
|
|
|
|
}, 60*time.Second, 1*time.Second, "Main cluster does not see aux cluster as connected")
|
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating client requests will be denied
|
2022-09-08 14:27:51 +00:00
|
|
|
impersonatingProxyClient, impersonatingProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: main,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: mainKubeGroups,
|
|
|
|
Impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{kube.TestImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = impersonatingProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2018-06-18 00:53:02 +00:00
|
|
|
// set up kube configuration using main proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: main,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: mainKubeGroups,
|
2020-04-30 00:27:47 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-09-26 00:11:51 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
2018-06-18 00:53:02 +00:00
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-09-26 00:11:51 +00:00
|
|
|
|
2018-06-18 00:53:02 +00:00
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, pod.Namespace, string(data))
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-06-18 00:53:02 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2018-06-18 00:53:02 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// verify the session stream output
|
|
|
|
sessionStream := out.String()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Contains(t, sessionStream, "echo hi")
|
|
|
|
require.Contains(t, sessionStream, "exit")
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// verify traffic capture and upload, wait for the upload to hit
|
|
|
|
var sessionID string
|
|
|
|
timeoutC := time.After(10 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-main.UploadEventsC:
|
|
|
|
sessionID = event.SessionID
|
|
|
|
break loop
|
|
|
|
case <-timeoutC:
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for upload of session to complete")
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read back the entire session and verify that it matches the stated output
|
2021-06-08 19:08:55 +00:00
|
|
|
capturedStream, err := main.Process.GetAuthServer().GetSessionChunk(apidefaults.Namespace, session.ID(sessionID), 0, events.MaxChunkBytes)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, sessionStream, string(capturedStream))
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating kube exec should be denied
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(impersonatingProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-11 03:25:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-03-11 03:25:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// forward local port to target port 80 of the nginx container
|
2022-09-14 06:53:19 +00:00
|
|
|
localPort := newPortValue()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2018-06-18 00:53:02 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
forwarderCh := make(chan error)
|
|
|
|
|
|
|
|
go func() { forwarderCh <- forwarder.ForwardPorts() }()
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, <-forwarderCh, "Forward ports exited with error")
|
2018-06-18 00:53:02 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for port forwarding.")
|
2018-06-18 00:53:02 +00:00
|
|
|
case <-forwarder.readyC:
|
|
|
|
}
|
|
|
|
defer close(forwarder.stopC)
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", localPort))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
require.NoError(t, resp.Body.Close())
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating client requests will be denied
|
2022-09-14 06:53:19 +00:00
|
|
|
localPort = newPortValue()
|
2019-03-11 03:25:43 +00:00
|
|
|
impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2019-03-11 03:25:43 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// This request should be denied
|
|
|
|
err = impersonatingForwarder.ForwardPorts()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
|
2019-03-24 20:14:34 +00:00
|
|
|
// TestKubeDisconnect tests kubernetes session disconnects
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeDisconnect(t *testing.T, suite *KubeSuite) {
|
2019-03-24 20:14:34 +00:00
|
|
|
testCases := []disconnectTestCase{
|
|
|
|
{
|
2023-04-04 21:31:39 +00:00
|
|
|
name: "idle timeout",
|
2021-06-04 20:29:31 +00:00
|
|
|
options: types.RoleOptions{
|
|
|
|
ClientIdleTimeout: types.NewDuration(500 * time.Millisecond),
|
2019-03-24 20:14:34 +00:00
|
|
|
},
|
|
|
|
disconnectTimeout: 2 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
2023-04-04 21:31:39 +00:00
|
|
|
name: "expired cert",
|
2021-06-04 20:29:31 +00:00
|
|
|
options: types.RoleOptions{
|
|
|
|
DisconnectExpiredCert: types.NewBool(true),
|
|
|
|
MaxSessionTTL: types.NewDuration(3 * time.Second),
|
2019-03-24 20:14:34 +00:00
|
|
|
},
|
|
|
|
disconnectTimeout: 6 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
2023-04-04 21:31:39 +00:00
|
|
|
|
2018-12-12 00:22:44 +00:00
|
|
|
for i := 0; i < utils.GetIterations(); i++ {
|
2023-04-04 21:31:39 +00:00
|
|
|
t.Run(fmt.Sprintf("Iteration=%d", i), func(t *testing.T) {
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
runKubeDisconnectTest(t, suite, tc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
2019-03-24 20:14:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestKubeDisconnect tests kubernetes session disconnects
|
2021-05-27 02:05:46 +00:00
|
|
|
func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
2022-07-20 02:04:54 +00:00
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
2022-06-15 07:07:26 +00:00
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
2019-03-24 20:14:34 +00:00
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2019-03-24 20:14:34 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV6{
|
2019-03-24 20:14:34 +00:00
|
|
|
Options: tc.options,
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-03-24 20:14:34 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2019-03-24 20:14:34 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: kubeGroups,
|
2020-04-30 00:27:47 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-09-24 17:59:48 +00:00
|
|
|
ctx := context.Background()
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-24 20:14:34 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, pod.Namespace, string(data))
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
2020-09-24 17:59:48 +00:00
|
|
|
sessionCtx, sessionCancel := context.WithCancel(ctx)
|
2019-03-24 20:14:34 +00:00
|
|
|
go func() {
|
|
|
|
defer sessionCancel()
|
2020-04-30 00:27:47 +00:00
|
|
|
err := kubeExec(proxyClientConfig, kubeExecArgs{
|
2019-03-24 20:14:34 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-24 20:14:34 +00:00
|
|
|
command: []string{"/bin/sh"},
|
2020-04-17 15:55:48 +00:00
|
|
|
stdout: term,
|
2019-03-24 20:14:34 +00:00
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-03-24 20:14:34 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// lets type something followed by "enter" and then hang the session
|
2022-07-20 02:04:54 +00:00
|
|
|
require.NoError(t, enterInput(sessionCtx, term, "echo boring platypus\r\n", ".*boring platypus.*"))
|
2019-03-24 20:14:34 +00:00
|
|
|
time.Sleep(tc.disconnectTimeout)
|
|
|
|
select {
|
|
|
|
case <-time.After(tc.disconnectTimeout):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("timeout waiting for session to exit")
|
2019-03-24 20:14:34 +00:00
|
|
|
case <-sessionCtx.Done():
|
|
|
|
// session closed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-14 20:03:40 +00:00
|
|
|
// testKubeTransportProtocol tests the proxy transport protocol capabilities
|
|
|
|
func testKubeTransportProtocol(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
|
|
|
|
2022-07-20 02:04:54 +00:00
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
2022-06-15 07:07:26 +00:00
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
2022-01-14 20:03:40 +00:00
|
|
|
NodeName: Host,
|
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2022-01-14 20:03:40 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
username := suite.me.Username
|
2022-09-08 14:27:51 +00:00
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
2023-01-11 16:58:22 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV6{
|
2022-01-14 20:03:40 +00:00
|
|
|
Allow: types.RoleConditions{
|
|
|
|
Logins: []string{username},
|
|
|
|
KubeGroups: kubeGroups,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2022-01-14 20:03:40 +00:00
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
|
|
|
|
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: username,
|
|
|
|
KubeGroups: kubeGroups,
|
2022-01-14 20:03:40 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-01-28 21:22:19 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-01-14 20:03:40 +00:00
|
|
|
u, err := url.Parse(proxyClientConfig.Host)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
u.Scheme = "https"
|
2022-01-28 21:22:19 +00:00
|
|
|
u.Path = fmt.Sprintf("/api/v1/namespaces/%v/pods/%v", pod.Namespace, pod.Name)
|
2022-01-14 20:03:40 +00:00
|
|
|
|
|
|
|
tlsConfig, err := tlsClientConfig(proxyClientConfig)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
trans := &http.Transport{
|
|
|
|
TLSClientConfig: tlsConfig,
|
|
|
|
}
|
|
|
|
|
|
|
|
// call proxy with an HTTP1 client
|
|
|
|
client := &http.Client{Transport: trans}
|
|
|
|
resp1, err := client.Get(u.String())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer resp1.Body.Close()
|
2023-11-06 20:38:38 +00:00
|
|
|
require.Equal(t, 200, resp1.StatusCode)
|
|
|
|
require.Equal(t, "HTTP/1.1", resp1.Proto)
|
2022-01-14 20:03:40 +00:00
|
|
|
|
|
|
|
// call proxy with an HTTP2 client
|
|
|
|
err = http2.ConfigureTransport(trans)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
resp2, err := client.Get(u.String())
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer resp2.Body.Close()
|
2023-11-06 20:38:38 +00:00
|
|
|
require.Equal(t, 200, resp2.StatusCode)
|
|
|
|
require.Equal(t, "HTTP/2.0", resp2.Proto)
|
2022-01-28 21:22:19 +00:00
|
|
|
|
|
|
|
// stream succeeds with an h1 transport
|
|
|
|
command := kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
container: pod.Spec.Containers[0].Name,
|
|
|
|
command: []string{"ls"},
|
|
|
|
}
|
|
|
|
|
|
|
|
err = kubeExec(proxyClientConfig, command)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// stream fails with an h2 transport
|
|
|
|
proxyClientConfig.TLSClientConfig.NextProtos = []string{"h2"}
|
|
|
|
err = kubeExec(proxyClientConfig, command)
|
|
|
|
require.Error(t, err)
|
2022-01-14 20:03:40 +00:00
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// teleKubeConfig sets up teleport with kubernetes turned on
|
2023-03-09 17:48:36 +00:00
|
|
|
func (s *KubeSuite) teleKubeConfig(hostname string) *servicecfg.Config {
|
|
|
|
tconf := servicecfg.MakeDefaultConfig()
|
2020-12-07 14:35:15 +00:00
|
|
|
tconf.Console = nil
|
|
|
|
tconf.Log = s.log
|
2018-06-10 00:21:14 +00:00
|
|
|
tconf.SSH.Enabled = true
|
|
|
|
tconf.Proxy.DisableWebInterface = true
|
|
|
|
tconf.PollingPeriod = 500 * time.Millisecond
|
2023-11-16 05:29:07 +00:00
|
|
|
tconf.Testing.ClientTimeout = time.Second
|
|
|
|
tconf.Testing.ShutdownTimeout = 2 * tconf.Testing.ClientTimeout
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// set kubernetes specific parameters
|
2018-08-02 00:25:16 +00:00
|
|
|
tconf.Proxy.Kube.Enabled = true
|
2022-09-14 06:53:19 +00:00
|
|
|
tconf.Proxy.Kube.ListenAddr.Addr = net.JoinHostPort(hostname, newPortStr())
|
2019-03-11 03:25:43 +00:00
|
|
|
tconf.Proxy.Kube.KubeconfigPath = s.kubeConfigPath
|
2021-05-26 00:50:35 +00:00
|
|
|
tconf.Proxy.Kube.LegacyKubeProxy = true
|
2022-06-03 15:55:56 +00:00
|
|
|
tconf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
return tconf
|
|
|
|
}
|
|
|
|
|
2023-05-19 13:55:52 +00:00
|
|
|
// teleKubeConfig sets up teleport with kubernetes turned on
|
|
|
|
func (s *KubeSuite) teleAuthConfig(hostname string) *servicecfg.Config {
|
|
|
|
tconf := servicecfg.MakeDefaultConfig()
|
|
|
|
tconf.Console = nil
|
|
|
|
tconf.Log = s.log
|
|
|
|
tconf.PollingPeriod = 500 * time.Millisecond
|
2023-11-16 05:29:07 +00:00
|
|
|
tconf.Testing.ClientTimeout = time.Second
|
|
|
|
tconf.Testing.ShutdownTimeout = 2 * tconf.Testing.ClientTimeout
|
2023-05-19 13:55:52 +00:00
|
|
|
tconf.Proxy.Enabled = false
|
|
|
|
tconf.SSH.Enabled = false
|
|
|
|
tconf.CircuitBreakerConfig = breaker.NoopBreakerConfig()
|
|
|
|
|
|
|
|
return tconf
|
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// tlsClientConfig returns TLS configuration for client
|
|
|
|
func tlsClientConfig(cfg *rest.Config) (*tls.Config, error) {
|
|
|
|
cert, err := tls.X509KeyPair(cfg.TLSClientConfig.CertData, cfg.TLSClientConfig.KeyData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pool := x509.NewCertPool()
|
|
|
|
ok := pool.AppendCertsFromPEM(cfg.TLSClientConfig.CAData)
|
|
|
|
if !ok {
|
|
|
|
return nil, trace.BadParameter("failed to append certs from PEM")
|
|
|
|
}
|
|
|
|
|
2022-09-19 22:38:59 +00:00
|
|
|
return &tls.Config{
|
2018-06-10 00:21:14 +00:00
|
|
|
RootCAs: pool,
|
|
|
|
Certificates: []tls.Certificate{cert},
|
|
|
|
ClientAuth: tls.RequireAndVerifyClientCert,
|
2022-09-19 22:38:59 +00:00
|
|
|
}, nil
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2022-09-08 14:27:51 +00:00
|
|
|
func kubeProxyTLSConfig(cfg kube.ProxyConfig) (*tls.Config, error) {
|
2022-02-15 16:02:10 +00:00
|
|
|
tlsConfig := &tls.Config{}
|
2022-09-08 14:27:51 +00:00
|
|
|
_, kubeConfig, err := kube.ProxyClient(cfg)
|
2022-02-15 16:02:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
caCert, err := tlsca.ParseCertificatePEM(kubeConfig.TLSClientConfig.CAData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
cert, err := tls.X509KeyPair(kubeConfig.TLSClientConfig.CertData, kubeConfig.TLSClientConfig.KeyData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2023-05-12 16:38:16 +00:00
|
|
|
tlsConfig.RootCAs = x509.NewCertPool()
|
2022-02-15 16:02:10 +00:00
|
|
|
tlsConfig.RootCAs.AddCert(caCert)
|
|
|
|
tlsConfig.Certificates = []tls.Certificate{cert}
|
|
|
|
tlsConfig.ServerName = kubeConfig.TLSClientConfig.ServerName
|
|
|
|
return tlsConfig, nil
|
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
const (
|
|
|
|
testNamespace = "teletest"
|
2020-10-07 20:58:07 +00:00
|
|
|
testPod = "test-pod"
|
2018-06-10 00:21:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func newNamespace(name string) *v1.Namespace {
|
|
|
|
return &v1.Namespace{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: name,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
func newPod(ns, name string) *v1.Pod {
|
|
|
|
return &v1.Pod{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Namespace: ns,
|
|
|
|
Name: name,
|
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
|
|
|
Containers: []v1.Container{{
|
|
|
|
Name: "nginx",
|
|
|
|
Image: "nginx:alpine",
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
type kubeExecArgs struct {
|
|
|
|
podName string
|
|
|
|
podNamespace string
|
|
|
|
container string
|
|
|
|
command []string
|
|
|
|
stdout io.Writer
|
|
|
|
stderr io.Writer
|
|
|
|
stdin io.Reader
|
|
|
|
tty bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type kubePortForwardArgs struct {
|
|
|
|
ports []string
|
|
|
|
podName string
|
|
|
|
podNamespace string
|
|
|
|
}
|
|
|
|
|
|
|
|
type kubePortForwarder struct {
|
|
|
|
*portforward.PortForwarder
|
|
|
|
stopC chan struct{}
|
|
|
|
readyC chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPortForwarder(kubeConfig *rest.Config, args kubePortForwardArgs) (*kubePortForwarder, error) {
|
|
|
|
u, err := url.Parse(kubeConfig.Host)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
u.Scheme = "https"
|
|
|
|
u.Path = fmt.Sprintf("/api/v1/namespaces/%v/pods/%v/portforward", args.podNamespace, args.podName)
|
|
|
|
|
|
|
|
// set up port forwarding request
|
|
|
|
tlsConfig, err := tlsClientConfig(kubeConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2022-06-09 09:16:52 +00:00
|
|
|
upgradeRoundTripper := streamspdy.NewRoundTripper(tlsConfig)
|
2018-06-10 00:21:14 +00:00
|
|
|
client := &http.Client{
|
|
|
|
Transport: upgradeRoundTripper,
|
|
|
|
}
|
|
|
|
dialer := spdy.NewDialer(upgradeRoundTripper, client, "POST", u)
|
2019-03-11 03:25:43 +00:00
|
|
|
if kubeConfig.Impersonate.UserName != "" {
|
|
|
|
client.Transport = transport.NewImpersonatingRoundTripper(
|
|
|
|
transport.ImpersonationConfig{
|
|
|
|
UserName: kubeConfig.Impersonate.UserName,
|
|
|
|
Groups: kubeConfig.Impersonate.Groups,
|
|
|
|
},
|
|
|
|
upgradeRoundTripper)
|
|
|
|
}
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
stopC, readyC := make(chan struct{}), make(chan struct{})
|
2020-06-08 22:20:50 +00:00
|
|
|
fwd, err := portforward.New(dialer, args.ports, stopC, readyC, nil, nil)
|
2018-06-10 00:21:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
return &kubePortForwarder{PortForwarder: fwd, stopC: stopC, readyC: readyC}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// kubeExec executes command against kubernetes API server
|
|
|
|
func kubeExec(kubeConfig *rest.Config, args kubeExecArgs) error {
|
|
|
|
query := make(url.Values)
|
|
|
|
for _, arg := range args.command {
|
|
|
|
query.Add("command", arg)
|
|
|
|
}
|
|
|
|
if args.stdout != nil {
|
|
|
|
query.Set("stdout", "true")
|
|
|
|
}
|
|
|
|
if args.stdin != nil {
|
|
|
|
query.Set("stdin", "true")
|
|
|
|
}
|
|
|
|
// stderr channel is only set if there is no tty allocated
|
|
|
|
// otherwise k8s server gets confused
|
|
|
|
if !args.tty && args.stderr == nil {
|
2022-03-14 23:59:28 +00:00
|
|
|
args.stderr = io.Discard
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
if args.stderr != nil && !args.tty {
|
|
|
|
query.Set("stderr", "true")
|
|
|
|
}
|
|
|
|
if args.tty {
|
|
|
|
query.Set("tty", "true")
|
|
|
|
}
|
|
|
|
query.Set("container", args.container)
|
|
|
|
u, err := url.Parse(kubeConfig.Host)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
u.Scheme = "https"
|
|
|
|
u.Path = fmt.Sprintf("/api/v1/namespaces/%v/pods/%v/exec", args.podNamespace, args.podName)
|
|
|
|
u.RawQuery = query.Encode()
|
|
|
|
executor, err := remotecommand.NewSPDYExecutor(kubeConfig, "POST", u)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
opts := remotecommand.StreamOptions{
|
|
|
|
Stdin: args.stdin,
|
|
|
|
Stdout: args.stdout,
|
|
|
|
Stderr: args.stderr,
|
|
|
|
Tty: args.tty,
|
|
|
|
}
|
2022-12-29 22:49:22 +00:00
|
|
|
return executor.StreamWithContext(context.Background(), opts)
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
2022-02-15 16:02:10 +00:00
|
|
|
|
2023-05-15 15:11:02 +00:00
|
|
|
func kubeJoin(kubeConfig kube.ProxyConfig, tc *client.TeleportClient, meta types.SessionTracker, mode types.SessionParticipantMode) (*client.KubeSession, error) {
|
2022-02-15 16:02:10 +00:00
|
|
|
tlsConfig, err := kubeProxyTLSConfig(kubeConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2023-05-15 15:11:02 +00:00
|
|
|
sess, err := client.NewKubeSession(context.TODO(), tc, meta, tc.KubeProxyAddr, "", mode, tlsConfig)
|
2022-02-15 16:02:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2022-04-01 17:48:40 +00:00
|
|
|
return sess, nil
|
2022-02-15 16:02:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// testKubeJoin tests that that joining an interactive exec session works.
|
|
|
|
func testKubeJoin(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
|
|
|
|
2022-07-20 02:04:54 +00:00
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
2022-06-15 07:07:26 +00:00
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
2022-02-15 16:02:10 +00:00
|
|
|
NodeName: Host,
|
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
2022-06-15 07:07:26 +00:00
|
|
|
Log: suite.log,
|
2022-02-15 16:02:10 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
hostUsername := suite.me.Username
|
|
|
|
participantUsername := suite.me.Username + "-participant"
|
2022-09-08 14:27:51 +00:00
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
2022-02-15 16:02:10 +00:00
|
|
|
kubeUsers := []string{"alice@example.com"}
|
2023-01-11 16:58:22 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV6{
|
2022-02-15 16:02:10 +00:00
|
|
|
Allow: types.RoleConditions{
|
|
|
|
Logins: []string{hostUsername},
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
2023-05-12 16:38:16 +00:00
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: []string{types.Wildcard},
|
|
|
|
},
|
2023-01-11 16:58:22 +00:00
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-01-11 16:58:22 +00:00
|
|
|
},
|
|
|
|
},
|
2022-02-15 16:02:10 +00:00
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-01-11 16:58:22 +00:00
|
|
|
joinRole, err := types.NewRole("participant", types.RoleSpecV6{
|
2022-05-05 19:42:57 +00:00
|
|
|
Allow: types.RoleConditions{
|
|
|
|
JoinSessions: []*types.SessionJoinPolicy{{
|
|
|
|
Name: "foo",
|
|
|
|
Roles: []string{"kubemaster"},
|
|
|
|
Kinds: []string{string(types.KubernetesSessionKind)},
|
2023-05-15 15:11:02 +00:00
|
|
|
Modes: []string{string(types.SessionPeerMode), string(types.SessionObserverMode)},
|
2022-05-05 19:42:57 +00:00
|
|
|
}},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-02-15 16:02:10 +00:00
|
|
|
teleport.AddUserWithRole(hostUsername, role)
|
2022-05-05 19:42:57 +00:00
|
|
|
teleport.AddUserWithRole(participantUsername, joinRole)
|
2022-02-15 16:02:10 +00:00
|
|
|
|
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
|
|
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2022-09-08 14:27:51 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: hostUsername,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
2022-02-15 16:02:10 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// try get request to fetch available pods
|
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
group := &errgroup.Group{}
|
|
|
|
|
|
|
|
// Start the main session.
|
|
|
|
group.Go(func() error {
|
|
|
|
err := kubeExec(proxyClientConfig, kubeExecArgs{
|
2022-02-15 16:02:10 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
container: pod.Spec.Containers[0].Name,
|
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
|
|
|
stdin: term,
|
|
|
|
})
|
2023-05-22 20:48:07 +00:00
|
|
|
return trace.Wrap(err)
|
|
|
|
})
|
2022-02-15 16:02:10 +00:00
|
|
|
|
|
|
|
// We need to wait for the exec request to be handled here for the session to be
|
|
|
|
// created. Sadly though the k8s API doesn't give us much indication of when that is.
|
2023-05-12 16:38:16 +00:00
|
|
|
var session types.SessionTracker
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
// We need to wait for the session to be created here. We can't use the
|
|
|
|
// session manager's WaitUntilExists method because it doesn't work for
|
|
|
|
// kubernetes sessions.
|
|
|
|
sessions, err := teleport.Process.GetAuthServer().GetActiveSessionTrackers(context.Background())
|
|
|
|
if err != nil || len(sessions) == 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
session = sessions[0]
|
|
|
|
return true
|
|
|
|
}, 10*time.Second, time.Second)
|
2022-02-15 16:02:10 +00:00
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
participantStdinR, participantStdinW, err := os.Pipe()
|
|
|
|
require.NoError(t, err)
|
|
|
|
participantStdoutR, participantStdoutW, err := os.Pipe()
|
|
|
|
require.NoError(t, err)
|
|
|
|
streamsMu := &sync.Mutex{}
|
2023-05-15 15:11:02 +00:00
|
|
|
streams := make([]*client.KubeSession, 0, 3)
|
|
|
|
observerCaptures := make([]*bytes.Buffer, 0, 2)
|
|
|
|
albProxy := helpers.MustStartMockALBProxy(t, teleport.Config.Proxy.WebAddr.Addr)
|
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
// join peer by KubeProxyAddr
|
|
|
|
group.Go(func() error {
|
2023-05-15 15:11:02 +00:00
|
|
|
tc, err := teleport.NewClient(helpers.ClientConfig{
|
|
|
|
Login: hostUsername,
|
|
|
|
Cluster: helpers.Site,
|
|
|
|
Host: Host,
|
|
|
|
})
|
2023-05-22 20:48:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2022-04-01 17:48:40 +00:00
|
|
|
|
2023-05-15 15:11:02 +00:00
|
|
|
tc.Stdin = participantStdinR
|
|
|
|
tc.Stdout = participantStdoutW
|
|
|
|
|
|
|
|
stream, err := kubeJoin(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: participantUsername,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
}, tc, session, types.SessionPeerMode)
|
2023-05-22 20:48:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
streamsMu.Lock()
|
2023-05-15 15:11:02 +00:00
|
|
|
streams = append(streams, stream)
|
2023-05-22 20:48:07 +00:00
|
|
|
streamsMu.Unlock()
|
|
|
|
stream.Wait()
|
|
|
|
// close participant stdout so that we can read it after till EOF
|
|
|
|
participantStdoutW.Close()
|
|
|
|
return nil
|
2023-05-12 16:38:16 +00:00
|
|
|
})
|
2022-04-01 17:48:40 +00:00
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
// join observer by WebProxyAddr
|
|
|
|
group.Go(func() error {
|
2023-05-15 15:11:02 +00:00
|
|
|
stream, capture := kubeJoinByWebAddr(t, teleport, participantUsername, kubeUsers, kubeGroups)
|
2023-05-22 20:48:07 +00:00
|
|
|
streamsMu.Lock()
|
2023-05-15 15:11:02 +00:00
|
|
|
streams = append(streams, stream)
|
|
|
|
observerCaptures = append(observerCaptures, capture)
|
2023-05-22 20:48:07 +00:00
|
|
|
streamsMu.Unlock()
|
|
|
|
stream.Wait()
|
|
|
|
return nil
|
2023-05-15 15:11:02 +00:00
|
|
|
})
|
2023-05-22 20:48:07 +00:00
|
|
|
|
|
|
|
// join observer with ALPN conn upgrade
|
|
|
|
group.Go(func() error {
|
2023-05-15 15:11:02 +00:00
|
|
|
stream, capture := kubeJoinByALBAddr(t, teleport, participantUsername, kubeUsers, kubeGroups, albProxy.Addr().String())
|
2023-05-22 20:48:07 +00:00
|
|
|
streamsMu.Lock()
|
2023-05-15 15:11:02 +00:00
|
|
|
streams = append(streams, stream)
|
|
|
|
observerCaptures = append(observerCaptures, capture)
|
2023-05-22 20:48:07 +00:00
|
|
|
streamsMu.Unlock()
|
|
|
|
stream.Wait()
|
|
|
|
return nil
|
2023-05-15 15:11:02 +00:00
|
|
|
})
|
2022-04-01 17:48:40 +00:00
|
|
|
|
2022-02-15 16:02:10 +00:00
|
|
|
// We wait again for the second user to finish joining the session.
|
|
|
|
// We allow a bit of time to pass here to give the session manager time to recognize the
|
|
|
|
// new IO streams of the second client.
|
|
|
|
time.Sleep(time.Second * 5)
|
|
|
|
|
2022-04-01 17:48:40 +00:00
|
|
|
// sent a test message from the participant
|
2023-05-15 15:11:02 +00:00
|
|
|
participantStdinW.Write([]byte("\ahi from peer\n\r"))
|
2022-04-01 17:48:40 +00:00
|
|
|
|
2022-02-15 16:02:10 +00:00
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
2023-05-15 15:11:02 +00:00
|
|
|
term.Type("\ahi from term\n\r")
|
2022-02-15 16:02:10 +00:00
|
|
|
|
|
|
|
// Terminate the session after a moment to allow for the IO to reach the second client.
|
2023-05-12 16:38:16 +00:00
|
|
|
time.AfterFunc(5*time.Second, func() {
|
2023-05-22 20:48:07 +00:00
|
|
|
// send exit command to close the session
|
|
|
|
term.Type("exit 0\n\r\a")
|
2023-05-12 16:38:16 +00:00
|
|
|
})
|
2022-02-15 16:02:10 +00:00
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
// wait for all clients to finish
|
|
|
|
require.NoError(t, group.Wait())
|
2023-05-15 15:11:02 +00:00
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
// Verify peer.
|
|
|
|
participantOutput, err := io.ReadAll(participantStdoutR)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Contains(t, string(participantOutput), "hi from term")
|
2023-05-15 15:11:02 +00:00
|
|
|
|
2023-05-22 20:48:07 +00:00
|
|
|
// Verify original session.
|
|
|
|
require.Contains(t, out.String(), "hi from peer")
|
|
|
|
|
|
|
|
// Verify observers.
|
|
|
|
for _, capture := range observerCaptures {
|
|
|
|
require.Contains(t, capture.String(), "hi from peer")
|
|
|
|
require.Contains(t, capture.String(), "hi from term")
|
|
|
|
}
|
2023-05-15 15:11:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func kubeJoinByWebAddr(t *testing.T, teleport *helpers.TeleInstance, username string, kubeUsers, kubeGroups []string) (*client.KubeSession, *bytes.Buffer) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
tc, err := teleport.NewClient(helpers.ClientConfig{
|
|
|
|
Login: username,
|
|
|
|
Cluster: helpers.Site,
|
|
|
|
Host: Host,
|
|
|
|
Proxy: &helpers.ProxyConfig{
|
|
|
|
WebAddr: teleport.Config.Proxy.WebAddr.Addr,
|
|
|
|
KubeAddr: teleport.Config.Proxy.WebAddr.Addr,
|
|
|
|
},
|
|
|
|
})
|
2022-02-15 16:02:10 +00:00
|
|
|
require.NoError(t, err)
|
2023-05-15 15:11:02 +00:00
|
|
|
|
|
|
|
buffer := new(bytes.Buffer)
|
|
|
|
tc.Stdout = buffer
|
|
|
|
return kubeJoinObserverWithSNISet(t, tc, teleport, kubeUsers, kubeGroups), buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
func kubeJoinByALBAddr(t *testing.T, teleport *helpers.TeleInstance, username string, kubeUsers, kubeGroups []string, albAddr string) (*client.KubeSession, *bytes.Buffer) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
tc, err := teleport.NewClient(helpers.ClientConfig{
|
|
|
|
Login: username,
|
|
|
|
Cluster: helpers.Site,
|
|
|
|
Host: Host,
|
|
|
|
ALBAddr: albAddr,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
buffer := new(bytes.Buffer)
|
|
|
|
tc.Stdout = buffer
|
|
|
|
return kubeJoinObserverWithSNISet(t, tc, teleport, kubeUsers, kubeGroups), buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
func kubeJoinObserverWithSNISet(t *testing.T, tc *client.TeleportClient, teleport *helpers.TeleInstance, kubeUsers, kubeGroups []string) *client.KubeSession {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
sessions, err := teleport.Process.GetAuthServer().GetActiveSessionTrackers(context.Background())
|
|
|
|
require.NoError(t, err)
|
2023-11-06 20:38:38 +00:00
|
|
|
require.NotEmpty(t, sessions)
|
2023-05-15 15:11:02 +00:00
|
|
|
|
|
|
|
stream, err := kubeJoin(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: tc.Username,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
CustomTLSServerName: constants.KubeTeleportProxyALPNPrefix + Host,
|
|
|
|
}, tc, sessions[0], types.SessionObserverMode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return stream
|
2022-02-15 16:02:10 +00:00
|
|
|
}
|
2023-05-19 13:55:52 +00:00
|
|
|
|
|
|
|
// testExecNoAuth tests that a user can get the pod and exec into a pod
|
|
|
|
// if they do not require any moderated session, if the auth server is not available.
|
|
|
|
// If moderated session is required, they are only allowed to get the pod but
|
|
|
|
// not exec into it.
|
|
|
|
func testExecNoAuth(t *testing.T, suite *KubeSuite) {
|
|
|
|
teleport := helpers.NewInstance(t, helpers.InstanceConfig{
|
|
|
|
ClusterName: helpers.Site,
|
|
|
|
HostID: helpers.HostID,
|
|
|
|
NodeName: Host,
|
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
Log: suite.log,
|
|
|
|
})
|
|
|
|
|
|
|
|
adminUsername := "admin"
|
|
|
|
kubeGroups := []string{kube.TestImpersonationGroup}
|
|
|
|
kubeUsers := []string{"alice@example.com"}
|
|
|
|
adminRole, err := types.NewRole("admin", types.RoleSpecV6{
|
|
|
|
Allow: types.RoleConditions{
|
|
|
|
Logins: []string{adminUsername},
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: {types.Wildcard},
|
|
|
|
},
|
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-05-19 13:55:52 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(adminUsername, adminRole)
|
|
|
|
|
|
|
|
userUsername := "user"
|
|
|
|
userRole, err := types.NewRole("userRole", types.RoleSpecV6{
|
|
|
|
Allow: types.RoleConditions{
|
|
|
|
Logins: []string{userUsername},
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubernetesLabels: types.Labels{
|
|
|
|
types.Wildcard: {types.Wildcard},
|
|
|
|
},
|
|
|
|
KubernetesResources: []types.KubernetesResource{
|
|
|
|
{
|
2023-07-06 15:39:11 +00:00
|
|
|
Kind: types.KindKubePod, Name: types.Wildcard, Namespace: types.Wildcard, Verbs: []string{types.Wildcard},
|
2023-05-19 13:55:52 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
RequireSessionJoin: []*types.SessionRequirePolicy{
|
|
|
|
{
|
|
|
|
Name: "Auditor oversight",
|
|
|
|
Filter: fmt.Sprintf("contains(user.spec.roles, %q)", adminRole.GetName()),
|
|
|
|
Kinds: []string{"k8s"},
|
|
|
|
Modes: []string{string(types.SessionModeratorMode)},
|
|
|
|
Count: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(userUsername, userRole)
|
|
|
|
authTconf := suite.teleAuthConfig(Host)
|
|
|
|
err = teleport.CreateEx(t, nil, authTconf)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Create a Teleport instance with a Proxy.
|
|
|
|
proxyConfig := helpers.ProxyConfig{
|
|
|
|
Name: "cluster-main-proxy",
|
|
|
|
DisableWebService: true,
|
|
|
|
DisableALPNSNIListener: true,
|
|
|
|
}
|
|
|
|
proxyConfig.SSHAddr = helpers.NewListenerOn(t, teleport.Hostname, service.ListenerNodeSSH, &proxyConfig.FileDescriptors)
|
|
|
|
proxyConfig.WebAddr = helpers.NewListenerOn(t, teleport.Hostname, service.ListenerProxyWeb, &proxyConfig.FileDescriptors)
|
|
|
|
proxyConfig.KubeAddr = helpers.NewListenerOn(t, teleport.Hostname, service.ListenerProxyKube, &proxyConfig.FileDescriptors)
|
|
|
|
proxyConfig.ReverseTunnelAddr = helpers.NewListenerOn(t, teleport.Hostname, service.ListenerProxyTunnel, &proxyConfig.FileDescriptors)
|
|
|
|
|
|
|
|
_, _, err = teleport.StartProxy(proxyConfig, helpers.WithLegacyKubeProxy(suite.kubeConfigPath))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
t.Cleanup(func() {
|
|
|
|
teleport.StopAll()
|
|
|
|
})
|
|
|
|
kubeAddr, err := utils.ParseAddr(proxyConfig.KubeAddr)
|
|
|
|
require.NoError(t, err)
|
|
|
|
// wait until the proxy and kube are ready
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
// set up kube configuration using proxy
|
|
|
|
proxyClient, _, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: adminUsername,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
TargetAddress: *kubeAddr,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
ctx := context.Background()
|
|
|
|
// try get request to fetch available pods
|
|
|
|
_, err = proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
|
|
|
return err == nil
|
|
|
|
}, 20*time.Second, 500*time.Millisecond)
|
|
|
|
|
|
|
|
adminProxyClient, adminProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: adminUsername,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
TargetAddress: *kubeAddr,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
userProxyClient, userProxyClientConfig, err := kube.ProxyClient(kube.ProxyConfig{
|
|
|
|
T: teleport,
|
|
|
|
Username: userUsername,
|
|
|
|
KubeUsers: kubeUsers,
|
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
TargetAddress: *kubeAddr,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// stop auth server to test that user with moderation is denied when no Auth exists.
|
|
|
|
// Both admin and user already have valid certificates.
|
|
|
|
require.NoError(t, teleport.StopAuth(true))
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
user string
|
|
|
|
proxyClient kubernetes.Interface
|
|
|
|
clientConfig *rest.Config
|
|
|
|
assetErr require.ErrorAssertionFunc
|
|
|
|
outputContains string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "admin user", // admin user does not require any additional moderation.
|
|
|
|
proxyClient: adminProxyClient,
|
|
|
|
clientConfig: adminProxyClientConfig,
|
|
|
|
user: adminUsername,
|
|
|
|
assetErr: require.NoError,
|
|
|
|
outputContains: "echo hi",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "user with moderation", // user requires moderation and his session must be denied when no Auth exists.
|
|
|
|
user: userUsername,
|
|
|
|
assetErr: require.Error,
|
|
|
|
proxyClient: userProxyClient,
|
|
|
|
clientConfig: userProxyClientConfig,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tt := range tests {
|
|
|
|
tt := tt
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
ctx := context.Background()
|
|
|
|
// try get request to fetch available pods
|
|
|
|
pod, err := tt.proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
err = kubeExec(tt.clientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
container: pod.Spec.Containers[0].Name,
|
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
stdin: term,
|
|
|
|
tty: true,
|
|
|
|
})
|
|
|
|
tt.assetErr(t, err)
|
|
|
|
|
|
|
|
data := out.Bytes()
|
|
|
|
require.Contains(t, string(data), tt.outputContains)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|