2018-06-10 00:21:14 +00:00
|
|
|
/*
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
Copyright 2016-2020 Gravitational, Inc.
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package integration
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
|
|
|
"os/user"
|
|
|
|
"strconv"
|
2021-05-27 02:05:46 +00:00
|
|
|
"testing"
|
2018-06-10 00:21:14 +00:00
|
|
|
"time"
|
|
|
|
|
2021-06-16 17:17:03 +00:00
|
|
|
"github.com/gravitational/trace"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport"
|
2021-07-30 22:34:19 +00:00
|
|
|
apidefaults "github.com/gravitational/teleport/api/defaults"
|
|
|
|
"github.com/gravitational/teleport/api/profile"
|
|
|
|
"github.com/gravitational/teleport/api/types"
|
2018-06-18 00:53:02 +00:00
|
|
|
"github.com/gravitational/teleport/lib"
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport/lib/auth/testauthority"
|
|
|
|
"github.com/gravitational/teleport/lib/events"
|
|
|
|
kubeutils "github.com/gravitational/teleport/lib/kube/utils"
|
|
|
|
"github.com/gravitational/teleport/lib/service"
|
|
|
|
"github.com/gravitational/teleport/lib/services"
|
|
|
|
"github.com/gravitational/teleport/lib/session"
|
2020-04-30 00:27:47 +00:00
|
|
|
"github.com/gravitational/teleport/lib/tlsca"
|
2018-06-10 00:21:14 +00:00
|
|
|
"github.com/gravitational/teleport/lib/utils"
|
2021-05-27 02:05:46 +00:00
|
|
|
|
2020-04-10 18:37:09 +00:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2018-06-10 00:21:14 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/errors"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
streamspdy "k8s.io/apimachinery/pkg/util/httpstream/spdy"
|
|
|
|
"k8s.io/client-go/kubernetes"
|
|
|
|
"k8s.io/client-go/rest"
|
|
|
|
"k8s.io/client-go/tools/portforward"
|
|
|
|
"k8s.io/client-go/tools/remotecommand"
|
2019-03-11 03:25:43 +00:00
|
|
|
"k8s.io/client-go/transport"
|
2018-06-10 00:21:14 +00:00
|
|
|
"k8s.io/client-go/transport/spdy"
|
|
|
|
)
|
|
|
|
|
|
|
|
type KubeSuite struct {
|
|
|
|
*kubernetes.Clientset
|
2018-09-26 00:11:51 +00:00
|
|
|
|
2021-04-29 16:39:43 +00:00
|
|
|
me *user.User
|
2018-06-10 00:21:14 +00:00
|
|
|
// priv/pub pair to avoid re-generating it
|
|
|
|
priv []byte
|
|
|
|
pub []byte
|
|
|
|
|
2018-09-26 00:11:51 +00:00
|
|
|
// kubeconfigPath is a path to valid kubeconfig
|
|
|
|
kubeConfigPath string
|
|
|
|
|
|
|
|
// kubeConfig is a kubernetes config struct
|
|
|
|
kubeConfig *rest.Config
|
2020-12-07 14:35:15 +00:00
|
|
|
|
|
|
|
// log defines the test-specific logger
|
|
|
|
log utils.Logger
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
func newKubeSuite(t *testing.T) *KubeSuite {
|
2020-12-07 14:35:15 +00:00
|
|
|
testEnabled := os.Getenv(teleport.KubeRunTests)
|
|
|
|
if ok, _ := strconv.ParseBool(testEnabled); !ok {
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Skip("Skipping Kubernetes test suite.")
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
suite := &KubeSuite{
|
|
|
|
kubeConfigPath: os.Getenv(teleport.EnvKubeConfig),
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NotEmpty(t, suite.kubeConfigPath, "This test requires path to valid kubeconfig.")
|
2020-12-07 14:35:15 +00:00
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
var err error
|
|
|
|
SetTestTimeouts(time.Millisecond * time.Duration(100))
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
suite.priv, suite.pub, err = testauthority.New().GenerateKeyPair("")
|
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
suite.me, err = user.Current()
|
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// close & re-open stdin because 'go test' runs with os.stdin connected to /dev/null
|
|
|
|
stdin, err := os.Open("/dev/tty")
|
2020-12-07 14:35:15 +00:00
|
|
|
if err == nil {
|
2018-06-10 00:21:14 +00:00
|
|
|
os.Stdin.Close()
|
|
|
|
os.Stdin = stdin
|
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Cleanup(func() {
|
|
|
|
var err error
|
|
|
|
// restore os.Stdin to its original condition: connected to /dev/null
|
|
|
|
os.Stdin.Close()
|
|
|
|
os.Stdin, err = os.Open("/dev/null")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
suite.Clientset, suite.kubeConfig, err = kubeutils.GetKubeClient(suite.kubeConfigPath)
|
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// Create test namespace and pod to run k8s commands against.
|
2018-06-10 00:21:14 +00:00
|
|
|
ns := newNamespace(testNamespace)
|
2021-05-27 02:05:46 +00:00
|
|
|
_, err = suite.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
|
2018-06-10 00:21:14 +00:00
|
|
|
if err != nil {
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, errors.IsAlreadyExists(err), "Failed to create namespace: %v:", err)
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
2020-10-07 20:58:07 +00:00
|
|
|
p := newPod(testNamespace, testPod)
|
2021-05-27 02:05:46 +00:00
|
|
|
_, err = suite.CoreV1().Pods(testNamespace).Create(context.Background(), p, metav1.CreateOptions{})
|
2020-10-07 20:58:07 +00:00
|
|
|
if err != nil {
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, errors.IsAlreadyExists(err), "Failed to create test pod: %v", err)
|
2020-10-07 20:58:07 +00:00
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
|
|
|
|
return suite
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2020-11-09 19:40:02 +00:00
|
|
|
// For this test suite to work, the target Kubernetes cluster must have the
|
|
|
|
// following RBAC objects configured:
|
|
|
|
// https://github.com/gravitational/teleport/blob/master/fixtures/ci-teleport-rbac/ci-teleport.yaml
|
2020-10-07 21:01:33 +00:00
|
|
|
const testImpersonationGroup = "teleport-ci-test-group"
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
type kubeIntegrationTest func(t *testing.T, suite *KubeSuite)
|
2020-10-07 20:58:07 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
func (s *KubeSuite) bind(test kubeIntegrationTest) func(t *testing.T) {
|
|
|
|
return func(t *testing.T) {
|
Attempts to make CI integration test logs more useful (#9626)
Actually tracking down the cause of a failure in the integration tests can
be hard:
* It's hard to get an overall summary of what failed
* The tests sometimes emit no output before timing out, meaning any
diagnostic info is lost
* The emitted logs are too voluminous for a human to parse
* The emitted logs can present information out of order
* It's often hard to tell where the output from one test ends
and the next one begins
This patch attempts to address these concerns without attempting to rewrite
any of the underlying teleport logging.
* It improves the render-tests script to (optionally) report progress per-
test, rather than on a per-package basis. My working hypothesis on the
tests that time out with no output is that go test ./integration is
waiting for the entire set of integration tests tests to be complete
before reporting success or failure. Reporting on a per-test cycle gives
faster feedback and means that any timed-out builds should give at least
some idea of where they are stuck.
* Adds the render-tests filter to the integration and integration-root make
targets. This will show an overall summary of test results, as well as
- Discarding log output from passing tests to increase signal-to-noise
ratio, and
- Strongly delimiting the output from each failed test, making failures
easier to find.
* Removes the notion of a failure-only logger in favour of post-processing
the log events with render-tests. The failure-only logger catches log
output from the tests and only forwards it to the console if the test
fails. Unfortunately, not all log output is guaranteed to pass through
this logger (some teleport packages do not honour the configured logger,
and reports from the go race detector certainly don't), meaning some
output is presented at the time it happens, and other output is batched
and displayed at the end of the test. This makes working out what
happened where harder than it need be.
In addition, this patch also promotes the render-tests script into a fully-
fledged program, with appropriate makefile targets, make clean support, etc.
It is now also more robust in the face on non-JSON output from go test
(which happens if a package fails to compile).
2022-01-04 23:42:07 +00:00
|
|
|
s.log = utils.NewLoggerForTests()
|
2021-05-27 02:05:46 +00:00
|
|
|
os.RemoveAll(profile.FullProfilePath(""))
|
|
|
|
t.Cleanup(func() { s.log = nil })
|
|
|
|
test(t, s)
|
|
|
|
}
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
func TestKube(t *testing.T) {
|
|
|
|
suite := newKubeSuite(t)
|
|
|
|
t.Run("Exec", suite.bind(testKubeExec))
|
|
|
|
t.Run("Deny", suite.bind(testKubeDeny))
|
|
|
|
t.Run("PortForward", suite.bind(testKubePortForward))
|
|
|
|
t.Run("TrustedClustersClientCert", suite.bind(testKubeTrustedClustersClientCert))
|
|
|
|
t.Run("TrustedClustersSNI", suite.bind(testKubeTrustedClustersSNI))
|
|
|
|
t.Run("Disconnect", suite.bind(testKubeDisconnect))
|
2020-12-07 14:35:15 +00:00
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// TestKubeExec tests kubernetes Exec command set
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeExec(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
2020-12-07 14:35:15 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
teleport := NewInstance(InstanceConfig{
|
2018-06-10 00:21:14 +00:00
|
|
|
ClusterName: Site,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2020-10-07 20:58:07 +00:00
|
|
|
kubeGroups := []string{testImpersonationGroup}
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeUsers := []string{"alice@example.com"}
|
2021-06-10 18:52:10 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2018-06-10 00:21:14 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
2018-06-10 00:21:14 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
// impersonating client requests will be denied if the headers
|
|
|
|
// are referencing users or groups not allowed by the existing roles
|
2019-07-16 00:40:43 +00:00
|
|
|
impersonatingProxyClient, impersonatingProxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2019-07-16 00:40:43 +00:00
|
|
|
username: username,
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeUsers: kubeUsers,
|
|
|
|
kubeGroups: kubeGroups,
|
2020-10-07 21:01:33 +00:00
|
|
|
impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{testImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// try get request to fetch a pod
|
2020-09-24 17:59:48 +00:00
|
|
|
ctx := context.Background()
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = impersonatingProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
// scoped client requests will be allowed, as long as the impersonation headers
|
|
|
|
// are referencing users and groups allowed by existing roles
|
|
|
|
scopedProxyClient, scopedProxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2020-04-30 00:27:47 +00:00
|
|
|
username: username,
|
|
|
|
kubeUsers: kubeUsers,
|
|
|
|
kubeGroups: kubeGroups,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
impersonation: &rest.ImpersonationConfig{
|
2021-10-05 21:04:18 +00:00
|
|
|
UserName: role.GetKubeUsers(types.Allow)[0],
|
|
|
|
Groups: role.GetKubeGroups(types.Allow),
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = scopedProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// set up kube configuration using proxy
|
2020-04-30 00:27:47 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2020-04-30 00:27:47 +00:00
|
|
|
username: username,
|
|
|
|
kubeUsers: kubeUsers,
|
|
|
|
kubeGroups: kubeGroups,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-09-26 00:11:51 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
2018-06-10 00:21:14 +00:00
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-09-26 00:11:51 +00:00
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, testNamespace, string(data))
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-06-10 00:21:14 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// verify the session stream output
|
|
|
|
sessionStream := out.String()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Contains(t, sessionStream, "echo hi")
|
|
|
|
require.Contains(t, sessionStream, "exit")
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// verify traffic capture and upload, wait for the upload to hit
|
|
|
|
var sessionID string
|
|
|
|
timeoutC := time.After(10 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
2021-05-27 02:05:46 +00:00
|
|
|
case event := <-teleport.UploadEventsC:
|
2018-06-10 00:21:14 +00:00
|
|
|
sessionID = event.SessionID
|
|
|
|
break loop
|
|
|
|
case <-timeoutC:
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for upload of session to complete")
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read back the entire session and verify that it matches the stated output
|
2021-06-08 19:08:55 +00:00
|
|
|
capturedStream, err := teleport.Process.GetAuthServer().GetSessionChunk(apidefaults.Namespace, session.ID(sessionID), 0, events.MaxChunkBytes)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, sessionStream, string(capturedStream))
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating kube exec should be denied
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(impersonatingProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-11 03:25:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-03-11 03:25:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
|
|
|
// scoped kube exec is allowed, impersonation headers
|
|
|
|
// are allowed by the role
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(scopedProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestKubeDeny makes sure that deny rule conflicting with allow
|
2020-05-06 17:11:06 +00:00
|
|
|
// rule takes precedence
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeDeny(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
teleport := NewInstance(InstanceConfig{
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
ClusterName: Site,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2020-10-07 20:58:07 +00:00
|
|
|
kubeGroups := []string{testImpersonationGroup}
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeUsers := []string{"alice@example.com"}
|
2021-06-10 18:52:10 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
},
|
2021-06-04 20:29:31 +00:00
|
|
|
Deny: types.RoleConditions{
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
|
|
|
KubeUsers: kubeUsers,
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2020-04-30 00:27:47 +00:00
|
|
|
proxyClient, _, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2020-04-30 00:27:47 +00:00
|
|
|
username: username,
|
|
|
|
kubeUsers: kubeUsers,
|
|
|
|
kubeGroups: kubeGroups,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
Adds support for kubernetes_users, extend interpolation (#3404) (#3418)
This commit fixes #3369, refs #3374
It adds support for kuberenetes_users section in roles,
allowing Teleport proxy to impersonate user identities.
It also extends variable interpolation syntax by adding
suffix and prefix to variables and function `email.local`:
Example:
```yaml
kind: role
version: v3
metadata:
name: admin
spec:
allow:
# extract email local part from the email claim
logins: ['{{email.local(external.email)}}']
# impersonate a kubernetes user with IAM prefix
kubernetes_users: ['IAM#{{external.email}}']
# the deny section uses the identical format as the 'allow' section.
# the deny rules always override allow rules.
deny: {}
```
Some notes on email.local behavior:
* This is the only function supported in the template variables for now
* In case if the email.local will encounter invalid email address,
it will interpolate to empty value, will be removed from resulting
output.
Changes in impersonation behavior:
* By default, if no kubernetes_users is set, which is a majority of cases,
user will impersonate themselves, which is the backwards-compatible behavior.
* As long as at least one `kubernetes_users` is set, the forwarder will start
limiting the list of users allowed by the client to impersonate.
* If the users' role set does not include actual user name, it will be rejected,
otherwise there will be no way to exclude the user from the list).
* If the `kuberentes_users` role set includes only one user
(quite frequently that's the real intent), teleport will default to it,
otherwise it will refuse to select.
This will enable the use case when `kubernetes_users` has just one field to
link the user identity with the IAM role, for example `IAM#{{external.email}}`
* Previous versions of the forwarding proxy were denying all external
impersonation headers, this commit allows 'Impesrsonate-User' and
'Impersonate-Group' header values that are allowed by role set.
* Previous versions of the forwarding proxy ignored 'Deny' section of the roles
when applied to impersonation, this commit fixes that - roles with deny
kubernetes_users and kubernetes_groups section will not allow
impersonation of those users and groups.
2020-03-08 00:32:37 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-09-24 17:59:48 +00:00
|
|
|
ctx := context.Background()
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestKubePortForward tests kubernetes port forwarding
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubePortForward(t *testing.T, suite *KubeSuite) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
2020-12-07 14:35:15 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
teleport := NewInstance(InstanceConfig{
|
2018-06-10 00:21:14 +00:00
|
|
|
ClusterName: Site,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2020-10-07 20:58:07 +00:00
|
|
|
kubeGroups := []string{testImpersonationGroup}
|
2021-06-10 18:52:10 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2018-06-10 00:21:14 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
2018-06-10 00:21:14 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2020-04-30 00:27:47 +00:00
|
|
|
_, proxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2020-04-30 00:27:47 +00:00
|
|
|
username: username,
|
|
|
|
kubeGroups: kubeGroups,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-10 00:21:14 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// forward local port to target port 80 of the nginx container
|
2021-04-29 16:39:43 +00:00
|
|
|
localPort := ports.Pop()
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
|
|
|
podName: testPod,
|
|
|
|
podNamespace: testNamespace,
|
2018-06-10 00:21:14 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
forwarderCh := make(chan error)
|
|
|
|
go func() { forwarderCh <- forwarder.ForwardPorts() }()
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, <-forwarderCh, "Forward ports exited with error")
|
2018-06-10 00:21:14 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for port forwarding.")
|
2018-06-10 00:21:14 +00:00
|
|
|
case <-forwarder.readyC:
|
|
|
|
}
|
|
|
|
defer close(forwarder.stopC)
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", localPort))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
require.NoError(t, resp.Body.Close())
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// impersonating client requests will be denied
|
2019-07-16 00:40:43 +00:00
|
|
|
_, impersonatingProxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2019-07-16 00:40:43 +00:00
|
|
|
username: username,
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeGroups: kubeGroups,
|
2020-10-07 21:01:33 +00:00
|
|
|
impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{testImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2021-04-29 16:39:43 +00:00
|
|
|
localPort = ports.Pop()
|
2019-03-11 03:25:43 +00:00
|
|
|
impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
|
|
|
podName: testPod,
|
|
|
|
podNamespace: testNamespace,
|
2019-03-11 03:25:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// This request should be denied
|
|
|
|
err = impersonatingForwarder.ForwardPorts()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
|
2019-07-16 00:40:43 +00:00
|
|
|
// TestKubeTrustedClustersClientCert tests scenario with trusted clusters
|
|
|
|
// using metadata encoded in the certificate
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeTrustedClustersClientCert(t *testing.T, suite *KubeSuite) {
|
2020-06-15 21:24:34 +00:00
|
|
|
ctx := context.Background()
|
2019-07-16 00:40:43 +00:00
|
|
|
clusterMain := "cluster-main"
|
2021-05-27 02:05:46 +00:00
|
|
|
mainConf := suite.teleKubeConfig(Host)
|
2020-06-08 22:20:50 +00:00
|
|
|
// Main cluster doesn't need a kubeconfig to forward requests to auxiliary
|
|
|
|
// cluster.
|
|
|
|
mainConf.Proxy.Kube.KubeconfigPath = ""
|
2019-07-16 00:40:43 +00:00
|
|
|
main := NewInstance(InstanceConfig{
|
|
|
|
ClusterName: clusterMain,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// main cluster has a role and user called main-kube
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2020-10-07 20:58:07 +00:00
|
|
|
mainKubeGroups := []string{testImpersonationGroup}
|
2021-06-10 18:52:10 +00:00
|
|
|
mainRole, err := types.NewRole("main-kube", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-07-16 00:40:43 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: mainKubeGroups,
|
2019-07-16 00:40:43 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
main.AddUserWithRole(username, mainRole)
|
|
|
|
|
|
|
|
clusterAux := "cluster-aux"
|
2021-05-27 02:05:46 +00:00
|
|
|
auxConf := suite.teleKubeConfig(Host)
|
2019-07-16 00:40:43 +00:00
|
|
|
aux := NewInstance(InstanceConfig{
|
|
|
|
ClusterName: clusterAux,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
lib.SetInsecureDevMode(true)
|
|
|
|
defer lib.SetInsecureDevMode(false)
|
|
|
|
|
|
|
|
mainConf.Proxy.Kube.Enabled = true
|
2021-06-18 19:57:29 +00:00
|
|
|
err = main.CreateEx(t, nil, mainConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = aux.CreateEx(t, nil, auxConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// auxiliary cluster has a role aux-kube
|
|
|
|
// connect aux cluster to main cluster
|
|
|
|
// using trusted clusters, so remote user will be allowed to assume
|
|
|
|
// role specified by mapping remote role "aux-kube" to local role "main-kube"
|
2020-04-30 00:27:47 +00:00
|
|
|
auxKubeGroups := []string{teleport.TraitInternalKubeGroupsVariable}
|
2021-06-10 18:52:10 +00:00
|
|
|
auxRole, err := types.NewRole("aux-kube", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-07-16 00:40:43 +00:00
|
|
|
Logins: []string{username},
|
|
|
|
// Note that main cluster can pass it's kubernetes groups
|
|
|
|
// to the remote cluster, and remote cluster
|
|
|
|
// can choose to use them by using special variable
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: auxKubeGroups,
|
2019-07-16 00:40:43 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-06-15 21:24:34 +00:00
|
|
|
err = aux.Process.GetAuthServer().UpsertRole(ctx, auxRole)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
trustedClusterToken := "trusted-clsuter-token"
|
2021-03-24 01:26:52 +00:00
|
|
|
err = main.Process.GetAuthServer().UpsertToken(ctx,
|
2021-06-04 20:29:31 +00:00
|
|
|
services.MustCreateProvisionToken(trustedClusterToken, []types.SystemRole{types.RoleTrustedCluster}, time.Time{}))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2021-09-13 09:54:49 +00:00
|
|
|
trustedCluster := main.AsTrustedCluster(trustedClusterToken, types.RoleMap{
|
2019-07-16 00:40:43 +00:00
|
|
|
{Remote: mainRole.GetName(), Local: []string{auxRole.GetName()}},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// start both clusters
|
|
|
|
err = main.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer main.StopAll()
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
err = aux.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer aux.StopAll()
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// try and upsert a trusted cluster
|
|
|
|
var upsertSuccess bool
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
|
2020-06-15 21:24:34 +00:00
|
|
|
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(ctx, trustedCluster)
|
2019-07-16 00:40:43 +00:00
|
|
|
if err != nil {
|
|
|
|
if trace.IsConnectionProblem(err) {
|
|
|
|
log.Debugf("retrying on connection problem: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("got non connection problem %v", err)
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
upsertSuccess = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// make sure we upsert a trusted cluster
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, upsertSuccess)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
|
|
|
|
abortTime := time.Now().Add(time.Second * 10)
|
2021-05-27 02:05:46 +00:00
|
|
|
for len(checkGetClusters(t, main.Tunnel)) < 2 && len(checkGetClusters(t, aux.Tunnel)) < 2 {
|
2019-07-16 00:40:43 +00:00
|
|
|
time.Sleep(time.Millisecond * 2000)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.False(t, time.Now().After(abortTime), "two clusters do not see each other: tunnels are not working")
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// impersonating client requests will be denied
|
|
|
|
impersonatingProxyClient, impersonatingProxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
|
|
|
t: main,
|
|
|
|
username: username,
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeGroups: mainKubeGroups,
|
2020-10-07 21:01:33 +00:00
|
|
|
impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{testImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
routeToCluster: clusterAux,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = impersonatingProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// set up kube configuration using main proxy
|
|
|
|
proxyClient, proxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
|
|
|
t: main,
|
|
|
|
username: username,
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeGroups: mainKubeGroups,
|
2019-07-16 00:40:43 +00:00
|
|
|
routeToCluster: clusterAux,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-07-16 00:40:43 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, pod.Namespace, string(data))
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-07-16 00:40:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// verify the session stream output
|
|
|
|
sessionStream := out.String()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Contains(t, sessionStream, "echo hi")
|
|
|
|
require.Contains(t, sessionStream, "exit")
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// verify traffic capture and upload, wait for the upload to hit
|
|
|
|
var sessionID string
|
|
|
|
timeoutC := time.After(10 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-main.UploadEventsC:
|
|
|
|
sessionID = event.SessionID
|
|
|
|
break loop
|
|
|
|
case <-timeoutC:
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for upload of session to complete")
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read back the entire session and verify that it matches the stated output
|
2021-06-08 19:08:55 +00:00
|
|
|
capturedStream, err := main.Process.GetAuthServer().GetSessionChunk(apidefaults.Namespace, session.ID(sessionID), 0, events.MaxChunkBytes)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, sessionStream, string(capturedStream))
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// impersonating kube exec should be denied
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(impersonatingProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-07-16 00:40:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2019-07-16 00:40:43 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// forward local port to target port 80 of the nginx container
|
2021-04-29 16:39:43 +00:00
|
|
|
localPort := ports.Pop()
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2019-07-16 00:40:43 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
forwarderCh := make(chan error)
|
|
|
|
go func() { forwarderCh <- forwarder.ForwardPorts() }()
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, <-forwarderCh, "Forward ports exited with error")
|
2019-07-16 00:40:43 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for port forwarding.")
|
2019-07-16 00:40:43 +00:00
|
|
|
case <-forwarder.readyC:
|
|
|
|
}
|
|
|
|
defer close(forwarder.stopC)
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", localPort))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
require.NoError(t, resp.Body.Close())
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// impersonating client requests will be denied
|
2021-04-29 16:39:43 +00:00
|
|
|
localPort = ports.Pop()
|
2019-07-16 00:40:43 +00:00
|
|
|
impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2019-07-16 00:40:43 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-07-16 00:40:43 +00:00
|
|
|
|
|
|
|
// This request should be denied
|
|
|
|
err = impersonatingForwarder.ForwardPorts()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
|
2020-10-07 21:01:33 +00:00
|
|
|
// TestKubeTrustedClustersSNI tests scenario with trusted clusters
|
2019-07-16 00:40:43 +00:00
|
|
|
// using SNI-forwarding
|
|
|
|
// DELETE IN(4.3.0)
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeTrustedClustersSNI(t *testing.T, suite *KubeSuite) {
|
2020-06-15 21:24:34 +00:00
|
|
|
ctx := context.Background()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
clusterMain := "cluster-main"
|
2021-05-27 02:05:46 +00:00
|
|
|
mainConf := suite.teleKubeConfig(Host)
|
2018-06-18 00:53:02 +00:00
|
|
|
main := NewInstance(InstanceConfig{
|
|
|
|
ClusterName: clusterMain,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2018-06-18 00:53:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// main cluster has a role and user called main-kube
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2020-10-07 21:01:33 +00:00
|
|
|
mainKubeGroups := []string{testImpersonationGroup}
|
2021-06-10 18:52:10 +00:00
|
|
|
mainRole, err := types.NewRole("main-kube", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2018-06-18 00:53:02 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: mainKubeGroups,
|
2018-06-18 00:53:02 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
main.AddUserWithRole(username, mainRole)
|
|
|
|
|
|
|
|
clusterAux := "cluster-aux"
|
2021-05-27 02:05:46 +00:00
|
|
|
auxConf := suite.teleKubeConfig(Host)
|
2018-06-18 00:53:02 +00:00
|
|
|
aux := NewInstance(InstanceConfig{
|
|
|
|
ClusterName: clusterAux,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2018-06-18 00:53:02 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
lib.SetInsecureDevMode(true)
|
|
|
|
defer lib.SetInsecureDevMode(false)
|
|
|
|
|
2018-08-02 00:25:16 +00:00
|
|
|
// route all the traffic to the aux cluster
|
|
|
|
mainConf.Proxy.Kube.Enabled = true
|
2019-03-11 03:25:43 +00:00
|
|
|
// ClusterOverride forces connection to be routed
|
|
|
|
// to cluster aux
|
2018-08-02 00:25:16 +00:00
|
|
|
mainConf.Proxy.Kube.ClusterOverride = clusterAux
|
2021-06-18 19:57:29 +00:00
|
|
|
err = main.CreateEx(t, nil, mainConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = aux.CreateEx(t, nil, auxConf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// auxiliary cluster has a role aux-kube
|
|
|
|
// connect aux cluster to main cluster
|
|
|
|
// using trusted clusters, so remote user will be allowed to assume
|
|
|
|
// role specified by mapping remote role "aux-kube" to local role "main-kube"
|
2020-04-30 00:27:47 +00:00
|
|
|
auxKubeGroups := []string{teleport.TraitInternalKubeGroupsVariable}
|
2021-06-10 18:52:10 +00:00
|
|
|
auxRole, err := types.NewRole("aux-kube", types.RoleSpecV4{
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-01-17 02:55:59 +00:00
|
|
|
Logins: []string{username},
|
|
|
|
// Note that main cluster can pass it's kubernetes groups
|
|
|
|
// to the remote cluster, and remote cluster
|
|
|
|
// can choose to use them by using special variable
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: auxKubeGroups,
|
2018-06-18 00:53:02 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-06-15 21:24:34 +00:00
|
|
|
err = aux.Process.GetAuthServer().UpsertRole(ctx, auxRole)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-10-07 21:01:33 +00:00
|
|
|
trustedClusterToken := "trusted-cluster-token"
|
2021-03-24 01:26:52 +00:00
|
|
|
err = main.Process.GetAuthServer().UpsertToken(ctx,
|
2021-06-04 20:29:31 +00:00
|
|
|
services.MustCreateProvisionToken(trustedClusterToken, []types.SystemRole{types.RoleTrustedCluster}, time.Time{}))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2021-09-13 09:54:49 +00:00
|
|
|
trustedCluster := main.AsTrustedCluster(trustedClusterToken, types.RoleMap{
|
2018-06-18 00:53:02 +00:00
|
|
|
{Remote: mainRole.GetName(), Local: []string{auxRole.GetName()}},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// start both clusters
|
|
|
|
err = main.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer main.StopAll()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
err = aux.Start()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2020-04-17 16:27:34 +00:00
|
|
|
defer aux.StopAll()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// try and upsert a trusted cluster
|
|
|
|
var upsertSuccess bool
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
|
2020-06-15 21:24:34 +00:00
|
|
|
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(ctx, trustedCluster)
|
2018-06-18 00:53:02 +00:00
|
|
|
if err != nil {
|
|
|
|
if trace.IsConnectionProblem(err) {
|
|
|
|
log.Debugf("retrying on connection problem: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("got non connection problem %v", err)
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
upsertSuccess = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// make sure we upsert a trusted cluster
|
2021-05-27 02:05:46 +00:00
|
|
|
require.True(t, upsertSuccess)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
|
|
|
|
abortTime := time.Now().Add(time.Second * 10)
|
2021-05-27 02:05:46 +00:00
|
|
|
for len(checkGetClusters(t, main.Tunnel)) < 2 && len(checkGetClusters(t, aux.Tunnel)) < 2 {
|
2018-06-18 00:53:02 +00:00
|
|
|
time.Sleep(time.Millisecond * 2000)
|
|
|
|
if time.Now().After(abortTime) {
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("two clusters do not see each other: tunnels are not working")
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating client requests will be denied
|
2019-07-16 00:40:43 +00:00
|
|
|
impersonatingProxyClient, impersonatingProxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
|
|
|
t: main,
|
|
|
|
username: username,
|
2020-04-30 00:27:47 +00:00
|
|
|
kubeGroups: mainKubeGroups,
|
2020-10-07 21:01:33 +00:00
|
|
|
impersonation: &rest.ImpersonationConfig{UserName: "bob", Groups: []string{testImpersonationGroup}},
|
2019-07-16 00:40:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
_, err = impersonatingProxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2018-06-18 00:53:02 +00:00
|
|
|
// set up kube configuration using main proxy
|
2020-04-30 00:27:47 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
|
|
|
t: main,
|
|
|
|
username: username,
|
|
|
|
kubeGroups: mainKubeGroups,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-09-26 00:11:51 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
2018-06-18 00:53:02 +00:00
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-09-26 00:11:51 +00:00
|
|
|
|
2018-06-18 00:53:02 +00:00
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, pod.Namespace, string(data))
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
|
|
|
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2018-06-18 00:53:02 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2018-06-18 00:53:02 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// verify the session stream output
|
|
|
|
sessionStream := out.String()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Contains(t, sessionStream, "echo hi")
|
|
|
|
require.Contains(t, sessionStream, "exit")
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
// verify traffic capture and upload, wait for the upload to hit
|
|
|
|
var sessionID string
|
|
|
|
timeoutC := time.After(10 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case event := <-main.UploadEventsC:
|
|
|
|
sessionID = event.SessionID
|
|
|
|
break loop
|
|
|
|
case <-timeoutC:
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for upload of session to complete")
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read back the entire session and verify that it matches the stated output
|
2021-06-08 19:08:55 +00:00
|
|
|
capturedStream, err := main.Process.GetAuthServer().GetSessionChunk(apidefaults.Namespace, session.ID(sessionID), 0, events.MaxChunkBytes)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, sessionStream, string(capturedStream))
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating kube exec should be denied
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term = NewTerminal(250)
|
|
|
|
term.Type("\aecho hi\n\r\aexit\n\r\a")
|
|
|
|
out = &bytes.Buffer{}
|
|
|
|
err = kubeExec(impersonatingProxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-11 03:25:43 +00:00
|
|
|
command: []string{"/bin/sh"},
|
|
|
|
stdout: out,
|
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-03-11 03:25:43 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
// forward local port to target port 80 of the nginx container
|
2021-04-29 16:39:43 +00:00
|
|
|
localPort := ports.Pop()
|
2018-06-18 00:53:02 +00:00
|
|
|
|
|
|
|
forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2018-06-18 00:53:02 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
forwarderCh := make(chan error)
|
|
|
|
|
|
|
|
go func() { forwarderCh <- forwarder.ForwardPorts() }()
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, <-forwarderCh, "Forward ports exited with error")
|
2018-06-18 00:53:02 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(5 * time.Second):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("Timeout waiting for port forwarding.")
|
2018-06-18 00:53:02 +00:00
|
|
|
case <-forwarder.readyC:
|
|
|
|
}
|
|
|
|
defer close(forwarder.stopC)
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", localPort))
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
|
|
require.NoError(t, resp.Body.Close())
|
2018-06-18 00:53:02 +00:00
|
|
|
|
2019-03-11 03:25:43 +00:00
|
|
|
// impersonating client requests will be denied
|
2021-04-29 16:39:43 +00:00
|
|
|
localPort = ports.Pop()
|
2019-03-11 03:25:43 +00:00
|
|
|
impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{
|
2020-10-07 20:58:07 +00:00
|
|
|
ports: []string{fmt.Sprintf("%v:80", localPort)},
|
2019-03-11 03:25:43 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-11 03:25:43 +00:00
|
|
|
|
|
|
|
// This request should be denied
|
|
|
|
err = impersonatingForwarder.ForwardPorts()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Regexp(t, ".*impersonation request has been denied.*", err.Error())
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2018-06-18 00:53:02 +00:00
|
|
|
}
|
|
|
|
|
2019-03-24 20:14:34 +00:00
|
|
|
// TestKubeDisconnect tests kubernetes session disconnects
|
2021-05-27 02:05:46 +00:00
|
|
|
func testKubeDisconnect(t *testing.T, suite *KubeSuite) {
|
2019-03-24 20:14:34 +00:00
|
|
|
testCases := []disconnectTestCase{
|
|
|
|
{
|
2021-06-04 20:29:31 +00:00
|
|
|
options: types.RoleOptions{
|
|
|
|
ClientIdleTimeout: types.NewDuration(500 * time.Millisecond),
|
2019-03-24 20:14:34 +00:00
|
|
|
},
|
|
|
|
disconnectTimeout: 2 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
2021-06-04 20:29:31 +00:00
|
|
|
options: types.RoleOptions{
|
|
|
|
DisconnectExpiredCert: types.NewBool(true),
|
|
|
|
MaxSessionTTL: types.NewDuration(3 * time.Second),
|
2019-03-24 20:14:34 +00:00
|
|
|
},
|
|
|
|
disconnectTimeout: 6 * time.Second,
|
|
|
|
},
|
|
|
|
}
|
2018-12-12 00:22:44 +00:00
|
|
|
for i := 0; i < utils.GetIterations(); i++ {
|
2021-05-27 02:05:46 +00:00
|
|
|
for j, tc := range testCases {
|
|
|
|
t.Run(fmt.Sprintf("#%02d_iter_%d", j, i), func(t *testing.T) {
|
|
|
|
runKubeDisconnectTest(t, suite, tc)
|
|
|
|
})
|
2019-03-24 20:14:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestKubeDisconnect tests kubernetes session disconnects
|
2021-05-27 02:05:46 +00:00
|
|
|
func runKubeDisconnectTest(t *testing.T, suite *KubeSuite, tc disconnectTestCase) {
|
|
|
|
tconf := suite.teleKubeConfig(Host)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
teleport := NewInstance(InstanceConfig{
|
2019-03-24 20:14:34 +00:00
|
|
|
ClusterName: Site,
|
|
|
|
HostID: HostID,
|
|
|
|
NodeName: Host,
|
2021-05-27 02:05:46 +00:00
|
|
|
Priv: suite.priv,
|
|
|
|
Pub: suite.pub,
|
|
|
|
log: suite.log,
|
2019-03-24 20:14:34 +00:00
|
|
|
})
|
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
username := suite.me.Username
|
2020-10-07 20:58:07 +00:00
|
|
|
kubeGroups := []string{testImpersonationGroup}
|
2021-06-10 18:52:10 +00:00
|
|
|
role, err := types.NewRole("kubemaster", types.RoleSpecV4{
|
2019-03-24 20:14:34 +00:00
|
|
|
Options: tc.options,
|
2021-06-04 20:29:31 +00:00
|
|
|
Allow: types.RoleConditions{
|
2019-03-24 20:14:34 +00:00
|
|
|
Logins: []string{username},
|
2020-04-30 00:27:47 +00:00
|
|
|
KubeGroups: kubeGroups,
|
2019-03-24 20:14:34 +00:00
|
|
|
},
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
teleport.AddUserWithRole(username, role)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
2021-06-18 19:57:29 +00:00
|
|
|
err = teleport.CreateEx(t, nil, tconf)
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
2021-05-27 02:05:46 +00:00
|
|
|
err = teleport.Start()
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer teleport.StopAll()
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
// set up kube configuration using proxy
|
2020-04-30 00:27:47 +00:00
|
|
|
proxyClient, proxyClientConfig, err := kubeProxyClient(kubeProxyConfig{
|
2021-05-27 02:05:46 +00:00
|
|
|
t: teleport,
|
2020-04-30 00:27:47 +00:00
|
|
|
username: username,
|
|
|
|
kubeGroups: kubeGroups,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
// try get request to fetch available pods
|
2020-09-24 17:59:48 +00:00
|
|
|
ctx := context.Background()
|
2020-10-07 20:58:07 +00:00
|
|
|
pod, err := proxyClient.CoreV1().Pods(testNamespace).Get(ctx, testPod, metav1.GetOptions{})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
out := &bytes.Buffer{}
|
|
|
|
err = kubeExec(proxyClientConfig, kubeExecArgs{
|
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-24 20:14:34 +00:00
|
|
|
command: []string{"/bin/cat", "/var/run/secrets/kubernetes.io/serviceaccount/namespace"},
|
|
|
|
stdout: out,
|
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
data := out.Bytes()
|
2021-05-27 02:05:46 +00:00
|
|
|
require.Equal(t, pod.Namespace, string(data))
|
2019-03-24 20:14:34 +00:00
|
|
|
|
|
|
|
// interactive command, allocate pty
|
|
|
|
term := NewTerminal(250)
|
2020-09-24 17:59:48 +00:00
|
|
|
sessionCtx, sessionCancel := context.WithCancel(ctx)
|
2019-03-24 20:14:34 +00:00
|
|
|
go func() {
|
|
|
|
defer sessionCancel()
|
2020-04-30 00:27:47 +00:00
|
|
|
err := kubeExec(proxyClientConfig, kubeExecArgs{
|
2019-03-24 20:14:34 +00:00
|
|
|
podName: pod.Name,
|
|
|
|
podNamespace: pod.Namespace,
|
2020-04-30 00:27:47 +00:00
|
|
|
container: pod.Spec.Containers[0].Name,
|
2019-03-24 20:14:34 +00:00
|
|
|
command: []string{"/bin/sh"},
|
2020-04-17 15:55:48 +00:00
|
|
|
stdout: term,
|
2019-03-24 20:14:34 +00:00
|
|
|
tty: true,
|
2020-04-17 15:55:48 +00:00
|
|
|
stdin: term,
|
2019-03-24 20:14:34 +00:00
|
|
|
})
|
2021-05-27 02:05:46 +00:00
|
|
|
require.NoError(t, err)
|
2019-03-24 20:14:34 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// lets type something followed by "enter" and then hang the session
|
2021-10-27 22:38:51 +00:00
|
|
|
require.NoError(t, enterInput(sessionCtx, term, "echo boring platapus\r\n", ".*boring platapus.*"))
|
2019-03-24 20:14:34 +00:00
|
|
|
time.Sleep(tc.disconnectTimeout)
|
|
|
|
select {
|
|
|
|
case <-time.After(tc.disconnectTimeout):
|
2021-05-27 02:05:46 +00:00
|
|
|
t.Fatalf("timeout waiting for session to exit")
|
2019-03-24 20:14:34 +00:00
|
|
|
case <-sessionCtx.Done():
|
|
|
|
// session closed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// teleKubeConfig sets up teleport with kubernetes turned on
|
|
|
|
func (s *KubeSuite) teleKubeConfig(hostname string) *service.Config {
|
|
|
|
tconf := service.MakeDefaultConfig()
|
2020-12-07 14:35:15 +00:00
|
|
|
tconf.Console = nil
|
|
|
|
tconf.Log = s.log
|
2018-06-10 00:21:14 +00:00
|
|
|
tconf.SSH.Enabled = true
|
|
|
|
tconf.Proxy.DisableWebInterface = true
|
|
|
|
tconf.PollingPeriod = 500 * time.Millisecond
|
|
|
|
tconf.ClientTimeout = time.Second
|
|
|
|
tconf.ShutdownTimeout = 2 * tconf.ClientTimeout
|
|
|
|
|
|
|
|
// set kubernetes specific parameters
|
2018-08-02 00:25:16 +00:00
|
|
|
tconf.Proxy.Kube.Enabled = true
|
2021-04-29 16:39:43 +00:00
|
|
|
tconf.Proxy.Kube.ListenAddr.Addr = net.JoinHostPort(hostname, ports.Pop())
|
2019-03-11 03:25:43 +00:00
|
|
|
tconf.Proxy.Kube.KubeconfigPath = s.kubeConfigPath
|
2021-05-26 00:50:35 +00:00
|
|
|
tconf.Proxy.Kube.LegacyKubeProxy = true
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
return tconf
|
|
|
|
}
|
|
|
|
|
|
|
|
// tlsClientConfig returns TLS configuration for client
|
|
|
|
func tlsClientConfig(cfg *rest.Config) (*tls.Config, error) {
|
|
|
|
cert, err := tls.X509KeyPair(cfg.TLSClientConfig.CertData, cfg.TLSClientConfig.KeyData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pool := x509.NewCertPool()
|
|
|
|
ok := pool.AppendCertsFromPEM(cfg.TLSClientConfig.CAData)
|
|
|
|
if !ok {
|
|
|
|
return nil, trace.BadParameter("failed to append certs from PEM")
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsConfig := &tls.Config{
|
|
|
|
RootCAs: pool,
|
|
|
|
Certificates: []tls.Certificate{cert},
|
|
|
|
ClientAuth: tls.RequireAndVerifyClientCert,
|
|
|
|
}
|
|
|
|
tlsConfig.BuildNameToCertificate()
|
|
|
|
return tlsConfig, nil
|
|
|
|
}
|
|
|
|
|
2019-07-16 00:40:43 +00:00
|
|
|
type kubeProxyConfig struct {
|
2021-09-13 09:54:49 +00:00
|
|
|
t *TeleInstance
|
|
|
|
username string
|
|
|
|
kubeUsers []string
|
|
|
|
kubeGroups []string
|
|
|
|
impersonation *rest.ImpersonationConfig
|
|
|
|
routeToCluster string
|
|
|
|
customTLSServerName string
|
|
|
|
targetAddress utils.NetAddr
|
2019-07-16 00:40:43 +00:00
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
// kubeProxyClient returns kubernetes client using local teleport proxy
|
2019-07-16 00:40:43 +00:00
|
|
|
func kubeProxyClient(cfg kubeProxyConfig) (*kubernetes.Clientset, *rest.Config, error) {
|
|
|
|
authServer := cfg.t.Process.GetAuthServer()
|
2018-06-10 00:21:14 +00:00
|
|
|
clusterName, err := authServer.GetClusterName()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-04-30 00:27:47 +00:00
|
|
|
// Fetch user info to get roles and max session TTL.
|
|
|
|
user, err := authServer.GetUser(cfg.username, false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
roles, err := services.FetchRoles(user.GetRoles(), authServer, user.GetTraits())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
ttl := roles.AdjustSessionTTL(10 * time.Minute)
|
|
|
|
|
2021-06-04 20:29:31 +00:00
|
|
|
ca, err := authServer.GetCertAuthority(types.CertAuthID{
|
|
|
|
Type: types.HostCA,
|
2018-06-10 00:21:14 +00:00
|
|
|
DomainName: clusterName.GetClusterName(),
|
2020-04-30 00:27:47 +00:00
|
|
|
}, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
2021-08-03 17:13:08 +00:00
|
|
|
caCert, signer, err := authServer.GetKeyStore().GetTLSCertAndSigner(ca)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
tlsCA, err := tlsca.FromCertAndSigner(caCert, signer)
|
2020-04-30 00:27:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
privPEM, _, err := authServer.GenerateKeyPair("")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
priv, err := tlsca.ParsePrivateKeyPEM(privPEM)
|
2018-06-10 00:21:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2020-04-30 00:27:47 +00:00
|
|
|
id := tlsca.Identity{
|
|
|
|
Username: cfg.username,
|
|
|
|
Groups: user.GetRoles(),
|
|
|
|
KubernetesUsers: cfg.kubeUsers,
|
|
|
|
KubernetesGroups: cfg.kubeGroups,
|
2020-06-08 22:20:50 +00:00
|
|
|
RouteToCluster: cfg.routeToCluster,
|
2020-04-30 00:27:47 +00:00
|
|
|
}
|
|
|
|
subj, err := id.Subject()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cert, err := tlsCA.GenerateCertificate(tlsca.CertificateRequest{
|
|
|
|
Clock: authServer.GetClock(),
|
|
|
|
PublicKey: priv.Public(),
|
|
|
|
Subject: subj,
|
|
|
|
NotAfter: authServer.GetClock().Now().Add(ttl),
|
|
|
|
})
|
2018-06-10 00:21:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsClientConfig := rest.TLSClientConfig{
|
2021-09-13 09:54:49 +00:00
|
|
|
CAData: ca.GetActiveKeys().TLS[0].Cert,
|
|
|
|
CertData: cert,
|
|
|
|
KeyData: privPEM,
|
|
|
|
ServerName: cfg.customTLSServerName,
|
2018-06-10 00:21:14 +00:00
|
|
|
}
|
|
|
|
config := &rest.Config{
|
2019-07-16 00:40:43 +00:00
|
|
|
Host: "https://" + cfg.t.Config.Proxy.Kube.ListenAddr.Addr,
|
2018-06-10 00:21:14 +00:00
|
|
|
TLSClientConfig: tlsClientConfig,
|
|
|
|
}
|
2021-09-13 09:54:49 +00:00
|
|
|
if !cfg.targetAddress.IsEmpty() {
|
|
|
|
config.Host = "https://" + cfg.targetAddress.Addr
|
|
|
|
}
|
2019-07-16 00:40:43 +00:00
|
|
|
if cfg.impersonation != nil {
|
|
|
|
config.Impersonate = *cfg.impersonation
|
2019-03-11 03:25:43 +00:00
|
|
|
}
|
2018-06-10 00:21:14 +00:00
|
|
|
client, err := kubernetes.NewForConfig(config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return client, config, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
testNamespace = "teletest"
|
2020-10-07 20:58:07 +00:00
|
|
|
testPod = "test-pod"
|
2018-06-10 00:21:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func newNamespace(name string) *v1.Namespace {
|
|
|
|
return &v1.Namespace{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: name,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-07 20:58:07 +00:00
|
|
|
func newPod(ns, name string) *v1.Pod {
|
|
|
|
return &v1.Pod{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Namespace: ns,
|
|
|
|
Name: name,
|
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
|
|
|
Containers: []v1.Container{{
|
|
|
|
Name: "nginx",
|
|
|
|
Image: "nginx:alpine",
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-10 00:21:14 +00:00
|
|
|
type kubeExecArgs struct {
|
|
|
|
podName string
|
|
|
|
podNamespace string
|
|
|
|
container string
|
|
|
|
command []string
|
|
|
|
stdout io.Writer
|
|
|
|
stderr io.Writer
|
|
|
|
stdin io.Reader
|
|
|
|
tty bool
|
|
|
|
}
|
|
|
|
|
|
|
|
type kubePortForwardArgs struct {
|
|
|
|
ports []string
|
|
|
|
podName string
|
|
|
|
podNamespace string
|
|
|
|
}
|
|
|
|
|
|
|
|
type kubePortForwarder struct {
|
|
|
|
*portforward.PortForwarder
|
|
|
|
stopC chan struct{}
|
|
|
|
readyC chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func newPortForwarder(kubeConfig *rest.Config, args kubePortForwardArgs) (*kubePortForwarder, error) {
|
|
|
|
u, err := url.Parse(kubeConfig.Host)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
u.Scheme = "https"
|
|
|
|
u.Path = fmt.Sprintf("/api/v1/namespaces/%v/pods/%v/portforward", args.podNamespace, args.podName)
|
|
|
|
|
|
|
|
// set up port forwarding request
|
|
|
|
tlsConfig, err := tlsClientConfig(kubeConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
2019-03-11 03:25:43 +00:00
|
|
|
|
2020-09-24 17:59:48 +00:00
|
|
|
upgradeRoundTripper := streamspdy.NewRoundTripper(tlsConfig, true, false)
|
2018-06-10 00:21:14 +00:00
|
|
|
client := &http.Client{
|
|
|
|
Transport: upgradeRoundTripper,
|
|
|
|
}
|
|
|
|
dialer := spdy.NewDialer(upgradeRoundTripper, client, "POST", u)
|
2019-03-11 03:25:43 +00:00
|
|
|
if kubeConfig.Impersonate.UserName != "" {
|
|
|
|
client.Transport = transport.NewImpersonatingRoundTripper(
|
|
|
|
transport.ImpersonationConfig{
|
|
|
|
UserName: kubeConfig.Impersonate.UserName,
|
|
|
|
Groups: kubeConfig.Impersonate.Groups,
|
|
|
|
},
|
|
|
|
upgradeRoundTripper)
|
|
|
|
}
|
2018-06-10 00:21:14 +00:00
|
|
|
|
|
|
|
stopC, readyC := make(chan struct{}), make(chan struct{})
|
2020-06-08 22:20:50 +00:00
|
|
|
fwd, err := portforward.New(dialer, args.ports, stopC, readyC, nil, nil)
|
2018-06-10 00:21:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
return &kubePortForwarder{PortForwarder: fwd, stopC: stopC, readyC: readyC}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// kubeExec executes command against kubernetes API server
|
|
|
|
func kubeExec(kubeConfig *rest.Config, args kubeExecArgs) error {
|
|
|
|
query := make(url.Values)
|
|
|
|
for _, arg := range args.command {
|
|
|
|
query.Add("command", arg)
|
|
|
|
}
|
|
|
|
if args.stdout != nil {
|
|
|
|
query.Set("stdout", "true")
|
|
|
|
}
|
|
|
|
if args.stdin != nil {
|
|
|
|
query.Set("stdin", "true")
|
|
|
|
}
|
|
|
|
// stderr channel is only set if there is no tty allocated
|
|
|
|
// otherwise k8s server gets confused
|
|
|
|
if !args.tty && args.stderr == nil {
|
|
|
|
args.stderr = ioutil.Discard
|
|
|
|
}
|
|
|
|
if args.stderr != nil && !args.tty {
|
|
|
|
query.Set("stderr", "true")
|
|
|
|
}
|
|
|
|
if args.tty {
|
|
|
|
query.Set("tty", "true")
|
|
|
|
}
|
|
|
|
query.Set("container", args.container)
|
|
|
|
u, err := url.Parse(kubeConfig.Host)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
u.Scheme = "https"
|
|
|
|
u.Path = fmt.Sprintf("/api/v1/namespaces/%v/pods/%v/exec", args.podNamespace, args.podName)
|
|
|
|
u.RawQuery = query.Encode()
|
|
|
|
executor, err := remotecommand.NewSPDYExecutor(kubeConfig, "POST", u)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
opts := remotecommand.StreamOptions{
|
|
|
|
Stdin: args.stdin,
|
|
|
|
Stdout: args.stdout,
|
|
|
|
Stderr: args.stderr,
|
|
|
|
Tty: args.tty,
|
|
|
|
}
|
|
|
|
return executor.Stream(opts)
|
|
|
|
}
|