mirror of
https://github.com/gravitational/teleport
synced 2024-10-21 09:44:51 +00:00
Fixed performance issues with the Web UI.
Fixed two issues that were causing a performance issue with the Web UI. The first issue was that when an "Authorizer" was being created at process startup by Auth Service, it was by-passing the cache and always hitting the backend directly. All services have been updated to now use an cached access point. The second issue was that the Web UI was not using the local cache when fetching the list of roles for a user. The Web UI has been updated to now use the local cached access point.
This commit is contained in:
parent
5eb7cc071f
commit
2ccd36b2fe
|
@ -345,7 +345,7 @@ func serverWithAllowRules(t *testing.T, srv *TestAuthServer, allowRules []types.
|
|||
require.NoError(t, err)
|
||||
|
||||
localUser := LocalUser{Username: username, Identity: tlsca.Identity{Username: username}}
|
||||
authContext, err := contextForLocalUser(localUser, srv.AuthServer.Identity, srv.AuthServer.Access)
|
||||
authContext, err := contextForLocalUser(localUser, srv.AuthServer)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &ServerWithRoles{
|
||||
|
|
|
@ -320,7 +320,7 @@ func NewTestAuthServer(cfg TestAuthServerConfig) (*TestAuthServer, error) {
|
|||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
||||
srv.Authorizer, err = NewAuthorizer(srv.ClusterName, srv.AuthServer.Access, srv.AuthServer.Identity, srv.AuthServer.Trust)
|
||||
srv.Authorizer, err = NewAuthorizer(srv.ClusterName, srv.AuthServer)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
|
|
@ -48,20 +48,17 @@ func NewBuiltinRoleContext(role types.SystemRole) (*Context, error) {
|
|||
}
|
||||
|
||||
// NewAuthorizer returns new authorizer using backends
|
||||
func NewAuthorizer(clusterName string, access services.Access, identity services.UserGetter, trust services.Trust) (Authorizer, error) {
|
||||
func NewAuthorizer(clusterName string, accessPoint ReadAccessPoint) (Authorizer, error) {
|
||||
if clusterName == "" {
|
||||
return nil, trace.BadParameter("missing parameter clusterName")
|
||||
}
|
||||
if access == nil {
|
||||
return nil, trace.BadParameter("missing parameter access")
|
||||
if accessPoint == nil {
|
||||
return nil, trace.BadParameter("missing parameter accessPoint")
|
||||
}
|
||||
if identity == nil {
|
||||
return nil, trace.BadParameter("missing parameter identity")
|
||||
}
|
||||
if trust == nil {
|
||||
return nil, trace.BadParameter("missing parameter trust")
|
||||
}
|
||||
return &authorizer{clusterName: clusterName, access: access, identity: identity, trust: trust}, nil
|
||||
return &authorizer{
|
||||
clusterName: clusterName,
|
||||
accessPoint: accessPoint,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Authorizer authorizes identity and returns auth context
|
||||
|
@ -73,9 +70,7 @@ type Authorizer interface {
|
|||
// authorizer creates new local authorizer
|
||||
type authorizer struct {
|
||||
clusterName string
|
||||
access services.Access
|
||||
identity services.UserGetter
|
||||
trust services.Trust
|
||||
accessPoint ReadAccessPoint
|
||||
}
|
||||
|
||||
// AuthContext is authorization context
|
||||
|
@ -128,12 +123,15 @@ func (a *authorizer) fromUser(ctx context.Context, userI interface{}) (*Context,
|
|||
|
||||
// authorizeLocalUser returns authz context based on the username
|
||||
func (a *authorizer) authorizeLocalUser(u LocalUser) (*Context, error) {
|
||||
return contextForLocalUser(u, a.identity, a.access)
|
||||
return contextForLocalUser(u, a.accessPoint)
|
||||
}
|
||||
|
||||
// authorizeRemoteUser returns checker based on cert authority roles
|
||||
func (a *authorizer) authorizeRemoteUser(u RemoteUser) (*Context, error) {
|
||||
ca, err := a.trust.GetCertAuthority(types.CertAuthID{Type: types.UserCA, DomainName: u.ClusterName}, false)
|
||||
ca, err := a.accessPoint.GetCertAuthority(types.CertAuthID{
|
||||
Type: types.UserCA,
|
||||
DomainName: u.ClusterName,
|
||||
}, false)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
@ -170,7 +168,7 @@ func (a *authorizer) authorizeRemoteUser(u RemoteUser) (*Context, error) {
|
|||
}
|
||||
log.Debugf("Mapped roles %v of remote user %q to local roles %v and traits %v.",
|
||||
u.RemoteRoles, u.Username, roleNames, traits)
|
||||
checker, err := services.FetchRoles(roleNames, a.access, traits)
|
||||
checker, err := services.FetchRoles(roleNames, a.accessPoint, traits)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
@ -608,17 +606,17 @@ func contextForBuiltinRole(r BuiltinRole, recConfig types.SessionRecordingConfig
|
|||
}, nil
|
||||
}
|
||||
|
||||
func contextForLocalUser(u LocalUser, identity services.UserGetter, access services.Access) (*Context, error) {
|
||||
func contextForLocalUser(u LocalUser, accessPoint ReadAccessPoint) (*Context, error) {
|
||||
// User has to be fetched to check if it's a blocked username
|
||||
user, err := identity.GetUser(u.Username, false)
|
||||
user, err := accessPoint.GetUser(u.Username, false)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
roles, traits, err := services.ExtractFromIdentity(identity, u.Identity)
|
||||
roles, traits, err := services.ExtractFromIdentity(accessPoint, u.Identity)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
checker, err := services.FetchRoles(roles, access, traits)
|
||||
checker, err := services.FetchRoles(roles, accessPoint, traits)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ func (process *TeleportProcess) initDatabaseService() (retErr error) {
|
|||
|
||||
clusterName := conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority]
|
||||
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, conn.Client, conn.Client, conn.Client)
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, accessPoint)
|
||||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ func (process *TeleportProcess) initKubernetesService(log *logrus.Entry, conn *C
|
|||
teleportClusterName := conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority]
|
||||
|
||||
// Create the kube server to service listener.
|
||||
authorizer, err := auth.NewAuthorizer(teleportClusterName, conn.Client, conn.Client, conn.Client)
|
||||
authorizer, err := auth.NewAuthorizer(teleportClusterName, accessPoint)
|
||||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
|
|
|
@ -1198,7 +1198,7 @@ func (process *TeleportProcess) initAuthService() error {
|
|||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
authorizer, err := auth.NewAuthorizer(cfg.Auth.ClusterName.GetClusterName(), authServer.Access, authServer.Identity, authServer.Trust)
|
||||
authorizer, err := auth.NewAuthorizer(cfg.Auth.ClusterName.GetClusterName(), authServer)
|
||||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
|
@ -2803,7 +2803,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
|
|||
|
||||
var kubeServer *kubeproxy.TLSServer
|
||||
if listeners.kube != nil && !process.Config.Proxy.DisableReverseTunnel {
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, conn.Client, conn.Client, conn.Client)
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, accessPoint)
|
||||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
|
@ -2866,7 +2866,7 @@ func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
|
|||
// then routing them to a respective database server over the reverse tunnel
|
||||
// framework.
|
||||
if !listeners.db.Empty() && !process.Config.Proxy.DisableReverseTunnel {
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, conn.Client, conn.Client, conn.Client)
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, accessPoint)
|
||||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
|
@ -3149,7 +3149,7 @@ func (process *TeleportProcess) initApps() {
|
|||
}
|
||||
clusterName := conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority]
|
||||
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, conn.Client, conn.Client, conn.Client)
|
||||
authorizer, err := auth.NewAuthorizer(clusterName, accessPoint)
|
||||
if err != nil {
|
||||
return trace.Wrap(err)
|
||||
}
|
||||
|
|
|
@ -200,7 +200,7 @@ func (s *Suite) SetUpTest(c *check.C) {
|
|||
), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
authorizer, err := auth.NewAuthorizer("cluster-name", s.authClient, s.authClient, s.authClient)
|
||||
authorizer, err := auth.NewAuthorizer("cluster-name", s.authClient)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.appServer, err = New(context.Background(), &Config{
|
||||
|
|
|
@ -636,7 +636,7 @@ func setupTestContext(ctx context.Context, t *testing.T, withDatabases ...withDa
|
|||
// Auth client/authorizer for database proxy.
|
||||
proxyAuthClient, err := testCtx.tlsServer.NewClient(auth.TestBuiltin(types.RoleProxy))
|
||||
require.NoError(t, err)
|
||||
proxyAuthorizer, err := auth.NewAuthorizer(testCtx.clusterName, proxyAuthClient, proxyAuthClient, proxyAuthClient)
|
||||
proxyAuthorizer, err := auth.NewAuthorizer(testCtx.clusterName, proxyAuthClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
// TLS config for database proxy and database service.
|
||||
|
@ -702,7 +702,7 @@ func (c *testContext) setupDatabaseServer(ctx context.Context, t *testing.T, hos
|
|||
require.NoError(t, err)
|
||||
|
||||
// Database service authorizer.
|
||||
dbAuthorizer, err := auth.NewAuthorizer(c.clusterName, c.authClient, c.authClient, c.authClient)
|
||||
dbAuthorizer, err := auth.NewAuthorizer(c.clusterName, c.authClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test database auth tokens generator.
|
||||
|
|
|
@ -507,7 +507,14 @@ func (h *Handler) getUserStatus(w http.ResponseWriter, r *http.Request, _ httpro
|
|||
// GET /webapi/sites/:site/context
|
||||
//
|
||||
func (h *Handler) getUserContext(w http.ResponseWriter, r *http.Request, p httprouter.Params, c *SessionContext, site reversetunnel.RemoteSite) (interface{}, error) {
|
||||
roleset, err := c.GetCertRoles()
|
||||
cn, err := h.cfg.AccessPoint.GetClusterName()
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
if cn.GetClusterName() != site.GetName() {
|
||||
return nil, trace.BadParameter("endpoint only implemented for root cluster")
|
||||
}
|
||||
roleset, err := c.GetUserRoles()
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
@ -1254,31 +1261,19 @@ type CreateSessionResponse struct {
|
|||
}
|
||||
|
||||
func newSessionResponse(ctx *SessionContext) (*CreateSessionResponse, error) {
|
||||
clt, err := ctx.GetClient()
|
||||
roleset, err := ctx.GetUserRoles()
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
token, err := ctx.getToken()
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
user, err := clt.GetUser(ctx.GetUser(), false)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
var roles services.RoleSet
|
||||
for _, roleName := range user.GetRoles() {
|
||||
role, err := clt.GetRole(context.TODO(), roleName)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
roles = append(roles, role)
|
||||
}
|
||||
_, err = roles.CheckLoginDuration(0)
|
||||
_, err = roleset.CheckLoginDuration(0)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
||||
token, err := ctx.getToken()
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
return &CreateSessionResponse{
|
||||
TokenType: roundtrip.AuthBearer,
|
||||
Token: token.GetName(),
|
||||
|
@ -2411,12 +2406,12 @@ func (h *Handler) AuthenticateRequest(w http.ResponseWriter, r *http.Request, ch
|
|||
// ProxyWithRoles returns a reverse tunnel proxy verifying the permissions
|
||||
// of the given user.
|
||||
func (h *Handler) ProxyWithRoles(ctx *SessionContext) (reversetunnel.Tunnel, error) {
|
||||
roles, err := ctx.GetCertRoles()
|
||||
roleset, err := ctx.GetUserRoles()
|
||||
if err != nil {
|
||||
h.log.WithError(err).Warn("Failed to get client roles.")
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
return reversetunnel.NewTunnelWithRoles(h.cfg.Proxy, roles, h.cfg.AccessPoint), nil
|
||||
return reversetunnel.NewTunnelWithRoles(h.cfg.Proxy, roleset, h.cfg.AccessPoint), nil
|
||||
}
|
||||
|
||||
// ProxyHostPort returns the address of the proxy server using --proxy
|
||||
|
|
|
@ -55,9 +55,22 @@ import (
|
|||
// each web session generated for the user and provides
|
||||
// a basic client cache for remote auth server connections.
|
||||
type SessionContext struct {
|
||||
log logrus.FieldLogger
|
||||
user string
|
||||
clt *auth.Client
|
||||
log logrus.FieldLogger
|
||||
user string
|
||||
|
||||
// clt holds a connection to the root auth. Note that requests made using this
|
||||
// client are made with the identity of the user and are NOT cached.
|
||||
clt *auth.Client
|
||||
|
||||
// unsafeCachedAuthClient holds a read-only cache to root auth. Note this access
|
||||
// point cache is authenticated with the identity of the node, not of the
|
||||
// user. This is why its prefixed with "unsafe".
|
||||
//
|
||||
// This access point should only be used if the identity of the caller will
|
||||
// not affect the result of the RPC. For example, never use it to call
|
||||
// "GetNodes".
|
||||
unsafeCachedAuthClient auth.ReadAccessPoint
|
||||
|
||||
parent *sessionCache
|
||||
// resources is persistent resource store this context is bound to.
|
||||
// The store maintains a list of resources between session renewals
|
||||
|
@ -305,9 +318,9 @@ func (c *SessionContext) GetX509Certificate() (*x509.Certificate, error) {
|
|||
return tlsCert, nil
|
||||
}
|
||||
|
||||
// GetCertRoles extracts roles from the *ssh.Certificate associated with this
|
||||
// session.
|
||||
func (c *SessionContext) GetCertRoles() (services.RoleSet, error) {
|
||||
// GetUserRoles return roles from the SSH certificate associated with
|
||||
// this session.
|
||||
func (c *SessionContext) GetUserRoles() (services.RoleSet, error) {
|
||||
cert, err := c.GetSSHCertificate()
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
|
@ -316,7 +329,7 @@ func (c *SessionContext) GetCertRoles() (services.RoleSet, error) {
|
|||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
roleset, err := services.FetchRoles(roles, c.clt, traits)
|
||||
roleset, err := services.FetchRoles(roles, c.unsafeCachedAuthClient, traits)
|
||||
if err != nil {
|
||||
return nil, trace.Wrap(err)
|
||||
}
|
||||
|
@ -759,12 +772,13 @@ func (s *sessionCache) newSessionContextFromSession(session types.WebSession) (*
|
|||
}
|
||||
|
||||
ctx := &SessionContext{
|
||||
clt: userClient,
|
||||
remoteClt: make(map[string]auth.ClientI),
|
||||
user: session.GetUser(),
|
||||
session: session,
|
||||
parent: s,
|
||||
resources: s.upsertSessionContext(session.GetUser()),
|
||||
clt: userClient,
|
||||
unsafeCachedAuthClient: s.accessPoint,
|
||||
remoteClt: make(map[string]auth.ClientI),
|
||||
user: session.GetUser(),
|
||||
session: session,
|
||||
parent: s,
|
||||
resources: s.upsertSessionContext(session.GetUser()),
|
||||
log: s.log.WithFields(logrus.Fields{
|
||||
"user": session.GetUser(),
|
||||
"session": session.GetShortName(),
|
||||
|
|
Loading…
Reference in a new issue