mirror of
https://github.com/gravitational/teleport
synced 2024-10-21 09:44:51 +00:00
e595c3793d
This commit implements #2070 ```yaml teleport: storage: type: dir audit_events_uri: [file:///var/lib/teleport/events, dynamodb://test_grv8_events] audit_sessions_uri: s3://testgrv8records ```
2057 lines
65 KiB
Go
2057 lines
65 KiB
Go
/*
|
|
Copyright 2015-2017 Gravitational, Inc.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
// Package service implements teleport running service, takes care
|
|
// of initialization, cleanup and shutdown procedures
|
|
package service
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"fmt"
|
|
"io"
|
|
"io/ioutil"
|
|
"net"
|
|
"net/http"
|
|
"net/http/pprof"
|
|
"os"
|
|
"path/filepath"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"golang.org/x/crypto/ssh"
|
|
|
|
"github.com/gravitational/teleport"
|
|
"github.com/gravitational/teleport/lib/auth"
|
|
"github.com/gravitational/teleport/lib/auth/native"
|
|
"github.com/gravitational/teleport/lib/backend"
|
|
"github.com/gravitational/teleport/lib/backend/boltbk"
|
|
"github.com/gravitational/teleport/lib/backend/dir"
|
|
"github.com/gravitational/teleport/lib/backend/dynamo"
|
|
"github.com/gravitational/teleport/lib/backend/etcdbk"
|
|
"github.com/gravitational/teleport/lib/defaults"
|
|
"github.com/gravitational/teleport/lib/events"
|
|
"github.com/gravitational/teleport/lib/events/dynamoevents"
|
|
"github.com/gravitational/teleport/lib/events/filesessions"
|
|
"github.com/gravitational/teleport/lib/events/s3sessions"
|
|
kubeproxy "github.com/gravitational/teleport/lib/kube/proxy"
|
|
"github.com/gravitational/teleport/lib/limiter"
|
|
"github.com/gravitational/teleport/lib/multiplexer"
|
|
"github.com/gravitational/teleport/lib/reversetunnel"
|
|
"github.com/gravitational/teleport/lib/services"
|
|
"github.com/gravitational/teleport/lib/session"
|
|
"github.com/gravitational/teleport/lib/srv/regular"
|
|
"github.com/gravitational/teleport/lib/state"
|
|
"github.com/gravitational/teleport/lib/system"
|
|
"github.com/gravitational/teleport/lib/utils"
|
|
"github.com/gravitational/teleport/lib/web"
|
|
"github.com/gravitational/trace"
|
|
|
|
"github.com/gravitational/roundtrip"
|
|
"github.com/jonboulle/clockwork"
|
|
"github.com/pborman/uuid"
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
var log = logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.ComponentProcess,
|
|
})
|
|
|
|
const (
|
|
// AuthIdentityEvent is generated when the Auth Servers identity has been
|
|
// initialized in the backend.
|
|
AuthIdentityEvent = "AuthIdentity"
|
|
|
|
// ProxyIdentityEvent is generated by the supervisor when the proxy's
|
|
// identity has been registered with the Auth Server.
|
|
ProxyIdentityEvent = "ProxyIdentity"
|
|
|
|
// SSHIdentityEvent is generated when node's identity has been registered
|
|
// with the Auth Server.
|
|
SSHIdentityEvent = "SSHIdentity"
|
|
|
|
// AuthTLSReady is generated when the Auth Server has initialized the
|
|
// TLS Mutual Auth endpoint and is ready to start accepting connections.
|
|
AuthTLSReady = "AuthTLSReady"
|
|
|
|
// ProxyWebServerReady is generated when the proxy has initialized the web
|
|
// server and is ready to start accepting connections.
|
|
ProxyWebServerReady = "ProxyWebServerReady"
|
|
|
|
// ProxyReverseTunnelReady is generated when the proxy has initialized the
|
|
// reverse tunnel server and is ready to start accepting connections.
|
|
ProxyReverseTunnelReady = "ProxyReverseTunnelReady"
|
|
|
|
// ProxyAgentPoolReady is generated when the proxy has initialized the agent
|
|
// pool (pool of connections from a remote cluster to a main cluster) and is
|
|
// ready to start accepting connections.
|
|
ProxyAgentPoolReady = "ProxyAgentPoolReady"
|
|
|
|
// ProxySSHReady is generated when the proxy has initialized a SSH server
|
|
// and is ready to start accepting connections.
|
|
ProxySSHReady = "ProxySSHReady"
|
|
|
|
// NodeReady is generated when the Teleport node has initialized a SSH server
|
|
// and is ready to start accepting SSH connections.
|
|
NodeSSHReady = "NodeReady"
|
|
|
|
// TeleportExitEvent is generated when the Teleport process begins closing
|
|
// all listening sockets and exiting.
|
|
TeleportExitEvent = "TeleportExit"
|
|
|
|
// TeleportReloadEvent is generated to trigger in-process teleport
|
|
// service reload - all servers and clients will be re-created
|
|
// in a graceful way.
|
|
TeleportReloadEvent = "TeleportReload"
|
|
|
|
// TeleportPhaseChangeEvent is generated to indidate that teleport
|
|
// CA rotation phase has been updated, used in tests
|
|
TeleportPhaseChangeEvent = "TeleportPhaseChange"
|
|
|
|
// TeleportReadyEvent is generated to signal that all teleport
|
|
// internal components have started successfully.
|
|
TeleportReadyEvent = "TeleportReady"
|
|
)
|
|
|
|
// RoleConfig is a configuration for a server role (either proxy or node)
|
|
type RoleConfig struct {
|
|
DataDir string
|
|
HostUUID string
|
|
HostName string
|
|
AuthServers []utils.NetAddr
|
|
Auth AuthConfig
|
|
Console io.Writer
|
|
}
|
|
|
|
// Connector has all resources process needs to connect
|
|
// to other parts of the cluster: client and identity
|
|
type Connector struct {
|
|
// ClientIdentity is the identity to be used in internal cluster
|
|
// clients to the auth service.
|
|
ClientIdentity *auth.Identity
|
|
// ServerIdentity is the identity to be used in servers - serving SSH
|
|
// and x509 certificates to clients.
|
|
ServerIdentity *auth.Identity
|
|
// Client is authenticated client with credentials from ClientIdenity.
|
|
Client *auth.Client
|
|
// AuthServer is auth server, used in connectors created for auth
|
|
// service components.
|
|
AuthServer *auth.AuthServer
|
|
}
|
|
|
|
// ReRegister receives new identity credentials for proxy, node and auth.
|
|
// In case if auth servers, the role is 'TeleportAdmin' and instead of using
|
|
// TLS client this method uses the local auth server.
|
|
func (c *Connector) ReRegister(additionalPrincipals []string) (*auth.Identity, error) {
|
|
if c.ClientIdentity.ID.Role == teleport.RoleAdmin || c.ClientIdentity.ID.Role == teleport.RoleAuth {
|
|
return auth.GenerateIdentity(c.AuthServer, c.ClientIdentity.ID, additionalPrincipals)
|
|
}
|
|
return auth.ReRegister(c.Client, c.ClientIdentity.ID, additionalPrincipals)
|
|
}
|
|
|
|
// GetCertAuthority returns cert authority by ID.
|
|
// In case if auth servers, the role is 'TeleportAdmin' and instead of using
|
|
// TLS client this method uses the local auth server.
|
|
func (c *Connector) GetCertAuthority(id services.CertAuthID, loadPrivateKeys bool) (services.CertAuthority, error) {
|
|
if c.ClientIdentity.ID.Role == teleport.RoleAdmin || c.ClientIdentity.ID.Role == teleport.RoleAuth {
|
|
return c.AuthServer.GetCertAuthority(id, loadPrivateKeys)
|
|
} else {
|
|
return c.Client.GetCertAuthority(id, loadPrivateKeys)
|
|
}
|
|
}
|
|
|
|
// TeleportProcess structure holds the state of the Teleport daemon, controlling
|
|
// execution and configuration of the teleport services: ssh, auth and proxy.
|
|
type TeleportProcess struct {
|
|
clockwork.Clock
|
|
sync.Mutex
|
|
Supervisor
|
|
Config *Config
|
|
// localAuth has local auth server listed in case if this process
|
|
// has started with auth server role enabled
|
|
localAuth *auth.AuthServer
|
|
// backend is the process' backend
|
|
backend backend.Backend
|
|
// auditLog is the initialized audit log
|
|
auditLog events.IAuditLog
|
|
|
|
// identities of this process (credentials to auth sever, basically)
|
|
Identities map[teleport.Role]*auth.Identity
|
|
|
|
// connectors is a list of connected clients and their identities
|
|
connectors map[teleport.Role]*Connector
|
|
|
|
// registeredListeners keeps track of all listeners created by the process
|
|
// used to pass listeners to child processes during live reload
|
|
registeredListeners []RegisteredListener
|
|
// importedDescriptors is a list of imported file descriptors
|
|
// passed by the parent process
|
|
importedDescriptors []FileDescriptor
|
|
|
|
// forkedPIDs is a collection of a teleport processes forked
|
|
// during restart used to collect their status in case if the
|
|
// child process crashed.
|
|
forkedPIDs []int
|
|
|
|
// storage is a server local storage
|
|
storage *auth.ProcessStorage
|
|
|
|
// id is a process id - used to identify different processes
|
|
// during in-process reloads.
|
|
id string
|
|
|
|
// Entry is a process-local log entry.
|
|
*logrus.Entry
|
|
}
|
|
|
|
// processIndex is an internal process index
|
|
// to help differentiate between two different teleport processes
|
|
// during in-process reload.
|
|
var processID int32 = 0
|
|
|
|
func nextProcessID() int32 {
|
|
return atomic.AddInt32(&processID, 1)
|
|
}
|
|
|
|
// GetAuthServer returns the process' auth server
|
|
func (process *TeleportProcess) GetAuthServer() *auth.AuthServer {
|
|
return process.localAuth
|
|
}
|
|
|
|
// GetAuditLog returns the process' audit log
|
|
func (process *TeleportProcess) GetAuditLog() events.IAuditLog {
|
|
return process.auditLog
|
|
}
|
|
|
|
// GetBackend returns the process' backend
|
|
func (process *TeleportProcess) GetBackend() backend.Backend {
|
|
return process.backend
|
|
}
|
|
|
|
func (process *TeleportProcess) backendSupportsForks() bool {
|
|
switch process.backend.(type) {
|
|
case *boltbk.BoltBackend:
|
|
return false
|
|
default:
|
|
return true
|
|
}
|
|
}
|
|
|
|
func (process *TeleportProcess) findStaticIdentity(id auth.IdentityID) (*auth.Identity, error) {
|
|
for i := range process.Config.Identities {
|
|
identity := process.Config.Identities[i]
|
|
if identity.ID.Equals(id) {
|
|
return identity, nil
|
|
}
|
|
}
|
|
return nil, trace.NotFound("identity %v not found", &id)
|
|
}
|
|
|
|
// getConnectors returns a copy of the identities registered for auth server
|
|
func (process *TeleportProcess) getConnectors() []*Connector {
|
|
process.Lock()
|
|
defer process.Unlock()
|
|
|
|
out := make([]*Connector, 0, len(process.connectors))
|
|
for role := range process.connectors {
|
|
out = append(out, process.connectors[role])
|
|
}
|
|
return out
|
|
}
|
|
|
|
// addConnector adds connector to registered connectors list,
|
|
// it will overwrite the connector for the same role
|
|
func (process *TeleportProcess) addConnector(connector *Connector) {
|
|
process.Lock()
|
|
defer process.Unlock()
|
|
|
|
process.connectors[connector.ClientIdentity.ID.Role] = connector
|
|
}
|
|
|
|
// GetIdentity returns the process identity (credentials to the auth server) for a given
|
|
// teleport Role. A teleport process can have any combination of 3 roles: auth, node, proxy
|
|
// and they have their own identities
|
|
func (process *TeleportProcess) GetIdentity(role teleport.Role) (i *auth.Identity, err error) {
|
|
var found bool
|
|
|
|
process.Lock()
|
|
defer process.Unlock()
|
|
|
|
i, found = process.Identities[role]
|
|
if found {
|
|
return i, nil
|
|
}
|
|
i, err = process.storage.ReadIdentity(auth.IdentityCurrent, role)
|
|
id := auth.IdentityID{
|
|
Role: role,
|
|
HostUUID: process.Config.HostUUID,
|
|
NodeName: process.Config.Hostname,
|
|
}
|
|
if err != nil {
|
|
if !trace.IsNotFound(err) {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
if role == teleport.RoleAdmin {
|
|
// for admin identity use local auth server
|
|
// because admin identity is requested by auth server
|
|
// itself
|
|
principals, err := process.getAdditionalPrincipals(role)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
i, err = auth.GenerateIdentity(process.localAuth, id, principals)
|
|
} else {
|
|
// try to locate static identity provided in the file
|
|
i, err = process.findStaticIdentity(id)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
process.Infof("Found static identity %v in the config file, writing to disk.", &id)
|
|
if err = process.storage.WriteIdentity(auth.IdentityCurrent, *i); err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
}
|
|
process.Identities[role] = i
|
|
return i, nil
|
|
}
|
|
|
|
// Process is a interface for processes
|
|
type Process interface {
|
|
// Closer closes all resources used by the process
|
|
io.Closer
|
|
// Start starts the process in a non-blocking way
|
|
Start() error
|
|
// WaitForSignals waits for and handles system process signals.
|
|
WaitForSignals(context.Context) error
|
|
// ExportFileDescriptors exports service listeners
|
|
// file descriptors used by the process.
|
|
ExportFileDescriptors() ([]FileDescriptor, error)
|
|
// Shutdown starts graceful shutdown of the process,
|
|
// blocks until all resources are freed and go-routines are
|
|
// shut down.
|
|
Shutdown(context.Context)
|
|
// WaitForEvent waits for event to occur, sends event to the channel,
|
|
// this is a non-blocking function.
|
|
WaitForEvent(ctx context.Context, name string, eventC chan Event)
|
|
// WaitWithContext waits for the service to stop. This is a blocking
|
|
// function.
|
|
WaitWithContext(ctx context.Context)
|
|
}
|
|
|
|
// NewProcess is a function that creates new teleport from config
|
|
type NewProcess func(cfg *Config) (Process, error)
|
|
|
|
func newTeleportProcess(cfg *Config) (Process, error) {
|
|
return NewTeleport(cfg)
|
|
}
|
|
|
|
// Run starts teleport processes, waits for signals
|
|
// and handles internal process reloads.
|
|
func Run(ctx context.Context, cfg Config, newTeleport NewProcess) error {
|
|
if newTeleport == nil {
|
|
newTeleport = newTeleportProcess
|
|
}
|
|
copyCfg := cfg
|
|
srv, err := newTeleport(©Cfg)
|
|
if err != nil {
|
|
return trace.Wrap(err, "initialization failed")
|
|
}
|
|
if srv == nil {
|
|
return trace.BadParameter("process has returned nil server")
|
|
}
|
|
if err := srv.Start(); err != nil {
|
|
return trace.Wrap(err, "startup failed")
|
|
}
|
|
// Wait and reload until called exit.
|
|
for {
|
|
srv, err = waitAndReload(ctx, cfg, srv, newTeleport)
|
|
if err != nil {
|
|
// This error means that was a clean shutdown
|
|
// an no reload is necessary.
|
|
if err == ErrTeleportExited {
|
|
return nil
|
|
}
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func waitAndReload(ctx context.Context, cfg Config, srv Process, newTeleport NewProcess) (Process, error) {
|
|
err := srv.WaitForSignals(ctx)
|
|
if err == nil {
|
|
return nil, ErrTeleportExited
|
|
}
|
|
if err != ErrTeleportReloading {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
log.Infof("Started in-process service reload.")
|
|
fileDescriptors, err := srv.ExportFileDescriptors()
|
|
if err != nil {
|
|
warnOnErr(srv.Close())
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
newCfg := cfg
|
|
newCfg.FileDescriptors = fileDescriptors
|
|
newSrv, err := newTeleport(&newCfg)
|
|
if err != nil {
|
|
warnOnErr(srv.Close())
|
|
return nil, trace.Wrap(err, "failed to create a new service")
|
|
}
|
|
log.Infof("Created new process.")
|
|
if err := newSrv.Start(); err != nil {
|
|
warnOnErr(srv.Close())
|
|
return nil, trace.Wrap(err, "failed to start a new service")
|
|
}
|
|
// Wait for the new server to report that it has started
|
|
// before shutting down the old one.
|
|
startTimeoutCtx, startCancel := context.WithTimeout(ctx, signalPipeTimeout)
|
|
defer startCancel()
|
|
eventC := make(chan Event, 1)
|
|
newSrv.WaitForEvent(startTimeoutCtx, TeleportReadyEvent, eventC)
|
|
select {
|
|
case <-eventC:
|
|
log.Infof("New service has started successfully.")
|
|
case <-startTimeoutCtx.Done():
|
|
warnOnErr(newSrv.Close())
|
|
warnOnErr(srv.Close())
|
|
return nil, trace.BadParameter("the new service has failed to start")
|
|
}
|
|
shutdownTimeout := cfg.ShutdownTimeout
|
|
if shutdownTimeout == 0 {
|
|
// The default shutdown timeout is very generous to avoid disrupting
|
|
// longer running connections.
|
|
shutdownTimeout = defaults.DefaultIdleConnectionDuration
|
|
}
|
|
log.Infof("Shutting down the old service with timeout %v.", shutdownTimeout)
|
|
// After the new process has started, initiate the graceful shutdown of the old process
|
|
// new process could have generated connections to the new process's server
|
|
// so not all connections can be kept forever.
|
|
timeoutCtx, cancel := context.WithTimeout(ctx, shutdownTimeout)
|
|
defer cancel()
|
|
srv.Shutdown(timeoutCtx)
|
|
if timeoutCtx.Err() == context.DeadlineExceeded {
|
|
// The new serivce can start initiating connections to the old service
|
|
// keeping it from shutting down gracefully, or some external
|
|
// connections can keep hanging the old auth service and prevent
|
|
// the services from shutting down, so abort the graceful way
|
|
// after some time to keep going.
|
|
log.Infof("Some connections to the old service were aborted after timeout of %v.", shutdownTimeout)
|
|
// Make sure that all parts of the service have exited, this function
|
|
// can not allow execution to continue if the shutdown is not complete,
|
|
// otherwise subsequent Run executions will hold system resources in case
|
|
// if old versions of the service are not exiting completely.
|
|
timeoutCtx, cancel := context.WithTimeout(ctx, shutdownTimeout)
|
|
defer cancel()
|
|
srv.WaitWithContext(timeoutCtx)
|
|
if timeoutCtx.Err() == context.DeadlineExceeded {
|
|
return nil, trace.BadParameter("the old service has failed to exit.")
|
|
}
|
|
} else {
|
|
log.Infof("The old service was successfully shut down gracefully.")
|
|
}
|
|
return newSrv, nil
|
|
}
|
|
|
|
// NewTeleport takes the daemon configuration, instantiates all required services
|
|
// and starts them under a supervisor, returning the supervisor object.
|
|
func NewTeleport(cfg *Config) (*TeleportProcess, error) {
|
|
// before we do anything reset the SIGINT handler back to the default
|
|
system.ResetInterruptSignalHandler()
|
|
|
|
if err := validateConfig(cfg); err != nil {
|
|
return nil, trace.Wrap(err, "configuration error")
|
|
}
|
|
|
|
// create the data directory if it's missing
|
|
_, err := os.Stat(cfg.DataDir)
|
|
if os.IsNotExist(err) {
|
|
err := os.MkdirAll(cfg.DataDir, os.ModeDir|0700)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
if len(cfg.FileDescriptors) == 0 {
|
|
cfg.FileDescriptors, err = importFileDescriptors()
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
// if there's no host uuid initialized yet, try to read one from the
|
|
// one of the identities
|
|
cfg.HostUUID, err = utils.ReadHostUUID(cfg.DataDir)
|
|
if err != nil {
|
|
if !trace.IsNotFound(err) {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
if len(cfg.Identities) != 0 {
|
|
cfg.HostUUID = cfg.Identities[0].ID.HostUUID
|
|
log.Infof("Taking host UUID from first identity: %v.", cfg.HostUUID)
|
|
} else {
|
|
cfg.HostUUID = uuid.New()
|
|
log.Infof("Generating new host UUID: %v.", cfg.HostUUID)
|
|
}
|
|
if err := utils.WriteHostUUID(cfg.DataDir, cfg.HostUUID); err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
// if user started auth and another service (without providing the auth address for
|
|
// that service, the address of the in-process auth will be used
|
|
if cfg.Auth.Enabled && len(cfg.AuthServers) == 0 {
|
|
cfg.AuthServers = []utils.NetAddr{cfg.Auth.SSHAddr}
|
|
}
|
|
|
|
// if user did not provide auth domain name, use this host's name
|
|
if cfg.Auth.Enabled && cfg.Auth.ClusterName == nil {
|
|
cfg.Auth.ClusterName, err = services.NewClusterName(services.ClusterNameSpecV2{
|
|
ClusterName: cfg.Hostname,
|
|
})
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
storage, err := auth.NewProcessStorage(filepath.Join(cfg.DataDir, teleport.ComponentProcess))
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
|
|
processID := fmt.Sprintf("%v", nextProcessID())
|
|
process := &TeleportProcess{
|
|
Clock: clockwork.NewRealClock(),
|
|
Supervisor: NewSupervisor(processID),
|
|
Config: cfg,
|
|
Identities: make(map[teleport.Role]*auth.Identity),
|
|
connectors: make(map[teleport.Role]*Connector),
|
|
importedDescriptors: cfg.FileDescriptors,
|
|
storage: storage,
|
|
id: processID,
|
|
}
|
|
|
|
process.Entry = logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentProcess, process.id),
|
|
})
|
|
|
|
serviceStarted := false
|
|
|
|
if !cfg.DiagnosticAddr.IsEmpty() {
|
|
if err := process.initDiagnosticService(); err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
} else {
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentDiagnostic))
|
|
}
|
|
|
|
// Create a process wide key generator that will be shared. This is so the
|
|
// key generator can pre-generate keys and share these across services.
|
|
if cfg.Keygen == nil {
|
|
precomputeCount := native.PrecomputedNum
|
|
// in case if not auth or proxy services are enabled,
|
|
// there is no need to precompute any SSH keys in the pool
|
|
if !cfg.Auth.Enabled && !cfg.Proxy.Enabled {
|
|
precomputeCount = 0
|
|
}
|
|
var err error
|
|
cfg.Keygen, err = native.New(native.PrecomputeKeys(precomputeCount))
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
// Produce global TeleportReadyEvent
|
|
// when all components have started
|
|
eventMapping := EventMapping{
|
|
Out: TeleportReadyEvent,
|
|
}
|
|
if cfg.Auth.Enabled {
|
|
eventMapping.In = append(eventMapping.In, AuthTLSReady)
|
|
}
|
|
if cfg.SSH.Enabled {
|
|
eventMapping.In = append(eventMapping.In, NodeSSHReady)
|
|
}
|
|
if cfg.Proxy.Enabled {
|
|
eventMapping.In = append(eventMapping.In, ProxySSHReady)
|
|
}
|
|
process.RegisterEventMapping(eventMapping)
|
|
|
|
if cfg.Auth.Enabled {
|
|
if err := process.initAuthService(); err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
serviceStarted = true
|
|
} else {
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentAuth))
|
|
}
|
|
|
|
if cfg.SSH.Enabled {
|
|
if err := process.initSSH(); err != nil {
|
|
return nil, err
|
|
}
|
|
serviceStarted = true
|
|
} else {
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentNode))
|
|
}
|
|
|
|
if cfg.Proxy.Enabled {
|
|
eventMapping.In = append(eventMapping.In, ProxySSHReady)
|
|
if err := process.initProxy(); err != nil {
|
|
return nil, err
|
|
}
|
|
serviceStarted = true
|
|
} else {
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentProxy))
|
|
}
|
|
|
|
process.RegisterFunc("common.rotate", process.periodicSyncRotationState)
|
|
|
|
if !serviceStarted {
|
|
return nil, trace.BadParameter("all services failed to start")
|
|
}
|
|
|
|
// create the new pid file only after started successfully
|
|
if cfg.PIDFile != "" {
|
|
f, err := os.OpenFile(cfg.PIDFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
|
if err != nil {
|
|
return nil, trace.ConvertSystemError(err)
|
|
}
|
|
fmt.Fprintf(f, "%v", os.Getpid())
|
|
defer f.Close()
|
|
}
|
|
|
|
// notify parent process that this process has started
|
|
go process.notifyParent()
|
|
|
|
return process, nil
|
|
}
|
|
|
|
// notifyParent notifies parent process that this process has started
|
|
// by writing to in-memory pipe used by communication channel.
|
|
func (process *TeleportProcess) notifyParent() {
|
|
ctx, cancel := context.WithTimeout(process.ExitContext(), signalPipeTimeout)
|
|
defer cancel()
|
|
|
|
eventC := make(chan Event, 1)
|
|
process.WaitForEvent(ctx, TeleportReadyEvent, eventC)
|
|
select {
|
|
case <-eventC:
|
|
process.Infof("New service has started successfully.")
|
|
case <-ctx.Done():
|
|
process.Errorf("Timeout waiting for process to start: %v", ctx.Err())
|
|
return
|
|
}
|
|
|
|
if err := process.writeToSignalPipe(fmt.Sprintf("Process %v has started.", os.Getpid())); err != nil {
|
|
process.Warningf("Failed to write to signal pipe: %v", err)
|
|
// despite the failure, it's ok to proceed,
|
|
// it could mean that the parent process has crashed and the pipe
|
|
// is no longer valid.
|
|
}
|
|
}
|
|
|
|
func (process *TeleportProcess) setLocalAuth(a *auth.AuthServer) {
|
|
process.Lock()
|
|
defer process.Unlock()
|
|
process.localAuth = a
|
|
}
|
|
|
|
func (process *TeleportProcess) getLocalAuth() *auth.AuthServer {
|
|
process.Lock()
|
|
defer process.Unlock()
|
|
return process.localAuth
|
|
}
|
|
|
|
// adminCreds returns admin UID and GID settings based on the OS
|
|
func adminCreds() (*int, *int, error) {
|
|
if runtime.GOOS != teleport.LinuxOS {
|
|
return nil, nil, nil
|
|
}
|
|
// if the user member of adm linux group,
|
|
// make audit log folder readable by admins
|
|
isAdmin, err := utils.IsGroupMember(teleport.LinuxAdminGID)
|
|
if err != nil {
|
|
return nil, nil, trace.Wrap(err)
|
|
}
|
|
if !isAdmin {
|
|
return nil, nil, nil
|
|
}
|
|
uid := os.Getuid()
|
|
gid := teleport.LinuxAdminGID
|
|
return &uid, &gid, nil
|
|
}
|
|
|
|
// initUploadHandler initializes upload handler based on the config settings,
|
|
// currently the only upload handler supported is S3
|
|
// the call can return trace.NotFOund if no upload handler is setup
|
|
func initUploadHandler(auditConfig services.AuditConfig) (events.UploadHandler, error) {
|
|
if auditConfig.AuditSessionsURI == "" {
|
|
return nil, trace.NotFound("no upload handler is setup")
|
|
}
|
|
uri, err := utils.ParseSessionsURI(auditConfig.AuditSessionsURI)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
|
|
switch uri.Scheme {
|
|
case teleport.SchemeS3:
|
|
return s3sessions.NewHandler(s3sessions.Config{
|
|
Bucket: uri.Host,
|
|
Region: auditConfig.Region,
|
|
Path: uri.Path,
|
|
})
|
|
case teleport.SchemeFile:
|
|
return filesessions.NewHandler(filesessions.Config{
|
|
Directory: uri.Path,
|
|
})
|
|
default:
|
|
return nil, trace.BadParameter(
|
|
"unsupported scheme for audit_sesions_uri: %q, currently supported schemes are %q and %q",
|
|
uri.Scheme, teleport.SchemeS3, teleport.SchemeFile)
|
|
}
|
|
}
|
|
|
|
// initExternalLog initializes external storage, if the storage is not
|
|
// setup, returns nil
|
|
func initExternalLog(auditConfig services.AuditConfig) (events.IAuditLog, error) {
|
|
if auditConfig.AuditTableName != "" {
|
|
auditConfig.AuditEventsURI = append(auditConfig.AuditEventsURI, fmt.Sprintf("%v://%v", dynamo.GetName(), auditConfig.AuditTableName))
|
|
}
|
|
if len(auditConfig.AuditEventsURI) > 0 && !auditConfig.ShouldUploadSessions() {
|
|
return nil, trace.BadParameter("please specify audit_sessions_uri when using external audit backends")
|
|
}
|
|
var hasNonFileLog bool
|
|
var loggers []events.IAuditLog
|
|
for _, eventsURI := range auditConfig.AuditEventsURI {
|
|
uri, err := utils.ParseSessionsURI(eventsURI)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
switch uri.Scheme {
|
|
case dynamo.GetName():
|
|
hasNonFileLog = true
|
|
logger, err := dynamoevents.New(dynamoevents.Config{
|
|
Tablename: uri.Host,
|
|
Region: auditConfig.Region,
|
|
})
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
loggers = append(loggers, logger)
|
|
case teleport.SchemeFile:
|
|
logger, err := events.NewFileLog(events.FileLogConfig{
|
|
Dir: uri.Path,
|
|
})
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
loggers = append(loggers, logger)
|
|
default:
|
|
return nil, trace.BadParameter(
|
|
"unsupported scheme for audit_events_uri: %q, currently supported schemes are %q and %q",
|
|
uri.Scheme, dynamo.GetName(), teleport.SchemeFile)
|
|
}
|
|
}
|
|
// only file external loggers are prohibited (they are not supposed
|
|
// to be used on their own, only in combo with external loggers)
|
|
// they also don't implement certain features, so they are going
|
|
// to be inefficient
|
|
switch len(loggers) {
|
|
case 0:
|
|
return nil, trace.NotFound("no external log is defined")
|
|
case 1:
|
|
if !hasNonFileLog {
|
|
return nil, trace.BadParameter("file:// log can not be used on it's own, can be only used in combination with external session logs, e.g. dynamodb://")
|
|
}
|
|
return loggers[0], nil
|
|
default:
|
|
if !hasNonFileLog {
|
|
return nil, trace.BadParameter("file:// log can not be used on it's own, can be only used in combination with external session logs, e.g. dynamodb://")
|
|
}
|
|
return events.NewMultiLog(loggers...), nil
|
|
}
|
|
}
|
|
|
|
// initAuthService can be called to initialize auth server service
|
|
func (process *TeleportProcess) initAuthService() error {
|
|
var err error
|
|
|
|
cfg := process.Config
|
|
|
|
// Initialize the storage back-ends for keys, events and records
|
|
b, err := process.initAuthStorage()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
process.backend = b
|
|
|
|
// create the audit log, which will be consuming (and recording) all events
|
|
// and recording all sessions.
|
|
if cfg.Auth.NoAudit {
|
|
// this is for teleconsole
|
|
process.auditLog = events.NewDiscardAuditLog()
|
|
|
|
warningMessage := "Warning: Teleport audit and session recording have been " +
|
|
"turned off. This is dangerous, you will not be able to view audit events " +
|
|
"or save and playback recorded sessions."
|
|
process.Warn(warningMessage)
|
|
} else {
|
|
// check if session recording has been disabled. note, we will continue
|
|
// logging audit events, we just won't record sessions.
|
|
recordSessions := true
|
|
if cfg.Auth.ClusterConfig.GetSessionRecording() == services.RecordOff {
|
|
recordSessions = false
|
|
|
|
warningMessage := "Warning: Teleport session recording have been turned off. " +
|
|
"This is dangerous, you will not be able to save and playback sessions."
|
|
process.Warn(warningMessage)
|
|
}
|
|
|
|
auditConfig := cfg.Auth.ClusterConfig.GetAuditConfig()
|
|
uploadHandler, err := initUploadHandler(auditConfig)
|
|
if err != nil {
|
|
if !trace.IsNotFound(err) {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
externalLog, err := initExternalLog(auditConfig)
|
|
if err != nil {
|
|
if !trace.IsNotFound(err) {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
auditServiceConfig := events.AuditLogConfig{
|
|
DataDir: filepath.Join(cfg.DataDir, teleport.LogsDir),
|
|
RecordSessions: recordSessions,
|
|
ServerID: cfg.HostUUID,
|
|
UploadHandler: uploadHandler,
|
|
ExternalLog: externalLog,
|
|
}
|
|
auditServiceConfig.UID, auditServiceConfig.GID, err = adminCreds()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
process.auditLog, err = events.NewAuditLog(auditServiceConfig)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
// first, create the AuthServer
|
|
authServer, err := auth.Init(auth.InitConfig{
|
|
Backend: b,
|
|
Authority: cfg.Keygen,
|
|
ClusterConfiguration: cfg.ClusterConfiguration,
|
|
ClusterConfig: cfg.Auth.ClusterConfig,
|
|
ClusterName: cfg.Auth.ClusterName,
|
|
AuthServiceName: cfg.Hostname,
|
|
DataDir: cfg.DataDir,
|
|
HostUUID: cfg.HostUUID,
|
|
NodeName: cfg.Hostname,
|
|
Authorities: cfg.Auth.Authorities,
|
|
ReverseTunnels: cfg.ReverseTunnels,
|
|
Trust: cfg.Trust,
|
|
Presence: cfg.Presence,
|
|
Provisioner: cfg.Provisioner,
|
|
Identity: cfg.Identity,
|
|
Access: cfg.Access,
|
|
StaticTokens: cfg.Auth.StaticTokens,
|
|
Roles: cfg.Auth.Roles,
|
|
AuthPreference: cfg.Auth.Preference,
|
|
OIDCConnectors: cfg.OIDCConnectors,
|
|
AuditLog: process.auditLog,
|
|
KubeCACertPath: cfg.Auth.KubeCACertPath,
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
process.setLocalAuth(authServer)
|
|
|
|
connector, err := process.connectToAuthService(teleport.RoleAdmin)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// second, create the API Server: it's actually a collection of API servers,
|
|
// each serving requests for a "role" which is assigned to every connected
|
|
// client based on their certificate (user, server, admin, etc)
|
|
sessionService, err := session.New(b)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
authorizer, err := auth.NewAuthorizer(authServer.Access, authServer.Identity, authServer.Trust)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
apiConf := &auth.APIConfig{
|
|
AuthServer: authServer,
|
|
SessionService: sessionService,
|
|
Authorizer: authorizer,
|
|
AuditLog: process.auditLog,
|
|
}
|
|
|
|
// admin access point is a caching access point used for frequently
|
|
// accessed data by auth server, e.g. cert authorities, users and roles
|
|
adminAuthServer, err := auth.NewAdminAuthServer(authServer, sessionService, process.auditLog)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
adminAccessPoint, err := process.newLocalCache(adminAuthServer, []string{"auth"})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentAuth, process.id),
|
|
})
|
|
|
|
// Register TLS endpoint of the auth service
|
|
tlsConfig, err := connector.ServerIdentity.TLSConfig()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
tlsServer, err := auth.NewTLSServer(auth.TLSServerConfig{
|
|
TLS: tlsConfig,
|
|
APIConfig: *apiConf,
|
|
LimiterConfig: cfg.Auth.Limiter,
|
|
AccessPoint: adminAccessPoint,
|
|
Component: teleport.Component(teleport.ComponentAuth, process.id),
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
// auth server listens on SSH and TLS, reusing the same socket
|
|
listener, err := process.importOrCreateListener(teleport.ComponentAuth, cfg.Auth.SSHAddr.Addr)
|
|
if err != nil {
|
|
log.Errorf("PID: %v Failed to bind to address %v: %v, exiting.", os.Getpid(), cfg.Auth.SSHAddr.Addr, err)
|
|
return trace.Wrap(err)
|
|
}
|
|
// clean up unused descriptors passed for proxy, but not used by it
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentAuth))
|
|
if cfg.Auth.EnableProxyProtocol {
|
|
log.Infof("Starting Auth service with PROXY protocol support.")
|
|
}
|
|
mux, err := multiplexer.New(multiplexer.Config{
|
|
EnableProxyProtocol: cfg.Auth.EnableProxyProtocol,
|
|
Listener: listener,
|
|
ID: teleport.Component(process.id),
|
|
})
|
|
if err != nil {
|
|
listener.Close()
|
|
return trace.Wrap(err)
|
|
}
|
|
go mux.Serve()
|
|
process.RegisterFunc("auth.tls", func() error {
|
|
utils.Consolef(cfg.Console, teleport.ComponentAuth, "Auth service is starting on %v.", cfg.Auth.SSHAddr.Addr)
|
|
|
|
// since tlsServer.Serve is a blocking call, we emit this even right before
|
|
// the service has started
|
|
process.BroadcastEvent(Event{Name: AuthTLSReady, Payload: nil})
|
|
err := tlsServer.Serve(mux.TLS())
|
|
if err != nil && err != http.ErrServerClosed {
|
|
log.Warningf("TLS server exited with error: %v.", err)
|
|
}
|
|
return nil
|
|
})
|
|
process.RegisterFunc("auth.heartbeat.broadcast", func() error {
|
|
// Heart beat auth server presence, this is not the best place for this
|
|
// logic, consolidate it into auth package later
|
|
connector, err := process.connectToAuthService(teleport.RoleAdmin)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
// External integrations rely on this event:
|
|
process.BroadcastEvent(Event{Name: AuthIdentityEvent, Payload: connector})
|
|
process.onExit("auth.broadcast", func(payload interface{}) {
|
|
connector.Client.Close()
|
|
})
|
|
return nil
|
|
})
|
|
process.RegisterFunc("auth.heartbeat", func() error {
|
|
srv := services.ServerV2{
|
|
Kind: services.KindAuthServer,
|
|
Version: services.V2,
|
|
Metadata: services.Metadata{
|
|
Namespace: defaults.Namespace,
|
|
Name: process.Config.HostUUID,
|
|
},
|
|
Spec: services.ServerSpecV2{
|
|
Addr: cfg.Auth.SSHAddr.Addr,
|
|
Hostname: process.Config.Hostname,
|
|
},
|
|
}
|
|
host, port, err := net.SplitHostPort(srv.GetAddr())
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
// advertise-ip is explicitly set:
|
|
if process.Config.AdvertiseIP != "" {
|
|
ahost, aport, err := utils.ParseAdvertiseAddr(process.Config.AdvertiseIP)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
// if port is not set in the advertise addr, use the default one
|
|
if aport == "" {
|
|
aport = port
|
|
}
|
|
srv.SetAddr(fmt.Sprintf("%v:%v", ahost, aport))
|
|
} else {
|
|
// advertise-ip is not set, while the CA is listening on 0.0.0.0? lets try
|
|
// to guess the 'advertise ip' then:
|
|
if net.ParseIP(host).IsUnspecified() {
|
|
ip, err := utils.GuessHostIP()
|
|
if err != nil {
|
|
log.Warn(err)
|
|
} else {
|
|
srv.SetAddr(net.JoinHostPort(ip.String(), port))
|
|
}
|
|
}
|
|
log.Warnf("Configuration setting auth_service/advertise_ip is not set. guessing %v.", srv.GetAddr())
|
|
}
|
|
// immediately register, and then keep repeating in a loop:
|
|
ticker := time.NewTicker(defaults.ServerHeartbeatTTL / 2)
|
|
defer ticker.Stop()
|
|
announce:
|
|
for {
|
|
state, err := process.storage.GetState(teleport.RoleAdmin)
|
|
if err != nil {
|
|
if !trace.IsNotFound(err) {
|
|
log.Warningf("Failed to get rotation state: %v.", err)
|
|
}
|
|
} else {
|
|
srv.Spec.Rotation = state.Spec.Rotation
|
|
}
|
|
srv.SetTTL(process, defaults.ServerHeartbeatTTL)
|
|
err = authServer.UpsertAuthServer(&srv)
|
|
if err != nil {
|
|
log.Warningf("Failed to announce presence: %v.", err)
|
|
}
|
|
select {
|
|
case <-process.ExitContext().Done():
|
|
break announce
|
|
case <-ticker.C:
|
|
}
|
|
}
|
|
log.Infof("Heartbeat to other auth servers exited.")
|
|
return nil
|
|
})
|
|
// execute this when process is asked to exit:
|
|
process.onExit("auth.shutdown", func(payload interface{}) {
|
|
// The listeners have to be closed here, because if shutdown
|
|
// was called before the start of the http server,
|
|
// the http server would have not started tracking the listeners
|
|
// and http.Shutdown will do nothing.
|
|
if mux != nil {
|
|
warnOnErr(mux.Close())
|
|
}
|
|
if listener != nil {
|
|
warnOnErr(listener.Close())
|
|
}
|
|
if payload == nil {
|
|
log.Info("Shutting down immediately.")
|
|
warnOnErr(tlsServer.Close())
|
|
} else {
|
|
log.Info("Shutting down gracefully.")
|
|
ctx := payloadContext(payload)
|
|
warnOnErr(tlsServer.Shutdown(ctx))
|
|
}
|
|
log.Info("Exited.")
|
|
})
|
|
return nil
|
|
}
|
|
|
|
func payloadContext(payload interface{}) context.Context {
|
|
ctx, ok := payload.(context.Context)
|
|
if ok {
|
|
return ctx
|
|
}
|
|
log.Errorf("expected context, got %T", payload)
|
|
return context.TODO()
|
|
}
|
|
|
|
// onExit allows individual services to register a callback function which will be
|
|
// called when Teleport Process is asked to exit. Usually services terminate themselves
|
|
// when the callback is called
|
|
func (process *TeleportProcess) onExit(serviceName string, callback func(interface{})) {
|
|
process.RegisterFunc(serviceName, func() error {
|
|
eventC := make(chan Event)
|
|
process.WaitForEvent(context.TODO(), TeleportExitEvent, eventC)
|
|
select {
|
|
case event := <-eventC:
|
|
callback(event.Payload)
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// newLocalCache returns new local cache access point
|
|
func (process *TeleportProcess) newLocalCache(clt auth.ClientI, cacheName []string) (auth.AccessPoint, error) {
|
|
// if caching is disabled, return access point
|
|
if !process.Config.CachePolicy.Enabled {
|
|
return clt, nil
|
|
}
|
|
path := filepath.Join(append([]string{process.Config.DataDir, "cache"}, cacheName...)...)
|
|
if err := os.MkdirAll(path, teleport.SharedDirMode); err != nil {
|
|
return nil, trace.ConvertSystemError(err)
|
|
}
|
|
cacheBackend, err := dir.New(backend.Params{"path": path})
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
return state.NewCachingAuthClient(state.Config{
|
|
AccessPoint: clt,
|
|
Backend: cacheBackend,
|
|
NeverExpires: process.Config.CachePolicy.NeverExpires,
|
|
RecentCacheTTL: process.Config.CachePolicy.GetRecentTTL(),
|
|
CacheMaxTTL: process.Config.CachePolicy.TTL,
|
|
})
|
|
}
|
|
|
|
func (process *TeleportProcess) getRotation(role teleport.Role) (*services.Rotation, error) {
|
|
state, err := process.storage.GetState(role)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
return &state.Spec.Rotation, nil
|
|
}
|
|
|
|
func (process *TeleportProcess) proxyPublicAddr() utils.NetAddr {
|
|
if len(process.Config.Proxy.PublicAddrs) == 0 {
|
|
return utils.NetAddr{}
|
|
}
|
|
return process.Config.Proxy.PublicAddrs[0]
|
|
}
|
|
|
|
// initSSH initializes the "node" role, i.e. a simple SSH server connected to the auth server.
|
|
func (process *TeleportProcess) initSSH() error {
|
|
process.registerWithAuthServer(teleport.RoleNode, SSHIdentityEvent)
|
|
eventsC := make(chan Event)
|
|
process.WaitForEvent(process.ExitContext(), SSHIdentityEvent, eventsC)
|
|
|
|
var s *regular.Server
|
|
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentNode, process.id),
|
|
})
|
|
|
|
process.RegisterFunc("ssh.node", func() error {
|
|
var event Event
|
|
select {
|
|
case event = <-eventsC:
|
|
log.Debugf("Received event %q.", event.Name)
|
|
case <-process.ExitContext().Done():
|
|
log.Debugf("Process is exiting.")
|
|
return nil
|
|
}
|
|
|
|
conn, ok := (event.Payload).(*Connector)
|
|
if !ok {
|
|
return trace.BadParameter("unsupported connector type: %T", event.Payload)
|
|
}
|
|
|
|
cfg := process.Config
|
|
|
|
limiter, err := limiter.NewLimiter(cfg.SSH.Limiter)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
authClient, err := process.newLocalCache(conn.Client, []string{"node"})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// make sure the namespace exists
|
|
namespace := services.ProcessNamespace(cfg.SSH.Namespace)
|
|
_, err = authClient.GetNamespace(namespace)
|
|
if err != nil {
|
|
if trace.IsNotFound(err) {
|
|
return trace.NotFound(
|
|
"namespace %v is not found, ask your system administrator to create this namespace so you can register nodes there.", namespace)
|
|
}
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
listener, err := process.importOrCreateListener(teleport.ComponentNode, cfg.SSH.Addr.Addr)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
// clean up unused descriptors passed for proxy, but not used by it
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentNode))
|
|
|
|
s, err = regular.New(cfg.SSH.Addr,
|
|
cfg.Hostname,
|
|
[]ssh.Signer{conn.ServerIdentity.KeySigner},
|
|
authClient,
|
|
cfg.DataDir,
|
|
cfg.AdvertiseIP,
|
|
process.proxyPublicAddr(),
|
|
regular.SetLimiter(limiter),
|
|
regular.SetShell(cfg.SSH.Shell),
|
|
regular.SetAuditLog(conn.Client),
|
|
regular.SetSessionServer(conn.Client),
|
|
regular.SetLabels(cfg.SSH.Labels, cfg.SSH.CmdLabels),
|
|
regular.SetNamespace(namespace),
|
|
regular.SetPermitUserEnvironment(cfg.SSH.PermitUserEnvironment),
|
|
regular.SetCiphers(cfg.Ciphers),
|
|
regular.SetKEXAlgorithms(cfg.KEXAlgorithms),
|
|
regular.SetMACAlgorithms(cfg.MACAlgorithms),
|
|
regular.SetPAMConfig(cfg.SSH.PAM),
|
|
regular.SetRotationGetter(process.getRotation),
|
|
)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// init uploader service for recording SSH node, if proxy is not
|
|
// enabled on this node, because proxy stars uploader service as well
|
|
if !cfg.Proxy.Enabled {
|
|
if err := process.initUploaderService(authClient, conn.Client); err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
log.Infof("Service is starting on %v %v.", cfg.SSH.Addr.Addr, process.Config.CachePolicy)
|
|
utils.Consolef(cfg.Console, teleport.ComponentNode, "Service is starting on %v.", cfg.SSH.Addr.Addr)
|
|
go s.Serve(listener)
|
|
|
|
// broadcast that the node has started
|
|
process.BroadcastEvent(Event{Name: NodeSSHReady, Payload: nil})
|
|
|
|
// block and wait while the node is running
|
|
s.Wait()
|
|
log.Infof("Exited.")
|
|
return nil
|
|
})
|
|
// execute this when process is asked to exit:
|
|
process.onExit("ssh.shutdown", func(payload interface{}) {
|
|
if payload == nil {
|
|
log.Infof("Shutting down immediately.")
|
|
if s != nil {
|
|
warnOnErr(s.Close())
|
|
}
|
|
} else {
|
|
log.Infof("Shutting down gracefully.")
|
|
if s != nil {
|
|
warnOnErr(s.Shutdown(payloadContext(payload)))
|
|
}
|
|
}
|
|
log.Infof("Exited.")
|
|
})
|
|
|
|
return nil
|
|
}
|
|
|
|
// registerWithAuthServer uses one time provisioning token obtained earlier
|
|
// from the server to get a pair of SSH keys signed by Auth server host
|
|
// certificate authority
|
|
func (process *TeleportProcess) registerWithAuthServer(role teleport.Role, eventName string) {
|
|
var authClient *auth.Client
|
|
process.RegisterFunc(fmt.Sprintf("register.%v", strings.ToLower(role.String())), func() error {
|
|
retryTime := defaults.ServerHeartbeatTTL / 3
|
|
for {
|
|
connector, err := process.connectToAuthService(role)
|
|
if err == nil {
|
|
process.BroadcastEvent(Event{Name: eventName, Payload: connector})
|
|
authClient = connector.Client
|
|
return nil
|
|
}
|
|
// in between attempts, check if teleport is shutting down
|
|
select {
|
|
case <-process.ExitContext().Done():
|
|
process.Infof("%v stopping connection attempts, teleport is shutting down.", role)
|
|
return ErrTeleportExited
|
|
default:
|
|
}
|
|
if trace.IsConnectionProblem(err) {
|
|
process.Infof("%v failed attempt connecting to auth server: %v.", role, err)
|
|
time.Sleep(retryTime)
|
|
continue
|
|
}
|
|
if !trace.IsNotFound(err) {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
})
|
|
|
|
process.onExit("auth.client", func(interface{}) {
|
|
if authClient != nil {
|
|
authClient.Close()
|
|
}
|
|
})
|
|
}
|
|
|
|
func (process *TeleportProcess) initUploaderService(accessPoint auth.AccessPoint, auditLog events.IAuditLog) error {
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentAuditLog, process.id),
|
|
})
|
|
// create folder for uploads
|
|
uid, gid, err := adminCreds()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// prepare dirs for uploader
|
|
path := []string{process.Config.DataDir, teleport.LogsDir, teleport.ComponentUpload, events.SessionLogsDir, defaults.Namespace}
|
|
for i := 1; i < len(path); i++ {
|
|
dir := filepath.Join(path[:i+1]...)
|
|
log.Infof("Creating directory %v.", dir)
|
|
err := os.Mkdir(dir, 0755)
|
|
err = trace.ConvertSystemError(err)
|
|
if err != nil {
|
|
if !trace.IsAlreadyExists(err) {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
if uid != nil && gid != nil {
|
|
log.Infof("Setting directory %v owner to %v:%v.", dir, *uid, *gid)
|
|
err := os.Chown(dir, *uid, *gid)
|
|
if err != nil {
|
|
return trace.ConvertSystemError(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
uploader, err := events.NewUploader(events.UploaderConfig{
|
|
DataDir: filepath.Join(process.Config.DataDir, teleport.LogsDir),
|
|
Namespace: defaults.Namespace,
|
|
ServerID: teleport.ComponentUpload,
|
|
AuditLog: auditLog,
|
|
EventsC: process.Config.UploadEventsC,
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
process.RegisterFunc("uploader.service", func() error {
|
|
err := uploader.Serve()
|
|
if err != nil {
|
|
log.Errorf("Uploader server exited with error: %v.", err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
process.onExit("uploader.shutdown", func(payload interface{}) {
|
|
log.Infof("Shutting down.")
|
|
warnOnErr(uploader.Stop())
|
|
log.Infof("Exited.")
|
|
})
|
|
return nil
|
|
}
|
|
|
|
// initDiagnosticService starts diagnostic service currently serving healthz
|
|
// and prometheus endpoints
|
|
func (process *TeleportProcess) initDiagnosticService() error {
|
|
mux := http.NewServeMux()
|
|
mux.Handle("/metrics", prometheus.Handler())
|
|
|
|
if process.Config.Debug {
|
|
log.Infof("Adding diagnostic debugging handlers. To connect with profiler, use `go tool pprof %v`.", process.Config.DiagnosticAddr.Addr)
|
|
|
|
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
|
mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
|
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
|
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
|
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
|
}
|
|
|
|
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
|
roundtrip.ReplyJSON(w, http.StatusOK, map[string]interface{}{"status": "ok"})
|
|
})
|
|
listener, err := process.importOrCreateListener(teleport.ComponentDiagnostic, process.Config.DiagnosticAddr.Addr)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
warnOnErr(process.closeImportedDescriptors(teleport.ComponentDiagnostic))
|
|
|
|
server := &http.Server{
|
|
Handler: mux,
|
|
}
|
|
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentDiagnostic, process.id),
|
|
})
|
|
|
|
log.Infof("Starting diagnostic service on %v.", process.Config.DiagnosticAddr.Addr)
|
|
|
|
process.RegisterFunc("diagnostic.service", func() error {
|
|
err := server.Serve(listener)
|
|
if err != nil && err != http.ErrServerClosed {
|
|
log.Warningf("Diagnostic server exited with error: %v.", err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
process.onExit("diagnostic.shutdown", func(payload interface{}) {
|
|
if payload == nil {
|
|
log.Infof("Shutting down immediately.")
|
|
warnOnErr(server.Close())
|
|
} else {
|
|
log.Infof("Shutting down gracefully.")
|
|
ctx := payloadContext(payload)
|
|
warnOnErr(server.Shutdown(ctx))
|
|
}
|
|
log.Infof("Exited.")
|
|
})
|
|
return nil
|
|
}
|
|
|
|
// getAdditionalPrincipals returns a list of additional principals to add
|
|
// to role's service certificates.
|
|
func (process *TeleportProcess) getAdditionalPrincipals(role teleport.Role) ([]string, error) {
|
|
var principals []string
|
|
if process.Config.Hostname != "" {
|
|
principals = append(principals, process.Config.Hostname)
|
|
}
|
|
var addrs []utils.NetAddr
|
|
switch role {
|
|
case teleport.RoleProxy:
|
|
addrs = append(process.Config.Proxy.PublicAddrs, utils.NetAddr{Addr: reversetunnel.RemoteKubeProxy})
|
|
case teleport.RoleAuth, teleport.RoleAdmin:
|
|
addrs = process.Config.Auth.PublicAddrs
|
|
case teleport.RoleNode:
|
|
addrs = process.Config.SSH.PublicAddrs
|
|
}
|
|
for _, addr := range addrs {
|
|
host, err := utils.Host(addr.Addr)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
principals = append(principals, host)
|
|
}
|
|
return principals, nil
|
|
}
|
|
|
|
// initProxy gets called if teleport runs with 'proxy' role enabled.
|
|
// this means it will do two things:
|
|
// 1. serve a web UI
|
|
// 2. proxy SSH connections to nodes running with 'node' role
|
|
// 3. take care of reverse tunnels
|
|
func (process *TeleportProcess) initProxy() error {
|
|
// if no TLS key was provided for the web UI, generate a self signed cert
|
|
if process.Config.Proxy.TLSKey == "" && !process.Config.Proxy.DisableTLS && !process.Config.Proxy.DisableWebService {
|
|
err := initSelfSignedHTTPSCert(process.Config)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
process.registerWithAuthServer(teleport.RoleProxy, ProxyIdentityEvent)
|
|
process.RegisterFunc("proxy.init", func() error {
|
|
eventsC := make(chan Event)
|
|
process.WaitForEvent(process.ExitContext(), ProxyIdentityEvent, eventsC)
|
|
|
|
var event Event
|
|
select {
|
|
case event = <-eventsC:
|
|
process.Debugf("Received event %q.", event.Name)
|
|
case <-process.ExitContext().Done():
|
|
process.Debugf("Process is exiting.")
|
|
return nil
|
|
}
|
|
|
|
conn, ok := (event.Payload).(*Connector)
|
|
if !ok {
|
|
return trace.BadParameter("unsupported connector type: %T", event.Payload)
|
|
}
|
|
|
|
err := process.initProxyEndpoint(conn)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
return nil
|
|
})
|
|
return nil
|
|
}
|
|
|
|
type proxyListeners struct {
|
|
mux *multiplexer.Mux
|
|
web net.Listener
|
|
reverseTunnel net.Listener
|
|
kube net.Listener
|
|
}
|
|
|
|
func (l *proxyListeners) Close() {
|
|
if l.mux != nil {
|
|
l.mux.Close()
|
|
}
|
|
if l.web != nil {
|
|
l.web.Close()
|
|
}
|
|
if l.reverseTunnel != nil {
|
|
l.reverseTunnel.Close()
|
|
}
|
|
if l.kube != nil {
|
|
l.kube.Close()
|
|
}
|
|
}
|
|
|
|
// setupProxyListeners sets up web proxy listeners based on the configuration
|
|
func (process *TeleportProcess) setupProxyListeners() (*proxyListeners, error) {
|
|
cfg := process.Config
|
|
process.Debugf("Setup Proxy: Web Proxy Address: %v, Reverse Tunnel Proxy Address: %v", cfg.Proxy.WebAddr.Addr, cfg.Proxy.ReverseTunnelListenAddr.Addr)
|
|
var err error
|
|
var listeners proxyListeners
|
|
|
|
if !cfg.Proxy.KubeListenAddr.IsEmpty() {
|
|
process.Debugf("Setup Proxy: turning on Kubernetes proxy.")
|
|
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "kube"), cfg.Proxy.KubeListenAddr.Addr)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
listeners.kube = listener
|
|
}
|
|
|
|
switch {
|
|
case cfg.Proxy.DisableWebService && cfg.Proxy.DisableReverseTunnel:
|
|
process.Debugf("Setup Proxy: Reverse tunnel proxy and web proxy are disabled.")
|
|
return &listeners, nil
|
|
case cfg.Proxy.ReverseTunnelListenAddr.Equals(cfg.Proxy.WebAddr) && !cfg.Proxy.DisableTLS:
|
|
process.Debugf("Setup Proxy: Reverse tunnel proxy and web proxy listen on the same port, multiplexing is on.")
|
|
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "tunnel", "web"), cfg.Proxy.WebAddr.Addr)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
listeners.mux, err = multiplexer.New(multiplexer.Config{
|
|
EnableProxyProtocol: cfg.Proxy.EnableProxyProtocol,
|
|
Listener: listener,
|
|
DisableTLS: cfg.Proxy.DisableWebService,
|
|
DisableSSH: cfg.Proxy.DisableReverseTunnel,
|
|
ID: teleport.Component(teleport.ComponentProxy, "tunnel", "web", process.id),
|
|
})
|
|
if err != nil {
|
|
listener.Close()
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
listeners.web = listeners.mux.TLS()
|
|
listeners.reverseTunnel = listeners.mux.SSH()
|
|
go listeners.mux.Serve()
|
|
return &listeners, nil
|
|
case cfg.Proxy.EnableProxyProtocol && !cfg.Proxy.DisableWebService && !cfg.Proxy.DisableTLS:
|
|
process.Debugf("Setup Proxy: Proxy protocol is enabled for web service, multiplexing is on.")
|
|
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "web"), cfg.Proxy.WebAddr.Addr)
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
listeners.mux, err = multiplexer.New(multiplexer.Config{
|
|
EnableProxyProtocol: cfg.Proxy.EnableProxyProtocol,
|
|
Listener: listener,
|
|
DisableTLS: false,
|
|
DisableSSH: true,
|
|
ID: teleport.Component(teleport.ComponentProxy, "web", process.id),
|
|
})
|
|
if err != nil {
|
|
listener.Close()
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
listeners.web = listeners.mux.TLS()
|
|
listeners.reverseTunnel, err = process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "tunnel"), cfg.Proxy.ReverseTunnelListenAddr.Addr)
|
|
if err != nil {
|
|
listener.Close()
|
|
listeners.Close()
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
go listeners.mux.Serve()
|
|
return &listeners, nil
|
|
default:
|
|
process.Debugf("Proxy reverse tunnel are listening on the separate ports.")
|
|
if !cfg.Proxy.DisableReverseTunnel {
|
|
listeners.reverseTunnel, err = process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "tunnel"), cfg.Proxy.ReverseTunnelListenAddr.Addr)
|
|
if err != nil {
|
|
listeners.Close()
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
if !cfg.Proxy.DisableWebService {
|
|
listeners.web, err = process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "web"), cfg.Proxy.WebAddr.Addr)
|
|
if err != nil {
|
|
listeners.Close()
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
}
|
|
return &listeners, nil
|
|
}
|
|
}
|
|
|
|
func (process *TeleportProcess) initProxyEndpoint(conn *Connector) error {
|
|
// clean up unused descriptors passed for proxy, but not used by it
|
|
defer process.closeImportedDescriptors(teleport.ComponentProxy)
|
|
var err error
|
|
cfg := process.Config
|
|
|
|
proxyLimiter, err := limiter.NewLimiter(cfg.Proxy.Limiter)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
reverseTunnelLimiter, err := limiter.NewLimiter(cfg.Proxy.Limiter)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// make a caching auth client for the auth server:
|
|
accessPoint, err := process.newLocalCache(conn.Client, []string{"proxy"})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
clientTLSConfig, err := conn.ClientIdentity.TLSConfig()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
listeners, err := process.setupProxyListeners()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// Register reverse tunnel agents pool
|
|
agentPool, err := reversetunnel.NewAgentPool(reversetunnel.AgentPoolConfig{
|
|
HostUUID: conn.ServerIdentity.ID.HostUUID,
|
|
Client: conn.Client,
|
|
AccessPoint: accessPoint,
|
|
HostSigners: []ssh.Signer{conn.ServerIdentity.KeySigner},
|
|
Cluster: conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority],
|
|
KubeDialAddr: utils.DialAddrFromListenAddr(cfg.Proxy.KubeListenAddr),
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentReverseTunnelServer, process.id),
|
|
})
|
|
|
|
// register SSH reverse tunnel server that accepts connections
|
|
// from remote teleport nodes
|
|
var tsrv reversetunnel.Server
|
|
if !process.Config.Proxy.DisableReverseTunnel {
|
|
tsrv, err = reversetunnel.NewServer(
|
|
reversetunnel.Config{
|
|
ID: process.Config.HostUUID,
|
|
ClusterName: conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority],
|
|
ClientTLS: clientTLSConfig,
|
|
Listener: listeners.reverseTunnel,
|
|
HostSigners: []ssh.Signer{conn.ServerIdentity.KeySigner},
|
|
LocalAuthClient: conn.Client,
|
|
LocalAccessPoint: accessPoint,
|
|
NewCachingAccessPoint: process.newLocalCache,
|
|
Limiter: reverseTunnelLimiter,
|
|
DirectClusters: []reversetunnel.DirectCluster{
|
|
{
|
|
Name: conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority],
|
|
Client: conn.Client,
|
|
},
|
|
},
|
|
KeyGen: cfg.Keygen,
|
|
Ciphers: cfg.Ciphers,
|
|
KEXAlgorithms: cfg.KEXAlgorithms,
|
|
MACAlgorithms: cfg.MACAlgorithms,
|
|
DataDir: process.Config.DataDir,
|
|
PollingPeriod: process.Config.PollingPeriod,
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
process.RegisterFunc("proxy.reveresetunnel.server", func() error {
|
|
utils.Consolef(cfg.Console, teleport.ComponentProxy, "Reverse tunnel service is starting on %v.", cfg.Proxy.ReverseTunnelListenAddr.Addr)
|
|
log.Infof("Starting on %v using %v", cfg.Proxy.ReverseTunnelListenAddr.Addr, process.Config.CachePolicy)
|
|
if err := tsrv.Start(); err != nil {
|
|
log.Error(err)
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
// notify parties that we've started reverse tunnel server
|
|
process.BroadcastEvent(Event{Name: ProxyReverseTunnelReady, Payload: tsrv})
|
|
tsrv.Wait()
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// Register web proxy server
|
|
var webServer *http.Server
|
|
var webHandler *web.RewritingHandler
|
|
if !process.Config.Proxy.DisableWebService {
|
|
webHandler, err = web.NewHandler(
|
|
web.Config{
|
|
Proxy: tsrv,
|
|
AuthServers: cfg.AuthServers[0],
|
|
DomainName: cfg.Hostname,
|
|
ProxyClient: conn.Client,
|
|
DisableUI: process.Config.Proxy.DisableWebInterface,
|
|
ProxySSHAddr: cfg.Proxy.SSHAddr,
|
|
ProxyWebAddr: cfg.Proxy.WebAddr,
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
proxyLimiter.WrapHandle(webHandler)
|
|
if !process.Config.Proxy.DisableTLS {
|
|
log.Infof("Using TLS cert %v, key %v", cfg.Proxy.TLSCert, cfg.Proxy.TLSKey)
|
|
tlsConfig, err := utils.CreateTLSConfiguration(cfg.Proxy.TLSCert, cfg.Proxy.TLSKey)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
listeners.web = tls.NewListener(listeners.web, tlsConfig)
|
|
}
|
|
webServer = &http.Server{
|
|
Handler: proxyLimiter,
|
|
}
|
|
process.RegisterFunc("proxy.web", func() error {
|
|
utils.Consolef(cfg.Console, teleport.ComponentProxy, "Web proxy service is starting on %v.", cfg.Proxy.WebAddr.Addr)
|
|
log.Infof("Web proxy service is starting on %v.", cfg.Proxy.WebAddr.Addr)
|
|
defer webHandler.Close()
|
|
process.BroadcastEvent(Event{Name: ProxyWebServerReady, Payload: webHandler})
|
|
if err := webServer.Serve(listeners.web); err != nil && err != http.ErrServerClosed {
|
|
log.Warningf("Error while serving web requests: %v", err)
|
|
}
|
|
log.Infof("Exited.")
|
|
return nil
|
|
})
|
|
} else {
|
|
log.Infof("Web UI is disabled.")
|
|
}
|
|
|
|
// Register SSH proxy server - SSH jumphost proxy server
|
|
listener, err := process.importOrCreateListener(teleport.Component(teleport.ComponentProxy, "ssh"), cfg.Proxy.SSHAddr.Addr)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
sshProxy, err := regular.New(cfg.Proxy.SSHAddr,
|
|
cfg.Hostname,
|
|
[]ssh.Signer{conn.ServerIdentity.KeySigner},
|
|
accessPoint,
|
|
cfg.DataDir,
|
|
"",
|
|
process.proxyPublicAddr(),
|
|
regular.SetLimiter(proxyLimiter),
|
|
regular.SetProxyMode(tsrv),
|
|
regular.SetSessionServer(conn.Client),
|
|
regular.SetAuditLog(conn.Client),
|
|
regular.SetCiphers(cfg.Ciphers),
|
|
regular.SetKEXAlgorithms(cfg.KEXAlgorithms),
|
|
regular.SetMACAlgorithms(cfg.MACAlgorithms),
|
|
regular.SetNamespace(defaults.Namespace),
|
|
regular.SetRotationGetter(process.getRotation),
|
|
)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
process.RegisterFunc("proxy.ssh", func() error {
|
|
utils.Consolef(cfg.Console, teleport.ComponentProxy, "SSH proxy service is starting on %v.", cfg.Proxy.SSHAddr.Addr)
|
|
log.Infof("SSH proxy service is starting on %v", cfg.Proxy.SSHAddr.Addr)
|
|
go sshProxy.Serve(listener)
|
|
// broadcast that the proxy ssh server has started
|
|
process.BroadcastEvent(Event{Name: ProxySSHReady, Payload: nil})
|
|
return nil
|
|
})
|
|
|
|
process.RegisterFunc("proxy.reversetunnel.agent", func() error {
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentReverseTunnelAgent, process.id),
|
|
})
|
|
log.Infof("Starting reverse tunnel agent pool.")
|
|
if err := agentPool.Start(); err != nil {
|
|
log.Errorf("Failed to start: %v.", err)
|
|
return trace.Wrap(err)
|
|
}
|
|
process.BroadcastEvent(Event{Name: ProxyAgentPoolReady, Payload: agentPool})
|
|
agentPool.Wait()
|
|
return nil
|
|
})
|
|
|
|
var kubeServer *kubeproxy.TLSServer
|
|
if listeners.kube != nil && !process.Config.Proxy.DisableReverseTunnel {
|
|
authorizer, err := auth.NewAuthorizer(conn.Client, conn.Client, conn.Client)
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
// Register TLS endpoint of the Kube proxy service
|
|
tlsConfig, err := conn.ServerIdentity.TLSConfig()
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
kubeServer, err = kubeproxy.NewTLSServer(kubeproxy.TLSServerConfig{
|
|
ForwarderConfig: kubeproxy.ForwarderConfig{
|
|
Namespace: defaults.Namespace,
|
|
Keygen: cfg.Keygen,
|
|
ClusterName: conn.ServerIdentity.Cert.Extensions[utils.CertExtensionAuthority],
|
|
Tunnel: tsrv,
|
|
Auth: authorizer,
|
|
Client: conn.Client,
|
|
DataDir: cfg.DataDir,
|
|
AccessPoint: accessPoint,
|
|
AuditLog: conn.Client,
|
|
ServerID: cfg.HostUUID,
|
|
TargetAddr: cfg.Proxy.KubeAPIAddr.Addr,
|
|
ClusterOverride: cfg.Proxy.KubeClusterOverride,
|
|
},
|
|
TLS: tlsConfig,
|
|
LimiterConfig: cfg.Proxy.Limiter,
|
|
AccessPoint: accessPoint,
|
|
Component: teleport.Component(teleport.ComponentProxy, teleport.ComponentKube),
|
|
})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
process.RegisterFunc("proxy.kube", func() error {
|
|
log := logrus.WithFields(logrus.Fields{
|
|
trace.Component: teleport.Component(teleport.ComponentKube),
|
|
})
|
|
log.Infof("Starting Kube proxy on %v.", cfg.Proxy.KubeListenAddr.Addr)
|
|
err := kubeServer.Serve(listeners.kube)
|
|
if err != nil && err != http.ErrServerClosed {
|
|
log.Warningf("Kube TLS server exited with error: %v.", err)
|
|
}
|
|
return nil
|
|
})
|
|
}
|
|
|
|
// execute this when process is asked to exit:
|
|
process.onExit("proxy.shutdown", func(payload interface{}) {
|
|
agentPool.Stop()
|
|
defer listeners.Close()
|
|
// Need to shut down this listener first, because
|
|
// in case of graceful shutdown, if tls server was not called
|
|
// the shutdown could be doing nothing, as server has not
|
|
// started tracking the listener first. It's ok to close listener
|
|
// several times.
|
|
if listeners.kube != nil {
|
|
listeners.kube.Close()
|
|
}
|
|
if payload == nil {
|
|
log.Infof("Shutting down immediately.")
|
|
if tsrv != nil {
|
|
warnOnErr(tsrv.Close())
|
|
}
|
|
if webServer != nil {
|
|
warnOnErr(webServer.Close())
|
|
}
|
|
if webHandler != nil {
|
|
warnOnErr(webHandler.Close())
|
|
}
|
|
warnOnErr(sshProxy.Close())
|
|
if kubeServer != nil {
|
|
warnOnErr(kubeServer.Close())
|
|
}
|
|
} else {
|
|
log.Infof("Shutting down gracefully.")
|
|
ctx := payloadContext(payload)
|
|
if tsrv != nil {
|
|
warnOnErr(tsrv.Shutdown(ctx))
|
|
}
|
|
warnOnErr(sshProxy.Shutdown(ctx))
|
|
if webServer != nil {
|
|
warnOnErr(webServer.Shutdown(ctx))
|
|
}
|
|
if kubeServer != nil {
|
|
warnOnErr(kubeServer.Shutdown(ctx))
|
|
}
|
|
if webHandler != nil {
|
|
warnOnErr(webHandler.Close())
|
|
}
|
|
}
|
|
log.Infof("Exited.")
|
|
})
|
|
if err := process.initUploaderService(accessPoint, conn.Client); err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func warnOnErr(err error) {
|
|
if err != nil {
|
|
// don't warn on double close, happens sometimes when closing
|
|
// calling accept on a closed listener
|
|
if strings.Contains(err.Error(), teleport.UseOfClosedNetworkConnection) {
|
|
return
|
|
}
|
|
log.Warningf("Got error while cleaning up: %v.", err)
|
|
}
|
|
}
|
|
|
|
// initAuthStorage initializes the storage backend for the auth service.
|
|
func (process *TeleportProcess) initAuthStorage() (bk backend.Backend, err error) {
|
|
bc := &process.Config.Auth.StorageConfig
|
|
process.Debugf("Using %v backend.", bc.Type)
|
|
switch bc.Type {
|
|
// legacy bolt backend:
|
|
case boltbk.GetName():
|
|
bk, err = boltbk.New(bc.Params)
|
|
// filesystem backend:
|
|
case dir.GetName():
|
|
bk, err = dir.New(bc.Params)
|
|
// DynamoDB backend:
|
|
case dynamo.GetName():
|
|
bk, err = dynamo.New(bc.Params)
|
|
// etcd backend:
|
|
case etcdbk.GetName():
|
|
bk, err = etcdbk.New(bc.Params)
|
|
default:
|
|
err = trace.BadParameter("unsupported secrets storage type: %q", bc.Type)
|
|
}
|
|
if err != nil {
|
|
return nil, trace.Wrap(err)
|
|
}
|
|
return bk, nil
|
|
}
|
|
|
|
// WaitWithContext waits until all internal services stop.
|
|
func (process *TeleportProcess) WaitWithContext(ctx context.Context) {
|
|
local, cancel := context.WithCancel(ctx)
|
|
go func() {
|
|
defer cancel()
|
|
process.Supervisor.Wait()
|
|
}()
|
|
select {
|
|
case <-local.Done():
|
|
return
|
|
}
|
|
}
|
|
|
|
// StartShutdown launches non-blocking graceful shutdown process that signals
|
|
// completion, returns context that will be closed once the shutdown is done
|
|
func (process *TeleportProcess) StartShutdown(ctx context.Context) context.Context {
|
|
process.BroadcastEvent(Event{Name: TeleportExitEvent, Payload: ctx})
|
|
localCtx, cancel := context.WithCancel(ctx)
|
|
go func() {
|
|
defer cancel()
|
|
process.Supervisor.Wait()
|
|
process.Debugf("All supervisor functions are completed.")
|
|
localAuth := process.getLocalAuth()
|
|
if localAuth != nil {
|
|
if err := process.localAuth.Close(); err != nil {
|
|
process.Warningf("Failed closing auth server: %v.", err)
|
|
}
|
|
}
|
|
}()
|
|
return localCtx
|
|
}
|
|
|
|
// Shutdown launches graceful shutdown process and waits
|
|
// for it to complete
|
|
func (process *TeleportProcess) Shutdown(ctx context.Context) {
|
|
localCtx := process.StartShutdown(ctx)
|
|
// wait until parent context closes
|
|
select {
|
|
case <-localCtx.Done():
|
|
process.Debugf("Process completed.")
|
|
}
|
|
}
|
|
|
|
// Close broadcasts close signals and exits immediately
|
|
func (process *TeleportProcess) Close() error {
|
|
process.BroadcastEvent(Event{Name: TeleportExitEvent})
|
|
|
|
process.Config.Keygen.Close()
|
|
|
|
var errors []error
|
|
localAuth := process.getLocalAuth()
|
|
if localAuth != nil {
|
|
errors = append(errors, process.localAuth.Close())
|
|
}
|
|
|
|
if process.storage != nil {
|
|
errors = append(errors, process.storage.Close())
|
|
}
|
|
|
|
return trace.NewAggregate(errors...)
|
|
}
|
|
|
|
func validateConfig(cfg *Config) error {
|
|
if !cfg.Auth.Enabled && !cfg.SSH.Enabled && !cfg.Proxy.Enabled {
|
|
return trace.BadParameter(
|
|
"config: supply at least one of Auth, SSH or Proxy roles")
|
|
}
|
|
|
|
if cfg.DataDir == "" {
|
|
return trace.BadParameter("config: please supply data directory")
|
|
}
|
|
|
|
if cfg.Console == nil {
|
|
cfg.Console = ioutil.Discard
|
|
}
|
|
|
|
if (cfg.Proxy.TLSKey == "" && cfg.Proxy.TLSCert != "") || (cfg.Proxy.TLSKey != "" && cfg.Proxy.TLSCert == "") {
|
|
return trace.BadParameter("please supply both TLS key and certificate")
|
|
}
|
|
|
|
if len(cfg.AuthServers) == 0 {
|
|
return trace.BadParameter("auth_servers is empty")
|
|
}
|
|
for i := range cfg.Auth.Authorities {
|
|
if err := cfg.Auth.Authorities[i].Check(); err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
for _, tun := range cfg.ReverseTunnels {
|
|
if err := tun.Check(); err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
}
|
|
|
|
if cfg.PollingPeriod == 0 {
|
|
cfg.PollingPeriod = defaults.HighResPollingPeriod
|
|
}
|
|
|
|
cfg.SSH.Namespace = services.ProcessNamespace(cfg.SSH.Namespace)
|
|
|
|
return nil
|
|
}
|
|
|
|
// initSelfSignedHTTPSCert generates and self-signs a TLS key+cert pair for https connection
|
|
// to the proxy server.
|
|
func initSelfSignedHTTPSCert(cfg *Config) (err error) {
|
|
log.Warningf("No TLS Keys provided, using self signed certificate.")
|
|
|
|
keyPath := filepath.Join(cfg.DataDir, defaults.SelfSignedKeyPath)
|
|
certPath := filepath.Join(cfg.DataDir, defaults.SelfSignedCertPath)
|
|
|
|
cfg.Proxy.TLSKey = keyPath
|
|
cfg.Proxy.TLSCert = certPath
|
|
|
|
// return the existing pair if they have already been generated:
|
|
_, err = tls.LoadX509KeyPair(certPath, keyPath)
|
|
if err == nil {
|
|
return nil
|
|
}
|
|
if !os.IsNotExist(err) {
|
|
return trace.Wrap(err, "unrecognized error reading certs")
|
|
}
|
|
log.Warningf("Generating self signed key and cert to %v %v.", keyPath, certPath)
|
|
|
|
creds, err := utils.GenerateSelfSignedCert([]string{cfg.Hostname, "localhost"})
|
|
if err != nil {
|
|
return trace.Wrap(err)
|
|
}
|
|
|
|
if err := ioutil.WriteFile(keyPath, creds.PrivateKey, 0600); err != nil {
|
|
return trace.Wrap(err, "error writing key PEM")
|
|
}
|
|
if err := ioutil.WriteFile(certPath, creds.Cert, 0600); err != nil {
|
|
return trace.Wrap(err, "error writing key PEM")
|
|
}
|
|
return nil
|
|
}
|