teleport/lib/service/cfg.go

779 lines
25 KiB
Go
Raw Normal View History

2015-10-31 18:56:49 +00:00
/*
Copyright 2015-2021 Gravitational, Inc.
2015-10-31 18:56:49 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
2017-04-07 23:51:31 +00:00
"fmt"
2016-03-11 01:03:01 +00:00
"io"
"net"
"net/url"
"os"
"path/filepath"
"strconv"
2017-04-07 23:51:31 +00:00
"time"
"golang.org/x/crypto/ssh"
"k8s.io/apimachinery/pkg/util/validation"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/backend/lite"
"github.com/gravitational/teleport/lib/backend/memory"
"github.com/gravitational/teleport/lib/bpf"
"github.com/gravitational/teleport/lib/defaults"
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
"github.com/gravitational/teleport/lib/events"
2015-12-03 09:26:34 +00:00
"github.com/gravitational/teleport/lib/limiter"
2018-02-24 01:23:09 +00:00
"github.com/gravitational/teleport/lib/pam"
"github.com/gravitational/teleport/lib/plugin"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/sshca"
2021-01-15 02:21:38 +00:00
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/ghodss/yaml"
"github.com/gravitational/trace"
2018-10-26 22:20:02 +00:00
"github.com/jonboulle/clockwork"
)
// Config structure is used to initialize _all_ services Teleport can run.
// Some settings are global (like DataDir) while others are grouped into
// sections, like AuthConfig
type Config struct {
// DataDir provides directory where teleport stores it's permanent state
// (in case of auth server backed by BoltDB) or local state, e.g. keys
DataDir string
// Hostname is a node host name
Hostname string
// Token is used to register this Teleport instance with the auth server
Token string
// AuthServers is a list of auth servers, proxies and peer auth servers to
// connect to. Yes, this is not just auth servers, the field name is
// misleading.
AuthServers []utils.NetAddr
// Identities is an optional list of pre-generated key pairs
// for teleport roles, this is helpful when server is preconfigured
Identities []*auth.Identity
// AdvertiseIP is used to "publish" an alternative IP address or hostname this node
// can be reached on, if running behind NAT
AdvertiseIP string
2017-04-07 23:51:31 +00:00
// CachePolicy sets caching policy for nodes and proxies
// in case if they loose connection to auth servers
CachePolicy CachePolicy
// Auth service configuration. Manages cluster state and configuration.
Auth AuthConfig
// Proxy service configuration. Manages incoming and outbound
// connections to the cluster.
Proxy ProxyConfig
// SSH service configuration. Manages SSH servers running within the cluster.
SSH SSHConfig
// App service configuration. Manages applications running within the cluster.
Apps AppsConfig
2021-01-15 02:21:38 +00:00
// Databases defines database proxy service configuration.
Databases DatabasesConfig
// Keygen points to a key generator implementation
Keygen sshca.Authority
// HostUUID is a unique UUID of this host (it will be known via this UUID within
2016-03-04 02:02:48 +00:00
// a teleport cluster). It's automatically generated on 1st start
HostUUID string
// Console writer to speak to a user
Console io.Writer
// ReverseTunnels is a list of reverse tunnels to create on the
// first cluster start
ReverseTunnels []services.ReverseTunnel
2016-04-02 00:58:41 +00:00
2016-04-03 05:20:51 +00:00
// OIDCConnectors is a list of trusted OpenID Connect identity providers
OIDCConnectors []services.OIDCConnector
2016-04-02 00:58:41 +00:00
// PidFile is a full path of the PID file for teleport daemon
PIDFile string
2016-04-05 00:26:15 +00:00
// Trust is a service that manages users and credentials
Trust services.Trust
// Presence service is a discovery and hearbeat tracker
Presence services.Presence
Events and GRPC API This commit introduces several key changes to Teleport backend and API infrastructure in order to achieve scalability improvements on 10K+ node deployments. Events and plain keyspace -------------------------- New backend interface supports events, pagination and range queries and moves away from buckets to plain keyspace, what better aligns with DynamoDB and Etcd featuring similar interfaces. All backend implementations are exposing Events API, allowing multiple subscribers to consume the same event stream and avoid polling database. Replacing BoltDB, Dir with SQLite ------------------------------- BoltDB backend does not support having two processes access the database at the same time. This prevented Teleport using BoltDB backend to be live reloaded. SQLite supports reads/writes by multiple processes and makes Dir backend obsolete as SQLite is more efficient on larger collections, supports transactions and can detect data corruption. Teleport automatically migrates data from Bolt and Dir backends into SQLite. GRPC API and protobuf resources ------------------------------- GRPC API has been introduced for the auth server. The auth server now serves both GRPC and JSON-HTTP API on the same TLS socket and uses the same client certificate authentication. All future API methods should use GRPC and HTTP-JSON API is considered obsolete. In addition to that some resources like Server and CertificateAuthority are now generated from protobuf service specifications in a way that is fully backward compatible with original JSON spec and schema, so the same resource can be encoded and decoded from JSON, YAML and protobuf. All models should be refactored into new proto specification over time. Streaming presence service -------------------------- In order to cut bandwidth, nodes are sending full updates only when changes to labels or spec have occured, otherwise new light-weight GRPC keep alive updates are sent over to the presence service, reducing bandwidth usage on multi-node deployments. In addition to that nodes are no longer polling auth server for certificate authority rotation updates, instead they subscribe to event updates to detect updates as soon as they happen. This is a new API, so the errors are inevitable, that's why polling is still done, but on a way slower rate.
2018-11-07 23:33:38 +00:00
// Events is events service
Events services.Events
2016-04-05 00:26:15 +00:00
// Provisioner is a service that keeps track of provisioning tokens
Provisioner services.Provisioner
// Trust is a service that manages users and credentials
Identity services.Identity
// Access is a service that controls access
Access services.Access
// ClusterConfiguration is a service that provides cluster configuration
ClusterConfiguration services.ClusterConfiguration
2018-06-08 23:50:43 +00:00
// CipherSuites is a list of TLS ciphersuites that Teleport supports. If
// omitted, a Teleport selected list of defaults will be used.
CipherSuites []uint16
// Ciphers is a list of SSH ciphers that the server supports. If omitted,
// the defaults will be used.
Ciphers []string
2018-06-08 23:50:43 +00:00
// KEXAlgorithms is a list of SSH key exchange (KEX) algorithms that the
// server supports. If omitted, the defaults will be used.
KEXAlgorithms []string
2018-06-08 23:50:43 +00:00
// MACAlgorithms is a list of SSH message authentication codes (MAC) that
// the server supports. If omitted the defaults will be used.
MACAlgorithms []string
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
// CASignatureAlgorithm is an SSH Certificate Authority (CA) signature
// algorithm that the server uses for signing user and host certificates.
// If omitted, the default will be used.
CASignatureAlgorithm *string
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
// DiagnosticAddr is an address for diagnostic and healthz endpoint service
DiagnosticAddr utils.NetAddr
// Debug sets debugging mode, results in diagnostic address
// endpoint extended with additional /debug handlers
Debug bool
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
// UploadEventsC is a channel for upload events
// used in tests
UploadEventsC chan events.UploadEvent `json:"-"`
// FileDescriptors is an optional list of file descriptors for the process
// to inherit and use for listeners, used for in-process updates.
FileDescriptors []FileDescriptor
// PollingPeriod is set to override default internal polling periods
// of sync agents, used to speed up integration tests.
PollingPeriod time.Duration
// ClientTimeout is set to override default client timeouts
// used by internal clients, used to speed up integration tests.
ClientTimeout time.Duration
// ShutdownTimeout is set to override default shutdown timeout.
ShutdownTimeout time.Duration
// CAPin is the SKPI hash of the CA used to verify the Auth Server.
CAPin string
2018-10-26 22:20:02 +00:00
// Clock is used to control time in tests.
Clock clockwork.Clock
// FIPS means FedRAMP/FIPS 140-2 compliant configuration was requested.
FIPS bool
// BPFConfig holds configuration for the BPF service.
BPFConfig *bpf.Config
// Kube is a Kubernetes API gateway using Teleport client identities.
Kube KubeConfig
// Log optionally specifies the logger
Log utils.Logger
// PluginRegistry allows adding enterprise logic to Teleport services
PluginRegistry plugin.Registry
}
// ApplyToken assigns a given token to all internal services but only if token
// is not an empty string.
//
// returns:
// true, nil if the token has been modified
// false, nil if the token has not been modified
// false, err if there was an error
func (cfg *Config) ApplyToken(token string) (bool, error) {
if token != "" {
var err error
cfg.Token, err = utils.ReadToken(token)
if err != nil {
return false, trace.Wrap(err)
}
return true, nil
}
return false, nil
}
2016-03-11 01:03:01 +00:00
// RoleConfig is a config for particular Teleport role
2015-10-27 00:58:39 +00:00
func (cfg *Config) RoleConfig() RoleConfig {
return RoleConfig{
DataDir: cfg.DataDir,
2016-03-05 00:27:52 +00:00
HostUUID: cfg.HostUUID,
HostName: cfg.Hostname,
2015-10-27 00:58:39 +00:00
AuthServers: cfg.AuthServers,
Auth: cfg.Auth,
Console: cfg.Console,
2015-10-27 00:58:39 +00:00
}
}
2016-03-11 01:03:01 +00:00
// DebugDumpToYAML is useful for debugging: it dumps the Config structure into
// a string
func (cfg *Config) DebugDumpToYAML() string {
shallow := *cfg
// do not copy sensitive data to stdout
shallow.Identities = nil
shallow.Auth.Authorities = nil
out, err := yaml.Marshal(shallow)
if err != nil {
return err.Error()
}
return string(out)
}
2017-04-07 23:51:31 +00:00
// CachePolicy sets caching policy for proxies and nodes
type CachePolicy struct {
// Type sets the cache type
Type string
2017-04-07 23:51:31 +00:00
// Enabled enables or disables caching
Enabled bool
// TTL sets maximum TTL for the cached values
// without explicit TTL set
TTL time.Duration
// NeverExpires means that cache values without TTL
// set by the auth server won't expire
NeverExpires bool
// RecentTTL is the recently accessed items cache TTL
RecentTTL *time.Duration
}
// GetRecentTTL either returns TTL that was set,
// or default recent TTL value
func (c *CachePolicy) GetRecentTTL() time.Duration {
if c.RecentTTL == nil {
return defaults.RecentCacheTTL
}
return *c.RecentTTL
2017-04-07 23:51:31 +00:00
}
// CheckAndSetDefaults checks and sets default values
func (c *CachePolicy) CheckAndSetDefaults() error {
switch c.Type {
case "", lite.GetName():
c.Type = lite.GetName()
case memory.GetName():
default:
return trace.BadParameter("unsupported cache type %q, supported values are %q and %q",
c.Type, lite.GetName(), memory.GetName())
}
return nil
}
2017-04-07 23:51:31 +00:00
// String returns human-friendly representation of the policy
func (c CachePolicy) String() string {
if !c.Enabled {
return "no cache policy"
}
recentCachePolicy := ""
if c.GetRecentTTL() == 0 {
recentCachePolicy = "will not cache frequently accessed items"
} else {
recentCachePolicy = fmt.Sprintf("will cache frequently accessed items for %v", c.GetRecentTTL())
}
2017-04-07 23:51:31 +00:00
if c.NeverExpires {
return fmt.Sprintf("%v cache that will not expire in case if connection to database is lost, %v", c.Type, recentCachePolicy)
2017-04-07 23:51:31 +00:00
}
if c.TTL == 0 {
return fmt.Sprintf("%v cache that will expire after connection to database is lost after %v, %v", c.Type, defaults.CacheTTL, recentCachePolicy)
2017-04-07 23:51:31 +00:00
}
return fmt.Sprintf("%v cache that will expire after connection to database is lost after %v, %v", c.Type, c.TTL, recentCachePolicy)
2017-04-07 23:51:31 +00:00
}
// ProxyConfig specifies configuration for proxy service
type ProxyConfig struct {
// Enabled turns proxy role on or off for this process
Enabled bool
//DisableTLS is enabled if we don't want self signed certs
DisableTLS bool
2017-05-20 19:52:03 +00:00
// DisableWebInterface allows to turn off serving the Web UI interface
DisableWebInterface bool
// DisableWebService turnes off serving web service completely, including web UI
DisableWebService bool
// DisableReverseTunnel disables reverse tunnel on the proxy
DisableReverseTunnel bool
2016-04-06 08:15:04 +00:00
2021-01-15 02:21:38 +00:00
// DisableDatabaseProxy disables database access proxy listener
DisableDatabaseProxy bool
// ReverseTunnelListenAddr is address where reverse tunnel dialers connect to
ReverseTunnelListenAddr utils.NetAddr
// EnableProxyProtocol enables proxy protocol support
EnableProxyProtocol bool
// WebAddr is address for web portal of the proxy
WebAddr utils.NetAddr
2015-11-02 21:02:34 +00:00
// SSHAddr is address of ssh proxy
SSHAddr utils.NetAddr
// MySQLAddr is address of MySQL proxy.
MySQLAddr utils.NetAddr
Limiter limiter.Config
// PublicAddrs is a list of the public addresses the proxy advertises
// for the HTTP endpoint. The hosts in in PublicAddr are included in the
// list of host principals on the TLS and SSH certificate.
PublicAddrs []utils.NetAddr
// SSHPublicAddrs is a list of the public addresses the proxy advertises
// for the SSH endpoint. The hosts in in PublicAddr are included in the
// list of host principals on the TLS and SSH certificate.
SSHPublicAddrs []utils.NetAddr
Added support for nodes dialing back to cluster. Updated services.ReverseTunnel to support type (proxy or node). For proxy types, which represent trusted cluster connections, when a services.ReverseTunnel is created, it's created on the remote side with name /reverseTunnels/example.com. For node types, services.ReverseTunnel is created on the main side as /reverseTunnels/{nodeUUID}.clusterName. Updated services.TunnelConn to support type (proxy or node). For proxy types, which represent trusted cluster connections, tunnel connections are created on the main side under /tunnelConnections/remote.example.com/{proxyUUID}-remote.example.com. For nodes, tunnel connections are created on the main side under /tunnelConnections/example.com/{proxyUUID}-example.com. This allows searching for tunnel connections by cluster then allows easily creating a set of proxies that are missing matching services.TunnelConn. The reverse tunnel server has been updated to handle heartbeats from proxies as well as nodes. Proxy heartbeat behavior has not changed. Heartbeats from nodes now add remote connections to the matching local site. In addition, the reverse tunnel server now proxies connection to the Auth Server for requests that are already authenticated (a second authentication to the Auth Server is required). For registration, nodes try and connect to the Auth Server to fetch host credentials. Upon failure, nodes now try and fallback to fetching host credentials from the web proxy. To establish a connection to an Auth Server, nodes first try and connect directly, and if the connection fails, fallback to obtaining a connection to the Auth Server through the reverse tunnel. If a connection is established directly, node startup behavior has not changed. If a node establishes a connection through the reverse tunnel, it creates an AgentPool that attempts to dial back to the cluster and establish a reverse tunnel. When nodes heartbeat, they also heartbeat if they are connected directly to the cluster or through a reverse tunnel. For nodes that are connected through a reverse tunnel, the proxy subsystem now directs the reverse tunnel server to establish a connection through the reverse tunnel instead of directly. When sending discovery requests, the domain field has been replaced with tunnelID. The tunnelID field is either the cluster name (same as before) for proxies, or {nodeUUID}.example.com for nodes.
2019-04-26 20:51:59 +00:00
// TunnelPublicAddrs is a list of the public addresses the proxy advertises
// for the tunnel endpoint. The hosts in in PublicAddr are included in the
// list of host principals on the TLS and SSH certificate.
TunnelPublicAddrs []utils.NetAddr
// Kube specifies kubernetes proxy configuration
Kube KubeProxyConfig
// KeyPairs are the key and certificate pairs that the proxy will load.
KeyPairs []KeyPairPath
// ACME is ACME protocol support config
ACME ACME
}
// ACME configures ACME automatic certificate renewal
type ACME struct {
// Enabled enables or disables ACME support
Enabled bool
// Email receives notifications from ACME server
Email string
// URI is ACME server URI
URI string
}
// KeyPairPath are paths to a key and certificate file.
type KeyPairPath struct {
// PrivateKey is the path to a PEM encoded private key.
PrivateKey string
// Certificate is the path to a PEM encoded certificate.
Certificate string
}
// KubeAddr returns the address for the Kubernetes endpoint on this proxy that
// can be reached by clients.
func (c ProxyConfig) KubeAddr() (string, error) {
if !c.Kube.Enabled {
return "", trace.NotFound("kubernetes support not enabled on this proxy")
}
if len(c.Kube.PublicAddrs) > 0 {
return fmt.Sprintf("https://%s", c.Kube.PublicAddrs[0].Addr), nil
}
host := "<proxyhost>"
// Try to guess the hostname from the HTTP public_addr.
if len(c.PublicAddrs) > 0 {
host = c.PublicAddrs[0].Host()
}
u := url.URL{
Scheme: "https",
Host: net.JoinHostPort(host, strconv.Itoa(c.Kube.ListenAddr.Port(defaults.KubeListenPort))),
}
return u.String(), nil
}
// KubeProxyConfig specifies configuration for proxy service
type KubeProxyConfig struct {
// Enabled turns kubernetes proxy role on or off for this process
Enabled bool
// ListenAddr is the address to listen on for incoming kubernetes requests.
ListenAddr utils.NetAddr
// ClusterOverride causes all traffic to go to a specific remote
// cluster, used only in tests
ClusterOverride string
// PublicAddrs is a list of the public addresses the Teleport Kube proxy can be accessed by,
// it also affects the host principals and routing logic
PublicAddrs []utils.NetAddr
// KubeconfigPath is a path to kubeconfig
KubeconfigPath string
}
// AuthConfig is a configuration of the auth server
type AuthConfig struct {
// Enabled turns auth role on or off for this process
Enabled bool
// EnableProxyProtocol enables proxy protocol support
EnableProxyProtocol bool
// SSHAddr is the listening address of SSH tunnel to HTTP service
SSHAddr utils.NetAddr
// Authorities is a set of trusted certificate authorities
// that will be added by this auth server on the first start
Authorities []services.CertAuthority
// Resources is a set of previously backed up resources
// used to bootstrap backend state on the first start.
Resources []services.Resource
2016-12-30 22:47:52 +00:00
// Roles is a set of roles to pre-provision for this cluster
Roles []services.Role
2017-07-28 18:37:12 +00:00
// ClusterName is a name that identifies this authority and all
// host nodes in the cluster that will share this authority domain name
// as a base name, e.g. if authority domain name is example.com,
// all nodes in the cluster will have UUIDs in the form: <uuid>.example.com
2017-07-28 18:37:12 +00:00
ClusterName services.ClusterName
// StaticTokens are pre-defined host provisioning tokens supplied via config file for
// environments where paranoid security is not needed
2017-07-28 18:37:12 +00:00
StaticTokens services.StaticTokens
// StorageConfig contains configuration settings for the storage backend.
StorageConfig backend.Config
2015-12-02 18:51:32 +00:00
Limiter limiter.Config
// NoAudit, when set to true, disables session recording and event audit
NoAudit bool
// Preference defines the authentication preference (type and second factor) for
// the auth server.
Preference services.AuthPreference
2017-11-29 00:15:46 +00:00
// ClusterConfig stores cluster level configuration.
ClusterConfig services.ClusterConfig
// LicenseFile is a full path to the license file
LicenseFile string
// PublicAddrs affects the SSH host principals and DNS names added to the SSH and TLS certs.
PublicAddrs []utils.NetAddr
}
// SSHConfig configures SSH server node role
type SSHConfig struct {
Enabled bool
Addr utils.NetAddr
Namespace string
Shell string
Limiter limiter.Config
Labels map[string]string
CmdLabels services.CommandLabels
PermitUserEnvironment bool
2018-02-24 01:23:09 +00:00
// PAM holds PAM configuration for Teleport.
PAM *pam.Config
// PublicAddrs affects the SSH host principals and DNS names added to the SSH and TLS certs.
PublicAddrs []utils.NetAddr
// BPF holds BPF configuration for Teleport.
BPF *bpf.Config
}
// KubeConfig specifies configuration for kubernetes service
type KubeConfig struct {
// Enabled turns kubernetes service role on or off for this process
Enabled bool
// ListenAddr is the address to listen on for incoming kubernetes requests.
// Optional.
ListenAddr *utils.NetAddr
// PublicAddrs is a list of the public addresses the Teleport kubernetes
// service can be reached by the proxy service.
PublicAddrs []utils.NetAddr
// KubeClusterName is the name of a kubernetes cluster this proxy is running
// in. If empty, defaults to the Teleport cluster name.
KubeClusterName string
// KubeconfigPath is a path to kubeconfig
KubeconfigPath string
// Labels are used for RBAC on clusters.
StaticLabels map[string]string
DynamicLabels services.CommandLabels
// Limiter limits the connection and request rates.
Limiter limiter.Config
}
2021-01-15 02:21:38 +00:00
// DatabasesConfig configures the database proxy service.
type DatabasesConfig struct {
// Enabled enables the database proxy service.
Enabled bool
// Databases is a list of databases proxied by this service.
Databases []Database
}
// Database represents a single database that's being proxied.
type Database struct {
// Name is the database name, used to refer to in CLI.
Name string
// Description is a free-form database description.
Description string
// Protocol is the database type, e.g. postgres or mysql.
Protocol string
// URI is the database endpoint to connect to.
URI string
// StaticLabels is a map of database static labels.
StaticLabels map[string]string
// DynamicLabels is a list of database dynamic labels.
DynamicLabels services.CommandLabels
// CACert is an optional database CA certificate.
CACert []byte
// AWS contains AWS specific settings for RDS/Aurora.
AWS DatabaseAWS
}
// DatabaseAWS contains AWS specific settings for RDS/Aurora databases.
type DatabaseAWS struct {
// Region is the cloud region database is running in when using AWS RDS.
Region string
}
// Check validates the database proxy configuration.
func (d *Database) Check() error {
if d.Name == "" {
return trace.BadParameter("empty database name")
}
// Unlike application access proxy, database proxy name doesn't necessarily
// need to be a valid subdomain but use the same validation logic for the
// simplicity and consistency.
if errs := validation.IsDNS1035Label(d.Name); len(errs) > 0 {
return trace.BadParameter("invalid database %q name: %v", d.Name, errs)
}
if !utils.SliceContainsStr(defaults.DatabaseProtocols, d.Protocol) {
return trace.BadParameter("unsupported database %q protocol %q, supported are: %v",
d.Name, d.Protocol, defaults.DatabaseProtocols)
}
if _, _, err := net.SplitHostPort(d.URI); err != nil {
return trace.BadParameter("invalid database %q address %q: %v",
d.Name, d.URI, err)
}
if len(d.CACert) != 0 {
if _, err := tlsca.ParseCertificatePEM(d.CACert); err != nil {
return trace.BadParameter("provided database %q CA doesn't appear to be a valid x509 certificate: %v",
d.Name, err)
}
}
return nil
}
// AppsConfig configures application proxy service.
type AppsConfig struct {
// Enabled enables application proxying service.
Enabled bool
// DebugApp enabled a header dumping debugging application.
DebugApp bool
// Apps is the list of applications that are being proxied.
Apps []App
}
// App is the specific application that will be proxied by the application
// service. This needs to exist because if the "config" package tries to
// directly create a services.App it will get into circular imports.
type App struct {
// Name of the application.
Name string
// URI is the internal address of the application.
URI string
// Public address of the application. This is the address users will access
// the application at.
PublicAddr string
// StaticLabels is a map of static labels to apply to this application.
StaticLabels map[string]string
// DynamicLabels is a list of dynamic labels to apply to this application.
DynamicLabels services.CommandLabels
// InsecureSkipVerify is used to skip validating the server's certificate.
InsecureSkipVerify bool
// Rewrite defines a block that is used to rewrite requests and responses.
Rewrite *Rewrite
}
// Check validates an application.
func (a App) Check() error {
if a.Name == "" {
return trace.BadParameter("missing application name")
}
if a.URI == "" {
return trace.BadParameter("missing application URI")
}
// Check if the application name is a valid subdomain. Don't allow names that
// are invalid subdomains because for trusted clusters the name is used to
// construct the domain that the application will be available at.
if errs := validation.IsDNS1035Label(a.Name); len(errs) > 0 {
return trace.BadParameter("application name %q must be a valid DNS subdomain: https://goteleport.com/teleport/docs/application-access/#application-name", a.Name)
}
// Parse and validate URL.
if _, err := url.Parse(a.URI); err != nil {
return trace.BadParameter("application URI invalid: %v", err)
}
// If a port was specified or an IP address was provided for the public
// address, return an error.
if a.PublicAddr != "" {
if _, _, err := net.SplitHostPort(a.PublicAddr); err == nil {
return trace.BadParameter("public_addr %q can not contain a port, applications will be available on the same port as the web proxy", a.PublicAddr)
}
if net.ParseIP(a.PublicAddr) != nil {
return trace.BadParameter("public_addr %q can not be an IP address, Teleport Application Access uses DNS names for routing", a.PublicAddr)
}
}
return nil
}
// Rewrite is a list of rewriting rules to apply to requests and responses.
type Rewrite struct {
// Redirect is a list of hosts that should be rewritten to the public address.
Redirect []string
}
// MakeDefaultConfig creates a new Config structure and populates it with defaults
func MakeDefaultConfig() (config *Config) {
config = &Config{}
ApplyDefaults(config)
return config
}
2016-02-17 19:58:28 +00:00
// ApplyDefaults applies default values to the existing config structure
func ApplyDefaults(cfg *Config) {
// Get defaults for Cipher, Kex algorithms, and MAC algorithms from
// golang.org/x/crypto/ssh default config.
var sc ssh.Config
sc.SetDefaults()
if cfg.Log == nil {
cfg.Log = utils.NewLogger()
}
// Remove insecure and (borderline insecure) cryptographic primitives from
// default configuration. These can still be added back in file configuration by
// users, but not supported by default by Teleport. See #1856 for more
// details.
kex := utils.RemoveFromSlice(sc.KeyExchanges,
defaults.DiffieHellmanGroup1SHA1,
defaults.DiffieHellmanGroup14SHA1)
macs := utils.RemoveFromSlice(sc.MACs,
defaults.HMACSHA1,
defaults.HMACSHA196)
hostname, err := os.Hostname()
if err != nil {
hostname = "localhost"
cfg.Log.Errorf("Failed to determine hostname: %v.", err)
}
// Global defaults.
cfg.Hostname = hostname
cfg.DataDir = defaults.DataDir
cfg.Console = os.Stdout
2018-06-08 23:50:43 +00:00
cfg.CipherSuites = utils.DefaultCipherSuites()
cfg.Ciphers = sc.Ciphers
cfg.KEXAlgorithms = kex
cfg.MACAlgorithms = macs
// Auth service defaults.
cfg.Auth.Enabled = true
cfg.Auth.SSHAddr = *defaults.AuthListenAddr()
cfg.Auth.StorageConfig.Type = lite.GetName()
cfg.Auth.StorageConfig.Params = backend.Params{defaults.BackendPath: filepath.Join(cfg.DataDir, defaults.BackendDir)}
2017-10-11 19:09:06 +00:00
cfg.Auth.StaticTokens = services.DefaultStaticTokens()
cfg.Auth.ClusterConfig = services.DefaultClusterConfig()
cfg.Auth.Preference = services.DefaultAuthPreference()
defaults.ConfigureLimiter(&cfg.Auth.Limiter)
cfg.Auth.LicenseFile = filepath.Join(cfg.DataDir, defaults.LicenseFile)
// Proxy service defaults.
cfg.Proxy.Enabled = true
cfg.Proxy.SSHAddr = *defaults.ProxyListenAddr()
cfg.Proxy.WebAddr = *defaults.ProxyWebListenAddr()
cfg.Proxy.ReverseTunnelListenAddr = *defaults.ReverseTunnelListenAddr()
defaults.ConfigureLimiter(&cfg.Proxy.Limiter)
// Kubernetes proxy service defaults.
cfg.Proxy.Kube.Enabled = false
cfg.Proxy.Kube.ListenAddr = *defaults.KubeProxyListenAddr()
// SSH service defaults.
cfg.SSH.Enabled = true
2016-02-16 21:18:58 +00:00
cfg.SSH.Shell = defaults.DefaultShell
defaults.ConfigureLimiter(&cfg.SSH.Limiter)
2018-02-24 01:23:09 +00:00
cfg.SSH.PAM = &pam.Config{Enabled: false}
cfg.SSH.BPF = &bpf.Config{Enabled: false}
// Kubernetes service defaults.
cfg.Kube.Enabled = false
defaults.ConfigureLimiter(&cfg.Kube.Limiter)
// Apps service defaults. It's disabled by default.
cfg.Apps.Enabled = false
2021-01-15 02:21:38 +00:00
// Databases proxy service is disabled by default.
cfg.Databases.Enabled = false
}
// ApplyFIPSDefaults updates default configuration to be FedRAMP/FIPS 140-2
// compliant.
func ApplyFIPSDefaults(cfg *Config) {
cfg.FIPS = true
// Update TLS and SSH cryptographic primitives.
cfg.CipherSuites = defaults.FIPSCipherSuites
cfg.Ciphers = defaults.FIPSCiphers
cfg.KEXAlgorithms = defaults.FIPSKEXAlgorithms
cfg.MACAlgorithms = defaults.FIPSMACAlgorithms
// Only SSO based authentication is supported in FIPS mode. The SSO
// provider is where any FedRAMP/FIPS 140-2 compliance (like password
// complexity) should be enforced.
cfg.Auth.ClusterConfig.SetLocalAuth(false)
// Update cluster configuration to record sessions at node, this way the
// entire cluster is FedRAMP/FIPS 140-2 compliant.
cfg.Auth.ClusterConfig.SetSessionRecording(services.RecordAtNode)
}