teleport/constants.go

593 lines
18 KiB
Go
Raw Normal View History

/*
Copyright 2018-2019 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package teleport
import (
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
"strings"
"time"
)
2017-03-02 19:50:35 +00:00
// WebAPIVersion is a current webapi version
const WebAPIVersion = "v1"
// ForeverTTL means that object TTL will not expire unless deleted
2016-03-11 01:03:01 +00:00
const ForeverTTL time.Duration = 0
2017-03-08 05:42:17 +00:00
const (
// SSHAuthSock is the environment variable pointing to the
// Unix socket the SSH agent is running on.
SSHAuthSock = "SSH_AUTH_SOCK"
// SSHAgentPID is the environment variable pointing to the agent
// process ID
SSHAgentPID = "SSH_AGENT_PID"
// SSHTeleportUser is the current Teleport user that is logged in.
SSHTeleportUser = "SSH_TELEPORT_USER"
// SSHSessionWebproxyAddr is the address the web proxy.
SSHSessionWebproxyAddr = "SSH_SESSION_WEBPROXY_ADDR"
// SSHTeleportClusterName is the name of the cluster this node belongs to.
SSHTeleportClusterName = "SSH_TELEPORT_CLUSTER_NAME"
// SSHTeleportHostUUID is the UUID of the host.
SSHTeleportHostUUID = "SSH_TELEPORT_HOST_UUID"
// SSHSessionID is the UUID of the current session.
SSHSessionID = "SSH_SESSION_ID"
2017-03-08 05:42:17 +00:00
)
const (
// HTTPSProxy is an environment variable pointing to a HTTPS proxy.
HTTPSProxy = "HTTPS_PROXY"
// HTTPProxy is an environment variable pointing to a HTTP proxy.
HTTPProxy = "HTTP_PROXY"
// NoProxy is an environment variable matching the cases
// when HTTPS_PROXY or HTTP_PROXY is ignored
NoProxy = "NO_PROXY"
)
2017-02-10 22:46:26 +00:00
const (
// TOTPValidityPeriod is the number of seconds a TOTP token is valid.
TOTPValidityPeriod uint = 30
// TOTPSkew adds that many periods before and after to the validity window.
TOTPSkew uint = 1
2017-02-10 22:46:26 +00:00
)
2016-03-11 01:03:01 +00:00
const (
// ComponentMemory is a memory backend
ComponentMemory = "memory"
// ComponentAuthority is a TLS and an SSH certificate authority
ComponentAuthority = "ca"
// ComponentProcess is a main control process
ComponentProcess = "proc"
// ComponentServer is a server subcomponent of some services
ComponentServer = "server"
// ComponentReverseTunnelServer is reverse tunnel server
// that together with agent establish a bi-directional SSH revers tunnel
// to bypass firewall restrictions
ComponentReverseTunnelServer = "proxy:server"
// ComponentReverseTunnelAgent is reverse tunnel agent
// that together with server establish a bi-directional SSH revers tunnel
// to bypass firewall restrictions
ComponentReverseTunnelAgent = "proxy:agent"
// ComponentLabel is a component label name used in reporting
ComponentLabel = "component"
// ComponentKube is a kubernetes proxy
ComponentKube = "proxy:kube"
Cleaned up Teleport logging * Downgraded many messages from `Debug` to `Info` * Edited messages so they're not verbose and not too short * Added "context" to some * Added logical teleport component as [COMPONENT] at the beginning of many, making logs **vastly** easier to read. * Added one more logging level option when creating Teleport (only Teleconsole uses it for now) The output with 'info' severity now look extremely clean. This is startup, for example: ``` INFO[0000] [AUTH] Auth service is starting on turing:32829 file=utils/cli.go:107 INFO[0000] [SSH:auth] listening socket: 127.0.0.1:32829 file=sshutils/server.go:119 INFO[0000] [SSH:auth] is listening on 127.0.0.1:32829 file=sshutils/server.go:144 INFO[0000] [Proxy] Successfully registered with the cluster file=utils/cli.go:107 INFO[0000] [Node] Successfully registered with the cluster file=utils/cli.go:107 INFO[0000] [AUTH] keyAuth: 127.0.0.1:56886->127.0.0.1:32829, user=turing file=auth/tun.go:370 WARN[0000] unable to load the auth server cache: open /tmp/cluster-teleconsole-client781495771/authservers.json: no such file or directory file=auth/tun.go:594 INFO[0000] [SSH:auth] new connection 127.0.0.1:56886 -> 127.0.0.1:32829 vesion: SSH-2.0-Go file=sshutils/server.go:205 INFO[0000] [AUTH] keyAuth: 127.0.0.1:56888->127.0.0.1:32829, user=turing.teleconsole-client file=auth/tun.go:370 INFO[0000] [AUTH] keyAuth: 127.0.0.1:56890->127.0.0.1:32829, user=turing.teleconsole-client file=auth/tun.go:370 INFO[0000] [Node] turing connected to the cluster 'teleconsole-client' file=service/service.go:158 INFO[0000] [AUTH] keyAuth: 127.0.0.1:56892->127.0.0.1:32829, user=turing file=auth/tun.go:370 INFO[0000] [SSH:auth] new connection 127.0.0.1:56890 -> 127.0.0.1:32829 vesion: SSH-2.0-Go file=sshutils/server.go:205 INFO[0000] [SSH:auth] new connection 127.0.0.1:56888 -> 127.0.0.1:32829 vesion: SSH-2.0-Go file=sshutils/server.go:205 INFO[0000] [Node] turing.teleconsole-client connected to the cluster 'teleconsole-client' file=service/service.go:158 INFO[0000] [Node] turing.teleconsole-client connected to the cluster 'teleconsole-client' file=service/service.go:158 INFO[0000] [SSH] received event(SSHIdentity) file=service/service.go:436 INFO[0000] [SSH] received event(ProxyIdentity) file=service/service.go:563 ``` You can easily tell that auth, ssh node and proxy have successfully started.
2016-09-02 23:04:05 +00:00
// ComponentAuth is the cluster CA node (auth server API)
ComponentAuth = "auth"
Events and GRPC API This commit introduces several key changes to Teleport backend and API infrastructure in order to achieve scalability improvements on 10K+ node deployments. Events and plain keyspace -------------------------- New backend interface supports events, pagination and range queries and moves away from buckets to plain keyspace, what better aligns with DynamoDB and Etcd featuring similar interfaces. All backend implementations are exposing Events API, allowing multiple subscribers to consume the same event stream and avoid polling database. Replacing BoltDB, Dir with SQLite ------------------------------- BoltDB backend does not support having two processes access the database at the same time. This prevented Teleport using BoltDB backend to be live reloaded. SQLite supports reads/writes by multiple processes and makes Dir backend obsolete as SQLite is more efficient on larger collections, supports transactions and can detect data corruption. Teleport automatically migrates data from Bolt and Dir backends into SQLite. GRPC API and protobuf resources ------------------------------- GRPC API has been introduced for the auth server. The auth server now serves both GRPC and JSON-HTTP API on the same TLS socket and uses the same client certificate authentication. All future API methods should use GRPC and HTTP-JSON API is considered obsolete. In addition to that some resources like Server and CertificateAuthority are now generated from protobuf service specifications in a way that is fully backward compatible with original JSON spec and schema, so the same resource can be encoded and decoded from JSON, YAML and protobuf. All models should be refactored into new proto specification over time. Streaming presence service -------------------------- In order to cut bandwidth, nodes are sending full updates only when changes to labels or spec have occured, otherwise new light-weight GRPC keep alive updates are sent over to the presence service, reducing bandwidth usage on multi-node deployments. In addition to that nodes are no longer polling auth server for certificate authority rotation updates, instead they subscribe to event updates to detect updates as soon as they happen. This is a new API, so the errors are inevitable, that's why polling is still done, but on a way slower rate.
2018-11-07 23:33:38 +00:00
// ComponentGRPC is grpc server
ComponentGRPC = "grpc"
// ComponentMigrate is responsible for data migrations
ComponentMigrate = "migrate"
// ComponentNode is SSH node (SSH server serving requests)
ComponentNode = "node"
// ComponentForwardingNode is SSH node (SSH server serving requests)
2017-11-29 00:15:46 +00:00
ComponentForwardingNode = "node:forward"
// ComponentProxy is SSH proxy (SSH server forwarding connections)
ComponentProxy = "proxy"
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
// ComponentDiagnostic is a diagnostic service
ComponentDiagnostic = "diag"
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
// ComponentClient is a client
ComponentClient = "client"
// ComponentTunClient is a tunnel client
2017-10-08 01:11:03 +00:00
ComponentTunClient = "client:tunnel"
// ComponentCache is a cache component
ComponentCache = "cache"
// ComponentBackend is a backend component
ComponentBackend = "backend"
2017-10-08 01:11:03 +00:00
// ComponentCachingClient is a caching auth client
ComponentCachingClient = "client:cache"
// ComponentSubsystemProxy is the proxy subsystem.
ComponentSubsystemProxy = "subsystem:proxy"
2017-11-29 00:15:46 +00:00
// ComponentLocalTerm is a terminal on a regular SSH node.
ComponentLocalTerm = "term:local"
// ComponentRemoteTerm is a terminal on a forwarding SSH node.
ComponentRemoteTerm = "term:remote"
// ComponentRemoteSubsystem is subsystem on a forwarding SSH node.
ComponentRemoteSubsystem = "subsystem:remote"
// ComponentAuditLog is audit log component
ComponentAuditLog = "audit"
// ComponentKeyAgent is an agent that has loaded the sessions keys and
// certificates for a user connected to a proxy.
ComponentKeyAgent = "keyagent"
// ComponentKeyStore is all sessions keys and certificates a user has on disk
// for all proxies.
ComponentKeyStore = "keystore"
// ComponentConnectProxy is the HTTP CONNECT proxy used to tunnel connection.
ComponentConnectProxy = "http:proxy"
2018-09-28 01:22:51 +00:00
// ComponentSOCKS is a SOCKS5 proxy.
ComponentSOCKS = "socks"
// ComponentKeyGen is the public/private keypair generator.
ComponentKeyGen = "keygen"
// ComponentFirestore represents firestore clients
ComponentFirestore = "firestore"
// ComponentSession is an active session.
ComponentSession = "session"
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
// ComponentDynamoDB represents dynamodb clients
ComponentDynamoDB = "dynamodb"
2018-02-24 01:23:09 +00:00
// Component pluggable authentication module (PAM)
ComponentPAM = "pam"
// ComponentUpload is a session recording upload server
ComponentUpload = "upload"
2018-08-17 20:53:49 +00:00
// ComponentWeb is a web server
ComponentWeb = "web"
// ComponentWebsocket is websocket server that the web client connects to.
ComponentWebsocket = "websocket"
// ComponentRBAC is role-based access control.
ComponentRBAC = "rbac"
// ComponentKeepAlive is keep-alive messages sent from clients to servers
// and vice versa.
ComponentKeepAlive = "keepalive"
// ComponentTSH is the "tsh" binary.
ComponentTSH = "tsh"
// ComponentKubeClient is the Kubernetes client.
ComponentKubeClient = "client:kube"
Events and GRPC API This commit introduces several key changes to Teleport backend and API infrastructure in order to achieve scalability improvements on 10K+ node deployments. Events and plain keyspace -------------------------- New backend interface supports events, pagination and range queries and moves away from buckets to plain keyspace, what better aligns with DynamoDB and Etcd featuring similar interfaces. All backend implementations are exposing Events API, allowing multiple subscribers to consume the same event stream and avoid polling database. Replacing BoltDB, Dir with SQLite ------------------------------- BoltDB backend does not support having two processes access the database at the same time. This prevented Teleport using BoltDB backend to be live reloaded. SQLite supports reads/writes by multiple processes and makes Dir backend obsolete as SQLite is more efficient on larger collections, supports transactions and can detect data corruption. Teleport automatically migrates data from Bolt and Dir backends into SQLite. GRPC API and protobuf resources ------------------------------- GRPC API has been introduced for the auth server. The auth server now serves both GRPC and JSON-HTTP API on the same TLS socket and uses the same client certificate authentication. All future API methods should use GRPC and HTTP-JSON API is considered obsolete. In addition to that some resources like Server and CertificateAuthority are now generated from protobuf service specifications in a way that is fully backward compatible with original JSON spec and schema, so the same resource can be encoded and decoded from JSON, YAML and protobuf. All models should be refactored into new proto specification over time. Streaming presence service -------------------------- In order to cut bandwidth, nodes are sending full updates only when changes to labels or spec have occured, otherwise new light-weight GRPC keep alive updates are sent over to the presence service, reducing bandwidth usage on multi-node deployments. In addition to that nodes are no longer polling auth server for certificate authority rotation updates, instead they subscribe to event updates to detect updates as soon as they happen. This is a new API, so the errors are inevitable, that's why polling is still done, but on a way slower rate.
2018-11-07 23:33:38 +00:00
// ComponentBuffer is in-memory event circular buffer
// used to broadcast events to subscribers.
ComponentBuffer = "buffer"
// ComponentBPF is the eBPF packagae.
ComponentBPF = "bpf"
// ComponentCgroup is the cgroup package.
ComponentCgroup = "cgroups"
// DebugEnvVar tells tests to use verbose debug output
DebugEnvVar = "DEBUG"
// VerboseLogEnvVar forces all logs to be verbose (down to DEBUG level)
VerboseLogsEnvVar = "TELEPORT_DEBUG"
// IterationsEnvVar sets tests iterations to run
IterationsEnvVar = "ITERATIONS"
// DefaultTerminalWidth defines the default width of a server-side allocated
// pseudo TTY
DefaultTerminalWidth = 80
// DefaultTerminalHeight defines the default height of a server-side allocated
// pseudo TTY
DefaultTerminalHeight = 25
// SafeTerminalType is the fall-back TTY type to fall back to (when $TERM
// is not defined)
SafeTerminalType = "xterm"
// ConnectorOIDC means connector type OIDC
ConnectorOIDC = "oidc"
2017-05-05 22:53:05 +00:00
// ConnectorSAML means connector type SAML
2017-12-15 01:19:57 +00:00
ConnectorSAML = "saml"
2017-05-05 22:53:05 +00:00
2017-12-14 21:41:38 +00:00
// ConnectorGithub means connector type Github
ConnectorGithub = "github"
// DataDirParameterName is the name of the data dir configuration parameter passed
// to all backends during initialization
DataDirParameterName = "data_dir"
// SSH request type to keep the connection alive. A client and a server keep
// pining each other with it:
KeepAliveReqType = "keepalive@openssh.com"
// RecordingProxyReqType is the name of a global request which returns if
// the proxy is recording sessions or not.
RecordingProxyReqType = "recording-proxy@teleport.com"
// OTP means One-time Password Algorithm for Two-Factor Authentication.
OTP = "otp"
// TOTP means Time-based One-time Password Algorithm. for Two-Factor Authentication.
TOTP = "totp"
// HOTP means HMAC-based One-time Password Algorithm.for Two-Factor Authentication.
HOTP = "hotp"
// U2F means Universal 2nd Factor.for Two-Factor Authentication.
U2F = "u2f"
// OFF means no second factor.for Two-Factor Authentication.
OFF = "off"
// Local means authentication will happen locally within the Teleport cluster.
Local = "local"
2017-12-14 21:41:38 +00:00
// OIDC means authentication will happen remotely using an OIDC connector.
2017-12-15 01:19:57 +00:00
OIDC = ConnectorOIDC
2017-05-05 22:53:05 +00:00
2017-12-14 21:41:38 +00:00
// SAML means authentication will happen remotely using a SAML connector.
2017-12-15 01:19:57 +00:00
SAML = ConnectorSAML
2017-08-25 03:24:47 +00:00
2017-12-14 21:41:38 +00:00
// Github means authentication will happen remotely using a Github connector.
2017-12-15 01:19:57 +00:00
Github = ConnectorGithub
2017-12-14 21:41:38 +00:00
2017-08-25 03:24:47 +00:00
// JSON means JSON serialization format
JSON = "json"
// YAML means YAML serialization format
YAML = "yaml"
// Text means text serialization format
Text = "text"
// LinuxAdminGID is the ID of the standard adm group on linux
LinuxAdminGID = 4
2018-07-25 20:56:14 +00:00
// LinuxOS is the GOOS constant used for Linux.
LinuxOS = "linux"
2018-07-25 20:56:14 +00:00
// WindowsOS is the GOOS constant used for Microsoft Windows.
WindowsOS = "windows"
// DarwinOS is the GOOS constant for Apple macOS/darwin.
DarwinOS = "darwin"
// DirMaskSharedGroup is the mask for a directory accessible
// by the owner and group
DirMaskSharedGroup = 0770
// FileMaskOwnerOnly is the file mask that allows read write access
// to owers only
FileMaskOwnerOnly = 0600
// On means mode is on
On = "on"
// Off means mode is off
Off = "off"
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
// SchemeS3 is S3 file scheme, means upload or download to S3 like object
// storage
SchemeS3 = "s3"
// SchemeGCS is GCS file scheme, means upload or download to GCS like object
// storage
SchemeGCS = "gs"
// Region is AWS region parameter
Region = "region"
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
// SchemeFile is a local disk file storage
SchemeFile = "file"
// SchemeStdout outputs audit log entries to stdout
SchemeStdout = "stdout"
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
// LogsDir is a log subdirectory for events and logs
LogsDir = "log"
// Syslog is a mode for syslog logging
Syslog = "syslog"
// HumanDateFormat is a human readable date formatting
HumanDateFormat = "Jan _2 15:04 UTC"
// HumanDateFormatSeconds is a human readable date formatting with seconds
HumanDateFormatSeconds = "Jan _2 15:04:05 UTC"
// HumanDateFormatMilli is a human readable date formatting with milliseconds
HumanDateFormatMilli = "Jan _2 15:04:05.000 UTC"
// DebugLevel is a debug logging level name
DebugLevel = "debug"
)
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
// Component generates "component:subcomponent1:subcomponent2" strings used
// in debugging
func Component(components ...string) string {
return strings.Join(components, ":")
}
const (
// AuthorizedKeys are public keys that check against User CAs.
AuthorizedKeys = "authorized_keys"
// KnownHosts are public keys that check against Host CAs.
KnownHosts = "known_hosts"
)
const (
// CertExtensionPermitAgentForwarding allows agent forwarding for certificate
CertExtensionPermitAgentForwarding = "permit-agent-forwarding"
// CertExtensionPermitPTY allows user to request PTY
CertExtensionPermitPTY = "permit-pty"
// CertExtensionPermitPortForwarding allows user to request port forwarding
CertExtensionPermitPortForwarding = "permit-port-forwarding"
2017-05-17 17:36:25 +00:00
// CertExtensionTeleportRoles is used to propagate teleport roles
CertExtensionTeleportRoles = "teleport-roles"
// CertExtensionTeleportRouteToCluster is used to encode
// the target cluster to route to in the certificate
CertExtensionTeleportRouteToCluster = "teleport-route-to-cluster"
// CertExtensionTeleportTraits is used to propagate traits about the user.
CertExtensionTeleportTraits = "teleport-traits"
// CertExtensionTeleportActiveRequests is used to track which privilege
// escalation requests were used to construct the certificate.
CertExtensionTeleportActiveRequests = "teleport-active-requests"
)
const (
// NetIQ is an identity provider.
NetIQ = "netiq"
2017-05-12 19:14:44 +00:00
// ADFS is Microsoft Active Directory Federation Services
ADFS = "adfs"
)
const (
// RemoteCommandSuccess is returned when a command has successfully executed.
RemoteCommandSuccess = 0
// RemoteCommandFailure is returned when a command has failed to execute and
// we don't have another status code for it.
RemoteCommandFailure = 255
)
// MaxEnvironmentFileLines is the maximum number of lines in a environment file.
const MaxEnvironmentFileLines = 1000
const (
// CertificateFormatOldSSH is used to make Teleport interoperate with older
// versions of OpenSSH.
CertificateFormatOldSSH = "oldssh"
// CertificateFormatStandard is used for normal Teleport operation without any
// compatibility modes.
CertificateFormatStandard = "standard"
// CertificateFormatUnspecified is used to check if the format was specified
// or not.
CertificateFormatUnspecified = ""
// DurationNever is human friendly shortcut that is interpreted as a Duration of 0
DurationNever = "never"
)
2017-07-24 22:18:46 +00:00
const (
// TraitInternalPrefix is the role variable prefix that indicates it's for
// local accounts.
TraitInternalPrefix = "internal"
// TraitLogins is the name the role variable used to store
// allowed logins.
TraitLogins = "logins"
// TraitKubeGroups is the name the role variable used to store
// allowed kubernetes groups
TraitKubeGroups = "kubernetes_groups"
// TraitInternalLoginsVariable is the variable used to store allowed
2017-07-24 22:18:46 +00:00
// logins for local accounts.
TraitInternalLoginsVariable = "{{internal.logins}}"
// TraitInternalKubeGroupsVariable is the variable used to store allowed
// kubernetes groups for local accounts.
TraitInternalKubeGroupsVariable = "{{internal.kubernetes_groups}}"
2017-07-24 22:18:46 +00:00
)
const (
// GSuiteIssuerURL is issuer URL used for GSuite provider
GSuiteIssuerURL = "https://accounts.google.com"
// GSuiteGroupsEndpoint is gsuite API endpoint
GSuiteGroupsEndpoint = "https://www.googleapis.com/admin/directory/v1/groups"
// GSuiteGroupsScope is a scope to get access to admin groups API
GSuiteGroupsScope = "https://www.googleapis.com/auth/admin.directory.group.readonly"
2019-11-03 20:55:03 +00:00
// GSuiteDomainClaim is the domain name claim for GSuite
GSuiteDomainClaim = "hd"
)
2017-11-29 00:15:46 +00:00
// SCP is Secure Copy.
const SCP = "scp"
// Root is *nix system administrator account name.
const Root = "root"
2017-09-05 19:20:57 +00:00
// DefaultRole is the name of the default admin role for all local users if
2017-07-24 22:18:46 +00:00
// another role is not explicitly assigned (Enterprise only).
2017-09-05 19:20:57 +00:00
const AdminRoleName = "admin"
2017-08-16 00:27:51 +00:00
// DefaultImplicitRole is implicit role that gets added to all service.RoleSet
// objects.
const DefaultImplicitRole = "default-implicit-role"
// APIDomain is a default domain name for Auth server API
const APIDomain = "teleport.cluster.local"
// MinClientVersion is the minimum client version required by the server.
const MinClientVersion = "3.0.0"
const (
// RemoteClusterStatusOffline indicates that cluster is considered as
// offline, since it has missed a series of heartbeats
RemoteClusterStatusOffline = "offline"
// RemoteClusterStatusOnline indicates that cluster is sending heartbeats
// at expected interval
RemoteClusterStatusOnline = "online"
)
const (
// SharedDirMode is a mode for a directory shared with group
SharedDirMode = 0750
// PrivateDirMode is a mode for private directories
PrivateDirMode = 0700
)
const (
// SessionEvent is sent by servers to clients when an audit event occurs on
// the session.
SessionEvent = "x-teleport-event"
// VersionRequest is sent by clients to server requesting the Teleport
// version they are running.
VersionRequest = "x-teleport-version"
)
const (
// EnvKubeConfig is environment variable for kubeconfig
EnvKubeConfig = "KUBECONFIG"
// KubeConfigDir is a default directory where k8s stores its user local config
KubeConfigDir = ".kube"
// KubeConfigFile is a default filename where k8s stores its user local config
KubeConfigFile = "config"
// EnvHome is home environment variable
EnvHome = "HOME"
2018-07-25 20:56:14 +00:00
// EnvUserProfile is the home directory environment variable on Windows.
EnvUserProfile = "USERPROFILE"
// KubeServiceAddr is an address for kubernetes endpoint service
KubeServiceAddr = "kubernetes.default.svc.cluster.local:443"
// KubeCAPath is a hardcode of mounted CA inside every pod of K8s
KubeCAPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
// KubeKindCSR is a certificate signing requests
KubeKindCSR = "CertificateSigningRequest"
// KubeKindPod is a kubernetes pod
KubeKindPod = "Pod"
// KubeMetadataNameSelector is a selector for name metadata in API requests
KubeMetadataNameSelector = "metadata.name"
// KubeMetadataLabelSelector is a selector for label
KubeMetadataLabelSelector = "metadata.label"
// KubeRunTests turns on kubernetes tests
KubeRunTests = "TEST_KUBE"
// KubeSystemMasters is a name of the builtin kubernets group for master nodes
KubeSystemMasters = "system:masters"
// KubeSystemAuthenticated is a builtin group that allows
// any user to access common API methods, e.g. discovery methods
// required for initial client usage
KubeSystemAuthenticated = "system:authenticated"
// UsageKubeOnly specifies certificate usage metadata
// that limits certificate to be only used for kubernetes proxying
UsageKubeOnly = "usage:kube"
)
const (
// UseOfClosedNetworkConnection is a special string some parts of
// go standard lib are using that is the only way to identify some errors
UseOfClosedNetworkConnection = "use of closed network connection"
)
2018-07-25 20:56:14 +00:00
const (
// OpenBrowserLinux is the command used to open a web browser on Linux.
OpenBrowserLinux = "xdg-open"
2018-07-25 20:56:14 +00:00
// OpenBrowserDarwin is the command used to open a web browser on macOS/Darwin.
OpenBrowserDarwin = "open"
// OpenBrowserWindows is the command used to open a web browser on Windows.
OpenBrowserWindows = "rundll32.exe"
)
const (
// EnhancedRecordingMinKernel is the minimum kernel version for the enhanced
// recording feature.
EnhancedRecordingMinKernel = "4.18.0"
// EnhancedRecordingCommand is a role option that implies command events are
// captured.
EnhancedRecordingCommand = "command"
// EnhancedRecordingDisk is a role option that implies disk events are captured.
EnhancedRecordingDisk = "disk"
// EnhancedRecordingNetwork is a role option that implies network events
// are captured.
EnhancedRecordingNetwork = "network"
)
const (
// ExecSubCommand is the sub-command Teleport uses to re-exec itself.
ExecSubCommand = "exec"
)
// RSAKeySize is the size of the RSA key.
const RSAKeySize = 2048