teleport/integration/helpers.go

1320 lines
38 KiB
Go
Raw Normal View History

package integration
import (
"context"
"crypto/rsa"
"crypto/x509/pkix"
2016-04-10 02:43:43 +00:00
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/auth/native"
"github.com/gravitational/teleport/lib/auth/testauthority"
"github.com/gravitational/teleport/lib/backend"
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
"github.com/gravitational/teleport/lib/backend/dir"
"github.com/gravitational/teleport/lib/client"
2016-04-08 01:29:07 +00:00
"github.com/gravitational/teleport/lib/defaults"
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/reversetunnel"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/sshutils"
"github.com/gravitational/teleport/lib/teleagent"
"github.com/gravitational/teleport/lib/tlsca"
2017-04-07 23:51:31 +00:00
"github.com/gravitational/teleport/lib/utils"
2016-05-08 04:17:28 +00:00
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
log "github.com/sirupsen/logrus"
)
// SetTestTimeouts affects global timeouts inside Teleport, making connections
// work faster but consuming more CPU (useful for integration testing)
func SetTestTimeouts(t time.Duration) {
defaults.ReverseTunnelAgentHeartbeatPeriod = t
defaults.ServerHeartbeatTTL = t
defaults.SessionRefreshPeriod = t
2016-04-08 01:29:07 +00:00
}
// TeleInstance represents an in-memory instance of a teleport
// process for testing
type TeleInstance struct {
2016-05-08 04:17:28 +00:00
// Secrets holds the keys (pub, priv and derived cert) of i instance
Secrets InstanceSecrets
2016-04-10 09:13:16 +00:00
// Slice of TCP ports used by Teleport services
Ports []int
// Hostname is the name of the host where instance is running
2016-04-10 02:43:43 +00:00
Hostname string
// Internal stuff...
Process *service.TeleportProcess
Config *service.Config
2016-04-10 02:43:43 +00:00
Tunnel reversetunnel.Server
Pool *reversetunnel.AgentPool
2017-04-07 23:51:31 +00:00
// Nodes is a list of additional nodes
// started with this instance
Nodes []*service.TeleportProcess
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
// UploadEventsC is a channel for upload events
UploadEventsC chan *events.UploadEvent
2016-04-10 02:43:43 +00:00
}
2016-04-10 02:43:43 +00:00
type User struct {
2017-05-20 02:03:28 +00:00
Username string `json:"username"`
AllowedLogins []string `json:"logins"`
Key *client.Key `json:"key"`
Roles []services.Role `json:"-"`
}
type InstanceSecrets struct {
// instance name (aka "site name")
2016-04-10 02:43:43 +00:00
SiteName string `json:"site_name"`
// instance keys+cert (reused for hostCA and userCA)
// PubKey is instance public key
PubKey []byte `json:"pub"`
// PrivKey is instance private key
2016-04-08 07:42:09 +00:00
PrivKey []byte `json:"priv"`
// Cert is SSH host certificate
Cert []byte `json:"cert"`
// TLSCACert is the certificate of the trusted certificate authority
TLSCACert []byte `json:"tls_ca_cert"`
// TLSCert is client TLS X509 certificate
TLSCert []byte `json:"tls_cert"`
2017-05-20 02:03:28 +00:00
// ListenAddr is a reverse tunnel listening port, allowing
2016-05-08 04:17:28 +00:00
// other sites to connect to i instance. Set to empty
// string if i instance is not allowing incoming tunnels
2016-04-08 07:42:09 +00:00
ListenAddr string `json:"tunnel_addr"`
2017-05-20 02:03:28 +00:00
// WebProxyAddr is address for web proxy
WebProxyAddr string `json:"web_proxy_addr"`
2016-05-08 04:17:28 +00:00
// list of users i instance trusts (key in the map is username)
2016-08-18 00:06:59 +00:00
Users map[string]*User `json:"users"`
2016-04-10 02:43:43 +00:00
}
func (s *InstanceSecrets) String() string {
bytes, _ := json.MarshalIndent(s, "", "\t")
return string(bytes)
}
// InstanceConfig is an instance configuration
type InstanceConfig struct {
// ClusterName is a cluster name of the instance
ClusterName string
// HostID is a host id of the instance
HostID string
// NodeName is a node name of the instance
NodeName string
// Ports is a list of assigned ports to use
Ports []int
// Priv is SSH private key of the instance
Priv []byte
// Pub is SSH public key of the instance
Pub []byte
// MultiplexProxy uses the same port for web and SSH reverse tunnel proxy
MultiplexProxy bool
}
// NewInstance creates a new Teleport process instance
func NewInstance(cfg InstanceConfig) *TeleInstance {
2016-04-10 02:43:43 +00:00
var err error
if len(cfg.Ports) < 5 {
fatalIf(fmt.Errorf("not enough free ports given: %v", cfg.Ports))
2016-04-10 09:13:16 +00:00
}
if cfg.NodeName == "" {
cfg.NodeName, err = os.Hostname()
2016-04-10 02:43:43 +00:00
fatalIf(err)
}
// generate instance secrets (keys):
keygen, err := native.New(native.PrecomputeKeys(0))
fatalIf(err)
if cfg.Priv == nil || cfg.Pub == nil {
cfg.Priv, cfg.Pub, _ = keygen.GenerateKeyPair("")
2016-04-10 09:13:16 +00:00
}
rsaKey, err := ssh.ParseRawPrivateKey(cfg.Priv)
fatalIf(err)
tlsCAKey, tlsCACert, err := tlsca.GenerateSelfSignedCAWithPrivateKey(rsaKey.(*rsa.PrivateKey), pkix.Name{
CommonName: cfg.ClusterName,
Organization: []string{cfg.ClusterName},
}, nil, defaults.CATTL)
fatalIf(err)
2017-05-19 17:06:48 +00:00
cert, err := keygen.GenerateHostCert(services.HostCertParams{
PrivateCASigningKey: cfg.Priv,
PublicHostKey: cfg.Pub,
HostID: cfg.HostID,
NodeName: cfg.NodeName,
ClusterName: cfg.ClusterName,
Roles: teleport.Roles{teleport.RoleAdmin},
TTL: time.Duration(time.Hour * 24),
})
fatalIf(err)
tlsCA, err := tlsca.New(tlsCACert, tlsCAKey)
fatalIf(err)
cryptoPubKey, err := sshutils.CryptoPublicKey(cfg.Pub)
identity := tlsca.Identity{
Username: fmt.Sprintf("%v.%v", cfg.HostID, cfg.ClusterName),
Groups: []string{string(teleport.RoleAdmin)},
}
clock := clockwork.NewRealClock()
tlsCert, err := tlsCA.GenerateCertificate(tlsca.CertificateRequest{
Clock: clock,
PublicKey: cryptoPubKey,
Subject: identity.Subject(),
NotAfter: clock.Now().UTC().Add(time.Hour * 24),
})
fatalIf(err)
2017-05-20 02:03:28 +00:00
i := &TeleInstance{
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
Ports: cfg.Ports,
Hostname: cfg.NodeName,
UploadEventsC: make(chan *events.UploadEvent, 100),
}
2017-05-20 02:03:28 +00:00
secrets := InstanceSecrets{
SiteName: cfg.ClusterName,
PrivKey: cfg.Priv,
PubKey: cfg.Pub,
2017-05-20 02:03:28 +00:00
Cert: cert,
TLSCACert: tlsCACert,
TLSCert: tlsCert,
ListenAddr: net.JoinHostPort(cfg.NodeName, i.GetPortReverseTunnel()),
WebProxyAddr: net.JoinHostPort(cfg.NodeName, i.GetPortWeb()),
2017-05-20 02:03:28 +00:00
Users: make(map[string]*User),
}
if cfg.MultiplexProxy {
secrets.ListenAddr = secrets.WebProxyAddr
}
2017-05-20 02:03:28 +00:00
i.Secrets = secrets
return i
}
2016-12-30 22:47:52 +00:00
// GetRoles returns a list of roles to initiate for this secret
func (s *InstanceSecrets) GetRoles() []services.Role {
var roles []services.Role
for _, ca := range s.GetCAs() {
if ca.GetType() != services.UserCA {
continue
}
role := services.RoleForCertAuthority(ca)
2017-06-30 01:02:47 +00:00
role.SetLogins(services.Allow, s.AllowedLogins())
2016-12-30 22:47:52 +00:00
roles = append(roles, role)
}
return roles
}
2016-05-08 04:17:28 +00:00
// GetCAs return an array of CAs stored by the secrets object. In i
// case we always return hard-coded userCA + hostCA (and they share keys
// for simplicity)
2016-05-08 04:17:28 +00:00
func (s *InstanceSecrets) GetCAs() []services.CertAuthority {
hostCA := services.NewCertAuthority(services.HostCA, s.SiteName, [][]byte{s.PrivKey}, [][]byte{s.PubKey}, []string{})
hostCA.SetTLSKeyPairs([]services.TLSKeyPair{{Cert: s.TLSCACert, Key: s.PrivKey}})
return []services.CertAuthority{
hostCA,
2016-12-30 22:47:52 +00:00
services.NewCertAuthority(services.UserCA, s.SiteName, [][]byte{s.PrivKey}, [][]byte{s.PubKey}, []string{services.RoleNameForCertAuthority(s.SiteName)}),
}
}
2016-05-08 04:17:28 +00:00
func (s *InstanceSecrets) AllowedLogins() []string {
var logins []string
2016-05-08 04:17:28 +00:00
for i := range s.Users {
logins = append(logins, s.Users[i].AllowedLogins...)
2016-04-10 02:43:43 +00:00
}
return logins
}
2017-05-20 02:03:28 +00:00
func (s *InstanceSecrets) AsTrustedCluster(token string, roleMap services.RoleMap) services.TrustedCluster {
return &services.TrustedClusterV2{
Kind: services.KindTrustedCluster,
Version: services.V2,
Metadata: services.Metadata{
Name: s.SiteName,
},
Spec: services.TrustedClusterSpecV2{
Token: token,
Enabled: true,
ProxyAddress: s.WebProxyAddr,
ReverseTunnelAddress: s.ListenAddr,
RoleMap: roleMap,
},
}
}
2016-05-08 04:17:28 +00:00
func (s *InstanceSecrets) AsSlice() []*InstanceSecrets {
return []*InstanceSecrets{s}
}
2016-05-08 04:17:28 +00:00
func (s *InstanceSecrets) GetIdentity() *auth.Identity {
i, err := auth.ReadIdentityFromKeyPair(s.PrivKey, s.Cert, s.TLSCert, [][]byte{s.TLSCACert})
fatalIf(err)
return i
}
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) GetPortSSHInt() int {
return i.Ports[0]
}
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) GetPortSSH() string {
return strconv.Itoa(i.GetPortSSHInt())
2016-04-10 02:43:43 +00:00
}
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) GetPortAuth() string {
return strconv.Itoa(i.Ports[1])
2016-04-10 02:43:43 +00:00
}
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) GetPortProxy() string {
return strconv.Itoa(i.Ports[2])
2016-04-10 02:43:43 +00:00
}
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) GetPortWeb() string {
return strconv.Itoa(i.Ports[3])
2016-04-10 02:43:43 +00:00
}
func (i *TeleInstance) GetPortReverseTunnel() string {
return strconv.Itoa(i.Ports[4])
}
2016-05-04 23:49:59 +00:00
// GetSiteAPI() is a helper which returns an API endpoint to a site with
2016-05-08 04:17:28 +00:00
// a given name. i endpoint implements HTTP-over-SSH access to the
2016-05-04 23:49:59 +00:00
// site's auth server.
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) GetSiteAPI(siteName string) auth.ClientI {
siteTunnel, err := i.Tunnel.GetSite(siteName)
if err != nil {
2016-05-04 23:49:59 +00:00
log.Warn(err)
return nil
}
siteAPI, err := siteTunnel.GetClient()
if err != nil {
log.Warn(err)
return nil
}
return siteAPI
}
// Create creates a new instance of Teleport which trusts a lsit of other clusters (other
// instances)
2016-05-08 04:17:28 +00:00
func (i *TeleInstance) Create(trustedSecrets []*InstanceSecrets, enableSSH bool, console io.Writer) error {
tconf := service.MakeDefaultConfig()
tconf.SSH.Enabled = enableSSH
tconf.Console = console
2017-05-20 19:52:03 +00:00
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
return i.CreateEx(trustedSecrets, tconf)
}
// UserCreds holds user client credentials
type UserCreds struct {
// Key is user client key and certificate
Key client.Key
// HostCA is a trusted host certificate authority
HostCA services.CertAuthority
}
// SetupUserCreds sets up user credentials for client
func SetupUserCreds(tc *client.TeleportClient, proxyHost string, creds UserCreds) error {
_, err := tc.AddKey(proxyHost, &creds.Key)
if err != nil {
return trace.Wrap(err)
}
err = tc.AddTrustedCA(creds.HostCA)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// SetupUser sets up user in the cluster
func SetupUser(process *service.TeleportProcess, username string, roles []services.Role) error {
auth := process.GetAuthServer()
teleUser, err := services.NewUser(username)
if err != nil {
return trace.Wrap(err)
}
if len(roles) == 0 {
role := services.RoleForUser(teleUser)
role.SetLogins(services.Allow, []string{username})
// allow tests to forward agent, still needs to be passed in client
roleOptions := role.GetOptions()
roleOptions.ForwardAgent = services.NewBool(true)
role.SetOptions(roleOptions)
err = auth.UpsertRole(role, backend.Forever)
if err != nil {
return trace.Wrap(err)
}
teleUser.AddRole(role.GetMetadata().Name)
roles = append(roles, role)
} else {
for _, role := range roles {
err := auth.UpsertRole(role, backend.Forever)
if err != nil {
return trace.Wrap(err)
}
teleUser.AddRole(role.GetName())
}
}
err = auth.UpsertUser(teleUser)
if err != nil {
return trace.Wrap(err)
}
return nil
}
// GenerateUserCreds generates key to be used by client
func GenerateUserCreds(process *service.TeleportProcess, username string) (*UserCreds, error) {
priv, pub, err := testauthority.New().GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
a := process.GetAuthServer()
sshCert, x509Cert, err := a.GenerateUserCerts(pub, username, time.Hour, teleport.CertificateFormatStandard)
if err != nil {
return nil, trace.Wrap(err)
}
clusterName, err := a.GetClusterName()
if err != nil {
return nil, trace.Wrap(err)
}
ca, err := a.GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterName.GetClusterName(),
}, false)
if err != nil {
return nil, trace.Wrap(err)
}
return &UserCreds{
HostCA: ca,
Key: client.Key{
Priv: priv,
Pub: pub,
Cert: sshCert,
TLSCert: x509Cert,
},
}, nil
}
// GenerateConfig generates instance config
func (i *TeleInstance) GenerateConfig(trustedSecrets []*InstanceSecrets, tconf *service.Config) (*service.Config, error) {
2017-07-28 18:37:12 +00:00
var err error
2016-05-08 04:17:28 +00:00
dataDir, err := ioutil.TempDir("", "cluster-"+i.Secrets.SiteName)
if err != nil {
return nil, trace.Wrap(err)
}
if tconf == nil {
tconf = service.MakeDefaultConfig()
}
tconf.DataDir = dataDir
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
tconf.UploadEventsC = i.UploadEventsC
2017-07-28 18:37:12 +00:00
tconf.Auth.ClusterName, err = services.NewClusterName(services.ClusterNameSpecV2{
ClusterName: i.Secrets.SiteName,
})
if err != nil {
return nil, trace.Wrap(err)
2017-07-28 18:37:12 +00:00
}
tconf.Auth.StaticTokens, err = services.NewStaticTokens(services.StaticTokensSpecV2{
StaticTokens: []services.ProvisionToken{
{
Roles: []teleport.Role{teleport.RoleNode, teleport.RoleProxy, teleport.RoleTrustedCluster},
Token: "token",
},
},
})
if err != nil {
return nil, trace.Wrap(err)
2017-07-28 18:37:12 +00:00
}
2016-05-08 04:17:28 +00:00
tconf.Auth.Authorities = append(tconf.Auth.Authorities, i.Secrets.GetCAs()...)
tconf.Identities = append(tconf.Identities, i.Secrets.GetIdentity())
for _, trusted := range trustedSecrets {
tconf.Auth.Authorities = append(tconf.Auth.Authorities, trusted.GetCAs()...)
2016-12-30 22:47:52 +00:00
tconf.Auth.Roles = append(tconf.Auth.Roles, trusted.GetRoles()...)
tconf.Identities = append(tconf.Identities, trusted.GetIdentity())
if trusted.ListenAddr != "" {
tconf.ReverseTunnels = []services.ReverseTunnel{
2016-12-30 22:47:52 +00:00
services.NewReverseTunnel(trusted.SiteName, []string{trusted.ListenAddr}),
}
}
}
2016-05-08 04:17:28 +00:00
tconf.Proxy.ReverseTunnelListenAddr.Addr = i.Secrets.ListenAddr
tconf.HostUUID = i.Secrets.GetIdentity().ID.HostUUID
tconf.SSH.Addr.Addr = net.JoinHostPort(i.Hostname, i.GetPortSSH())
tconf.Auth.SSHAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortAuth())
tconf.Proxy.SSHAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortProxy())
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortWeb())
tconf.Proxy.PublicAddrs = []utils.NetAddr{{Addr: i.Hostname}}
tconf.AuthServers = append(tconf.AuthServers, tconf.Auth.SSHAddr)
tconf.Auth.StorageConfig = backend.Config{
Teleport signal handling and live reload. This commit introduces signal handling. Parent teleport process is now capable of forking the child process and passing listeners file descriptors to the child. Parent process then can gracefully shutdown by tracking the amount of current connections and closing listeners once the amount goes to 0. Here are the signals handled: * USR2 signal will cause the parent to fork a child process and pass listener file descriptors to it. Child process will close unused file descriptors and will bind to the used ones. At this moment two processes - the parent and the forked child process will be serving requests. After looking at the traffic and the log files, administrator can either shut down the parent process or the child process if the child process is not functioning as expected. * TERM, INT signals will trigger graceful process shutdown. Auth, node and proxy processes will wait until the amount of active connections goes down to 0 and will exit after that. * KILL, QUIT signals will cause immediate non-graceful shutdown. * HUP signal combines USR2 and TERM signals in a convenient way: parent process will fork a child process and self-initate graceful shutdown. This is a more convenient than USR2/TERM sequence, but less agile and robust as if the connection to the parent process drops, but the new process exits with error, administrators can lock themselves out of the environment. Additionally, boltdb backend has to be phased out, as it does not support read/writes by two concurrent processes. This had required refactoring of the dir backend to use file locking to allow inter-process collaboration on read/write operations.
2018-02-08 02:32:50 +00:00
Type: dir.GetName(),
Params: backend.Params{"path": dataDir},
}
2017-04-07 23:51:31 +00:00
tconf.Keygen = testauthority.New()
i.Config = tconf
return tconf, nil
}
// CreateEx creates a new instance of Teleport which trusts a list of other clusters (other
// instances)
//
// Unlike Create() it allows for greater customization because it accepts
// a full Teleport config structure
func (i *TeleInstance) CreateEx(trustedSecrets []*InstanceSecrets, tconf *service.Config) error {
tconf, err := i.GenerateConfig(trustedSecrets, tconf)
if err != nil {
return trace.Wrap(err)
}
2016-05-08 04:17:28 +00:00
i.Config = tconf
i.Process, err = service.NewTeleport(tconf)
if err != nil {
return trace.Wrap(err)
}
// if the auth server is not enabled, nothing more to do be done
if !tconf.Auth.Enabled {
return nil
}
// if this instance contains an auth server, configure the auth server as well.
// create users and roles if they don't exist, or sign their keys if they're
// already present
2016-05-08 04:17:28 +00:00
auth := i.Process.GetAuthServer()
2016-12-30 22:47:52 +00:00
2016-05-08 04:17:28 +00:00
for _, user := range i.Secrets.Users {
2016-12-30 22:47:52 +00:00
teleUser, err := services.NewUser(user.Username)
if err != nil {
return trace.Wrap(err)
2016-12-19 00:58:53 +00:00
}
2017-05-20 19:52:03 +00:00
var roles []services.Role
2017-05-20 02:03:28 +00:00
if len(user.Roles) == 0 {
role := services.RoleForUser(teleUser)
2017-06-30 01:02:47 +00:00
role.SetLogins(services.Allow, user.AllowedLogins)
// allow tests to forward agent, still needs to be passed in client
roleOptions := role.GetOptions()
roleOptions.ForwardAgent = services.NewBool(true)
role.SetOptions(roleOptions)
2017-05-20 02:03:28 +00:00
err = auth.UpsertRole(role, backend.Forever)
if err != nil {
return trace.Wrap(err)
}
teleUser.AddRole(role.GetMetadata().Name)
2017-05-20 19:52:03 +00:00
roles = append(roles, role)
2017-05-20 02:03:28 +00:00
} else {
2017-05-20 19:52:03 +00:00
roles = user.Roles
2017-05-20 02:03:28 +00:00
for _, role := range user.Roles {
err := auth.UpsertRole(role, backend.Forever)
if err != nil {
return trace.Wrap(err)
}
teleUser.AddRole(role.GetName())
}
2016-12-19 00:58:53 +00:00
}
err = auth.UpsertUser(teleUser)
2016-04-10 02:43:43 +00:00
if err != nil {
return trace.Wrap(err)
2016-04-10 02:43:43 +00:00
}
// if user keys are not present, auto-geneate keys:
if user.Key == nil || len(user.Key.Pub) == 0 {
priv, pub, _ := tconf.Keygen.GenerateKeyPair("")
user.Key = &client.Key{
Priv: priv,
Pub: pub,
}
}
// sign user's keys:
2017-05-20 19:52:03 +00:00
ttl := 24 * time.Hour
user.Key.Cert, user.Key.TLSCert, err = auth.GenerateUserCerts(user.Key.Pub, teleUser.GetName(), ttl, teleport.CertificateFormatStandard)
2016-04-10 02:43:43 +00:00
if err != nil {
return err
}
}
2016-04-10 02:43:43 +00:00
return nil
}
// StartNode starts a SSH node and connects it to the cluster.
func (i *TeleInstance) StartNode(name string, sshPort int) (*service.TeleportProcess, error) {
2017-04-07 23:51:31 +00:00
dataDir, err := ioutil.TempDir("", "cluster-"+i.Secrets.SiteName)
if err != nil {
return nil, trace.Wrap(err)
2017-04-07 23:51:31 +00:00
}
2017-04-07 23:51:31 +00:00
tconf := service.MakeDefaultConfig()
authServer := utils.MustParseAddr(net.JoinHostPort(i.Hostname, i.GetPortAuth()))
tconf.AuthServers = append(tconf.AuthServers, *authServer)
tconf.Token = "token"
2017-04-07 23:51:31 +00:00
tconf.HostUUID = name
tconf.Hostname = name
tconf.DataDir = dataDir
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
tconf.UploadEventsC = i.UploadEventsC
var ttl time.Duration
tconf.CachePolicy = service.CachePolicy{
Enabled: true,
RecentTTL: &ttl,
}
2017-04-07 23:51:31 +00:00
tconf.Auth.Enabled = false
tconf.Proxy.Enabled = false
2017-04-07 23:51:31 +00:00
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", sshPort))
// Create a new Teleport process and add it to the list of nodes that
// compose this "cluster".
process, err := service.NewTeleport(tconf)
if err != nil {
return nil, trace.Wrap(err)
}
i.Nodes = append(i.Nodes, process)
// Build a list of expected events to wait for before unblocking based off
// the configuration passed in.
expectedEvents := []string{
service.NodeSSHReady,
}
// Start the process and block until the expected events have arrived.
receivedEvents, err := startAndWait(process, expectedEvents)
if err != nil {
return nil, trace.Wrap(err)
}
log.Debugf("Teleport node (in instance %v) started: %v/%v events received.",
i.Secrets.SiteName, len(expectedEvents), len(receivedEvents))
return process, nil
}
// StartNodeAndProxy starts a SSH node and a Proxy Server and connects it to
// the cluster.
func (i *TeleInstance) StartNodeAndProxy(name string, sshPort, proxyWebPort, proxySSHPort int) error {
dataDir, err := ioutil.TempDir("", "cluster-"+i.Secrets.SiteName)
if err != nil {
return trace.Wrap(err)
}
tconf := service.MakeDefaultConfig()
2017-04-07 23:51:31 +00:00
authServer := utils.MustParseAddr(net.JoinHostPort(i.Hostname, i.GetPortAuth()))
tconf.AuthServers = append(tconf.AuthServers, *authServer)
tconf.Token = "token"
tconf.HostUUID = name
tconf.Hostname = name
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
tconf.UploadEventsC = i.UploadEventsC
tconf.DataDir = dataDir
var ttl time.Duration
tconf.CachePolicy = service.CachePolicy{
Enabled: true,
RecentTTL: &ttl,
}
tconf.Auth.Enabled = false
2017-05-20 19:52:03 +00:00
tconf.Proxy.Enabled = true
2017-04-07 23:51:31 +00:00
tconf.Proxy.SSHAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", proxySSHPort))
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", proxyWebPort))
2017-05-20 19:52:03 +00:00
tconf.Proxy.DisableReverseTunnel = true
tconf.Proxy.DisableWebService = true
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", sshPort))
2017-04-07 23:51:31 +00:00
// Create a new Teleport process and add it to the list of nodes that
// compose this "cluster".
2017-04-07 23:51:31 +00:00
process, err := service.NewTeleport(tconf)
if err != nil {
return trace.Wrap(err)
}
i.Nodes = append(i.Nodes, process)
// Build a list of expected events to wait for before unblocking based off
// the configuration passed in.
expectedEvents := []string{
service.ProxySSHReady,
service.NodeSSHReady,
}
// Start the process and block until the expected events have arrived.
receivedEvents, err := startAndWait(process, expectedEvents)
if err != nil {
return trace.Wrap(err)
}
log.Debugf("Teleport node and proxy (in instance %v) started: %v/%v events received.",
i.Secrets.SiteName, len(expectedEvents), len(receivedEvents))
return nil
2017-04-07 23:51:31 +00:00
}
2017-10-12 17:35:46 +00:00
// ProxyConfig is a set of configuration parameters for Proxy
type ProxyConfig struct {
// Name is a proxy name
Name string
// SSHPort is SSH proxy port
SSHPort int
// WebPort is web proxy port
WebPort int
// ReverseTunnelPort is a port for reverse tunnel addresses
ReverseTunnelPort int
}
// StartProxy starts another Proxy Server and connects it to the cluster.
2017-10-12 17:35:46 +00:00
func (i *TeleInstance) StartProxy(cfg ProxyConfig) error {
dataDir, err := ioutil.TempDir("", "cluster-"+i.Secrets.SiteName+"-"+cfg.Name)
if err != nil {
return trace.Wrap(err)
}
2017-10-12 17:35:46 +00:00
tconf := service.MakeDefaultConfig()
authServer := utils.MustParseAddr(net.JoinHostPort(i.Hostname, i.GetPortAuth()))
tconf.AuthServers = append(tconf.AuthServers, *authServer)
tconf.CachePolicy = service.CachePolicy{Enabled: true}
tconf.DataDir = dataDir
External events and sessions storage. Updates #1755 Design ------ This commit adds support for pluggable events and sessions recordings and adds several plugins. In case if external sessions recording storage is used, nodes or proxies depending on configuration store the session recordings locally and then upload the recordings in the background. Non-print session events are always sent to the remote auth server as usual. In case if remote events storage is used, auth servers download recordings from it during playbacks. DynamoDB event backend ---------------------- Transient DynamoDB backend is added for events storage. Events are stored with default TTL of 1 year. External lambda functions should be used to forward events from DynamoDB. Parameter audit_table_name in storage section turns on dynamodb backend. The table will be auto created. S3 sessions backend ------------------- If audit_sessions_uri is specified to s3://bucket-name node or proxy depending on recording mode will start uploading the recorded sessions to the bucket. If the bucket does not exist, teleport will attempt to create a bucket with versioning and encryption turned on by default. Teleport will turn on bucket-side encryption for the tarballs using aws:kms key. File sessions backend --------------------- If audit_sessions_uri is specified to file:///folder teleport will start writing tarballs to this folder instead of sending records to the file server. This is helpful for plugin writers who can use fuse or NFS mounted storage to handle the data. Working dynamic configuration.
2018-03-04 02:26:44 +00:00
tconf.UploadEventsC = i.UploadEventsC
2017-10-12 17:35:46 +00:00
tconf.HostUUID = cfg.Name
tconf.Hostname = cfg.Name
tconf.Token = "token"
2017-10-12 17:35:46 +00:00
tconf.Auth.Enabled = false
2017-10-12 17:35:46 +00:00
tconf.SSH.Enabled = false
2017-10-12 17:35:46 +00:00
tconf.Proxy.Enabled = true
tconf.Proxy.SSHAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", cfg.SSHPort))
2017-10-12 23:51:18 +00:00
tconf.Proxy.ReverseTunnelListenAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", cfg.ReverseTunnelPort))
2017-10-12 17:35:46 +00:00
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, fmt.Sprintf("%v", cfg.WebPort))
tconf.Proxy.DisableReverseTunnel = false
tconf.Proxy.DisableWebService = true
// Create a new Teleport process and add it to the list of nodes that
// compose this "cluster".
2017-10-12 17:35:46 +00:00
process, err := service.NewTeleport(tconf)
if err != nil {
return trace.Wrap(err)
}
i.Nodes = append(i.Nodes, process)
// Build a list of expected events to wait for before unblocking based off
// the configuration passed in.
expectedEvents := []string{
service.ProxyReverseTunnelReady,
service.ProxySSHReady,
}
// Start the process and block until the expected events have arrived.
receivedEvents, err := startAndWait(process, expectedEvents)
if err != nil {
return trace.Wrap(err)
}
log.Debugf("Teleport proxy (in instance %v) started: %v/%v events received.",
i.Secrets.SiteName, len(expectedEvents), len(receivedEvents))
return nil
2017-10-12 17:35:46 +00:00
}
// Reset re-creates the teleport instance based on the same configuration
// This is needed if you want to stop the instance, reset it and start again
func (i *TeleInstance) Reset() (err error) {
i.Process, err = service.NewTeleport(i.Config)
if err != nil {
return trace.Wrap(err)
}
return nil
}
2017-05-20 02:03:28 +00:00
// AddUserUserWithRole adds user with assigned role
func (i *TeleInstance) AddUserWithRole(username string, role services.Role) *User {
user := &User{
Username: username,
Roles: []services.Role{role},
}
i.Secrets.Users[username] = user
return user
}
2016-05-08 04:17:28 +00:00
// Adds a new user into i Teleport instance. 'mappings' is a comma-separated
2016-04-10 02:43:43 +00:00
// list of OS users
func (i *TeleInstance) AddUser(username string, mappings []string) *User {
2016-05-04 23:49:59 +00:00
log.Infof("teleInstance.AddUser(%v) mapped to %v", username, mappings)
2016-04-10 02:43:43 +00:00
if mappings == nil {
mappings = make([]string, 0)
}
user := &User{
2016-04-10 02:43:43 +00:00
Username: username,
AllowedLogins: mappings,
}
i.Secrets.Users[username] = user
return user
}
// Start will start the TeleInstance and then block until it is ready to
// process requests based off the passed in configuration.
func (i *TeleInstance) Start() error {
// Build a list of expected events to wait for before unblocking based off
// the configuration passed in.
expectedEvents := []string{}
if i.Config.Auth.Enabled {
expectedEvents = append(expectedEvents, service.AuthTLSReady)
}
if i.Config.Proxy.Enabled {
expectedEvents = append(expectedEvents, service.ProxyReverseTunnelReady)
expectedEvents = append(expectedEvents, service.ProxySSHReady)
expectedEvents = append(expectedEvents, service.ProxyAgentPoolReady)
if !i.Config.Proxy.DisableWebService {
expectedEvents = append(expectedEvents, service.ProxyWebServerReady)
}
}
if i.Config.SSH.Enabled {
expectedEvents = append(expectedEvents, service.NodeSSHReady)
}
// Start the process and block until the expected events have arrived.
receivedEvents, err := startAndWait(i.Process, expectedEvents)
if err != nil {
return trace.Wrap(err)
}
// Extract and set reversetunnel.Server and reversetunnel.AgentPool upon
// receipt of a ProxyReverseTunnelReady and ProxyAgentPoolReady respectivly.
for _, re := range receivedEvents {
switch re.Name {
case service.ProxyReverseTunnelReady:
ts, ok := re.Payload.(reversetunnel.Server)
if ok {
i.Tunnel = ts
}
case service.ProxyAgentPoolReady:
ap, ok := re.Payload.(*reversetunnel.AgentPool)
if ok {
i.Pool = ap
}
}
}
log.Debugf("Teleport instance %v started: %v/%v events received.",
i.Secrets.SiteName, len(receivedEvents), len(expectedEvents))
return nil
}
2017-10-12 17:35:46 +00:00
// ClientConfig is a client configuration
type ClientConfig struct {
// Login is SSH login name
Login string
// Cluster is a cluster name to connect to
Cluster string
// Host string is a target host to connect to
Host string
// Port is a target port to connect to
Port int
// Proxy is an optional alternative proxy to use
Proxy *ProxyConfig
// ForwardAgent controls if the client requests it's agent be forwarded to
// the server.
ForwardAgent bool
2017-10-12 17:35:46 +00:00
}
// NewClientWithCreds creates client with credentials
func (i *TeleInstance) NewClientWithCreds(cfg ClientConfig, creds UserCreds) (tc *client.TeleportClient, err error) {
clt, err := i.NewUnauthenticatedClient(cfg)
if err != nil {
return nil, trace.Wrap(err)
}
err = SetupUserCreds(clt, i.Config.Proxy.SSHAddr.Addr, creds)
if err != nil {
return nil, trace.Wrap(err)
}
return clt, nil
}
// NewUnauthenticatedClient returns a fully configured and pre-authenticated client
2017-01-29 00:15:11 +00:00
// (pre-authenticated with server CAs and signed session key)
func (i *TeleInstance) NewUnauthenticatedClient(cfg ClientConfig) (tc *client.TeleportClient, err error) {
2016-05-08 04:17:28 +00:00
keyDir, err := ioutil.TempDir(i.Config.DataDir, "tsh")
2016-04-19 17:37:46 +00:00
if err != nil {
return nil, err
}
2016-10-22 02:11:13 +00:00
// break down proxy address into host, ssh_port and web_port:
proxyConf := &i.Config.Proxy
proxyHost, sp, err := net.SplitHostPort(proxyConf.SSHAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
2017-10-12 17:35:46 +00:00
// use alternative proxy if necessary
var proxySSHPort, proxyWebPort int
if cfg.Proxy == nil {
proxySSHPort, err = strconv.Atoi(sp)
if err != nil {
return nil, trace.Wrap(err)
}
_, sp, err = net.SplitHostPort(proxyConf.WebAddr.Addr)
if err != nil {
return nil, trace.Wrap(err)
}
proxyWebPort, err = strconv.Atoi(sp)
if err != nil {
return nil, trace.Wrap(err)
}
} else {
proxySSHPort, proxyWebPort = cfg.Proxy.SSHPort, cfg.Proxy.WebPort
2016-10-22 02:11:13 +00:00
}
cconf := &client.Config{
2017-10-12 17:35:46 +00:00
Username: cfg.Login,
Host: cfg.Host,
HostPort: cfg.Port,
HostLogin: cfg.Login,
InsecureSkipVerify: true,
2016-04-19 17:37:46 +00:00
KeysDir: keyDir,
2017-10-12 17:35:46 +00:00
SiteName: cfg.Cluster,
ForwardAgent: cfg.ForwardAgent,
2016-10-22 02:11:13 +00:00
}
cconf.SetProxy(proxyHost, proxyWebPort, proxySSHPort, 0)
2016-10-22 02:11:13 +00:00
return client.NewClient(cconf)
}
// NewClient returns a fully configured and pre-authenticated client
// (pre-authenticated with server CAs and signed session key)
func (i *TeleInstance) NewClient(cfg ClientConfig) (*client.TeleportClient, error) {
tc, err := i.NewUnauthenticatedClient(cfg)
if err != nil {
return nil, trace.Wrap(err)
}
// configures the client authenticate using the keys from 'secrets':
2017-10-12 17:35:46 +00:00
user, ok := i.Secrets.Users[cfg.Login]
if !ok {
2017-10-12 17:35:46 +00:00
return nil, trace.BadParameter("unknown login %q", cfg.Login)
}
2016-05-04 23:49:59 +00:00
if user.Key == nil {
2017-10-12 17:35:46 +00:00
return nil, trace.BadParameter("user %q has no key", cfg.Login)
2016-05-04 23:49:59 +00:00
}
2017-10-12 17:35:46 +00:00
_, err = tc.AddKey(cfg.Host, user.Key)
if err != nil {
2016-05-08 04:17:28 +00:00
return nil, trace.Wrap(err)
}
2017-01-29 00:15:11 +00:00
// tell the client to trust given CAs (from secrets). this is the
// equivalent of 'known hosts' in openssh
2016-05-08 04:17:28 +00:00
cas := i.Secrets.GetCAs()
for i := range cas {
err = tc.AddTrustedCA(cas[i])
if err != nil {
2016-05-08 04:17:28 +00:00
return nil, trace.Wrap(err)
}
}
return tc, nil
2016-04-13 01:44:09 +00:00
}
// StopProxy loops over the extra nodes in a TeleInstance and stops all
// nodes where the proxy server is enabled.
func (i *TeleInstance) StopProxy() error {
var errors []error
for _, p := range i.Nodes {
if p.Config.Proxy.Enabled {
if err := p.Close(); err != nil {
errors = append(errors, err)
log.Errorf("Failed closing extra proxy: %v.", err)
}
if err := p.Wait(); err != nil {
errors = append(errors, err)
log.Errorf("Failed to stop extra proxy: %v.", err)
}
}
}
return trace.NewAggregate(errors...)
}
2017-04-07 23:51:31 +00:00
// StopNodes stops additional nodes
func (i *TeleInstance) StopNodes() error {
var errors []error
for _, node := range i.Nodes {
if err := node.Close(); err != nil {
errors = append(errors, err)
log.Errorf("failed closing extra node %v", err)
}
if err := node.Wait(); err != nil {
errors = append(errors, err)
log.Errorf("failed stopping extra node %v", err)
}
}
return trace.NewAggregate(errors...)
}
func (i *TeleInstance) Stop(removeData bool) error {
if i.Config != nil && removeData {
2016-05-08 04:17:28 +00:00
err := os.RemoveAll(i.Config.DataDir)
if err != nil {
log.Error("failed removing temporary local Teleport directory", err)
}
}
2017-04-07 23:51:31 +00:00
log.Infof("Asking Teleport to stop")
2016-05-08 04:17:28 +00:00
err := i.Process.Close()
if err != nil {
log.Error(err)
2016-05-08 04:17:28 +00:00
return trace.Wrap(err)
}
defer func() {
2016-05-08 04:17:28 +00:00
log.Infof("Teleport instance '%v' stopped!", i.Secrets.SiteName)
}()
2016-05-08 04:17:28 +00:00
return i.Process.Wait()
}
func startAndWait(process *service.TeleportProcess, expectedEvents []string) ([]service.Event, error) {
// register to listen for all ready events on the broadcast channel
broadcastCh := make(chan service.Event)
for _, eventName := range expectedEvents {
process.WaitForEvent(context.TODO(), eventName, broadcastCh)
}
// start the process
err := process.Start()
if err != nil {
return nil, trace.Wrap(err)
}
// wait for all events to arrive or a timeout. if all the expected events
// from above are not received, this instance will not start
receivedEvents := []service.Event{}
timeoutCh := time.After(10 * time.Second)
for idx := 0; idx < len(expectedEvents); idx++ {
select {
case e := <-broadcastCh:
receivedEvents = append(receivedEvents, e)
case <-timeoutCh:
return nil, trace.BadParameter("timed out, only %v/%v events received. received: %v, expected: %v",
len(receivedEvents), len(expectedEvents), receivedEvents, expectedEvents)
}
}
// Not all services follow a non-blocking Start/Wait pattern. This means a
// *Ready event may be emit slightly before the service actually starts for
// blocking services. Long term those services should be re-factored, until
// then sleep for 250ms to handle this situation.
time.Sleep(250 * time.Millisecond)
return receivedEvents, nil
}
type proxyServer struct {
sync.Mutex
count int
}
// ServeHTTP only accepts the CONNECT verb and will tunnel your connection to
// the specified host. Also tracks the number of connections that it proxies for
// debugging purposes.
func (p *proxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Validate http connect parameters.
if r.Method != http.MethodConnect {
trace.WriteError(w, trace.BadParameter("%v not supported", r.Method))
return
}
if r.Host == "" {
trace.WriteError(w, trace.BadParameter("host not set"))
return
}
// Dial to the target host, this is done before hijacking the connection to
// ensure the target host is accessible.
dconn, err := net.Dial("tcp", r.Host)
if err != nil {
trace.WriteError(w, err)
return
}
defer dconn.Close()
// Once the client receives 200 OK, the rest of the data will no longer be
// http, but whatever protocol is being tunneled.
w.WriteHeader(http.StatusOK)
// Hijack request so we can get underlying connection.
hj, ok := w.(http.Hijacker)
if !ok {
trace.WriteError(w, trace.AccessDenied("unable to hijack connection"))
return
}
sconn, _, err := hj.Hijack()
if err != nil {
trace.WriteError(w, err)
return
}
defer sconn.Close()
// Success, we're proxying data now.
p.Lock()
p.count = p.count + 1
p.Unlock()
// Copy from src to dst and dst to src.
errc := make(chan error, 2)
replicate := func(dst io.Writer, src io.Reader) {
_, err := io.Copy(dst, src)
errc <- err
}
go replicate(sconn, dconn)
go replicate(dconn, sconn)
// Wait until done, error, or 10 second.
select {
case <-time.After(10 * time.Second):
case <-errc:
}
}
// Count returns the number of connections that have been proxied.
func (p *proxyServer) Count() int {
p.Lock()
defer p.Unlock()
return p.count
}
// discardServer is a SSH server that discards SSH exec requests and starts
// with the passed in host signer.
type discardServer struct {
sshServer *sshutils.Server
}
func newDiscardServer(host string, port int, hostSigner ssh.Signer) (*discardServer, error) {
ds := &discardServer{}
// create underlying ssh server
sshServer, err := sshutils.NewServer(
"integration-discard-server",
utils.NetAddr{AddrNetwork: "tcp", Addr: fmt.Sprintf("%v:%v", host, port)},
ds,
[]ssh.Signer{hostSigner},
sshutils.AuthMethods{
PublicKey: ds.userKeyAuth,
},
)
if err != nil {
return nil, trace.Wrap(err)
}
ds.sshServer = sshServer
return ds, nil
}
func (s *discardServer) userKeyAuth(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
return nil, nil
}
func (s *discardServer) Start() error {
return s.sshServer.Start()
}
func (s *discardServer) Stop() {
s.sshServer.Close()
}
func (s *discardServer) HandleNewChan(conn net.Conn, sconn *ssh.ServerConn, newChannel ssh.NewChannel) {
channel, reqs, err := newChannel.Accept()
if err != nil {
sconn.Close()
conn.Close()
return
}
go s.handleChannel(channel, reqs)
}
func (s *discardServer) handleChannel(channel ssh.Channel, reqs <-chan *ssh.Request) {
defer channel.Close()
for {
select {
case req := <-reqs:
if req == nil {
return
}
if req.Type == "exec" {
successPayload := ssh.Marshal(struct{ C uint32 }{C: uint32(0)})
channel.SendRequest("exit-status", false, successPayload)
if req.WantReply {
req.Reply(true, nil)
}
return
}
if req.WantReply {
req.Reply(true, nil)
}
}
}
}
// commandOptions controls how the SSH command is built.
type commandOptions struct {
forwardAgent bool
forcePTY bool
controlPath string
socketPath string
proxyPort string
nodePort string
command string
}
// externalSSHCommand runs an external SSH command (if an external ssh binary
// exists) with the passed in parameters.
func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {
var execArgs []string
// Don't check the host certificate as part of the testing an external SSH
// client, this is done elsewhere.
execArgs = append(execArgs, "-oStrictHostKeyChecking=no")
execArgs = append(execArgs, "-oUserKnownHostsFile=/dev/null")
// ControlMaster is often used by applications like Ansible.
if o.controlPath != "" {
execArgs = append(execArgs, "-oControlMaster=auto")
execArgs = append(execArgs, "-oControlPersist=1s")
execArgs = append(execArgs, "-oConnectTimeout=2")
execArgs = append(execArgs, fmt.Sprintf("-oControlPath=%v", o.controlPath))
}
// The -tt flag is used to force PTY allocation. It's often used by
// applications like Ansible.
if o.forcePTY {
execArgs = append(execArgs, "-tt")
}
// Connect to node on the passed in port.
execArgs = append(execArgs, fmt.Sprintf("-p %v", o.nodePort))
// Build proxy command.
proxyCommand := []string{"ssh"}
proxyCommand = append(proxyCommand, "-oStrictHostKeyChecking=no")
proxyCommand = append(proxyCommand, "-oUserKnownHostsFile=/dev/null")
if o.forwardAgent {
proxyCommand = append(proxyCommand, "-oForwardAgent=yes")
}
proxyCommand = append(proxyCommand, fmt.Sprintf("-p %v", o.proxyPort))
proxyCommand = append(proxyCommand, `%r@localhost -s proxy:%h:%p`)
// Add in ProxyCommand option, needed for all Teleport connections.
execArgs = append(execArgs, fmt.Sprintf("-oProxyCommand=%v", strings.Join(proxyCommand, " ")))
// Add in the host to connect to and the command to run when connected.
execArgs = append(execArgs, Host)
execArgs = append(execArgs, o.command)
// Find the OpenSSH binary.
sshpath, err := exec.LookPath("ssh")
if err != nil {
return nil, trace.Wrap(err)
}
// Create an exec.Command and tell it where to find the SSH agent.
cmd, err := exec.Command(sshpath, execArgs...), nil
if err != nil {
return nil, trace.Wrap(err)
}
cmd.Env = []string{fmt.Sprintf("SSH_AUTH_SOCK=%v", o.socketPath)}
return cmd, nil
}
// createAgent creates a SSH agent with the passed in private key and
// certificate that can be used in tests. This is useful so tests don't
// clobber your system agent.
func createAgent(me *user.User, privateKeyByte []byte, certificateBytes []byte) (*teleagent.AgentServer, string, string, error) {
// create a path to the unix socket
sockDir, err := ioutil.TempDir("", "int-test")
if err != nil {
return nil, "", "", trace.Wrap(err)
}
sockPath := filepath.Join(sockDir, "agent.sock")
uid, err := strconv.Atoi(me.Uid)
if err != nil {
return nil, "", "", trace.Wrap(err)
}
gid, err := strconv.Atoi(me.Gid)
if err != nil {
return nil, "", "", trace.Wrap(err)
}
// transform the key and certificate bytes into something the agent can understand
publicKey, _, _, _, err := ssh.ParseAuthorizedKey(certificateBytes)
if err != nil {
return nil, "", "", trace.Wrap(err)
}
privateKey, err := ssh.ParseRawPrivateKey(privateKeyByte)
if err != nil {
return nil, "", "", trace.Wrap(err)
}
agentKey := agent.AddedKey{
PrivateKey: privateKey,
Certificate: publicKey.(*ssh.Certificate),
Comment: "",
LifetimeSecs: 0,
ConfirmBeforeUse: false,
}
// create a (unstarted) agent and add the key to it
teleAgent := teleagent.NewServer()
teleAgent.Add(agentKey)
// start the SSH agent
err = teleAgent.ListenUnixSocket(sockPath, uid, gid, 0600)
if err != nil {
return nil, "", "", trace.Wrap(err)
}
go teleAgent.Serve()
return teleAgent, sockDir, sockPath, nil
}
func closeAgent(teleAgent *teleagent.AgentServer, socketDirPath string) error {
err := teleAgent.Close()
if err != nil {
return trace.Wrap(err)
}
err = os.RemoveAll(socketDirPath)
if err != nil {
return trace.Wrap(err)
}
return nil
}
func fatalIf(err error) {
if err != nil {
log.Fatalf("%v at %v", string(debug.Stack()), err)
}
}
func makeKey() (priv, pub []byte) {
k, err := native.New(native.PrecomputeKeys(0))
if err != nil {
panic(err)
}
priv, pub, _ = k.GenerateKeyPair("")
return priv, pub
}