2016-12-30 09:21:28 +00:00
|
|
|
/*
|
|
|
|
Copyright 2015 Gravitational, Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
2017-10-06 22:38:15 +00:00
|
|
|
|
2016-12-30 09:21:28 +00:00
|
|
|
package reversetunnel
|
|
|
|
|
|
|
|
import (
|
2017-10-06 22:38:15 +00:00
|
|
|
"context"
|
2016-12-30 09:21:28 +00:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2017-12-05 01:51:59 +00:00
|
|
|
"golang.org/x/crypto/ssh"
|
|
|
|
"golang.org/x/crypto/ssh/agent"
|
|
|
|
|
2017-12-28 02:51:46 +00:00
|
|
|
"github.com/gravitational/teleport"
|
2016-12-30 09:21:28 +00:00
|
|
|
"github.com/gravitational/teleport/lib/auth"
|
|
|
|
"github.com/gravitational/teleport/lib/defaults"
|
2017-10-06 00:29:31 +00:00
|
|
|
"github.com/gravitational/teleport/lib/services"
|
2017-12-05 01:51:59 +00:00
|
|
|
"github.com/gravitational/teleport/lib/srv/forward"
|
2016-12-30 09:21:28 +00:00
|
|
|
"github.com/gravitational/teleport/lib/utils"
|
|
|
|
"github.com/gravitational/trace"
|
|
|
|
|
2018-05-19 23:58:14 +00:00
|
|
|
oxyforward "github.com/gravitational/oxy/forward"
|
2017-12-28 02:51:46 +00:00
|
|
|
"github.com/gravitational/roundtrip"
|
2017-10-08 01:11:03 +00:00
|
|
|
"github.com/jonboulle/clockwork"
|
|
|
|
log "github.com/sirupsen/logrus"
|
2016-12-30 09:21:28 +00:00
|
|
|
)
|
|
|
|
|
2017-10-06 00:29:31 +00:00
|
|
|
// remoteSite is a remote site that established the inbound connecton to
|
2016-12-30 09:21:28 +00:00
|
|
|
// the local reverse tunnel server, and now it can provide access to the
|
|
|
|
// cluster behind it.
|
|
|
|
type remoteSite struct {
|
2017-10-13 16:11:13 +00:00
|
|
|
sync.RWMutex
|
2016-12-30 09:21:28 +00:00
|
|
|
|
2017-10-06 22:38:15 +00:00
|
|
|
*log.Entry
|
2016-12-30 09:21:28 +00:00
|
|
|
domainName string
|
|
|
|
connections []*remoteConn
|
|
|
|
lastUsed int
|
|
|
|
srv *server
|
2017-04-07 23:51:31 +00:00
|
|
|
transport *http.Transport
|
2017-10-06 00:29:31 +00:00
|
|
|
connInfo services.TunnelConnection
|
2018-09-21 20:07:48 +00:00
|
|
|
// lastConnInfo is the last conn
|
|
|
|
lastConnInfo services.TunnelConnection
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
|
|
|
clock clockwork.Clock
|
2017-12-05 01:51:59 +00:00
|
|
|
|
|
|
|
// certificateCache caches host certificates for the forwarding server.
|
|
|
|
certificateCache *certificateCache
|
|
|
|
|
|
|
|
// localClient provides access to the Auth Server API of the cluster
|
|
|
|
// within which reversetunnel.Server is running.
|
|
|
|
localClient auth.ClientI
|
|
|
|
// remoteClient provides access to the Auth Server API of the remote cluster that
|
|
|
|
// this site belongs to.
|
2017-11-25 01:09:11 +00:00
|
|
|
remoteClient auth.ClientI
|
2017-12-05 01:51:59 +00:00
|
|
|
// localAccessPoint provides access to a cached subset of the Auth Server API of
|
|
|
|
// the local cluster.
|
|
|
|
localAccessPoint auth.AccessPoint
|
|
|
|
// remoteAccessPoint provides access to a cached subset of the Auth Server API of
|
|
|
|
// the remote cluster this site belongs to.
|
|
|
|
remoteAccessPoint auth.AccessPoint
|
2018-04-08 21:37:33 +00:00
|
|
|
|
|
|
|
// remoteCA is the last remote certificate authority recorded by the client.
|
|
|
|
// It is used to detect CA rotation status changes. If the rotation
|
|
|
|
// state has been changed, the tunnel will reconnect to re-create the client
|
|
|
|
// with new settings.
|
|
|
|
remoteCA services.CertAuthority
|
2017-04-07 23:51:31 +00:00
|
|
|
}
|
|
|
|
|
2017-11-25 01:09:11 +00:00
|
|
|
func (s *remoteSite) getRemoteClient() (auth.ClientI, bool, error) {
|
|
|
|
// check if all cert authorities are initiated and if everything is OK
|
|
|
|
ca, err := s.srv.localAccessPoint.GetCertAuthority(services.CertAuthID{Type: services.HostCA, DomainName: s.domainName}, false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
keys := ca.GetTLSKeyPairs()
|
|
|
|
// the fact that cluster has keys to remote CA means that the key exchange has completed
|
|
|
|
if len(keys) != 0 {
|
|
|
|
s.Debugf("Using TLS client to remote cluster.")
|
|
|
|
pool, err := services.CertPool(ca)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
tlsConfig := s.srv.ClientTLS.Clone()
|
|
|
|
tlsConfig.RootCAs = pool
|
2018-09-21 20:07:48 +00:00
|
|
|
// encode the name of this cluster to identify this cluster,
|
|
|
|
// connecting to the remote one (it is used to find the right certificate
|
|
|
|
// authority to verify)
|
|
|
|
tlsConfig.ServerName = auth.EncodeClusterName(s.srv.ClusterName)
|
2017-11-25 01:09:11 +00:00
|
|
|
clt, err := auth.NewTLSClientWithDialer(s.authServerContextDialer, tlsConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
return clt, false, nil
|
|
|
|
}
|
|
|
|
// create legacy client that will continue to perform certificate
|
|
|
|
// exchange attempts
|
|
|
|
s.Debugf("Created legacy SSH client to remote cluster.")
|
|
|
|
clt, err := auth.NewClient("http://stub:0", s.dialAccessPoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
return clt, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) authServerContextDialer(ctx context.Context, network, address string) (net.Conn, error) {
|
|
|
|
return s.DialAuthServer()
|
|
|
|
}
|
|
|
|
|
2018-07-21 01:50:29 +00:00
|
|
|
// GetTunnelsCount always returns 0 for local cluster
|
|
|
|
func (s *remoteSite) GetTunnelsCount() int {
|
|
|
|
return s.connectionCount()
|
|
|
|
}
|
|
|
|
|
2017-04-07 23:51:31 +00:00
|
|
|
func (s *remoteSite) CachingAccessPoint() (auth.AccessPoint, error) {
|
2017-12-05 01:51:59 +00:00
|
|
|
return s.remoteAccessPoint, nil
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) GetClient() (auth.ClientI, error) {
|
2017-12-05 01:51:59 +00:00
|
|
|
return s.remoteClient, nil
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) String() string {
|
|
|
|
return fmt.Sprintf("remoteSite(%v)", s.domainName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) connectionCount() int {
|
2017-10-13 16:11:13 +00:00
|
|
|
s.RLock()
|
|
|
|
defer s.RUnlock()
|
2016-12-30 09:21:28 +00:00
|
|
|
return len(s.connections)
|
|
|
|
}
|
|
|
|
|
2017-10-12 23:51:18 +00:00
|
|
|
func (s *remoteSite) hasValidConnections() bool {
|
2017-10-13 16:11:13 +00:00
|
|
|
s.RLock()
|
|
|
|
defer s.RUnlock()
|
2017-10-12 23:51:18 +00:00
|
|
|
|
|
|
|
for _, conn := range s.connections {
|
|
|
|
if !conn.isInvalid() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-11-25 01:09:11 +00:00
|
|
|
// Clos closes remote cluster connections
|
|
|
|
func (s *remoteSite) Close() error {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
2017-12-28 02:51:46 +00:00
|
|
|
s.cancel()
|
2017-11-25 01:09:11 +00:00
|
|
|
for i := range s.connections {
|
|
|
|
s.connections[i].Close()
|
|
|
|
}
|
|
|
|
s.connections = []*remoteConn{}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-30 09:21:28 +00:00
|
|
|
func (s *remoteSite) nextConn() (*remoteConn, error) {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
for {
|
|
|
|
if len(s.connections) == 0 {
|
|
|
|
return nil, trace.NotFound("no active tunnels to cluster %v", s.GetName())
|
|
|
|
}
|
|
|
|
s.lastUsed = (s.lastUsed + 1) % len(s.connections)
|
|
|
|
remoteConn := s.connections[s.lastUsed]
|
|
|
|
if !remoteConn.isInvalid() {
|
|
|
|
return remoteConn, nil
|
|
|
|
}
|
|
|
|
s.connections = append(s.connections[:s.lastUsed], s.connections[s.lastUsed+1:]...)
|
|
|
|
s.lastUsed = 0
|
|
|
|
go remoteConn.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// addConn helper adds a new active remote cluster connection to the list
|
|
|
|
// of such connections
|
|
|
|
func (s *remoteSite) addConn(conn net.Conn, sshConn ssh.Conn) (*remoteConn, error) {
|
|
|
|
rc := &remoteConn{
|
|
|
|
sshConn: sshConn,
|
|
|
|
conn: conn,
|
2017-10-06 22:38:15 +00:00
|
|
|
log: s.Entry,
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
s.connections = append(s.connections, rc)
|
|
|
|
s.lastUsed = 0
|
|
|
|
return rc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) GetStatus() string {
|
2018-09-21 20:07:48 +00:00
|
|
|
connInfo, err := s.getLastConnInfo()
|
2017-10-06 00:29:31 +00:00
|
|
|
if err != nil {
|
2017-12-28 02:51:46 +00:00
|
|
|
return teleport.RemoteClusterStatusOffline
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
2017-12-28 02:51:46 +00:00
|
|
|
return services.TunnelConnectionStatus(s.clock, connInfo)
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
|
2017-10-13 16:11:13 +00:00
|
|
|
func (s *remoteSite) copyConnInfo() services.TunnelConnection {
|
|
|
|
s.RLock()
|
|
|
|
defer s.RUnlock()
|
|
|
|
return s.connInfo.Clone()
|
|
|
|
}
|
|
|
|
|
2018-09-21 20:07:48 +00:00
|
|
|
func (s *remoteSite) setLastConnInfo(connInfo services.TunnelConnection) {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
s.lastConnInfo = connInfo.Clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) getLastConnInfo() (services.TunnelConnection, error) {
|
|
|
|
s.RLock()
|
|
|
|
defer s.RUnlock()
|
|
|
|
if s.lastConnInfo == nil {
|
|
|
|
return nil, trace.NotFound("no last connection found")
|
|
|
|
}
|
|
|
|
return s.lastConnInfo.Clone(), nil
|
|
|
|
}
|
|
|
|
|
2017-10-06 00:29:31 +00:00
|
|
|
func (s *remoteSite) registerHeartbeat(t time.Time) {
|
2017-10-13 16:11:13 +00:00
|
|
|
connInfo := s.copyConnInfo()
|
|
|
|
connInfo.SetLastHeartbeat(t)
|
|
|
|
connInfo.SetExpiry(s.clock.Now().Add(defaults.ReverseTunnelOfflineThreshold))
|
2018-09-21 20:07:48 +00:00
|
|
|
s.setLastConnInfo(connInfo)
|
2017-12-05 01:51:59 +00:00
|
|
|
err := s.localAccessPoint.UpsertTunnelConnection(connInfo)
|
2017-10-06 00:29:31 +00:00
|
|
|
if err != nil {
|
2017-10-09 01:07:01 +00:00
|
|
|
s.Warningf("failed to register heartbeat: %v", err)
|
2017-10-06 00:29:31 +00:00
|
|
|
}
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
|
2017-10-12 23:51:18 +00:00
|
|
|
// deleteConnectionRecord deletes connection record to let know peer proxies
|
|
|
|
// that this node lost the connection and needs to be discovered
|
|
|
|
func (s *remoteSite) deleteConnectionRecord() {
|
2017-12-05 01:51:59 +00:00
|
|
|
s.localAccessPoint.DeleteTunnelConnection(s.connInfo.GetClusterName(), s.connInfo.GetName())
|
2017-10-12 23:51:18 +00:00
|
|
|
}
|
|
|
|
|
2017-10-14 02:26:49 +00:00
|
|
|
// handleHearbeat receives heartbeat messages from the connected agent
|
|
|
|
// if the agent has missed several heartbeats in a row, Proxy marks
|
|
|
|
// the connection as invalid.
|
2016-12-30 09:21:28 +00:00
|
|
|
func (s *remoteSite) handleHeartbeat(conn *remoteConn, ch ssh.Channel, reqC <-chan *ssh.Request) {
|
|
|
|
defer func() {
|
2017-10-06 22:38:15 +00:00
|
|
|
s.Infof("cluster connection closed")
|
2016-12-30 09:21:28 +00:00
|
|
|
conn.Close()
|
|
|
|
}()
|
|
|
|
for {
|
|
|
|
select {
|
2017-10-06 22:38:15 +00:00
|
|
|
case <-s.ctx.Done():
|
|
|
|
s.Infof("closing")
|
|
|
|
return
|
2016-12-30 09:21:28 +00:00
|
|
|
case req := <-reqC:
|
|
|
|
if req == nil {
|
2017-10-12 23:51:18 +00:00
|
|
|
s.Infof("cluster agent disconnected")
|
2016-12-30 09:21:28 +00:00
|
|
|
conn.markInvalid(trace.ConnectionProblem(nil, "agent disconnected"))
|
2017-10-12 23:51:18 +00:00
|
|
|
if !s.hasValidConnections() {
|
|
|
|
s.Debugf("deleting connection record")
|
|
|
|
s.deleteConnectionRecord()
|
|
|
|
}
|
2016-12-30 09:21:28 +00:00
|
|
|
return
|
|
|
|
}
|
2017-10-09 01:07:01 +00:00
|
|
|
var timeSent time.Time
|
|
|
|
var roundtrip time.Duration
|
|
|
|
if req.Payload != nil {
|
|
|
|
if err := timeSent.UnmarshalText(req.Payload); err == nil {
|
|
|
|
roundtrip = s.srv.Clock.Now().Sub(timeSent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if roundtrip != 0 {
|
2017-10-10 01:58:24 +00:00
|
|
|
s.WithFields(log.Fields{"latency": roundtrip}).Debugf("ping <- %v", conn.conn.RemoteAddr())
|
2017-10-09 01:07:01 +00:00
|
|
|
} else {
|
|
|
|
s.Debugf("ping <- %v", conn.conn.RemoteAddr())
|
|
|
|
}
|
2017-10-06 00:29:31 +00:00
|
|
|
go s.registerHeartbeat(time.Now())
|
2017-10-18 01:29:54 +00:00
|
|
|
// since we block on select, time.After is re-created everytime we process a request.
|
2017-10-09 01:07:01 +00:00
|
|
|
case <-time.After(defaults.ReverseTunnelOfflineThreshold):
|
|
|
|
conn.markInvalid(trace.ConnectionProblem(nil, "no heartbeats for %v", defaults.ReverseTunnelOfflineThreshold))
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) GetName() string {
|
|
|
|
return s.domainName
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) GetLastConnected() time.Time {
|
2018-09-21 20:07:48 +00:00
|
|
|
connInfo, err := s.getLastConnInfo()
|
2017-10-06 00:29:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
return connInfo.GetLastHeartbeat()
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 21:37:33 +00:00
|
|
|
func (s *remoteSite) compareAndSwapCertAuthority(ca services.CertAuthority) error {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
if s.remoteCA == nil {
|
|
|
|
s.remoteCA = ca
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
rotation := s.remoteCA.GetRotation()
|
|
|
|
if rotation.Matches(ca.GetRotation()) {
|
|
|
|
s.remoteCA = ca
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
s.remoteCA = ca
|
|
|
|
return trace.CompareFailed("remote certificate authority rotation has been updated")
|
|
|
|
}
|
|
|
|
|
2017-10-06 22:38:15 +00:00
|
|
|
func (s *remoteSite) periodicSendDiscoveryRequests() {
|
|
|
|
ticker := time.NewTicker(defaults.ReverseTunnelAgentHeartbeatPeriod)
|
|
|
|
defer ticker.Stop()
|
|
|
|
if err := s.sendDiscoveryRequest(); err != nil {
|
2017-10-08 01:11:03 +00:00
|
|
|
s.Warningf("failed to send discovery: %v", err)
|
2017-10-06 22:38:15 +00:00
|
|
|
}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
s.Debugf("closing")
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
err := s.sendDiscoveryRequest()
|
|
|
|
if err != nil {
|
2017-10-08 01:11:03 +00:00
|
|
|
s.Warningf("could not send discovery request: %v", trace.DebugReport(err))
|
2017-10-06 22:38:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-08 21:37:33 +00:00
|
|
|
// updateCertAuthorities updates local and remote cert authorities
|
|
|
|
func (s *remoteSite) updateCertAuthorities() error {
|
|
|
|
// update main cluster cert authorities on the remote side
|
|
|
|
// remote side makes sure that only relevant fields
|
|
|
|
// are updated
|
|
|
|
hostCA, err := s.localClient.GetCertAuthority(services.CertAuthID{
|
2017-11-25 01:09:11 +00:00
|
|
|
Type: services.HostCA,
|
|
|
|
DomainName: s.srv.ClusterName,
|
|
|
|
}, false)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2018-04-08 21:37:33 +00:00
|
|
|
err = s.remoteClient.RotateExternalCertAuthority(hostCA)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
userCA, err := s.localClient.GetCertAuthority(services.CertAuthID{
|
|
|
|
Type: services.UserCA,
|
|
|
|
DomainName: s.srv.ClusterName,
|
|
|
|
}, false)
|
2017-11-25 01:09:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2018-04-08 21:37:33 +00:00
|
|
|
err = s.remoteClient.RotateExternalCertAuthority(userCA)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// update remote cluster's host cert authoritiy on a local cluster
|
|
|
|
// local proxy is authorized to perform this operation only for
|
|
|
|
// host authorities of remote clusters.
|
|
|
|
remoteCA, err := s.remoteClient.GetCertAuthority(services.CertAuthID{
|
2017-11-25 01:09:11 +00:00
|
|
|
Type: services.HostCA,
|
|
|
|
DomainName: s.domainName,
|
|
|
|
}, false)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2018-04-08 21:37:33 +00:00
|
|
|
if remoteCA.GetClusterName() != s.domainName {
|
|
|
|
return trace.BadParameter(
|
|
|
|
"remote cluster sent different cluster name %v instead of expected one %v",
|
|
|
|
remoteCA.GetClusterName(), s.domainName)
|
|
|
|
}
|
|
|
|
err = s.localClient.UpsertCertAuthority(remoteCA)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
2017-11-25 01:09:11 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 21:37:33 +00:00
|
|
|
return s.compareAndSwapCertAuthority(remoteCA)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) periodicUpdateCertAuthorities() {
|
|
|
|
s.Debugf("Ticking with period %v", s.srv.PollingPeriod)
|
|
|
|
ticker := time.NewTicker(s.srv.PollingPeriod)
|
|
|
|
defer ticker.Stop()
|
2017-11-25 01:09:11 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
s.Debugf("Context is closing.")
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
2018-04-08 21:37:33 +00:00
|
|
|
err := s.updateCertAuthorities()
|
2017-11-25 01:09:11 +00:00
|
|
|
if err != nil {
|
2018-04-08 21:37:33 +00:00
|
|
|
switch {
|
|
|
|
case trace.IsNotFound(err):
|
|
|
|
s.Debugf("Remote cluster %v does not support cert authorities rotation yet.", s.domainName)
|
|
|
|
case trace.IsCompareFailed(err):
|
|
|
|
s.Infof("Remote cluster has updated certificate authorities, going to force reconnect.")
|
|
|
|
s.srv.RemoveSite(s.domainName)
|
|
|
|
s.Close()
|
|
|
|
return
|
|
|
|
case trace.IsConnectionProblem(err):
|
|
|
|
s.Debugf("Remote cluster %v is offline.", s.domainName)
|
|
|
|
default:
|
|
|
|
s.Warningf("Could not perform cert authorities updated: %v.", trace.DebugReport(err))
|
|
|
|
}
|
2017-11-25 01:09:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-08 01:11:03 +00:00
|
|
|
func (s *remoteSite) isOnline(conn services.TunnelConnection) bool {
|
2017-12-28 02:51:46 +00:00
|
|
|
return services.TunnelConnectionStatus(s.clock, conn) == teleport.RemoteClusterStatusOnline
|
2017-10-08 01:11:03 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 22:46:34 +00:00
|
|
|
// findDisconnectedProxies finds proxies that do not have inbound reverse tunnel
|
|
|
|
// connections
|
2017-10-06 22:38:15 +00:00
|
|
|
func (s *remoteSite) findDisconnectedProxies() ([]services.Server, error) {
|
2017-10-18 22:46:34 +00:00
|
|
|
connInfo := s.copyConnInfo()
|
2018-09-27 18:50:13 +00:00
|
|
|
conns, err := s.localAccessPoint.GetTunnelConnections(s.domainName, services.SkipValidation())
|
2017-10-06 22:38:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
connected := make(map[string]bool)
|
|
|
|
for _, conn := range conns {
|
2017-10-09 01:07:01 +00:00
|
|
|
if s.isOnline(conn) {
|
|
|
|
connected[conn.GetProxyName()] = true
|
|
|
|
}
|
2017-10-06 22:38:15 +00:00
|
|
|
}
|
2017-12-05 01:51:59 +00:00
|
|
|
proxies, err := s.localAccessPoint.GetProxies()
|
2017-10-06 22:38:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
var missing []services.Server
|
|
|
|
for i := range proxies {
|
|
|
|
proxy := proxies[i]
|
2017-10-18 22:46:34 +00:00
|
|
|
// do not add this proxy to the list of disconnected proxies
|
|
|
|
if !connected[proxy.GetName()] && proxy.GetName() != connInfo.GetProxyName() {
|
2017-10-06 22:38:15 +00:00
|
|
|
missing = append(missing, proxy)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return missing, nil
|
|
|
|
}
|
|
|
|
|
2017-10-14 02:26:49 +00:00
|
|
|
// sendDiscovery requests sends special "Discovery requests"
|
|
|
|
// back to the connected agent.
|
|
|
|
// Discovery request consists of the proxies that are part
|
|
|
|
// of the cluster, but did not receive the connection from the agent.
|
|
|
|
// Agent will act on a discovery request attempting
|
|
|
|
// to establish connection to the proxies that were not discovered.
|
|
|
|
// See package documentation for more details.
|
2017-10-06 22:38:15 +00:00
|
|
|
func (s *remoteSite) sendDiscoveryRequest() error {
|
|
|
|
disconnectedProxies, err := s.findDisconnectedProxies()
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
if len(disconnectedProxies) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2017-12-05 01:51:59 +00:00
|
|
|
clusterName, err := s.localAccessPoint.GetDomainName()
|
2017-10-18 01:29:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2017-10-18 22:46:34 +00:00
|
|
|
connInfo := s.copyConnInfo()
|
|
|
|
s.Debugf("proxy %q is going to request discovery for: %q", connInfo.GetProxyName(), Proxies(disconnectedProxies))
|
2017-10-06 22:38:15 +00:00
|
|
|
req := discoveryRequest{
|
2017-10-18 01:29:54 +00:00
|
|
|
ClusterName: clusterName,
|
|
|
|
Proxies: disconnectedProxies,
|
2017-10-06 22:38:15 +00:00
|
|
|
}
|
|
|
|
payload, err := marshalDiscoveryRequest(req)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
send := func() error {
|
|
|
|
remoteConn, err := s.nextConn()
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
discoveryC, err := remoteConn.openDiscoveryChannel()
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2017-10-08 01:11:03 +00:00
|
|
|
_, err = discoveryC.SendRequest("discovery", false, payload)
|
|
|
|
if err != nil {
|
|
|
|
remoteConn.markInvalid(err)
|
|
|
|
s.Errorf("disconnecting cluster on %v, err: %v",
|
|
|
|
remoteConn.conn.RemoteAddr(),
|
|
|
|
err)
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
return nil
|
2017-10-06 22:38:15 +00:00
|
|
|
}
|
|
|
|
|
2017-10-14 02:26:49 +00:00
|
|
|
// loop over existing connections (reverse tunnels) and try to send discovery
|
|
|
|
// requests to the remote cluster
|
2017-10-06 22:38:15 +00:00
|
|
|
for i := 0; i < s.connectionCount(); i++ {
|
|
|
|
err := send()
|
|
|
|
if err != nil {
|
2017-10-08 01:11:03 +00:00
|
|
|
s.Warningf("%v", err)
|
2017-10-06 22:38:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-12-30 09:21:28 +00:00
|
|
|
// dialAccessPoint establishes a connection from the proxy (reverse tunnel server)
|
|
|
|
// back into the client using previously established tunnel.
|
|
|
|
func (s *remoteSite) dialAccessPoint(network, addr string) (net.Conn, error) {
|
|
|
|
try := func() (net.Conn, error) {
|
|
|
|
remoteConn, err := s.nextConn()
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
ch, _, err := remoteConn.sshConn.OpenChannel(chanAccessPoint, nil)
|
|
|
|
if err != nil {
|
|
|
|
remoteConn.markInvalid(err)
|
2017-10-06 22:38:15 +00:00
|
|
|
s.Errorf("disconnecting cluster on %v, err: %v",
|
2016-12-30 09:21:28 +00:00
|
|
|
remoteConn.conn.RemoteAddr(),
|
|
|
|
err)
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
2017-10-08 01:11:03 +00:00
|
|
|
s.Debugf("success dialing to cluster")
|
2016-12-30 09:21:28 +00:00
|
|
|
return utils.NewChConn(remoteConn.sshConn, ch), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
conn, err := try()
|
|
|
|
if err != nil {
|
|
|
|
if trace.IsNotFound(err) {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-05 01:51:59 +00:00
|
|
|
func (s *remoteSite) DialAuthServer() (conn net.Conn, err error) {
|
2017-11-25 01:09:11 +00:00
|
|
|
return s.connThroughTunnel(chanTransportDialReq, RemoteAuthServer)
|
2017-12-05 01:51:59 +00:00
|
|
|
}
|
|
|
|
|
2016-12-30 09:21:28 +00:00
|
|
|
// Dial is used to connect a requesting client (say, tsh) to an SSH server
|
|
|
|
// located in a remote connected site, the connection goes through the
|
|
|
|
// reverse proxy tunnel.
|
2017-12-05 01:51:59 +00:00
|
|
|
func (s *remoteSite) Dial(from net.Addr, to net.Addr, userAgent agent.Agent) (net.Conn, error) {
|
|
|
|
clusterConfig, err := s.localAccessPoint.GetClusterConfig()
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
2016-12-30 09:21:28 +00:00
|
|
|
|
2017-12-05 01:51:59 +00:00
|
|
|
// if the proxy is in recording mode use the agent to dial and build a
|
|
|
|
// in-memory forwarding server
|
|
|
|
if clusterConfig.GetSessionRecording() == services.RecordAtProxy {
|
|
|
|
if userAgent == nil {
|
|
|
|
return nil, trace.BadParameter("user agent missing")
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
2017-12-05 01:51:59 +00:00
|
|
|
return s.dialWithAgent(from, to, userAgent)
|
|
|
|
}
|
2018-08-02 00:25:16 +00:00
|
|
|
return s.DialTCP(from, to)
|
2017-12-05 01:51:59 +00:00
|
|
|
}
|
|
|
|
|
2018-08-02 00:25:16 +00:00
|
|
|
func (s *remoteSite) DialTCP(from, to net.Addr) (net.Conn, error) {
|
2017-12-05 01:51:59 +00:00
|
|
|
s.Debugf("Dialing from %v to %v", from, to)
|
|
|
|
|
|
|
|
conn, err := s.connThroughTunnel(chanTransportDialReq, to.String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) dialWithAgent(from, to net.Addr, userAgent agent.Agent) (net.Conn, error) {
|
|
|
|
s.Debugf("Dialing with an agent from %v to %v", from, to)
|
|
|
|
|
|
|
|
// get a host certificate for the forwarding node from the cache
|
|
|
|
hostCertificate, err := s.certificateCache.GetHostCertificate(to.String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
targetConn, err := s.connThroughTunnel(chanTransportDialReq, to.String())
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// create a forwarding server that serves a single ssh connection on it. we
|
|
|
|
// don't need to close this server it will close and release all resources
|
|
|
|
// once conn is closed.
|
|
|
|
//
|
|
|
|
// note, a localClient is passed to the forwarding server, that's to make
|
|
|
|
// sure that the session gets recorded in the local cluster instead of the
|
|
|
|
// remote cluster.
|
|
|
|
serverConfig := forward.ServerConfig{
|
|
|
|
AuthClient: s.localClient,
|
|
|
|
UserAgent: userAgent,
|
|
|
|
TargetConn: targetConn,
|
|
|
|
SrcAddr: from,
|
|
|
|
DstAddr: to,
|
|
|
|
HostCertificate: hostCertificate,
|
2017-12-13 02:17:18 +00:00
|
|
|
Ciphers: s.srv.Config.Ciphers,
|
|
|
|
KEXAlgorithms: s.srv.Config.KEXAlgorithms,
|
|
|
|
MACAlgorithms: s.srv.Config.MACAlgorithms,
|
2018-03-04 02:26:44 +00:00
|
|
|
DataDir: s.srv.Config.DataDir,
|
2017-12-05 01:51:59 +00:00
|
|
|
}
|
|
|
|
remoteServer, err := forward.New(serverConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
go remoteServer.Serve()
|
|
|
|
|
|
|
|
// return a connection to the forwarding server
|
|
|
|
conn, err := remoteServer.Dial()
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
2017-12-05 01:51:59 +00:00
|
|
|
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *remoteSite) connThroughTunnel(transportType string, data string) (conn net.Conn, err error) {
|
|
|
|
var stop bool
|
|
|
|
|
|
|
|
s.Debugf("Requesting %v connection to remote site with payload: %v", transportType, data)
|
|
|
|
|
2016-12-30 09:21:28 +00:00
|
|
|
// loop through existing TCP/IP connections (reverse tunnels) and try
|
|
|
|
// to establish an inbound connection-over-ssh-channel to the remote
|
|
|
|
// cluster (AKA "remotetunnel agent"):
|
|
|
|
for i := 0; i < s.connectionCount() && !stop; i++ {
|
2017-12-05 01:51:59 +00:00
|
|
|
conn, stop, err = s.chanTransportConn(transportType, data)
|
2016-12-30 09:21:28 +00:00
|
|
|
if err == nil {
|
|
|
|
return conn, nil
|
|
|
|
}
|
2017-12-05 01:51:59 +00:00
|
|
|
s.Warnf("Request for %v connection to remote site failed: %v", transportType, err)
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
// didn't connect and no error? this means we didn't have any connected
|
|
|
|
// tunnels to try
|
|
|
|
if err == nil {
|
2017-10-06 00:29:31 +00:00
|
|
|
err = trace.ConnectionProblem(nil, "%v is offline", s.GetName())
|
2016-12-30 09:21:28 +00:00
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-12-05 01:51:59 +00:00
|
|
|
func (s *remoteSite) chanTransportConn(transportType string, addr string) (net.Conn, bool, error) {
|
|
|
|
var stop bool
|
|
|
|
|
|
|
|
remoteConn, err := s.nextConn()
|
|
|
|
if err != nil {
|
|
|
|
return nil, stop, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
var ch ssh.Channel
|
|
|
|
ch, _, err = remoteConn.sshConn.OpenChannel(chanTransport, nil)
|
|
|
|
if err != nil {
|
|
|
|
remoteConn.markInvalid(err)
|
|
|
|
return nil, stop, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
// send a special SSH out-of-band request called "teleport-transport"
|
|
|
|
// the agent on the other side will create a new TCP/IP connection to
|
|
|
|
// 'addr' on its network and will start proxying that connection over
|
|
|
|
// this SSH channel:
|
|
|
|
var dialed bool
|
|
|
|
dialed, err = ch.SendRequest(transportType, true, []byte(addr))
|
|
|
|
if err != nil {
|
|
|
|
return nil, stop, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
stop = true
|
|
|
|
if !dialed {
|
|
|
|
defer ch.Close()
|
|
|
|
// pull the error message from the tunnel client (remote cluster)
|
|
|
|
// passed to us via stderr:
|
|
|
|
errMessage, _ := ioutil.ReadAll(ch.Stderr())
|
|
|
|
if errMessage == nil {
|
|
|
|
errMessage = []byte("failed connecting to " + addr)
|
|
|
|
}
|
|
|
|
return nil, stop, trace.Errorf(strings.TrimSpace(string(errMessage)))
|
|
|
|
}
|
|
|
|
return utils.NewChConn(remoteConn.sshConn, ch), stop, nil
|
|
|
|
}
|
|
|
|
|
2016-12-30 09:21:28 +00:00
|
|
|
func (s *remoteSite) handleAuthProxy(w http.ResponseWriter, r *http.Request) {
|
2017-10-06 22:38:15 +00:00
|
|
|
s.Debugf("handleAuthProxy()")
|
2016-12-30 09:21:28 +00:00
|
|
|
|
2017-12-05 01:51:59 +00:00
|
|
|
fwd, err := oxyforward.New(oxyforward.RoundTripper(s.transport), oxyforward.Logger(s.Entry))
|
2016-12-30 09:21:28 +00:00
|
|
|
if err != nil {
|
|
|
|
roundtrip.ReplyJSON(w, http.StatusInternalServerError, err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
r.URL.Scheme = "http"
|
|
|
|
r.URL.Host = "stub"
|
|
|
|
fwd.ServeHTTP(w, r)
|
|
|
|
}
|