errcheck: add missing error checks in lib/reversetunnel

This commit is contained in:
Andrew Lytvynov 2020-05-29 09:39:15 -07:00 committed by Andrew Lytvynov
parent 1d9e01bb80
commit 0529395df0
7 changed files with 42 additions and 34 deletions

View file

@ -402,7 +402,9 @@ func (a *Agent) processRequests(conn *ssh.Client) error {
newDiscoveryC := conn.HandleChannelOpen(chanDiscovery)
// send first ping right away, then start a ping timer:
hb.SendRequest("ping", false, nil)
if _, err := hb.SendRequest("ping", false, nil); err != nil {
return trace.Wrap(err)
}
for {
select {

View file

@ -253,7 +253,9 @@ func (m *AgentPool) closeAgents(matchKey *agentKey) {
func (m *AgentPool) pollAndSyncAgents() {
ticker := time.NewTicker(defaults.ResyncInterval)
defer ticker.Stop()
m.FetchAndSyncAgents()
if err := m.FetchAndSyncAgents(); err != nil {
m.Warningf("Failed to get reverse tunnels: %v.", err)
}
for {
select {
case <-m.ctx.Done():

View file

@ -204,15 +204,13 @@ func (c *remoteConn) openDiscoveryChannel() (ssh.Channel, error) {
// updateProxies is a non-blocking call that puts the new proxies
// list so that remote connection can notify the remote agent
// about the list update
func (c *remoteConn) updateProxies(proxies []services.Server) error {
func (c *remoteConn) updateProxies(proxies []services.Server) {
select {
case c.newProxiesC <- proxies:
return nil
default:
// Missing proxies update is no longer critical with more permissive
// discovery protocol that tolerates conflicting, stale or missing updates
c.log.Warnf("Discovery channel overflow at %v.", len(c.newProxiesC))
return nil
}
}

View file

@ -312,9 +312,7 @@ func (s *localSite) fanOutProxies(proxies []services.Server) {
s.Lock()
defer s.Unlock()
for _, conn := range s.remoteConns {
if err := conn.updateProxies(proxies); err != nil {
conn.markInvalid(err)
}
conn.updateProxies(proxies)
}
}

View file

@ -302,9 +302,7 @@ func (s *remoteSite) fanOutProxies(proxies []services.Server) {
s.Lock()
defer s.Unlock()
for _, conn := range s.connections {
if err := conn.updateProxies(proxies); err != nil {
conn.markInvalid(err)
}
conn.updateProxies(proxies)
}
}

View file

@ -554,7 +554,7 @@ func (s *server) HandleNewChan(ccx *sshutils.ConnectionContext, nch ssh.NewChann
msg = "Cannot open new SSH session on reverse tunnel. Are you connecting to the right port?"
}
s.Warn(msg)
nch.Reject(ssh.ConnectionFailed, msg)
s.rejectRequest(nch, ssh.ConnectionFailed, msg)
return
}
}
@ -592,7 +592,7 @@ func (s *server) handleHeartbeat(conn net.Conn, sconn *ssh.ServerConn, nch ssh.N
val, ok := sconn.Permissions.Extensions[extCertRole]
if !ok {
log.Errorf("Failed to accept connection, unknown role: %v.", val)
nch.Reject(ssh.ConnectionFailed, "unknown role")
s.rejectRequest(nch, ssh.ConnectionFailed, "unknown role")
}
switch {
// Node is dialing back.
@ -627,7 +627,7 @@ func (s *server) handleNewCluster(conn net.Conn, sshConn *ssh.ServerConn, nch ss
site, remoteConn, err := s.upsertRemoteCluster(conn, sshConn)
if err != nil {
log.Error(trace.Wrap(err))
nch.Reject(ssh.ConnectionFailed, "failed to accept incoming cluster connection")
s.rejectRequest(nch, ssh.ConnectionFailed, "failed to accept incoming cluster connection")
return
}
// accept the request and start the heartbeat on it:
@ -899,6 +899,12 @@ func (s *server) fanOutProxies(proxies []services.Server) {
}
}
func (s *server) rejectRequest(ch ssh.NewChannel, reason ssh.RejectionReason, msg string) {
if err := ch.Reject(reason, msg); err != nil {
s.Warnf("Failed rejecting new channel request: %v", err)
}
}
// newRemoteSite helper creates and initializes 'remoteSite' instance
func newRemoteSite(srv *server, domainName string, sconn ssh.Conn) (*remoteSite, error) {
connInfo, err := services.NewTunnelConnection(

View file

@ -212,12 +212,12 @@ func (p *transport) start() {
authServers, err := p.authClient.GetAuthServers()
if err != nil {
p.log.Errorf("Transport request failed: unable to get list of Auth Servers: %v.", err)
req.Reply(false, []byte("connection rejected: failed to connect to auth server"))
p.reply(req, false, []byte("connection rejected: failed to connect to auth server"))
return
}
if len(authServers) == 0 {
p.log.Errorf("Transport request failed: no auth servers found.")
req.Reply(false, []byte("connection rejected: failed to connect to auth server"))
p.reply(req, false, []byte("connection rejected: failed to connect to auth server"))
return
}
for _, as := range authServers {
@ -226,32 +226,35 @@ func (p *transport) start() {
// Connect to the Kubernetes proxy.
case RemoteKubeProxy:
if p.component == teleport.ComponentReverseTunnelServer {
req.Reply(false, []byte("connection rejected: no remote kubernetes proxy"))
p.reply(req, false, []byte("connection rejected: no remote kubernetes proxy"))
return
}
// If Kubernetes is not configured, reject the connection.
if p.kubeDialAddr.IsEmpty() {
req.Reply(false, []byte("connection rejected: configure kubernetes proxy for this cluster."))
p.reply(req, false, []byte("connection rejected: configure kubernetes proxy for this cluster."))
return
}
servers = append(servers, p.kubeDialAddr.Addr)
// LocalNode requests are for the single server running in the agent pool.
case LocalNode:
if p.component == teleport.ComponentReverseTunnelServer {
req.Reply(false, []byte("connection rejected: no local node"))
p.reply(req, false, []byte("connection rejected: no local node"))
return
}
if p.server == nil {
req.Reply(false, []byte("connection rejected: server missing"))
p.reply(req, false, []byte("connection rejected: server missing"))
return
}
if p.sconn == nil {
req.Reply(false, []byte("connection rejected: server connection missing"))
p.reply(req, false, []byte("connection rejected: server connection missing"))
return
}
req.Reply(true, []byte("Connected."))
if err := req.Reply(true, []byte("Connected.")); err != nil {
p.log.Errorf("Failed responding OK to %q request: %v", req.Type, err)
return
}
// Hand connection off to the SSH server.
p.server.HandleConnection(utils.NewChConn(p.sconn, p.channel))
@ -267,12 +270,15 @@ func (p *transport) start() {
if err != nil {
errorMessage := fmt.Sprintf("connection rejected: %v", err)
fmt.Fprint(p.channel.Stderr(), errorMessage)
req.Reply(false, []byte(errorMessage))
p.reply(req, false, []byte(errorMessage))
return
}
// Dial was successful.
req.Reply(true, []byte("Connected."))
if err := req.Reply(true, []byte("Connected.")); err != nil {
p.log.Errorf("Failed responding OK to %q request: %v", req.Type, err)
return
}
p.log.Debugf("Successfully dialed to %v %v, start proxying.", dreq.Address, dreq.ServerID)
// Start processing channel requests. Pass in a context that wraps the passed
@ -321,17 +327,9 @@ func (p *transport) handleChannelRequests(closeContext context.Context, useTunne
}
switch req.Type {
case utils.ConnectionTypeRequest:
err := req.Reply(useTunnel, nil)
if err != nil {
p.log.Debugf("Failed to reply to %v request: %v.", req.Type, err)
continue
}
p.reply(req, useTunnel, nil)
default:
err := req.Reply(false, nil)
if err != nil {
p.log.Debugf("Failed to reply to %v request: %v.", req.Type, err)
continue
}
p.reply(req, false, nil)
}
case <-closeContext.Done():
return
@ -394,6 +392,12 @@ func (p *transport) tunnelDial(serverID string) (net.Conn, error) {
return conn, nil
}
func (p *transport) reply(req *ssh.Request, ok bool, msg []byte) {
if err := req.Reply(ok, msg); err != nil {
p.log.Warnf("Failed sending reply to %q request on SSH channel: %v", req.Type, err)
}
}
// directDial attempst to directly dial to the target host.
func directDial(servers []string) (net.Conn, error) {
var errors []error