Create context once either "session" or "direct-tcpip" channel has been

opened in the forwarding server.
This commit is contained in:
Russell Jones 2018-03-30 21:42:08 +00:00
parent 29190ed28b
commit 9454d0133a
3 changed files with 314 additions and 147 deletions

View file

@ -15,6 +15,7 @@ import (
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
@ -1012,46 +1013,73 @@ func (s *discardServer) handleChannel(channel ssh.Channel, reqs <-chan *ssh.Requ
}
}
// commandOptions controls how the SSH command is built.
type commandOptions struct {
forwardAgent bool
forcePTY bool
controlPath string
socketPath string
proxyPort string
nodePort string
command string
}
// externalSSHCommand runs an external SSH command (if an external ssh binary
// exists) with the passed in parameters.
func externalSSHCommand(forwardAgent bool, socketPath string, proxyPort string, nodePort string, command string) (*exec.Cmd, error) {
func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {
var execArgs []string
// don't check the host certificate during tests
// Don't check the host certificate as part of the testing an external SSH
// client, this is done elsewhere.
execArgs = append(execArgs, "-oStrictHostKeyChecking=no")
execArgs = append(execArgs, "-oUserKnownHostsFile=/dev/null")
// connect to node on the passed in port
execArgs = append(execArgs, "-p")
execArgs = append(execArgs, nodePort)
// build proxy command
var proxyCommand string
switch forwardAgent {
case true:
proxyCommand = fmt.Sprintf("ProxyCommand ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oForwardAgent=yes -p %v %%r@localhost -s proxy:%%h:%%p", proxyPort)
case false:
proxyCommand = fmt.Sprintf("ProxyCommand ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -p %v %%r@localhost -s proxy:%%h:%%p", proxyPort)
// ControlMaster is often used by applications like Ansible.
if o.controlPath != "" {
execArgs = append(execArgs, "-oControlMaster=auto")
execArgs = append(execArgs, "-oControlPersist=1s")
execArgs = append(execArgs, "-oConnectTimeout=2")
execArgs = append(execArgs, fmt.Sprintf("-oControlPath=%v", o.controlPath))
}
execArgs = append(execArgs, "-o")
execArgs = append(execArgs, proxyCommand)
// add in the host the connect to and the command to run when connected
// The -tt flag is used to force PTY allocation. It's often used by
// applications like Ansible.
if o.forcePTY {
execArgs = append(execArgs, "-tt")
}
// Connect to node on the passed in port.
execArgs = append(execArgs, fmt.Sprintf("-p %v", o.nodePort))
// Build proxy command.
proxyCommand := []string{"ssh"}
proxyCommand = append(proxyCommand, "-oStrictHostKeyChecking=no")
proxyCommand = append(proxyCommand, "-oUserKnownHostsFile=/dev/null")
if o.forwardAgent {
proxyCommand = append(proxyCommand, "-oForwardAgent=yes")
}
proxyCommand = append(proxyCommand, fmt.Sprintf("-p %v", o.proxyPort))
proxyCommand = append(proxyCommand, `%r@localhost -s proxy:%h:%p`)
// Add in ProxyCommand option, needed for all Teleport connections.
execArgs = append(execArgs, fmt.Sprintf("-oProxyCommand=%v", strings.Join(proxyCommand, " ")))
// Add in the host to connect to and the command to run when connected.
execArgs = append(execArgs, Host)
execArgs = append(execArgs, command)
execArgs = append(execArgs, o.command)
// find the ssh binary
// Find the OpenSSH binary.
sshpath, err := exec.LookPath("ssh")
if err != nil {
return nil, trace.Wrap(err)
}
// create exec command and tell it where to find the ssh agent
// Create an exec.Command and tell it where to find the SSH agent.
cmd, err := exec.Command(sshpath, execArgs...), nil
if err != nil {
return nil, trace.Wrap(err)
}
cmd.Env = []string{fmt.Sprintf("SSH_AUTH_SOCK=%v", socketPath)}
cmd.Env = []string{fmt.Sprintf("SSH_AUTH_SOCK=%v", o.socketPath)}
return cmd, nil
}

View file

@ -1510,7 +1510,7 @@ func (s *IntSuite) TestDiscovery(c *check.C) {
// TestExternalClient tests if we can connect to a node in a Teleport
// cluster. Both normal and recording proxies are tested.
func (s *IntSuite) TestExternalClient(c *check.C) {
// only run this test if we have access to the external ssh binary
// Only run this test if we have access to the external SSH binary.
_, err := exec.LookPath("ssh")
if err != nil {
c.Skip("Skipping TestExternalClient, no external SSH binary found.")
@ -1524,46 +1524,46 @@ func (s *IntSuite) TestExternalClient(c *check.C) {
outError bool
outExecOutput string
}{
// record at the node, forward agent. will still work even though the agent
// Record at the node, forward agent. Will still work even though the agent
// will be rejected by the proxy (agent forwarding request rejection is a
// soft failure)
// soft failure).
{
services.RecordAtNode,
true,
"echo hello",
false,
"hello",
inRecordLocation: services.RecordAtNode,
inForwardAgent: true,
inCommand: "echo hello",
outError: false,
outExecOutput: "hello",
},
// record at the node, don't forward agent, will work. this is the normal
// teleport mode of operation.
// Record at the node, don't forward agent, will work. This is the normal
// Teleport mode of operation.
{
services.RecordAtNode,
false,
"echo hello",
false,
"hello",
inRecordLocation: services.RecordAtNode,
inForwardAgent: false,
inCommand: "echo hello",
outError: false,
outExecOutput: "hello",
},
// record at the proxy, forward agent. will work.
// Record at the proxy, forward agent. Will work.
{
services.RecordAtProxy,
true,
"echo hello",
false,
"hello",
inRecordLocation: services.RecordAtProxy,
inForwardAgent: true,
inCommand: "echo hello",
outError: false,
outExecOutput: "hello",
},
// record at the proxy, don't forward agent, request will fail because
// Record at the proxy, don't forward agent, request will fail because
// recording proxy requires an agent.
{
services.RecordAtProxy,
false,
"echo hello",
true,
"",
inRecordLocation: services.RecordAtProxy,
inForwardAgent: false,
inCommand: "echo hello",
outError: true,
outExecOutput: "",
},
}
for _, tt := range tests {
// create a teleport instance with auth, proxy, and node
// Create a Teleport instance with auth, proxy, and node.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: tt.inRecordLocation,
@ -1586,7 +1586,7 @@ func (s *IntSuite) TestExternalClient(c *check.C) {
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// start (and defer close) a agent that runs during this integration test
// Start (and defer close) a agent that runs during this integration test.
teleAgent, socketDirPath, socketPath, err := createAgent(
s.me,
t.Secrets.Users[s.me.Username].Key.Priv,
@ -1594,26 +1594,126 @@ func (s *IntSuite) TestExternalClient(c *check.C) {
c.Assert(err, check.IsNil)
defer closeAgent(teleAgent, socketDirPath)
// create a *exec.Cmd that will execute the external ssh command
execCmd, err := externalSSHCommand(
tt.inForwardAgent,
socketPath,
t.GetPortProxy(),
t.GetPortSSH(),
tt.inCommand)
// Create a *exec.Cmd that will execute the external SSH command.
execCmd, err := externalSSHCommand(commandOptions{
forwardAgent: tt.inForwardAgent,
socketPath: socketPath,
proxyPort: t.GetPortProxy(),
nodePort: t.GetPortSSH(),
command: tt.inCommand,
})
c.Assert(err, check.IsNil)
// execute ssh command and check the output is what we expect
// Execute SSH command and check the output is what we expect.
output, err := execCmd.Output()
if tt.outError {
c.Assert(err, check.NotNil)
} else {
if err != nil {
// If an *exec.ExitError is returned, parse it and return stderr. If this
// is not done then c.Assert will just print a byte array for the error.
er, ok := err.(*exec.ExitError)
if ok {
c.Fatalf("Unexpected error: %v", string(er.Stderr))
}
}
c.Assert(err, check.IsNil)
c.Assert(strings.TrimSpace(string(output)), check.Equals, tt.outExecOutput)
}
}
}
// TestControlMaster checks if multiple SSH channels can be created over the
// same connection. This is frequently used by tools like Ansible.
func (s *IntSuite) TestControlMaster(c *check.C) {
// Only run this test if we have access to the external SSH binary.
_, err := exec.LookPath("ssh")
if err != nil {
c.Skip("Skipping TestControlMaster, no external SSH binary found.")
return
}
var tests = []struct {
inRecordLocation string
}{
// Run tests when Teleport is recording sessions at the node.
{
inRecordLocation: services.RecordAtNode,
},
// Run tests when Teleport is recording sessions at the proxy.
{
inRecordLocation: services.RecordAtProxy,
},
}
for _, tt := range tests {
controlDir, err := ioutil.TempDir("", "teleport-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(controlDir)
controlPath := filepath.Join(controlDir, "control-path")
// Create a Teleport instance with auth, proxy, and node.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: tt.inRecordLocation,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// Start (and defer close) a agent that runs during this integration test.
teleAgent, socketDirPath, socketPath, err := createAgent(
s.me,
t.Secrets.Users[s.me.Username].Key.Priv,
t.Secrets.Users[s.me.Username].Key.Cert)
c.Assert(err, check.IsNil)
defer closeAgent(teleAgent, socketDirPath)
// Create and run an exec command twice with the passed in ControlPath. This
// will cause re-use of the connection and creation of two sessions within
// the connection.
for i := 0; i < 2; i++ {
execCmd, err := externalSSHCommand(commandOptions{
forcePTY: true,
forwardAgent: true,
controlPath: controlPath,
socketPath: socketPath,
proxyPort: t.GetPortProxy(),
nodePort: t.GetPortSSH(),
command: "echo hello",
})
c.Assert(err, check.IsNil)
// Execute SSH command and check the output is what we expect.
output, err := execCmd.Output()
if err != nil {
// If an *exec.ExitError is returned, parse it and return stderr. If this
// is not done then c.Assert will just print a byte array for the error.
er, ok := err.(*exec.ExitError)
if ok {
c.Fatalf("Unexpected error: %v", string(er.Stderr))
}
}
c.Assert(err, check.IsNil)
c.Assert(strings.TrimSpace(string(output)), check.Equals, "hello")
}
}
}
// TestProxyHostKeyCheck uses the forwarding proxy to connect to a server that
// presents a host key instead of a certificate in different configurations
// for the host key checking parameter in services.ClusterConfig.

View file

@ -68,14 +68,30 @@ type Server struct {
id string
// targetConn is the TCP connection to the remote host.
targetConn net.Conn
// clientConn is the client half of the pipe used to connect the client
// and server.
clientConn net.Conn
// serverConn is the server half of the pipe used to connect the client and
// server.
serverConn net.Conn
// sconn is an authenticated SSH connection from the server perspective.
sconn *ssh.ServerConn
// remoteClient exposes an API to SSH functionality like shells, port
// forwarding, subsystems.
remoteClient *ssh.Client
// identityContext holds identity information about the user that has
// authenticated on sconn (like system login, Teleport username, roles).
identityContext srv.IdentityContext
// userAgent is the SSH user agent that was forwarded to the proxy.
userAgent agent.Agent
// userAgentChannel is the channel over which communication with the agent occurs.
userAgentChannel ssh.Channel
// hostCertificate is the SSH host certificate this in-memory server presents
// to the client.
@ -305,10 +321,10 @@ func (s *Server) Serve() {
s.log.Errorf("Unable to create server connection: %v.", err)
return
}
s.sconn = sconn
// take connection and build identity for the user from it to be passed
// along with context
identityContext, err := s.authHandlers.CreateIdentityContext(sconn)
// Take connection and extract identity information for the user from it.
s.identityContext, err = s.authHandlers.CreateIdentityContext(sconn)
if err != nil {
s.targetConn.Close()
s.clientConn.Close()
@ -318,12 +334,12 @@ func (s *Server) Serve() {
return
}
// build a remote session to the remote node
// Connect and authenticate to the remote node.
s.log.Debugf("Creating remote connection to %v@%v", sconn.User(), s.clientConn.RemoteAddr().String())
remoteClient, remoteSession, err := s.newRemoteSession(sconn.User())
s.remoteClient, err = s.newRemoteClient(sconn.User())
if err != nil {
// reject the connection with an error so the client doesn't hang then
// close the connection
// Reject the connection with an error so the client doesn't hang then
// close the connection.
s.rejectChannel(chans, err)
sconn.Close()
@ -335,39 +351,48 @@ func (s *Server) Serve() {
return
}
// create server context for this connection, it's closed when the
// connection is closed
ctx := srv.NewServerContext(s, sconn, identityContext)
ctx.RemoteClient = remoteClient
ctx.RemoteSession = remoteSession
ctx.SetAgent(s.userAgent, s.userAgentChannel)
ctx.AddCloser(sconn)
ctx.AddCloser(s.targetConn)
ctx.AddCloser(s.serverConn)
ctx.AddCloser(s.clientConn)
ctx.AddCloser(remoteSession)
ctx.AddCloser(remoteClient)
s.log.Debugf("Created connection context %v", ctx.ID())
// create a cancelable context and pass it to a keep alive loop. the keep
// Create a cancelable context and pass it to a keep alive loop. The keep
// alive loop will keep pinging the remote server and after it has missed a
// certain number of keep alive requests it will cancel the context which
// will close any listening goroutines.
heartbeatContext, cancel := context.WithCancel(context.Background())
go s.keepAliveLoop(ctx, sconn, cancel)
go s.handleConnection(ctx, heartbeatContext, sconn, chans, reqs)
go s.keepAliveLoop(cancel)
go s.handleConnection(heartbeatContext, chans, reqs)
}
// Close will close all underlying connections that the forwarding server holds.
func (s *Server) Close() error {
conns := []io.Closer{
s.sconn,
s.clientConn,
s.serverConn,
s.targetConn,
s.remoteClient,
}
var errs []error
for _, c := range conns {
if c == nil {
continue
}
err := c.Close()
if err != nil {
errs = append(errs, err)
}
}
return trace.NewAggregate(errs...)
}
// newRemoteSession will create and return a *ssh.Client and *ssh.Session
// with a remote host.
func (s *Server) newRemoteSession(systemLogin string) (*ssh.Client, *ssh.Session, error) {
func (s *Server) newRemoteClient(systemLogin string) (*ssh.Client, error) {
// the proxy will use the agent that has been forwarded to it as the auth
// method when connecting to the remote host
if s.userAgent == nil {
return nil, nil, trace.AccessDenied("agent must be forwarded to proxy")
return nil, trace.AccessDenied("agent must be forwarded to proxy")
}
authMethod := ssh.PublicKeysCallback(s.userAgent.Signers)
@ -393,43 +418,38 @@ func (s *Server) newRemoteSession(systemLogin string) (*ssh.Client, *ssh.Session
dstAddr := s.targetConn.RemoteAddr().String()
client, err := proxy.NewClientConnWithDeadline(s.targetConn, dstAddr, clientConfig)
if err != nil {
return nil, nil, trace.Wrap(err)
return nil, trace.Wrap(err)
}
session, err := client.NewSession()
if err != nil {
return nil, nil, trace.Wrap(err)
}
return client, session, nil
return client, nil
}
func (s *Server) handleConnection(ctx *srv.ServerContext, heartbeatContext context.Context, sconn *ssh.ServerConn, chans <-chan ssh.NewChannel, reqs <-chan *ssh.Request) {
defer s.log.Debugf("Closing connection context %v and releasing resources.", ctx.ID())
defer ctx.Close()
func (s *Server) handleConnection(heartbeatContext context.Context, chans <-chan ssh.NewChannel, reqs <-chan *ssh.Request) {
defer s.log.Debugf("Closing forwarding server connected to %v and releasing resources.", s.sconn.LocalAddr())
defer s.Close()
for {
select {
// global out-of-band requests
// Global out-of-band requests.
case newRequest := <-reqs:
if newRequest == nil {
return
}
go s.handleGlobalRequest(ctx, newRequest)
// channel requests
go s.handleGlobalRequest(newRequest)
// Channel requests.
case newChannel := <-chans:
if newChannel == nil {
return
}
go s.handleChannel(ctx, sconn, newChannel)
// if the heartbeats failed, we close everything and cleanup
go s.handleChannel(newChannel)
// If the heartbeats failed, close everything and cleanup.
case <-heartbeatContext.Done():
return
}
}
}
func (s *Server) keepAliveLoop(ctx *srv.ServerContext, sconn *ssh.ServerConn, cancel context.CancelFunc) {
func (s *Server) keepAliveLoop(cancel context.CancelFunc) {
var missed int
// tick at 1/3 of the idle timeout duration
@ -440,8 +460,8 @@ func (s *Server) keepAliveLoop(ctx *srv.ServerContext, sconn *ssh.ServerConn, ca
select {
case <-keepAliveTick.C:
// send a keep alive to the target node and the client to ensure both are alive.
proxyToNodeOk := s.sendKeepAliveWithTimeout(ctx.RemoteClient, defaults.ReadHeadersTimeout)
proxyToClientOk := s.sendKeepAliveWithTimeout(sconn, defaults.ReadHeadersTimeout)
proxyToNodeOk := s.sendKeepAliveWithTimeout(s.remoteClient, defaults.ReadHeadersTimeout)
proxyToClientOk := s.sendKeepAliveWithTimeout(s.sconn, defaults.ReadHeadersTimeout)
if proxyToNodeOk && proxyToClientOk {
missed = 0
continue
@ -468,30 +488,32 @@ func (s *Server) rejectChannel(chans <-chan ssh.NewChannel, err error) {
}
}
func (s *Server) handleGlobalRequest(ctx *srv.ServerContext, req *ssh.Request) {
ok, err := ctx.RemoteSession.SendRequest(req.Type, req.WantReply, req.Payload)
func (s *Server) handleGlobalRequest(req *ssh.Request) {
ok, payload, err := s.remoteClient.SendRequest(req.Type, req.WantReply, req.Payload)
if err != nil {
s.log.Warnf("Failed to forward global request %v: %v", req.Type, err)
return
}
if req.WantReply {
err = req.Reply(ok, nil)
err = req.Reply(ok, payload)
if err != nil {
s.log.Warnf("Failed to reply to global request: %v: %v", req.Type, err)
}
}
}
func (s *Server) handleChannel(ctx *srv.ServerContext, sconn *ssh.ServerConn, nch ssh.NewChannel) {
func (s *Server) handleChannel(nch ssh.NewChannel) {
channelType := nch.ChannelType()
switch channelType {
// a client requested the terminal size to be sent along with every
// session message (Teleport-specific SSH channel for web-based terminals)
// A client requested the terminal size to be sent along with every
// session message (Teleport-specific SSH channel for web-based terminals).
case "x-teleport-request-resize-events":
ch, _, _ := nch.Accept()
go s.handleTerminalResize(sconn, ch)
// interactive sessions
go s.handleTerminalResize(ch)
// Channels of type "session" handle requests that are invovled in running
// commands on a server.
case "session":
ch, requests, err := nch.Accept()
if err != nil {
@ -499,8 +521,8 @@ func (s *Server) handleChannel(ctx *srv.ServerContext, sconn *ssh.ServerConn, nc
nch.Reject(ssh.ConnectionFailed, fmt.Sprintf("unable to accept channel: %v", err))
return
}
go s.handleSessionRequests(ctx, sconn, ch, requests)
// port forwarding
go s.handleSessionRequests(ch, requests)
// Channels of type "direct-tcpip" handles request for port forwarding.
case "direct-tcpip":
req, err := sshutils.ParseDirectTCPIPReq(nch.ExtraData())
if err != nil {
@ -514,42 +536,48 @@ func (s *Server) handleChannel(ctx *srv.ServerContext, sconn *ssh.ServerConn, nc
nch.Reject(ssh.ConnectionFailed, fmt.Sprintf("unable to accept channel: %v", err))
return
}
go s.handleDirectTCPIPRequest(ctx, sconn, ch, req)
go s.handleDirectTCPIPRequest(ch, req)
default:
nch.Reject(ssh.UnknownChannelType, fmt.Sprintf("unknown channel type: %v", channelType))
}
}
// handleDirectTCPIPRequest handles port forwarding requests.
func (s *Server) handleDirectTCPIPRequest(ctx *srv.ServerContext, sconn *ssh.ServerConn, ch ssh.Channel, req *sshutils.DirectTCPIPReq) {
func (s *Server) handleDirectTCPIPRequest(ch ssh.Channel, req *sshutils.DirectTCPIPReq) {
srcAddr := fmt.Sprintf("%v:%d", req.Orig, req.OrigPort)
dstAddr := fmt.Sprintf("%v:%d", req.Host, req.Port)
defer s.log.Debugf("Completing direct-tcpip request from %v to %v.", srcAddr, dstAddr)
// Create context for this channel. This context will be closed when
// forwarding is complete.
ctx := srv.NewServerContext(s, s.sconn, s.identityContext)
ctx.RemoteClient = s.remoteClient
defer ctx.Close()
// check if the role allows port forwarding for this user
// Check if the role allows port forwarding for this user.
err := s.authHandlers.CheckPortForward(dstAddr, ctx)
if err != nil {
ch.Stderr().Write([]byte(err.Error()))
return
}
s.log.Debugf("Opening direct-tcpip channel from %v to %v.", srcAddr, dstAddr)
s.log.Debugf("Opening direct-tcpip channel from %v to %v in context %v.", srcAddr, dstAddr, ctx.ID())
defer s.log.Debugf("Completing direct-tcpip request from %v to %v in context %v.", srcAddr, dstAddr, ctx.ID())
conn, err := ctx.RemoteClient.Dial("tcp", dstAddr)
// Create "direct-tcpip" channel from the remote host to the target host.
conn, err := s.remoteClient.Dial("tcp", dstAddr)
if err != nil {
ctx.Infof("Failed to connect to: %v: %v", dstAddr, err)
return
}
defer conn.Close()
// emit a port forwarding audit event
// Emit a port forwarding audit event.
s.EmitAuditEvent(events.PortForwardEvent, events.EventFields{
events.PortForwardAddr: dstAddr,
events.PortForwardSuccess: true,
events.EventLogin: ctx.Identity.Login,
events.LocalAddr: sconn.LocalAddr().String(),
events.RemoteAddr: sconn.RemoteAddr().String(),
events.EventLogin: s.identityContext.Login,
events.LocalAddr: s.sconn.LocalAddr().String(),
events.RemoteAddr: s.sconn.RemoteAddr().String(),
})
wg := &sync.WaitGroup{}
@ -574,10 +602,10 @@ func (s *Server) handleDirectTCPIPRequest(ctx *srv.ServerContext, sconn *ssh.Ser
// by creating this new SSH channel, to start injecting the terminal size
// into every SSH write back to it.
//
// this is the only way to make web-based terminal UI not break apart
// when window changes its size
func (s *Server) handleTerminalResize(sconn *ssh.ServerConn, ch ssh.Channel) {
err := s.sessionRegistry.PushTermSizeToParty(sconn, ch)
// This is the only way to make web-based terminal UI not break apart
// when window changes its size.
func (s *Server) handleTerminalResize(channel ssh.Channel) {
err := s.sessionRegistry.PushTermSizeToParty(s.sconn, channel)
if err != nil {
s.log.Warnf("Unable to push terminal size to party: %v", err)
}
@ -586,20 +614,33 @@ func (s *Server) handleTerminalResize(sconn *ssh.ServerConn, ch ssh.Channel) {
// handleSessionRequests handles out of band session requests once the session
// channel has been created this function's loop handles all the "exec",
// "subsystem" and "shell" requests.
func (s *Server) handleSessionRequests(ctx *srv.ServerContext, sconn *ssh.ServerConn, ch ssh.Channel, in <-chan *ssh.Request) {
defer s.log.Debugf("Closing session request to %v.", sconn.RemoteAddr())
defer ch.Close()
func (s *Server) handleSessionRequests(ch ssh.Channel, in <-chan *ssh.Request) {
// Create context for this channel. This context will be closed when the
// session request is complete.
ctx := srv.NewServerContext(s, s.sconn, s.identityContext)
ctx.RemoteClient = s.remoteClient
ctx.AddCloser(ch)
defer ctx.Close()
s.log.Debugf("Opening session request to %v.", sconn.RemoteAddr())
// Create a "session" channel on the remote host.
remoteSession, err := s.remoteClient.NewSession()
if err != nil {
ch.Stderr().Write([]byte(err.Error()))
return
}
ctx.RemoteSession = remoteSession
s.log.Debugf("Opening session request to %v in context %v.", s.sconn.RemoteAddr(), ctx.ID())
defer s.log.Debugf("Closing session request to %v in context %v.", s.sconn.RemoteAddr(), ctx.ID())
for {
// update ctx with the session ID:
// Update the context with the session ID.
err := ctx.CreateOrJoinSession(s.sessionRegistry)
if err != nil {
errorMessage := fmt.Sprintf("unable to update context: %v", err)
ctx.Errorf("%v", errorMessage)
// write the error to channel and close it
// Write the error to channel and close it.
ch.Stderr().Write([]byte(errorMessage))
_, err := ch.SendRequest("exit-status", false, ssh.Marshal(struct{ C uint32 }{C: teleport.RemoteCommandFailure}))
if err != nil {
@ -610,15 +651,13 @@ func (s *Server) handleSessionRequests(ctx *srv.ServerContext, sconn *ssh.Server
select {
case result := <-ctx.SubsystemResultCh:
// this means that subsystem has finished executing and
// want us to close session and the channel
// Subsystem has finished executing, close the channel and session.
ctx.Debugf("Subsystem execution result: %v", result.Err)
return
case req := <-in:
if req == nil {
// this will happen when the client closes/drops the connection
ctx.Debugf("Client %v disconnected", sconn.RemoteAddr())
// The client has closed or dropped the connection.
ctx.Debugf("Client %v disconnected", s.sconn.RemoteAddr())
return
}
if err := s.dispatch(ch, req, ctx); err != nil {
@ -631,8 +670,8 @@ func (s *Server) handleSessionRequests(ctx *srv.ServerContext, sconn *ssh.Server
case result := <-ctx.ExecResultCh:
ctx.Debugf("Exec request (%q) complete: %v", result.Command, result.Code)
// this means that exec process has finished and delivered the execution result,
// we send it back and close the session
// The exec process has finished and delivered the execution result, send
// the result back to the client, and close the session and channel.
_, err := ch.SendRequest("exit-status", false, ssh.Marshal(struct{ C uint32 }{C: uint32(result.Code)}))
if err != nil {
ctx.Infof("Failed to send exit status for %v: %v", result.Command, err)
@ -675,19 +714,19 @@ func (s *Server) dispatch(ch ssh.Channel, req *ssh.Request, ctx *srv.ServerConte
}
func (s *Server) handleAgentForward(ch ssh.Channel, req *ssh.Request, ctx *srv.ServerContext) error {
// check if the user's RBAC role allows agent forwarding
// Check if the user's RBAC role allows agent forwarding.
err := s.authHandlers.CheckAgentForward(ctx)
if err != nil {
return trace.Wrap(err)
}
// route authentication requests to the agent that was forwarded to the proxy
err = agent.ForwardToAgent(ctx.RemoteClient, ctx.GetAgent())
// Route authentication requests to the agent that was forwarded to the proxy.
err = agent.ForwardToAgent(ctx.RemoteClient, s.userAgent)
if err != nil {
return trace.Wrap(err)
}
// make an "auth-agent-req@openssh.com" request on the target node
// Make an "auth-agent-req@openssh.com" request on the remote host.
err = agent.RequestAgentForwarding(ctx.RemoteSession)
if err != nil {
return trace.Wrap(err)