2016-02-07 21:33:33 +00:00
|
|
|
/*
|
2017-02-14 02:29:27 +00:00
|
|
|
Copyright 2015 Gravitational, Inc.
|
2016-02-07 21:33:33 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
2016-03-02 23:07:59 +00:00
|
|
|
|
2017-01-11 08:15:24 +00:00
|
|
|
// Package 'config' provides facilities for configuring Teleport daemons
|
|
|
|
// including
|
|
|
|
// - parsing YAML configuration
|
|
|
|
// - parsing CLI flags
|
2016-03-29 20:36:52 +00:00
|
|
|
package config
|
2016-02-07 21:33:33 +00:00
|
|
|
|
|
|
|
import (
|
2016-06-07 22:02:59 +00:00
|
|
|
"bufio"
|
2019-08-29 23:16:03 +00:00
|
|
|
"io"
|
2016-02-09 01:25:20 +00:00
|
|
|
"io/ioutil"
|
2016-02-07 21:33:33 +00:00
|
|
|
"net"
|
2016-06-07 22:02:59 +00:00
|
|
|
"net/url"
|
2016-02-07 21:33:33 +00:00
|
|
|
"os"
|
2017-11-22 01:35:58 +00:00
|
|
|
"path/filepath"
|
2016-02-09 04:55:13 +00:00
|
|
|
"strings"
|
2016-03-15 06:00:29 +00:00
|
|
|
"time"
|
|
|
|
"unicode"
|
2016-02-09 21:46:34 +00:00
|
|
|
|
2016-06-02 01:56:48 +00:00
|
|
|
"golang.org/x/crypto/ssh"
|
|
|
|
|
2017-02-23 23:05:23 +00:00
|
|
|
"github.com/gravitational/teleport"
|
2017-09-10 03:59:38 +00:00
|
|
|
"github.com/gravitational/teleport/lib"
|
2017-03-26 19:58:01 +00:00
|
|
|
"github.com/gravitational/teleport/lib/backend"
|
Events and GRPC API
This commit introduces several key changes to
Teleport backend and API infrastructure
in order to achieve scalability improvements
on 10K+ node deployments.
Events and plain keyspace
--------------------------
New backend interface supports events,
pagination and range queries
and moves away from buckets to
plain keyspace, what better aligns
with DynamoDB and Etcd featuring similar
interfaces.
All backend implementations are
exposing Events API, allowing
multiple subscribers to consume the same
event stream and avoid polling database.
Replacing BoltDB, Dir with SQLite
-------------------------------
BoltDB backend does not support
having two processes access the database at the
same time. This prevented Teleport
using BoltDB backend to be live reloaded.
SQLite supports reads/writes by multiple
processes and makes Dir backend obsolete
as SQLite is more efficient on larger collections,
supports transactions and can detect data
corruption.
Teleport automatically migrates data from
Bolt and Dir backends into SQLite.
GRPC API and protobuf resources
-------------------------------
GRPC API has been introduced for
the auth server. The auth server now serves both GRPC
and JSON-HTTP API on the same TLS socket and uses
the same client certificate authentication.
All future API methods should use GRPC and HTTP-JSON
API is considered obsolete.
In addition to that some resources like
Server and CertificateAuthority are now
generated from protobuf service specifications in
a way that is fully backward compatible with
original JSON spec and schema, so the same resource
can be encoded and decoded from JSON, YAML
and protobuf.
All models should be refactored
into new proto specification over time.
Streaming presence service
--------------------------
In order to cut bandwidth, nodes
are sending full updates only when changes
to labels or spec have occured, otherwise
new light-weight GRPC keep alive updates are sent
over to the presence service, reducing
bandwidth usage on multi-node deployments.
In addition to that nodes are no longer polling
auth server for certificate authority rotation
updates, instead they subscribe to event updates
to detect updates as soon as they happen.
This is a new API, so the errors are inevitable,
that's why polling is still done, but
on a way slower rate.
2018-11-07 23:33:38 +00:00
|
|
|
"github.com/gravitational/teleport/lib/backend/lite"
|
2016-03-15 00:25:00 +00:00
|
|
|
"github.com/gravitational/teleport/lib/client"
|
2016-02-09 21:46:34 +00:00
|
|
|
"github.com/gravitational/teleport/lib/defaults"
|
2016-02-21 01:17:09 +00:00
|
|
|
"github.com/gravitational/teleport/lib/limiter"
|
2018-02-24 01:23:09 +00:00
|
|
|
"github.com/gravitational/teleport/lib/pam"
|
2016-02-09 21:46:34 +00:00
|
|
|
"github.com/gravitational/teleport/lib/service"
|
2016-02-21 01:17:09 +00:00
|
|
|
"github.com/gravitational/teleport/lib/services"
|
2016-02-09 21:46:34 +00:00
|
|
|
"github.com/gravitational/teleport/lib/utils"
|
2016-04-02 20:14:15 +00:00
|
|
|
"github.com/gravitational/trace"
|
2016-03-11 01:03:01 +00:00
|
|
|
|
2017-08-22 22:30:30 +00:00
|
|
|
log "github.com/sirupsen/logrus"
|
2019-08-29 23:16:03 +00:00
|
|
|
kyaml "k8s.io/apimachinery/pkg/util/yaml"
|
2016-02-07 21:33:33 +00:00
|
|
|
)
|
|
|
|
|
2016-02-16 21:18:58 +00:00
|
|
|
// CommandLineFlags stores command line flag values, it's a much simplified subset
|
|
|
|
// of Teleport configuration (which is fully expressed via YAML config file)
|
|
|
|
type CommandLineFlags struct {
|
2016-02-13 00:41:55 +00:00
|
|
|
// --name flag
|
|
|
|
NodeName string
|
2016-02-09 21:46:34 +00:00
|
|
|
// --auth-server flag
|
2019-07-06 07:28:49 +00:00
|
|
|
AuthServerAddr []string
|
2016-02-09 21:46:34 +00:00
|
|
|
// --token flag
|
|
|
|
AuthToken string
|
2018-10-03 19:35:57 +00:00
|
|
|
// CAPin is the hash of the SKPI of the root CA. Used to verify the cluster
|
|
|
|
// being joined is the one expected.
|
|
|
|
CAPin string
|
2016-02-09 21:46:34 +00:00
|
|
|
// --listen-ip flag
|
|
|
|
ListenIP net.IP
|
2016-03-02 02:24:20 +00:00
|
|
|
// --advertise-ip flag
|
2018-05-02 22:45:31 +00:00
|
|
|
AdvertiseIP string
|
2016-02-09 21:46:34 +00:00
|
|
|
// --config flag
|
|
|
|
ConfigFile string
|
2019-08-29 23:16:03 +00:00
|
|
|
// Bootstrap flag contains a YAML file that defines a set of resources to bootstrap
|
|
|
|
// a cluster.
|
|
|
|
BootstrapFile string
|
2016-03-28 19:58:34 +00:00
|
|
|
// ConfigString is a base64 encoded configuration string
|
|
|
|
// set by --config-string or TELEPORT_CONFIG environment variable
|
|
|
|
ConfigString string
|
2016-02-09 21:46:34 +00:00
|
|
|
// --roles flag
|
|
|
|
Roles string
|
|
|
|
// -d flag
|
|
|
|
Debug bool
|
2017-10-29 10:50:29 +00:00
|
|
|
|
|
|
|
// --insecure-no-tls flag
|
2017-10-23 17:40:27 +00:00
|
|
|
DisableTLS bool
|
2017-10-29 10:50:29 +00:00
|
|
|
|
2016-03-15 00:25:00 +00:00
|
|
|
// --labels flag
|
|
|
|
Labels string
|
2016-04-02 00:58:41 +00:00
|
|
|
// --pid-file flag
|
2016-04-02 01:03:57 +00:00
|
|
|
PIDFile string
|
2017-05-14 01:32:10 +00:00
|
|
|
// DiagnosticAddr is listen address for diagnostic endpoint
|
|
|
|
DiagnosticAddr string
|
2017-05-26 19:28:46 +00:00
|
|
|
// PermitUserEnvironment enables reading of ~/.tsh/environment
|
|
|
|
// when creating a new session.
|
|
|
|
PermitUserEnvironment bool
|
2017-09-10 03:59:38 +00:00
|
|
|
|
|
|
|
// Insecure mode is controlled by --insecure flag and in this mode
|
|
|
|
// Teleport won't check certificates when connecting to trusted clusters
|
|
|
|
// It's useful for learning Teleport (following quick starts, etc).
|
|
|
|
InsecureMode bool
|
2019-03-12 22:30:44 +00:00
|
|
|
|
|
|
|
// FIPS mode means Teleport starts in a FedRAMP/FIPS 140-2 compliant
|
|
|
|
// configuration.
|
|
|
|
FIPS bool
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
|
|
|
|
2016-02-16 22:18:45 +00:00
|
|
|
// readConfigFile reads /etc/teleport.yaml (or whatever is passed via --config flag)
|
|
|
|
// and overrides values in 'cfg' structure
|
2016-06-01 00:31:33 +00:00
|
|
|
func ReadConfigFile(cliConfigPath string) (*FileConfig, error) {
|
2016-02-21 01:17:09 +00:00
|
|
|
configFilePath := defaults.ConfigFilePath
|
|
|
|
// --config tells us to use a specific conf. file:
|
|
|
|
if cliConfigPath != "" {
|
2016-02-23 00:21:51 +00:00
|
|
|
configFilePath = cliConfigPath
|
2016-02-16 22:18:45 +00:00
|
|
|
if !fileExists(configFilePath) {
|
2016-02-21 01:17:09 +00:00
|
|
|
return nil, trace.Errorf("file not found: %s", configFilePath)
|
2016-02-16 22:18:45 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-21 01:17:09 +00:00
|
|
|
// default config doesn't exist? quietly return:
|
|
|
|
if !fileExists(configFilePath) {
|
|
|
|
log.Info("not using a config file")
|
|
|
|
return nil, nil
|
|
|
|
}
|
2016-02-23 00:21:51 +00:00
|
|
|
log.Debug("reading config file: ", configFilePath)
|
2016-03-29 20:36:52 +00:00
|
|
|
return ReadFromFile(configFilePath)
|
2016-02-17 02:19:21 +00:00
|
|
|
}
|
|
|
|
|
2019-08-29 23:16:03 +00:00
|
|
|
// ReadResources loads a set of resources from a file.
|
|
|
|
func ReadResources(filePath string) ([]services.Resource, error) {
|
|
|
|
reader, err := utils.OpenFile(filePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
Fix remaining staticcheck findings in lib/...
Fixed findings:
```
lib/sshutils/server_test.go:163:2: SA4006: this value of `clt` is never used (staticcheck)
clt, err := ssh.Dial("tcp", srv.Addr(), &cc)
^
lib/sshutils/server_test.go:91:3: SA5001: should check returned error before deferring ch.Close() (staticcheck)
defer ch.Close()
^
lib/shell/shell_test.go:33:2: SA4006: this value of `shell` is never used (staticcheck)
shell, err = GetLoginShell("non-existent-user")
^
lib/cgroup/cgroup_test.go:111:2: SA9003: empty branch (staticcheck)
if err != nil {
^
lib/cgroup/cgroup_test.go:119:2: SA5001: should check returned error before deferring service.Close() (staticcheck)
defer service.Close()
^
lib/client/keystore_test.go:138:2: SA4006: this value of `keyCopy` is never used (staticcheck)
keyCopy, err = s.store.GetKey("host.a", "bob")
^
lib/client/api.go:1604:3: SA4004: the surrounding loop is unconditionally terminated (staticcheck)
return makeProxyClient(sshClient, m), nil
^
lib/backend/test/suite.go:156:2: SA4006: this value of `err` is never used (staticcheck)
result, err = s.B.GetRange(ctx, prefix("/prefix/c/c1"), backend.RangeEnd(prefix("/prefix/c/cz")), backend.NoLimit)
^
lib/utils/timeout_test.go:84:2: SA1019: t.Dial is deprecated: Use DialContext instead, which allows the transport to cancel dials as soon as they are no longer needed. If both are set, DialContext takes priority. (staticcheck)
t.Dial = func(network string, addr string) (net.Conn, error) {
^
lib/utils/websocketwriter.go:83:3: SA4006: this value of `err` is never used (staticcheck)
utf8, err = w.encoder.String(string(data))
^
lib/utils/loadbalancer_test.go:134:2: SA4006: this value of `out` is never used (staticcheck)
out, err = Roundtrip(frontend.String())
^
lib/utils/loadbalancer_test.go:209:2: SA4006: this value of `out` is never used (staticcheck)
out, err = RoundtripWithConn(conn)
^
lib/srv/forward/sshserver.go:582:3: SA4004: the surrounding loop is unconditionally terminated (staticcheck)
return
^
lib/service/service.go:347:4: SA4006: this value of `err` is never used (staticcheck)
i, err = auth.GenerateIdentity(process.localAuth, id, principals, dnsNames)
^
lib/service/signals.go:60:3: SA1016: syscall.SIGKILL cannot be trapped (did you mean syscall.SIGTERM?) (staticcheck)
syscall.SIGKILL, // fast shutdown
^
lib/config/configuration_test.go:184:2: SA4006: this value of `conf` is never used (staticcheck)
conf, err = ReadFromFile(s.configFileBadContent)
^
lib/config/configuration.go:129:2: SA5001: should check returned error before deferring reader.Close() (staticcheck)
defer reader.Close()
^
lib/kube/kubeconfig/kubeconfig_test.go:227:2: SA4006: this value of `err` is never used (staticcheck)
tlsCert, err := ca.GenerateCertificate(tlsca.CertificateRequest{
^
lib/srv/sess.go:720:3: SA4006: this value of `err` is never used (staticcheck)
result, err := s.term.Wait()
^
lib/multiplexer/multiplexer_test.go:169:11: SA1006: printf-style function with dynamic format string and no further arguments should use print-style function instead (staticcheck)
_, err = fmt.Fprintf(conn, proxyLine.String())
^
lib/multiplexer/multiplexer_test.go:221:11: SA1006: printf-style function with dynamic format string and no further arguments should use print-style function instead (staticcheck)
_, err = fmt.Fprintf(conn, proxyLine.String())
^
```
2020-04-27 21:32:59 +00:00
|
|
|
defer reader.Close()
|
2019-08-29 23:16:03 +00:00
|
|
|
decoder := kyaml.NewYAMLOrJSONDecoder(reader, defaults.LookaheadBufSize)
|
|
|
|
var resources []services.Resource
|
|
|
|
for {
|
|
|
|
var raw services.UnknownResource
|
|
|
|
err := decoder.Decode(&raw)
|
|
|
|
if err != nil {
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
rsc, err := services.UnmarshalResource(raw.Kind, raw.Raw)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
resources = append(resources, rsc)
|
|
|
|
}
|
|
|
|
return resources, nil
|
|
|
|
}
|
|
|
|
|
2017-02-14 02:29:27 +00:00
|
|
|
// ApplyFileConfig applies configuration from a YAML file to Teleport
|
2016-02-17 02:19:21 +00:00
|
|
|
// runtime config
|
2016-03-29 20:48:06 +00:00
|
|
|
func ApplyFileConfig(fc *FileConfig, cfg *service.Config) error {
|
2017-02-22 21:23:01 +00:00
|
|
|
var err error
|
|
|
|
|
2016-02-17 03:24:51 +00:00
|
|
|
// no config file? no problem
|
|
|
|
if fc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2016-02-17 02:19:21 +00:00
|
|
|
// merge file-based config with defaults in 'cfg'
|
|
|
|
if fc.Auth.Disabled() {
|
|
|
|
cfg.Auth.Enabled = false
|
|
|
|
}
|
|
|
|
if fc.SSH.Disabled() {
|
|
|
|
cfg.SSH.Enabled = false
|
2016-02-16 22:18:45 +00:00
|
|
|
}
|
2016-02-17 02:19:21 +00:00
|
|
|
if fc.Proxy.Disabled() {
|
|
|
|
cfg.Proxy.Enabled = false
|
|
|
|
}
|
|
|
|
applyString(fc.NodeName, &cfg.Hostname)
|
|
|
|
|
2016-03-12 04:09:40 +00:00
|
|
|
// apply "advertise_ip" setting:
|
|
|
|
advertiseIP := fc.AdvertiseIP
|
2018-05-02 22:45:31 +00:00
|
|
|
if advertiseIP != "" {
|
|
|
|
if _, _, err := utils.ParseAdvertiseAddr(advertiseIP); err != nil {
|
2016-03-12 04:09:40 +00:00
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.AdvertiseIP = advertiseIP
|
|
|
|
}
|
2016-04-02 01:03:57 +00:00
|
|
|
cfg.PIDFile = fc.PIDFile
|
2016-03-12 04:09:40 +00:00
|
|
|
|
2016-02-17 02:19:21 +00:00
|
|
|
// config file has auth servers in there?
|
2016-03-11 01:03:01 +00:00
|
|
|
if len(fc.AuthServers) > 0 {
|
2016-03-28 19:58:34 +00:00
|
|
|
cfg.AuthServers = make([]utils.NetAddr, 0, len(fc.AuthServers))
|
2016-03-11 01:03:01 +00:00
|
|
|
for _, as := range fc.AuthServers {
|
2016-06-12 23:24:38 +00:00
|
|
|
addr, err := utils.ParseHostPortAddr(as, defaults.AuthListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2016-02-17 02:19:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Errorf("cannot parse auth server address: '%v'", as)
|
|
|
|
}
|
2016-10-26 21:14:59 +00:00
|
|
|
cfg.AuthServers = append(cfg.AuthServers, *addr)
|
2016-02-17 02:19:21 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-17 19:51:18 +00:00
|
|
|
if _, err := cfg.ApplyToken(fc.AuthToken); err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2016-12-08 03:37:22 +00:00
|
|
|
|
2016-06-01 00:31:33 +00:00
|
|
|
if fc.Global.DataDir != "" {
|
|
|
|
cfg.DataDir = fc.Global.DataDir
|
2017-01-16 00:27:19 +00:00
|
|
|
cfg.Auth.StorageConfig.Params["path"] = cfg.DataDir
|
2016-02-17 03:24:51 +00:00
|
|
|
}
|
|
|
|
|
2019-07-02 21:35:17 +00:00
|
|
|
// If a backend is specified, override the defaults.
|
2017-01-16 00:27:19 +00:00
|
|
|
if fc.Storage.Type != "" {
|
2019-07-02 21:35:17 +00:00
|
|
|
// If the alternative name "dir" is given, update it to "lite".
|
|
|
|
if fc.Storage.Type == lite.AlternativeName {
|
|
|
|
fc.Storage.Type = lite.GetName()
|
|
|
|
}
|
|
|
|
|
2017-01-16 00:27:19 +00:00
|
|
|
cfg.Auth.StorageConfig = fc.Storage
|
2018-05-03 17:58:22 +00:00
|
|
|
// backend is specified, but no path is set, set a reasonable default
|
|
|
|
_, pathSet := cfg.Auth.StorageConfig.Params[defaults.BackendPath]
|
2019-07-02 21:35:17 +00:00
|
|
|
if cfg.Auth.StorageConfig.Type == lite.GetName() && !pathSet {
|
2018-05-03 17:58:22 +00:00
|
|
|
if cfg.Auth.StorageConfig.Params == nil {
|
|
|
|
cfg.Auth.StorageConfig.Params = make(backend.Params)
|
|
|
|
}
|
|
|
|
cfg.Auth.StorageConfig.Params[defaults.BackendPath] = filepath.Join(cfg.DataDir, defaults.BackendDir)
|
|
|
|
}
|
|
|
|
} else {
|
2019-07-02 21:35:17 +00:00
|
|
|
// Set a reasonable default.
|
|
|
|
cfg.Auth.StorageConfig.Params[defaults.BackendPath] = filepath.Join(cfg.DataDir, defaults.BackendDir)
|
2017-01-16 00:27:19 +00:00
|
|
|
}
|
2017-01-13 00:04:00 +00:00
|
|
|
|
2016-02-17 03:24:51 +00:00
|
|
|
// apply logger settings
|
|
|
|
switch fc.Logger.Output {
|
|
|
|
case "":
|
|
|
|
break // not set
|
|
|
|
case "stderr", "error", "2":
|
|
|
|
log.SetOutput(os.Stderr)
|
|
|
|
case "stdout", "out", "1":
|
|
|
|
log.SetOutput(os.Stdout)
|
2018-04-03 22:41:12 +00:00
|
|
|
case teleport.Syslog:
|
2019-02-10 01:43:59 +00:00
|
|
|
err := utils.SwitchLoggingtoSyslog()
|
|
|
|
if err != nil {
|
|
|
|
// this error will go to stderr
|
|
|
|
log.Errorf("Failed to switch logging to syslog: %v.", err)
|
|
|
|
}
|
2016-02-17 03:24:51 +00:00
|
|
|
default:
|
|
|
|
// assume it's a file path:
|
|
|
|
logFile, err := os.Create(fc.Logger.Output)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err, "failed to create the log file")
|
|
|
|
}
|
|
|
|
log.SetOutput(logFile)
|
|
|
|
}
|
|
|
|
switch strings.ToLower(fc.Logger.Severity) {
|
|
|
|
case "":
|
|
|
|
break // not set
|
|
|
|
case "info":
|
|
|
|
log.SetLevel(log.InfoLevel)
|
|
|
|
case "err", "error":
|
|
|
|
log.SetLevel(log.ErrorLevel)
|
2019-02-10 01:43:59 +00:00
|
|
|
case teleport.DebugLevel:
|
2016-02-17 03:24:51 +00:00
|
|
|
log.SetLevel(log.DebugLevel)
|
|
|
|
case "warn", "warning":
|
|
|
|
log.SetLevel(log.WarnLevel)
|
|
|
|
default:
|
2017-04-07 23:51:31 +00:00
|
|
|
return trace.BadParameter("unsupported logger severity: '%v'", fc.Logger.Severity)
|
2016-02-17 03:24:51 +00:00
|
|
|
}
|
2017-04-07 23:51:31 +00:00
|
|
|
// apply cache policy for node and proxy
|
|
|
|
cachePolicy, err := fc.CachePolicy.Parse()
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.CachePolicy = *cachePolicy
|
|
|
|
|
2018-06-08 23:50:43 +00:00
|
|
|
// Apply (TLS) cipher suites and (SSH) ciphers, KEX algorithms, and MAC
|
|
|
|
// algorithms.
|
|
|
|
if len(fc.CipherSuites) > 0 {
|
|
|
|
cipherSuites, err := utils.CipherSuiteMapping(fc.CipherSuites)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.CipherSuites = cipherSuites
|
|
|
|
}
|
2017-06-10 02:32:31 +00:00
|
|
|
if fc.Ciphers != nil {
|
|
|
|
cfg.Ciphers = fc.Ciphers
|
|
|
|
}
|
|
|
|
if fc.KEXAlgorithms != nil {
|
|
|
|
cfg.KEXAlgorithms = fc.KEXAlgorithms
|
|
|
|
}
|
|
|
|
if fc.MACAlgorithms != nil {
|
|
|
|
cfg.MACAlgorithms = fc.MACAlgorithms
|
|
|
|
}
|
|
|
|
|
2018-10-03 19:35:57 +00:00
|
|
|
// Read in how nodes will validate the CA.
|
|
|
|
if fc.CAPin != "" {
|
|
|
|
cfg.CAPin = fc.CAPin
|
|
|
|
}
|
|
|
|
|
2016-02-21 01:17:09 +00:00
|
|
|
// apply connection throttling:
|
2018-09-24 21:15:47 +00:00
|
|
|
limiters := []*limiter.LimiterConfig{
|
|
|
|
&cfg.SSH.Limiter,
|
|
|
|
&cfg.Auth.Limiter,
|
|
|
|
&cfg.Proxy.Limiter,
|
2016-02-21 01:17:09 +00:00
|
|
|
}
|
|
|
|
for _, l := range limiters {
|
|
|
|
if fc.Limits.MaxConnections > 0 {
|
|
|
|
l.MaxConnections = fc.Limits.MaxConnections
|
|
|
|
}
|
|
|
|
if fc.Limits.MaxUsers > 0 {
|
|
|
|
l.MaxNumberOfUsers = fc.Limits.MaxUsers
|
|
|
|
}
|
|
|
|
for _, rate := range fc.Limits.Rates {
|
|
|
|
l.Rates = append(l.Rates, limiter.Rate{
|
|
|
|
Period: rate.Period,
|
|
|
|
Average: rate.Average,
|
|
|
|
Burst: rate.Burst,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2016-03-28 19:58:34 +00:00
|
|
|
|
2018-08-29 23:38:11 +00:00
|
|
|
// Apply configuration for "auth_service", "proxy_service", and
|
|
|
|
// "ssh_service" if it's enabled.
|
|
|
|
if fc.Auth.Enabled() {
|
|
|
|
err = applyAuthConfig(fc, cfg)
|
2016-02-21 01:17:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
2018-08-29 23:38:11 +00:00
|
|
|
if fc.Proxy.Enabled() {
|
|
|
|
err = applyProxyConfig(fc, cfg)
|
2016-02-21 01:17:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
2018-08-29 23:38:11 +00:00
|
|
|
if fc.SSH.Enabled() {
|
|
|
|
err = applySSHConfig(fc, cfg)
|
2016-06-12 02:05:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
2018-08-02 00:25:16 +00:00
|
|
|
|
2018-08-29 23:38:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
2016-02-21 01:17:09 +00:00
|
|
|
|
2018-08-29 23:38:11 +00:00
|
|
|
// applyAuthConfig applies file configuration for the "auth_service" section.
|
|
|
|
func applyAuthConfig(fc *FileConfig, cfg *service.Config) error {
|
|
|
|
var err error
|
2018-08-02 00:25:16 +00:00
|
|
|
|
2018-09-26 00:11:51 +00:00
|
|
|
if fc.Auth.KubeconfigFile != "" {
|
2019-03-11 03:25:43 +00:00
|
|
|
warningMessage := "The auth_service no longer needs kubeconfig_file. It has " +
|
|
|
|
"been moved to proxy_service section. This setting is ignored."
|
|
|
|
log.Warning(warningMessage)
|
2018-08-02 00:25:16 +00:00
|
|
|
}
|
2017-11-25 01:09:11 +00:00
|
|
|
cfg.Auth.EnableProxyProtocol, err = utils.ParseOnOff("proxy_protocol", fc.Auth.ProxyProtocol, true)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2016-02-21 01:17:09 +00:00
|
|
|
if fc.Auth.ListenAddress != "" {
|
|
|
|
addr, err := utils.ParseHostPortAddr(fc.Auth.ListenAddress, int(defaults.AuthListenPort))
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Auth.SSHAddr = *addr
|
2016-06-01 00:31:33 +00:00
|
|
|
cfg.AuthServers = append(cfg.AuthServers, *addr)
|
2016-02-21 01:17:09 +00:00
|
|
|
}
|
2018-08-02 00:25:16 +00:00
|
|
|
|
2017-07-28 18:37:12 +00:00
|
|
|
// INTERNAL: Authorities (plus Roles) and ReverseTunnels don't follow the
|
|
|
|
// same pattern as the rest of the configuration (they are not configuration
|
|
|
|
// singletons). However, we need to keep them around while Telekube uses them.
|
2016-04-02 19:57:44 +00:00
|
|
|
for _, authority := range fc.Auth.Authorities {
|
2017-01-02 20:49:05 +00:00
|
|
|
ca, role, err := authority.Parse()
|
2016-03-28 19:58:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2016-12-30 23:13:45 +00:00
|
|
|
cfg.Auth.Authorities = append(cfg.Auth.Authorities, ca)
|
2017-01-02 20:49:05 +00:00
|
|
|
cfg.Auth.Roles = append(cfg.Auth.Roles, role)
|
2016-03-28 19:58:34 +00:00
|
|
|
}
|
2017-07-28 18:37:12 +00:00
|
|
|
for _, t := range fc.Auth.ReverseTunnels {
|
|
|
|
tun, err := t.ConvertAndValidate()
|
2016-05-12 07:44:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2017-07-28 18:37:12 +00:00
|
|
|
cfg.ReverseTunnels = append(cfg.ReverseTunnels, tun)
|
|
|
|
}
|
2018-05-02 22:45:31 +00:00
|
|
|
if len(fc.Auth.PublicAddr) != 0 {
|
|
|
|
addrs, err := fc.Auth.PublicAddr.Addrs(defaults.AuthListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Auth.PublicAddrs = addrs
|
|
|
|
}
|
2017-07-28 18:37:12 +00:00
|
|
|
// read in cluster name from file configuration and create services.ClusterName
|
|
|
|
cfg.Auth.ClusterName, err = fc.Auth.ClusterName.Parse()
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
// read in static tokens from file configuration and create services.StaticTokens
|
2017-10-11 19:09:06 +00:00
|
|
|
if fc.Auth.StaticTokens != nil {
|
|
|
|
cfg.Auth.StaticTokens, err = fc.Auth.StaticTokens.Parse()
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2017-07-28 18:37:12 +00:00
|
|
|
}
|
|
|
|
// read in and set authentication preferences
|
|
|
|
if fc.Auth.Authentication != nil {
|
2019-07-02 21:35:17 +00:00
|
|
|
authPreference, err := fc.Auth.Authentication.Parse()
|
2017-07-28 18:37:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Auth.Preference = authPreference
|
2016-05-12 07:44:25 +00:00
|
|
|
}
|
2017-11-22 01:35:58 +00:00
|
|
|
|
2019-02-21 01:47:50 +00:00
|
|
|
var localAuth services.Bool
|
|
|
|
if fc.Auth.Authentication == nil || fc.Auth.Authentication.LocalAuth == nil {
|
|
|
|
localAuth = services.NewBool(true)
|
|
|
|
} else {
|
|
|
|
localAuth = *fc.Auth.Authentication.LocalAuth
|
|
|
|
}
|
|
|
|
|
|
|
|
if localAuth.Value() == false && fc.Auth.Authentication.SecondFactor != "" {
|
|
|
|
warningMessage := "Second factor settings will have no affect because local " +
|
|
|
|
"authentication is disabled. Update file configuration and remove " +
|
|
|
|
"\"second_factor\" field to get rid of this error message."
|
|
|
|
log.Warnf(warningMessage)
|
|
|
|
}
|
|
|
|
|
2018-03-04 02:26:44 +00:00
|
|
|
auditConfig, err := services.AuditConfigFromObject(fc.Storage.Params)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
auditConfig.Type = fc.Storage.Type
|
|
|
|
|
2018-11-07 01:21:44 +00:00
|
|
|
// Set cluster-wide configuration from file configuration.
|
2017-11-29 00:15:46 +00:00
|
|
|
cfg.Auth.ClusterConfig, err = services.NewClusterConfig(services.ClusterConfigSpecV3{
|
2018-07-08 17:52:15 +00:00
|
|
|
SessionRecording: fc.Auth.SessionRecording,
|
|
|
|
ProxyChecksHostKeys: fc.Auth.ProxyChecksHostKeys,
|
|
|
|
Audit: *auditConfig,
|
|
|
|
ClientIdleTimeout: fc.Auth.ClientIdleTimeout,
|
|
|
|
DisconnectExpiredCert: fc.Auth.DisconnectExpiredCert,
|
2018-11-07 01:21:44 +00:00
|
|
|
KeepAliveInterval: fc.Auth.KeepAliveInterval,
|
|
|
|
KeepAliveCountMax: fc.Auth.KeepAliveCountMax,
|
2019-02-21 01:47:50 +00:00
|
|
|
LocalAuth: localAuth,
|
2017-11-29 00:15:46 +00:00
|
|
|
})
|
2017-10-24 20:52:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2016-03-28 19:58:34 +00:00
|
|
|
|
2017-11-22 01:35:58 +00:00
|
|
|
// read in and set the license file path (not used in open-source version)
|
|
|
|
licenseFile := fc.Auth.LicenseFile
|
|
|
|
if licenseFile != "" {
|
|
|
|
if filepath.IsAbs(licenseFile) {
|
|
|
|
cfg.Auth.LicenseFile = licenseFile
|
|
|
|
} else {
|
|
|
|
cfg.Auth.LicenseFile = filepath.Join(cfg.DataDir, licenseFile)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-29 23:38:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// applyProxyConfig applies file configuration for the "proxy_service" section.
|
|
|
|
func applyProxyConfig(fc *FileConfig, cfg *service.Config) error {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
cfg.Proxy.EnableProxyProtocol, err = utils.ParseOnOff("proxy_protocol", fc.Proxy.ProxyProtocol, true)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
if fc.Proxy.ListenAddress != "" {
|
|
|
|
addr, err := utils.ParseHostPortAddr(fc.Proxy.ListenAddress, int(defaults.SSHProxyListenPort))
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.SSHAddr = *addr
|
|
|
|
}
|
|
|
|
if fc.Proxy.WebAddr != "" {
|
|
|
|
addr, err := utils.ParseHostPortAddr(fc.Proxy.WebAddr, int(defaults.HTTPListenPort))
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.WebAddr = *addr
|
|
|
|
}
|
|
|
|
if fc.Proxy.TunAddr != "" {
|
|
|
|
addr, err := utils.ParseHostPortAddr(fc.Proxy.TunAddr, int(defaults.SSHProxyTunnelListenPort))
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.ReverseTunnelListenAddr = *addr
|
|
|
|
}
|
|
|
|
|
|
|
|
if fc.Proxy.KeyFile != "" {
|
|
|
|
if !fileExists(fc.Proxy.KeyFile) {
|
|
|
|
return trace.Errorf("https key does not exist: %s", fc.Proxy.KeyFile)
|
|
|
|
}
|
|
|
|
cfg.Proxy.TLSKey = fc.Proxy.KeyFile
|
|
|
|
}
|
|
|
|
if fc.Proxy.CertFile != "" {
|
|
|
|
if !fileExists(fc.Proxy.CertFile) {
|
|
|
|
return trace.Errorf("https cert does not exist: %s", fc.Proxy.CertFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
// read in certificate chain from disk
|
|
|
|
certificateChainBytes, err := utils.ReadPath(fc.Proxy.CertFile)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// parse certificate chain into []*x509.Certificate
|
|
|
|
certificateChain, err := utils.ReadCertificateChain(certificateChainBytes)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// if starting teleport with a self signed certificate, print a warning, and
|
|
|
|
// then take whatever was passed to us. otherwise verify the certificate
|
|
|
|
// chain from leaf to root so browsers don't complain.
|
|
|
|
if utils.IsSelfSigned(certificateChain) {
|
|
|
|
warningMessage := "Starting Teleport with a self-signed TLS certificate, this is " +
|
|
|
|
"not safe for production clusters. Using a self-signed certificate opens " +
|
|
|
|
"Teleport users to Man-in-the-Middle attacks."
|
|
|
|
log.Warnf(warningMessage)
|
|
|
|
} else {
|
|
|
|
if err := utils.VerifyCertificateChain(certificateChain); err != nil {
|
|
|
|
return trace.BadParameter("unable to verify HTTPS certificate chain in %v: %s",
|
|
|
|
fc.Proxy.CertFile, utils.UserMessageFromError(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg.Proxy.TLSCert = fc.Proxy.CertFile
|
|
|
|
}
|
|
|
|
|
|
|
|
// apply kubernetes proxy config, by default kube proxy is disabled
|
|
|
|
if fc.Proxy.Kube.Configured() {
|
|
|
|
cfg.Proxy.Kube.Enabled = fc.Proxy.Kube.Enabled()
|
|
|
|
}
|
2019-03-11 03:25:43 +00:00
|
|
|
if fc.Proxy.Kube.KubeconfigFile != "" {
|
|
|
|
cfg.Proxy.Kube.KubeconfigPath = fc.Proxy.Kube.KubeconfigFile
|
|
|
|
}
|
2018-08-29 23:38:11 +00:00
|
|
|
if fc.Proxy.Kube.ListenAddress != "" {
|
|
|
|
addr, err := utils.ParseHostPortAddr(fc.Proxy.Kube.ListenAddress, int(defaults.KubeProxyListenPort))
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.Kube.ListenAddr = *addr
|
|
|
|
}
|
|
|
|
if len(fc.Proxy.Kube.PublicAddr) != 0 {
|
|
|
|
addrs, err := fc.Proxy.Kube.PublicAddr.Addrs(defaults.KubeProxyListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.Kube.PublicAddrs = addrs
|
|
|
|
}
|
|
|
|
if len(fc.Proxy.PublicAddr) != 0 {
|
|
|
|
addrs, err := fc.Proxy.PublicAddr.Addrs(defaults.HTTPListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.PublicAddrs = addrs
|
|
|
|
}
|
|
|
|
if len(fc.Proxy.SSHPublicAddr) != 0 {
|
|
|
|
addrs, err := fc.Proxy.SSHPublicAddr.Addrs(defaults.SSHProxyListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.SSHPublicAddrs = addrs
|
|
|
|
}
|
Added support for nodes dialing back to cluster.
Updated services.ReverseTunnel to support type (proxy or node). For
proxy types, which represent trusted cluster connections, when a
services.ReverseTunnel is created, it's created on the remote side with
name /reverseTunnels/example.com. For node types, services.ReverseTunnel
is created on the main side as /reverseTunnels/{nodeUUID}.clusterName.
Updated services.TunnelConn to support type (proxy or node). For proxy
types, which represent trusted cluster connections, tunnel connections
are created on the main side under
/tunnelConnections/remote.example.com/{proxyUUID}-remote.example.com.
For nodes, tunnel connections are created on the main side under
/tunnelConnections/example.com/{proxyUUID}-example.com. This allows
searching for tunnel connections by cluster then allows easily creating
a set of proxies that are missing matching services.TunnelConn.
The reverse tunnel server has been updated to handle heartbeats from
proxies as well as nodes. Proxy heartbeat behavior has not changed.
Heartbeats from nodes now add remote connections to the matching local
site. In addition, the reverse tunnel server now proxies connection to
the Auth Server for requests that are already authenticated (a second
authentication to the Auth Server is required).
For registration, nodes try and connect to the Auth Server to fetch host
credentials. Upon failure, nodes now try and fallback to fetching host
credentials from the web proxy.
To establish a connection to an Auth Server, nodes first try and connect
directly, and if the connection fails, fallback to obtaining a
connection to the Auth Server through the reverse tunnel. If a
connection is established directly, node startup behavior has not
changed. If a node establishes a connection through the reverse tunnel,
it creates an AgentPool that attempts to dial back to the cluster and
establish a reverse tunnel.
When nodes heartbeat, they also heartbeat if they are connected directly
to the cluster or through a reverse tunnel. For nodes that are connected
through a reverse tunnel, the proxy subsystem now directs the reverse
tunnel server to establish a connection through the reverse tunnel
instead of directly.
When sending discovery requests, the domain field has been replaced with
tunnelID. The tunnelID field is either the cluster name (same as before)
for proxies, or {nodeUUID}.example.com for nodes.
2019-04-26 20:51:59 +00:00
|
|
|
if len(fc.Proxy.TunnelPublicAddr) != 0 {
|
|
|
|
addrs, err := fc.Proxy.TunnelPublicAddr.Addrs(defaults.SSHProxyTunnelListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.Proxy.TunnelPublicAddrs = addrs
|
|
|
|
}
|
2018-08-29 23:38:11 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// applySSHConfig applies file configuration for the "ssh_service" section.
|
|
|
|
func applySSHConfig(fc *FileConfig, cfg *service.Config) error {
|
2016-02-21 01:17:09 +00:00
|
|
|
if fc.SSH.ListenAddress != "" {
|
|
|
|
addr, err := utils.ParseHostPortAddr(fc.SSH.ListenAddress, int(defaults.SSHServerListenPort))
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.SSH.Addr = *addr
|
|
|
|
}
|
2016-02-23 00:21:51 +00:00
|
|
|
if fc.SSH.Labels != nil {
|
|
|
|
cfg.SSH.Labels = make(map[string]string)
|
|
|
|
for k, v := range fc.SSH.Labels {
|
|
|
|
cfg.SSH.Labels[k] = v
|
|
|
|
}
|
2016-02-21 01:17:09 +00:00
|
|
|
}
|
2016-02-23 00:21:51 +00:00
|
|
|
if fc.SSH.Commands != nil {
|
|
|
|
cfg.SSH.CmdLabels = make(services.CommandLabels)
|
|
|
|
for _, cmdLabel := range fc.SSH.Commands {
|
2016-12-30 23:13:45 +00:00
|
|
|
cfg.SSH.CmdLabels[cmdLabel.Name] = &services.CommandLabelV2{
|
|
|
|
Period: services.NewDuration(cmdLabel.Period),
|
2016-02-23 00:21:51 +00:00
|
|
|
Command: cmdLabel.Command,
|
|
|
|
Result: "",
|
|
|
|
}
|
2016-02-21 01:17:09 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-16 19:48:16 +00:00
|
|
|
if fc.SSH.Namespace != "" {
|
|
|
|
cfg.SSH.Namespace = fc.SSH.Namespace
|
|
|
|
}
|
2017-05-26 19:28:46 +00:00
|
|
|
if fc.SSH.PermitUserEnvironment {
|
|
|
|
cfg.SSH.PermitUserEnvironment = true
|
|
|
|
}
|
2018-02-24 01:23:09 +00:00
|
|
|
if fc.SSH.PAM != nil {
|
|
|
|
cfg.SSH.PAM = fc.SSH.PAM.Parse()
|
|
|
|
|
|
|
|
// If PAM is enabled, make sure that Teleport was built with PAM support
|
|
|
|
// and the PAM library was found at runtime.
|
|
|
|
if cfg.SSH.PAM.Enabled {
|
|
|
|
if !pam.BuildHasPAM() {
|
|
|
|
errorMessage := "Unable to start Teleport: PAM was enabled in file configuration but this \n" +
|
|
|
|
"Teleport binary was built without PAM support. To continue either download a \n" +
|
|
|
|
"Teleport binary build with PAM support from https://gravitational.com/teleport \n" +
|
|
|
|
"or disable PAM in file configuration."
|
|
|
|
return trace.BadParameter(errorMessage)
|
|
|
|
}
|
|
|
|
if !pam.SystemHasPAM() {
|
|
|
|
errorMessage := "Unable to start Teleport: PAM was enabled in file configuration but this \n" +
|
|
|
|
"system does not have the needed PAM library installed. To continue either \n" +
|
|
|
|
"install libpam or disable PAM in file configuration."
|
|
|
|
return trace.BadParameter(errorMessage)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 22:45:31 +00:00
|
|
|
if len(fc.SSH.PublicAddr) != 0 {
|
|
|
|
addrs, err := fc.SSH.PublicAddr.Addrs(defaults.SSHServerListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
cfg.SSH.PublicAddrs = addrs
|
|
|
|
}
|
2019-11-16 00:39:40 +00:00
|
|
|
if fc.SSH.BPF != nil {
|
|
|
|
cfg.SSH.BPF = fc.SSH.BPF.Parse()
|
|
|
|
}
|
2018-08-29 23:38:11 +00:00
|
|
|
|
2016-02-16 22:18:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-05 21:43:42 +00:00
|
|
|
// parseAuthorizedKeys parses keys in the authorized_keys format and
|
|
|
|
// returns a services.CertAuthority.
|
|
|
|
func parseAuthorizedKeys(bytes []byte, allowedLogins []string) (services.CertAuthority, services.Role, error) {
|
|
|
|
pubkey, comment, _, _, err := ssh.ParseAuthorizedKey(bytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
comments, err := url.ParseQuery(comment)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
clusterName := comments.Get("clustername")
|
|
|
|
if clusterName == "" {
|
2017-04-07 17:55:48 +00:00
|
|
|
return nil, nil, trace.BadParameter("no clustername provided")
|
2017-04-05 21:43:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// create a new certificate authority
|
|
|
|
ca := services.NewCertAuthority(
|
|
|
|
services.UserCA,
|
|
|
|
clusterName,
|
|
|
|
nil,
|
|
|
|
[][]byte{ssh.MarshalAuthorizedKey(pubkey)},
|
2017-04-11 00:55:58 +00:00
|
|
|
nil)
|
2017-04-05 21:43:42 +00:00
|
|
|
|
|
|
|
// transform old allowed logins into roles
|
|
|
|
role := services.RoleForCertAuthority(ca)
|
2017-06-30 01:02:47 +00:00
|
|
|
role.SetLogins(services.Allow, allowedLogins)
|
2017-04-05 21:43:42 +00:00
|
|
|
ca.AddRole(role.GetName())
|
|
|
|
|
|
|
|
return ca, role, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseKnownHosts parses keys in known_hosts format and returns a
|
|
|
|
// services.CertAuthority.
|
|
|
|
func parseKnownHosts(bytes []byte, allowedLogins []string) (services.CertAuthority, services.Role, error) {
|
2016-06-14 00:12:36 +00:00
|
|
|
marker, options, pubKey, comment, _, err := ssh.ParseKnownHosts(bytes)
|
|
|
|
if marker != "cert-authority" {
|
2017-01-02 20:49:05 +00:00
|
|
|
return nil, nil, trace.BadParameter("invalid file format. expected '@cert-authority` marker")
|
2016-06-14 00:12:36 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2017-01-02 20:49:05 +00:00
|
|
|
return nil, nil, trace.BadParameter("invalid public key")
|
2016-06-14 00:12:36 +00:00
|
|
|
}
|
|
|
|
teleportOpts, err := url.ParseQuery(comment)
|
|
|
|
if err != nil {
|
2017-01-02 20:49:05 +00:00
|
|
|
return nil, nil, trace.BadParameter("invalid key comment: '%s'", comment)
|
2016-06-14 00:12:36 +00:00
|
|
|
}
|
|
|
|
authType := services.CertAuthType(teleportOpts.Get("type"))
|
|
|
|
if authType != services.HostCA && authType != services.UserCA {
|
2017-01-02 20:49:05 +00:00
|
|
|
return nil, nil, trace.BadParameter("unsupported CA type: '%s'", authType)
|
2016-06-14 00:12:36 +00:00
|
|
|
}
|
|
|
|
if len(options) == 0 {
|
2017-01-02 20:49:05 +00:00
|
|
|
return nil, nil, trace.BadParameter("key without cluster_name")
|
2016-06-14 00:12:36 +00:00
|
|
|
}
|
|
|
|
const prefix = "*."
|
|
|
|
domainName := strings.TrimPrefix(options[0], prefix)
|
2017-01-02 20:49:05 +00:00
|
|
|
|
|
|
|
v1 := &services.CertAuthorityV1{
|
|
|
|
AllowedLogins: utils.CopyStrings(allowedLogins),
|
|
|
|
DomainName: domainName,
|
|
|
|
Type: authType,
|
|
|
|
CheckingKeys: [][]byte{ssh.MarshalAuthorizedKey(pubKey)},
|
|
|
|
}
|
|
|
|
ca, role := services.ConvertV1CertAuthority(v1)
|
|
|
|
return ca, role, nil
|
2016-06-14 00:12:36 +00:00
|
|
|
}
|
|
|
|
|
2017-04-05 21:43:42 +00:00
|
|
|
// certificateAuthorityFormat parses bytes and determines if they are in
|
|
|
|
// known_hosts format or authorized_keys format.
|
|
|
|
func certificateAuthorityFormat(bytes []byte) (string, error) {
|
|
|
|
_, _, _, _, err := ssh.ParseAuthorizedKey(bytes)
|
|
|
|
if err != nil {
|
|
|
|
_, _, _, _, _, err := ssh.ParseKnownHosts(bytes)
|
|
|
|
if err != nil {
|
|
|
|
return "", trace.BadParameter("unknown ca format")
|
|
|
|
}
|
|
|
|
return teleport.KnownHosts, nil
|
|
|
|
}
|
|
|
|
return teleport.AuthorizedKeys, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseCAKey parses bytes either in known_hosts or authorized_keys format
|
|
|
|
// and returns a services.CertAuthority.
|
|
|
|
func parseCAKey(bytes []byte, allowedLogins []string) (services.CertAuthority, services.Role, error) {
|
|
|
|
caFormat, err := certificateAuthorityFormat(bytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, trace.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if caFormat == teleport.AuthorizedKeys {
|
|
|
|
return parseAuthorizedKeys(bytes, allowedLogins)
|
|
|
|
}
|
|
|
|
return parseKnownHosts(bytes, allowedLogins)
|
|
|
|
}
|
|
|
|
|
2016-06-11 06:02:42 +00:00
|
|
|
// readTrustedClusters parses the content of "trusted_clusters" YAML structure
|
|
|
|
// and modifies Teleport 'conf' by adding "authorities" and "reverse tunnels"
|
|
|
|
// to it
|
|
|
|
func readTrustedClusters(clusters []TrustedCluster, conf *service.Config) error {
|
|
|
|
if len(clusters) == 0 {
|
|
|
|
return nil
|
2016-06-07 22:02:59 +00:00
|
|
|
}
|
2016-06-11 06:02:42 +00:00
|
|
|
// go over all trusted clusters:
|
|
|
|
for i := range clusters {
|
|
|
|
tc := &clusters[i]
|
|
|
|
// parse "allow_logins"
|
|
|
|
var allowedLogins []string
|
|
|
|
for _, login := range strings.Split(tc.AllowedLogins, ",") {
|
|
|
|
login = strings.TrimSpace(login)
|
|
|
|
if login != "" {
|
|
|
|
allowedLogins = append(allowedLogins, login)
|
|
|
|
}
|
2016-06-02 01:56:48 +00:00
|
|
|
}
|
2016-06-11 06:02:42 +00:00
|
|
|
// open the key file for this cluster:
|
|
|
|
log.Debugf("reading trusted cluster key file %s", tc.KeyFile)
|
2016-06-12 22:54:59 +00:00
|
|
|
if tc.KeyFile == "" {
|
|
|
|
return trace.Errorf("key_file is missing for a trusted cluster")
|
|
|
|
}
|
2016-06-11 06:02:42 +00:00
|
|
|
f, err := os.Open(tc.KeyFile)
|
2016-06-02 01:56:48 +00:00
|
|
|
if err != nil {
|
2016-06-11 06:02:42 +00:00
|
|
|
return trace.Errorf("reading trusted cluster keys: %v", err)
|
2016-06-02 01:56:48 +00:00
|
|
|
}
|
2016-06-07 22:02:59 +00:00
|
|
|
defer f.Close()
|
2016-06-11 06:02:42 +00:00
|
|
|
// read the keyfile for this cluster and get trusted CA keys:
|
|
|
|
var authorities []services.CertAuthority
|
2016-12-30 23:13:45 +00:00
|
|
|
var roles []services.Role
|
2016-06-07 22:02:59 +00:00
|
|
|
scanner := bufio.NewScanner(f)
|
|
|
|
for line := 0; scanner.Scan(); {
|
2017-01-02 20:49:05 +00:00
|
|
|
ca, role, err := parseCAKey(scanner.Bytes(), allowedLogins)
|
2016-06-11 06:02:42 +00:00
|
|
|
if err != nil {
|
2016-12-30 23:13:45 +00:00
|
|
|
return trace.BadParameter("%s:L%d. %v", tc.KeyFile, line, err)
|
2016-06-07 22:02:59 +00:00
|
|
|
}
|
2016-12-30 23:13:45 +00:00
|
|
|
if ca.GetType() == services.UserCA && len(allowedLogins) == 0 && len(tc.TunnelAddr) > 0 {
|
|
|
|
return trace.BadParameter("trusted cluster '%s' needs allow_logins parameter",
|
|
|
|
ca.GetClusterName())
|
2016-06-11 06:02:42 +00:00
|
|
|
}
|
2016-12-30 23:13:45 +00:00
|
|
|
authorities = append(authorities, ca)
|
2017-04-05 21:43:42 +00:00
|
|
|
if role != nil {
|
|
|
|
roles = append(roles, role)
|
|
|
|
}
|
2016-06-11 06:02:42 +00:00
|
|
|
}
|
|
|
|
conf.Auth.Authorities = append(conf.Auth.Authorities, authorities...)
|
2016-12-30 23:13:45 +00:00
|
|
|
conf.Auth.Roles = append(conf.Auth.Roles, roles...)
|
|
|
|
clusterName := authorities[0].GetClusterName()
|
2016-06-11 06:02:42 +00:00
|
|
|
// parse "tunnel_addr"
|
|
|
|
var tunnelAddresses []string
|
|
|
|
for _, ta := range strings.Split(tc.TunnelAddr, ",") {
|
|
|
|
ta := strings.TrimSpace(ta)
|
|
|
|
if ta == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
addr, err := utils.ParseHostPortAddr(ta, defaults.SSHProxyTunnelListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err,
|
|
|
|
"Invalid tunnel address '%s' for cluster '%s'. Expect host:port format",
|
|
|
|
ta, clusterName)
|
|
|
|
}
|
|
|
|
tunnelAddresses = append(tunnelAddresses, addr.FullAddress())
|
|
|
|
}
|
|
|
|
if len(tunnelAddresses) > 0 {
|
2016-12-30 23:13:45 +00:00
|
|
|
conf.ReverseTunnels = append(conf.ReverseTunnels, services.NewReverseTunnel(clusterName, tunnelAddresses))
|
2016-06-02 01:56:48 +00:00
|
|
|
}
|
|
|
|
}
|
2016-06-11 06:02:42 +00:00
|
|
|
return nil
|
2016-06-02 01:56:48 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 02:19:21 +00:00
|
|
|
// applyString takes 'src' and overwrites target with it, unless 'src' is empty
|
|
|
|
// returns 'True' if 'src' was not empty
|
|
|
|
func applyString(src string, target *string) bool {
|
|
|
|
if src != "" {
|
|
|
|
*target = src
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-03-29 20:36:52 +00:00
|
|
|
// Configure merges command line arguments with what's in a configuration file
|
2016-02-07 21:33:33 +00:00
|
|
|
// with CLI commands taking precedence
|
2016-04-11 23:30:58 +00:00
|
|
|
func Configure(clf *CommandLineFlags, cfg *service.Config) error {
|
2017-09-10 03:59:38 +00:00
|
|
|
// pass the value of --insecure flag to the runtime
|
|
|
|
lib.SetInsecureDevMode(clf.InsecureMode)
|
|
|
|
|
2016-02-17 02:19:21 +00:00
|
|
|
// load /etc/teleport.yaml and apply it's values:
|
2016-06-01 00:31:33 +00:00
|
|
|
fileConf, err := ReadConfigFile(clf.ConfigFile)
|
2016-02-17 02:19:21 +00:00
|
|
|
if err != nil {
|
2016-04-11 23:30:58 +00:00
|
|
|
return trace.Wrap(err)
|
2016-02-17 02:19:21 +00:00
|
|
|
}
|
2016-03-28 19:58:34 +00:00
|
|
|
// if configuration is passed as an environment variable,
|
|
|
|
// try to decode it and override the config file
|
|
|
|
if clf.ConfigString != "" {
|
2016-03-29 20:36:52 +00:00
|
|
|
fileConf, err = ReadFromString(clf.ConfigString)
|
2016-03-28 19:58:34 +00:00
|
|
|
if err != nil {
|
2016-04-11 23:30:58 +00:00
|
|
|
return trace.Wrap(err)
|
2016-03-28 19:58:34 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-10 01:43:59 +00:00
|
|
|
|
2019-08-29 23:16:03 +00:00
|
|
|
if clf.BootstrapFile != "" {
|
|
|
|
resources, err := ReadResources(clf.BootstrapFile)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
if len(resources) < 1 {
|
|
|
|
return trace.BadParameter("no resources found: %q", clf.BootstrapFile)
|
|
|
|
}
|
|
|
|
cfg.Auth.Resources = resources
|
|
|
|
}
|
|
|
|
|
2019-06-03 18:39:41 +00:00
|
|
|
// Apply command line --debug flag to override logger severity.
|
|
|
|
if clf.Debug {
|
|
|
|
// If debug logging is requested and no file configuration exists, set the
|
|
|
|
// log level right away. Otherwise allow the command line flag to override
|
|
|
|
// logger severity in file configuration.
|
|
|
|
if fileConf == nil {
|
|
|
|
log.SetLevel(log.DebugLevel)
|
|
|
|
} else {
|
|
|
|
fileConf.Logger.Severity = teleport.DebugLevel
|
|
|
|
}
|
2019-02-10 01:43:59 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 20:48:06 +00:00
|
|
|
if err = ApplyFileConfig(fileConf, cfg); err != nil {
|
2016-04-11 23:30:58 +00:00
|
|
|
return trace.Wrap(err)
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
2016-03-28 19:58:34 +00:00
|
|
|
|
2019-03-12 22:30:44 +00:00
|
|
|
// If FIPS mode is specified, validate Teleport configuration is FedRAMP/FIPS
|
|
|
|
// 140-2 compliant.
|
|
|
|
if clf.FIPS {
|
|
|
|
// Make sure all cryptographic primitives are FIPS compliant.
|
|
|
|
err = utils.UintSliceSubset(defaults.FIPSCipherSuites, cfg.CipherSuites)
|
|
|
|
if err != nil {
|
|
|
|
return trace.BadParameter("non-FIPS compliant TLS cipher suite selected: %v", err)
|
|
|
|
}
|
|
|
|
err = utils.StringSliceSubset(defaults.FIPSCiphers, cfg.Ciphers)
|
|
|
|
if err != nil {
|
|
|
|
return trace.BadParameter("non-FIPS compliant SSH cipher selected: %v", err)
|
|
|
|
}
|
|
|
|
err = utils.StringSliceSubset(defaults.FIPSKEXAlgorithms, cfg.KEXAlgorithms)
|
|
|
|
if err != nil {
|
|
|
|
return trace.BadParameter("non-FIPS compliant SSH kex algorithm selected: %v", err)
|
|
|
|
}
|
|
|
|
err = utils.StringSliceSubset(defaults.FIPSMACAlgorithms, cfg.MACAlgorithms)
|
|
|
|
if err != nil {
|
|
|
|
return trace.BadParameter("non-FIPS compliant SSH mac algorithm selected: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure cluster settings are also FedRAMP/FIPS 140-2 compliant.
|
|
|
|
if cfg.Auth.Enabled {
|
|
|
|
// Only SSO based authentication is supported. The SSO provider is where
|
|
|
|
// any FedRAMP/FIPS 140-2 compliance (like password complexity) should be
|
|
|
|
// enforced.
|
|
|
|
if cfg.Auth.ClusterConfig.GetLocalAuth() == true {
|
|
|
|
return trace.BadParameter("non-FIPS compliant authentication setting: \"local_auth\" must be false")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If sessions are being recorded at the proxy host key checking must be
|
|
|
|
// enabled. This make sure the host certificate key algorithm is FIPS
|
|
|
|
// compliant.
|
|
|
|
if cfg.Auth.ClusterConfig.GetSessionRecording() == services.RecordAtProxy &&
|
|
|
|
cfg.Auth.ClusterConfig.GetProxyChecksHostKeys() == services.HostKeyCheckNo {
|
|
|
|
return trace.BadParameter("non-FIPS compliant proxy settings: \"proxy_checks_host_keys\" must be true")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-03 19:35:57 +00:00
|
|
|
// Apply diagnostic address flag.
|
2018-02-08 02:32:50 +00:00
|
|
|
if clf.DiagnosticAddr != "" {
|
|
|
|
addr, err := utils.ParseAddr(clf.DiagnosticAddr)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err, "failed to parse diag-addr")
|
|
|
|
}
|
|
|
|
cfg.DiagnosticAddr = *addr
|
|
|
|
}
|
|
|
|
|
2017-10-29 10:50:29 +00:00
|
|
|
// apply --insecure-no-tls flag:
|
2017-10-23 17:40:27 +00:00
|
|
|
if clf.DisableTLS {
|
2017-10-29 10:50:29 +00:00
|
|
|
cfg.Proxy.DisableTLS = clf.DisableTLS
|
2017-10-23 17:40:27 +00:00
|
|
|
}
|
|
|
|
|
2019-02-10 01:43:59 +00:00
|
|
|
// apply --debug flag to config:
|
2016-02-16 21:18:58 +00:00
|
|
|
if clf.Debug {
|
2016-02-09 01:25:20 +00:00
|
|
|
cfg.Console = ioutil.Discard
|
2018-02-17 23:51:57 +00:00
|
|
|
cfg.Debug = clf.Debug
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
2016-02-09 01:25:20 +00:00
|
|
|
|
2016-02-09 04:55:13 +00:00
|
|
|
// apply --roles flag:
|
2016-02-16 21:18:58 +00:00
|
|
|
if clf.Roles != "" {
|
|
|
|
if err := validateRoles(clf.Roles); err != nil {
|
2016-04-11 23:30:58 +00:00
|
|
|
return trace.Wrap(err)
|
2016-02-09 04:55:13 +00:00
|
|
|
}
|
2016-02-17 03:24:51 +00:00
|
|
|
cfg.SSH.Enabled = strings.Index(clf.Roles, defaults.RoleNode) != -1
|
|
|
|
cfg.Auth.Enabled = strings.Index(clf.Roles, defaults.RoleAuthService) != -1
|
|
|
|
cfg.Proxy.Enabled = strings.Index(clf.Roles, defaults.RoleProxy) != -1
|
2016-02-09 04:55:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-09 21:46:34 +00:00
|
|
|
// apply --auth-server flag:
|
2019-07-06 07:28:49 +00:00
|
|
|
if len(clf.AuthServerAddr) > 0 {
|
2016-02-09 21:46:34 +00:00
|
|
|
if cfg.Auth.Enabled {
|
|
|
|
log.Warnf("not starting the local auth service. --auth-server flag tells to connect to another auth server")
|
|
|
|
cfg.Auth.Enabled = false
|
|
|
|
}
|
2019-07-06 07:28:49 +00:00
|
|
|
cfg.AuthServers = make([]utils.NetAddr, 0, len(clf.AuthServerAddr))
|
|
|
|
for _, as := range clf.AuthServerAddr {
|
|
|
|
addr, err := utils.ParseHostPortAddr(as, defaults.AuthListenPort)
|
|
|
|
if err != nil {
|
|
|
|
return trace.BadParameter("cannot parse auth server address: '%v'", as)
|
|
|
|
}
|
|
|
|
cfg.AuthServers = append(cfg.AuthServers, *addr)
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
2016-02-09 21:46:34 +00:00
|
|
|
}
|
|
|
|
|
2016-02-17 02:19:21 +00:00
|
|
|
// apply --name flag:
|
2016-02-16 21:18:58 +00:00
|
|
|
if clf.NodeName != "" {
|
|
|
|
cfg.Hostname = clf.NodeName
|
2016-02-13 00:41:55 +00:00
|
|
|
}
|
|
|
|
|
2018-02-14 23:05:54 +00:00
|
|
|
// apply --pid-file flag
|
|
|
|
if clf.PIDFile != "" {
|
|
|
|
cfg.PIDFile = clf.PIDFile
|
|
|
|
}
|
|
|
|
|
2016-02-09 21:46:34 +00:00
|
|
|
// apply --token flag:
|
2019-07-17 19:51:18 +00:00
|
|
|
if _, err := cfg.ApplyToken(clf.AuthToken); err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
2016-02-09 21:46:34 +00:00
|
|
|
|
2018-10-03 19:35:57 +00:00
|
|
|
// Apply flags used for the node to validate the Auth Server.
|
|
|
|
if clf.CAPin != "" {
|
|
|
|
cfg.CAPin = clf.CAPin
|
|
|
|
}
|
|
|
|
|
2016-02-09 21:46:34 +00:00
|
|
|
// apply --listen-ip flag:
|
2016-02-16 21:18:58 +00:00
|
|
|
if clf.ListenIP != nil {
|
|
|
|
applyListenIP(clf.ListenIP, cfg)
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
2016-02-10 02:28:38 +00:00
|
|
|
|
2016-03-15 06:00:29 +00:00
|
|
|
// --advertise-ip flag
|
2018-05-02 22:45:31 +00:00
|
|
|
if clf.AdvertiseIP != "" {
|
|
|
|
if _, _, err := utils.ParseAdvertiseAddr(clf.AdvertiseIP); err != nil {
|
2016-04-11 23:30:58 +00:00
|
|
|
return trace.Wrap(err)
|
2016-03-12 04:09:40 +00:00
|
|
|
}
|
|
|
|
cfg.AdvertiseIP = clf.AdvertiseIP
|
|
|
|
}
|
|
|
|
|
2016-03-15 00:25:00 +00:00
|
|
|
// apply --labels flag
|
2016-03-15 06:00:29 +00:00
|
|
|
if err = parseLabels(clf.Labels, &cfg.SSH); err != nil {
|
2016-04-11 23:30:58 +00:00
|
|
|
return trace.Wrap(err)
|
2016-03-15 00:25:00 +00:00
|
|
|
}
|
|
|
|
|
2016-04-02 00:58:41 +00:00
|
|
|
// --pid-file:
|
2016-04-02 01:03:57 +00:00
|
|
|
if clf.PIDFile != "" {
|
|
|
|
cfg.PIDFile = clf.PIDFile
|
2016-04-02 00:58:41 +00:00
|
|
|
}
|
2016-06-01 07:20:58 +00:00
|
|
|
|
|
|
|
// auth_servers not configured, but the 'auth' is enabled (auth is on localhost)?
|
|
|
|
if len(cfg.AuthServers) == 0 && cfg.Auth.Enabled {
|
|
|
|
cfg.AuthServers = append(cfg.AuthServers, cfg.Auth.SSHAddr)
|
|
|
|
}
|
|
|
|
|
2017-03-26 19:58:01 +00:00
|
|
|
// add data_dir to the backend config:
|
|
|
|
if cfg.Auth.StorageConfig.Params == nil {
|
|
|
|
cfg.Auth.StorageConfig.Params = backend.Params{}
|
|
|
|
}
|
|
|
|
cfg.Auth.StorageConfig.Params["data_dir"] = cfg.DataDir
|
2017-05-26 19:28:46 +00:00
|
|
|
// command line flag takes precedence over file config
|
|
|
|
if clf.PermitUserEnvironment {
|
|
|
|
cfg.SSH.PermitUserEnvironment = true
|
|
|
|
}
|
|
|
|
|
2016-04-11 23:30:58 +00:00
|
|
|
return nil
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
|
|
|
|
2016-03-15 06:00:29 +00:00
|
|
|
// parseLabels takes the value of --labels flag and tries to correctly populate
|
|
|
|
// sshConf.Labels and sshConf.CmdLabels
|
|
|
|
func parseLabels(spec string, sshConf *service.SSHConfig) error {
|
|
|
|
if spec == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// base syntax parsing, the spec must be in the form of 'key=value,more="better"`
|
|
|
|
lmap, err := client.ParseLabelSpec(spec)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
if len(lmap) > 0 {
|
|
|
|
sshConf.CmdLabels = make(services.CommandLabels, 0)
|
|
|
|
sshConf.Labels = make(map[string]string, 0)
|
|
|
|
}
|
|
|
|
// see which labels are actually command labels:
|
|
|
|
for key, value := range lmap {
|
|
|
|
cmdLabel, err := isCmdLabelSpec(value)
|
|
|
|
if err != nil {
|
|
|
|
return trace.Wrap(err)
|
|
|
|
}
|
|
|
|
if cmdLabel != nil {
|
2016-12-30 23:13:45 +00:00
|
|
|
sshConf.CmdLabels[key] = cmdLabel
|
2016-03-15 06:00:29 +00:00
|
|
|
} else {
|
|
|
|
sshConf.Labels[key] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isCmdLabelSpec tries to interpret a given string as a "command label" spec.
|
|
|
|
// A command label spec looks like [time_duration:command param1 param2 ...] where
|
|
|
|
// time_duration is in "1h2m1s" form.
|
|
|
|
//
|
|
|
|
// Example of a valid spec: "[1h:/bin/uname -m]"
|
2016-12-30 23:13:45 +00:00
|
|
|
func isCmdLabelSpec(spec string) (services.CommandLabel, error) {
|
2016-03-15 06:00:29 +00:00
|
|
|
// command spec? (surrounded by brackets?)
|
|
|
|
if len(spec) > 5 && spec[0] == '[' && spec[len(spec)-1] == ']' {
|
2016-04-12 17:54:24 +00:00
|
|
|
invalidSpecError := trace.BadParameter(
|
|
|
|
"invalid command label spec: '%s'", spec)
|
2016-03-15 06:00:29 +00:00
|
|
|
spec = strings.Trim(spec, "[]")
|
|
|
|
idx := strings.IndexRune(spec, ':')
|
|
|
|
if idx < 0 {
|
|
|
|
return nil, trace.Wrap(invalidSpecError)
|
|
|
|
}
|
|
|
|
periodSpec := spec[:idx]
|
|
|
|
period, err := time.ParseDuration(periodSpec)
|
|
|
|
if err != nil {
|
|
|
|
return nil, trace.Wrap(invalidSpecError)
|
|
|
|
}
|
|
|
|
cmdSpec := spec[idx+1:]
|
|
|
|
if len(cmdSpec) < 1 {
|
|
|
|
return nil, trace.Wrap(invalidSpecError)
|
|
|
|
}
|
|
|
|
var openQuote bool = false
|
2016-12-30 23:13:45 +00:00
|
|
|
return &services.CommandLabelV2{
|
|
|
|
Period: services.NewDuration(period),
|
2016-03-15 06:00:29 +00:00
|
|
|
Command: strings.FieldsFunc(cmdSpec, func(c rune) bool {
|
|
|
|
if c == '"' {
|
|
|
|
openQuote = !openQuote
|
|
|
|
}
|
|
|
|
return unicode.IsSpace(c) && !openQuote
|
|
|
|
}),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
// not a valid spec
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-02-07 21:33:33 +00:00
|
|
|
// applyListenIP replaces all 'listen addr' settings for all services with
|
|
|
|
// a given IP
|
2016-02-09 21:46:34 +00:00
|
|
|
func applyListenIP(ip net.IP, cfg *service.Config) {
|
2016-02-07 21:33:33 +00:00
|
|
|
listeningAddresses := []*utils.NetAddr{
|
|
|
|
&cfg.Auth.SSHAddr,
|
|
|
|
&cfg.Auth.SSHAddr,
|
|
|
|
&cfg.Proxy.SSHAddr,
|
|
|
|
&cfg.Proxy.WebAddr,
|
|
|
|
&cfg.SSH.Addr,
|
2016-02-08 22:51:22 +00:00
|
|
|
&cfg.Proxy.ReverseTunnelListenAddr,
|
2016-02-07 21:33:33 +00:00
|
|
|
}
|
|
|
|
for _, addr := range listeningAddresses {
|
|
|
|
replaceHost(addr, ip.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// replaceHost takes utils.NetAddr and replaces the hostname in it, preserving
|
|
|
|
// the original port
|
|
|
|
func replaceHost(addr *utils.NetAddr, newHost string) {
|
|
|
|
_, port, err := net.SplitHostPort(addr.Addr)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("failed parsing address: '%v'", addr.Addr)
|
|
|
|
}
|
|
|
|
addr.Addr = net.JoinHostPort(newHost, port)
|
|
|
|
}
|
|
|
|
|
|
|
|
func fileExists(fp string) bool {
|
2016-02-09 21:46:34 +00:00
|
|
|
_, err := os.Stat(fp)
|
|
|
|
if err != nil && os.IsNotExist(err) {
|
2016-02-07 21:33:33 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
2016-02-09 04:55:13 +00:00
|
|
|
|
|
|
|
// validateRoles makes sure that value upassed to --roles flag is valid
|
|
|
|
func validateRoles(roles string) error {
|
|
|
|
for _, role := range strings.Split(roles, ",") {
|
|
|
|
switch role {
|
|
|
|
case defaults.RoleAuthService,
|
|
|
|
defaults.RoleNode,
|
|
|
|
defaults.RoleProxy:
|
|
|
|
break
|
|
|
|
default:
|
2016-02-09 21:46:34 +00:00
|
|
|
return trace.Errorf("unknown role: '%s'", role)
|
2016-02-09 04:55:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|