Some minor improvements

- `tctl auth ls` lists all CAs by default
- Documented `authorize_ip` better
This commit is contained in:
Ev Kontsevoy 2016-05-31 17:31:33 -07:00
parent 06fa66b574
commit cddaf6e5c8
15 changed files with 171 additions and 82 deletions

View file

@ -51,6 +51,7 @@ docs: bbox
#
.PHONY:run-docs
run-docs: bbox
@echo -e "\n\n----> LIVE EDIT HERE: http://localhost:6600/admin-guide/\n"
docker run $(DOCKERFLAGS) -ti $(NOROOT) -e HOME=$(SRCDIR)/build.assets -p 6600:6600 -w $(SRCDIR) $(BBOX) mkdocs serve -a 0.0.0.0:6600
#

View file

@ -132,6 +132,11 @@ Lets cover some of these flags in more detail:
* `--advertise-ip` flag can be used when Teleport nodes are running behind NAT and
their externally routable IP cannot be automatically determined.
For example, assume that a host "foo" can be reached via `10.0.0.10` but there is
no `A` DNS record for "foo", so you cannot connect to it via `tsh ssh foo`. If
you start teleport on "foo" with `--advertise-ip=10.0.0.10`, it will automatically
tell Teleport proxy to use that IP when someone tries to connect
to "foo". This is also useful when connecting to Teleport nodes using their labels.
* `--nodename` flag lets you assign an alternative name the node which can be used
by clients to login. By default it's equal to the value returned by `hostname`
@ -162,6 +167,10 @@ teleport:
# by default it's equal to hostname
nodename: graviton
# Data directory where Teleport keeps its data, like keys/users for
# authentication (if using the default BoltDB back-end)
data_dir: /var/lib/teleport
# one-time invitation token used to join a cluster. it is not used on
# subsequent starts
auth_token: xxxx-token-xxxx
@ -192,7 +201,6 @@ teleport:
# backend if you want to run Teleport in HA configuration.
storage:
type: bolt
data_dir: /var/lib/teleport
# This section configures the 'auth service':
auth_service:

View file

@ -0,0 +1,16 @@
# Simple config file with just a few customizations (with comments)
teleport:
nodename: localhost
# auth token allows easy adding of other nodes. pass this value
# as --token when starting nodes.
auth_token: OhwiZ2ainushemith1oquiex
log:
output: stderr
severity: INFO
auth_service:
enabled: yes
cluster_name: teleport.local
ssh_service:
enabled: yes
proxy_service:
enabled: yes

22
fixtures/tmp-cluster.yaml Normal file
View file

@ -0,0 +1,22 @@
# This config file creates a single node teleport cluster with all
# its data in /tmp
#
# All listening ports are changed as well
#
teleport:
nodename: tmp.localhost
auth_token: Tex7siequoo9eew4Eitoo1ni
data_dir: /tmp/teleport
log:
output: stderr
severity: INFO
auth_service:
enabled: yes
cluster_name: teleport.tmp
listen_addr: 0.0.0.0:5010
ssh_service:
enabled: yes
listen_addr: 0.0.0.0:5011
proxy_service:
listen_addr: 0.0.0.0:5012
enabled: yes

View file

@ -197,6 +197,7 @@ func (i *TeleInstance) Create(trustedSecrets []*InstanceSecrets, enableSSH bool,
return err
}
tconf := service.MakeDefaultConfig()
tconf.DataDir = dataDir
tconf.Console = console
tconf.Auth.DomainName = i.Secrets.SiteName
tconf.Auth.Authorities = append(tconf.Auth.Authorities, i.Secrets.GetCAs()...)
@ -221,9 +222,8 @@ func (i *TeleInstance) Create(trustedSecrets []*InstanceSecrets, enableSSH bool,
tconf.Proxy.SSHAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortProxy())
tconf.Proxy.WebAddr.Addr = net.JoinHostPort(i.Hostname, i.GetPortWeb())
tconf.Proxy.DisableWebUI = true
tconf.AuthServers[0].Addr = tconf.Auth.SSHAddr.Addr
tconf.ConfigureBolt(dataDir)
tconf.DataDir = dataDir
tconf.AuthServers = append(tconf.AuthServers, tconf.Auth.SSHAddr)
tconf.ConfigureBolt()
tconf.Keygen = testauthority.New()
i.Config = tconf
i.Process, err = service.NewTeleport(tconf)

View file

@ -689,6 +689,7 @@ func (s *APIServer) createUserWithToken(w http.ResponseWriter, r *http.Request,
}
sess, err := s.a.CreateUserWithToken(req.Token, req.Password, req.HOTPToken)
if err != nil {
log.Error(err)
return nil, trace.Wrap(err)
}
return sess, nil

View file

@ -578,7 +578,7 @@ func NewTunClient(purpose string,
for _, o := range opts {
o(tc)
}
log.Infof("newTunClient(%s)", purpose)
log.Infof("newTunClient(%s) with auth: %v", purpose, authServers)
clt, err := NewClient("http://stub:0", tc.Dial)
if err != nil {
@ -645,7 +645,7 @@ func (c *TunClient) GetAgent() (AgentCloser, error) {
// Dial dials to Auth server's HTTP API over SSH tunnel
func (c *TunClient) Dial(network, address string) (net.Conn, error) {
log.Infof("TunClient[%s].Dial(%v, %v)", c.purpose, network, address)
log.Infof("TunClient[%s].Dial()", c.purpose)
client, err := c.getClient()
if err != nil {
return nil, trace.Wrap(err)
@ -762,6 +762,7 @@ func (c *TunClient) getClient() (client *ssh.Client, err error) {
if len(authServers) == 0 {
return nil, trace.Errorf("all auth servers are offline")
}
log.Infof("tunClient(%s).authServers: %v", c.purpose, authServers)
// try to connect to the 1st one who will pick up:
for _, authServer := range authServers {

View file

@ -98,7 +98,7 @@ func (s *ConfigTestSuite) TestSampleConfig(c *check.C) {
// validate a couple of values:
c.Assert(fc.Limits.MaxUsers, check.Equals, defaults.LimiterMaxConcurrentUsers)
c.Assert(fc.Global.Storage.DirName, check.Equals, defaults.DataDir)
c.Assert(fc.Global.DataDir, check.Equals, defaults.DataDir)
c.Assert(fc.Logger.Severity, check.Equals, "INFO")
}
@ -134,7 +134,7 @@ func (s *ConfigTestSuite) TestConfigReading(c *check.C) {
c.Assert(conf.Logger.Output, check.Equals, "stderr")
c.Assert(conf.Logger.Severity, check.Equals, "INFO")
c.Assert(conf.Storage.Type, check.Equals, "bolt")
c.Assert(conf.Storage.DirName, check.Equals, "/var/lib/teleport")
c.Assert(conf.DataDir, check.Equals, "/path/to/data")
c.Assert(conf.Auth.Enabled(), check.Equals, true)
c.Assert(conf.Auth.ListenAddress, check.Equals, "tcp://auth")
c.Assert(conf.SSH.Configured(), check.Equals, true)
@ -330,6 +330,7 @@ func makeConfigFixture() string {
// common config:
conf.NodeName = NodeName
conf.DataDir = "/path/to/data"
conf.AuthServers = AuthServers
conf.Limits.MaxConnections = 100
conf.Limits.MaxUsers = 5
@ -337,7 +338,6 @@ func makeConfigFixture() string {
conf.Logger.Output = "stderr"
conf.Logger.Severity = "INFO"
conf.Storage.Type = "bolt"
conf.Storage.DirName = "/var/lib/teleport"
// auth service:
conf.Auth.EnabledFlag = "Yeah"

View file

@ -71,7 +71,7 @@ type CommandLineFlags struct {
// readConfigFile reads /etc/teleport.yaml (or whatever is passed via --config flag)
// and overrides values in 'cfg' structure
func readConfigFile(cliConfigPath string) (*FileConfig, error) {
func ReadConfigFile(cliConfigPath string) (*FileConfig, error) {
configFilePath := defaults.ConfigFilePath
// --config tells us to use a specific conf. file:
if cliConfigPath != "" {
@ -126,19 +126,24 @@ func ApplyFileConfig(fc *FileConfig, cfg *service.Config) error {
if err != nil {
return trace.Errorf("cannot parse auth server address: '%v'", as)
}
cfg.AuthServers = append(cfg.AuthServers, *addr)
cfg.AuthServers = []utils.NetAddr{*addr}
}
}
cfg.ApplyToken(fc.AuthToken)
cfg.Auth.DomainName = fc.Auth.DomainName
if fc.Global.DataDir != "" {
cfg.DataDir = fc.Global.DataDir
}
if fc.Storage.Type == "" {
fc.Storage.Type = teleport.BoltBackendType
}
// configure storage:
switch fc.Storage.Type {
case teleport.BoltBackendType:
cfg.ConfigureBolt(fc.Storage.DirName)
cfg.ConfigureBolt()
case teleport.ETCDBackendType:
if err := cfg.ConfigureETCD(
fc.Storage.DirName, etcdbk.Config{
if err := cfg.ConfigureETCD(etcdbk.Config{
Nodes: fc.Storage.Peers,
Key: fc.Storage.Prefix,
TLSKeyFile: fc.Storage.TLSKeyFile,
@ -268,6 +273,7 @@ func ApplyFileConfig(fc *FileConfig, cfg *service.Config) error {
return trace.Wrap(err)
}
cfg.Auth.SSHAddr = *addr
cfg.AuthServers = append(cfg.AuthServers, *addr)
}
for _, authority := range fc.Auth.Authorities {
ca, err := authority.Parse()
@ -325,7 +331,7 @@ func applyString(src string, target *string) bool {
// with CLI commands taking precedence
func Configure(clf *CommandLineFlags, cfg *service.Config) error {
// load /etc/teleport.yaml and apply it's values:
fileConf, err := readConfigFile(clf.ConfigFile)
fileConf, err := ReadConfigFile(clf.ConfigFile)
if err != nil {
return trace.Wrap(err)
}

View file

@ -198,7 +198,7 @@ func MakeSampleFileConfig() (fc *FileConfig) {
g.AuthServers = []string{defaults.AuthListenAddr().Addr}
g.Limits.MaxConnections = defaults.LimiterMaxConnections
g.Limits.MaxUsers = defaults.LimiterMaxConcurrentUsers
g.Storage.DirName = defaults.DataDir
g.DataDir = defaults.DataDir
g.Storage.Type = conf.Auth.RecordsBackend.Type
g.PIDFile = "/var/run/teleport.pid"
@ -259,7 +259,6 @@ func MakeAuthPeerFileConfig(domainName string, token string) (fc *FileConfig) {
g.AuthServers = []string{"<insert auth server peer address here>"}
g.Limits.MaxConnections = defaults.LimiterMaxConnections
g.Limits.MaxUsers = defaults.LimiterMaxConcurrentUsers
g.Storage.DirName = defaults.DataDir
g.Storage.Type = teleport.ETCDBackendType
g.Storage.Prefix = defaults.ETCDPrefix
g.Storage.Peers = []string{"insert ETCD peers addresses here"}
@ -318,8 +317,6 @@ type Log struct {
type StorageBackend struct {
// Type can be "bolt" or "etcd"
Type string `yaml:"type,omitempty"`
// DirName is valid only for bolt
DirName string `yaml:"data_dir,omitempty"`
// Peers is a lsit of etcd peers, valid only for etcd
Peers []string `yaml:"peers,omitempty"`
// Prefix is etcd key prefix, valid only for etcd
@ -342,6 +339,7 @@ type Global struct {
Logger Log `yaml:"log,omitempty"`
Storage StorageBackend `yaml:"storage,omitempty"`
AdvertiseIP net.IP `yaml:"advertise_ip,omitempty"`
DataDir string `yaml:"data_dir,omitempty"`
// Keys holds the list of SSH key/cert pairs used by all services
// Each service (like proxy, auth, node) can find the key it needs

View file

@ -122,22 +122,22 @@ func (cfg *Config) ApplyToken(token string) bool {
}
// ConfigureBolt configures Bolt back-ends with a data dir.
func (cfg *Config) ConfigureBolt(dataDir string) {
func (cfg *Config) ConfigureBolt() {
a := &cfg.Auth
if a.EventsBackend.Type == teleport.BoltBackendType {
a.EventsBackend.Params = boltParams(dataDir, defaults.EventsBoltFile)
a.EventsBackend.Params = boltParams(cfg.DataDir, defaults.EventsBoltFile)
}
if a.KeysBackend.Type == teleport.BoltBackendType {
a.KeysBackend.Params = boltParams(dataDir, defaults.KeysBoltFile)
a.KeysBackend.Params = boltParams(cfg.DataDir, defaults.KeysBoltFile)
}
if a.RecordsBackend.Type == teleport.BoltBackendType {
a.RecordsBackend.Params = boltParams(dataDir, defaults.RecordsBoltFile)
a.RecordsBackend.Params = boltParams(cfg.DataDir, defaults.RecordsBoltFile)
}
}
// ConfigureETCD configures ETCD backend (still uses BoltDB for some cases)
func (cfg *Config) ConfigureETCD(dataDir string, etcdCfg etcdbk.Config) error {
func (cfg *Config) ConfigureETCD(etcdCfg etcdbk.Config) error {
a := &cfg.Auth
params, err := etcdParams(etcdCfg)
@ -149,10 +149,10 @@ func (cfg *Config) ConfigureETCD(dataDir string, etcdCfg etcdbk.Config) error {
// We can't store records and events in ETCD
a.EventsBackend.Type = teleport.BoltBackendType
a.EventsBackend.Params = boltParams(dataDir, defaults.EventsBoltFile)
a.EventsBackend.Params = boltParams(cfg.DataDir, defaults.EventsBoltFile)
a.RecordsBackend.Type = teleport.BoltBackendType
a.RecordsBackend.Params = boltParams(dataDir, defaults.RecordsBoltFile)
a.RecordsBackend.Params = boltParams(cfg.DataDir, defaults.RecordsBoltFile)
return nil
}
@ -313,9 +313,6 @@ func ApplyDefaults(cfg *Config) {
// global defaults
cfg.Hostname = hostname
cfg.DataDir = defaults.DataDir
if cfg.Auth.Enabled {
cfg.AuthServers = []utils.NetAddr{cfg.Auth.SSHAddr}
}
cfg.Console = os.Stdout
}

View file

@ -54,11 +54,9 @@ func (s *ConfigSuite) TestDefaultConfig(c *C) {
if len(config.Hostname) < 2 {
c.Error("default hostname wasn't properly set")
}
c.Assert(config.AuthServers, DeepEquals, []utils.NetAddr{localAuthAddr})
// auth section
auth := config.Auth
c.Assert(config.AuthServers, DeepEquals, []utils.NetAddr{auth.SSHAddr})
c.Assert(auth.SSHAddr, DeepEquals, localAuthAddr)
c.Assert(auth.Limiter.MaxConnections, Equals, int64(defaults.LimiterMaxConnections))
c.Assert(auth.Limiter.MaxNumberOfUsers, Equals, defaults.LimiterMaxConcurrentUsers)

View file

@ -26,9 +26,11 @@ import (
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/auth/native"
"github.com/gravitational/teleport/lib/config"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
@ -44,6 +46,7 @@ import (
type CLIConfig struct {
Debug bool
ConfigFile string
}
type UserCommand struct {
@ -113,6 +116,9 @@ func main() {
app.Flag("debug", "Enable verbose logging to stderr").
Short('d').
BoolVar(&ccf.Debug)
app.Flag("config", fmt.Sprintf("Path to a configuration file [%v]", defaults.ConfigFilePath)).
Short('c').
ExistingFileVar(&ccf.ConfigFile)
// commands:
ver := app.Command("version", "Print the version.")
@ -120,7 +126,7 @@ func main() {
// user add command:
users := app.Command("users", "Manage users logins")
userAdd := users.Command("add", "Generates an invitation token and prints the signup URL for setting up 2nd factor auth.")
userAdd := users.Command("add", "Generate an invitation token and print the signup URL")
userAdd.Arg("login", "Teleport user login").Required().StringVar(&cmdUsers.login)
userAdd.Arg("local-logins", "Local UNIX users this account can log in as [login]").
Default("").StringVar(&cmdUsers.allowedLogins)
@ -128,7 +134,7 @@ func main() {
userAdd.Alias(AddUserHelp)
// list users command
userList := users.Command("ls", "Lists all user accounts")
userList := users.Command("ls", "List all user accounts")
// delete user command
userDelete := users.Command("del", "Deletes user accounts")
@ -137,13 +143,13 @@ func main() {
// add node command
nodes := app.Command("nodes", "Issue invites for other nodes to join the cluster")
nodeAdd := nodes.Command("add", "Generates an invitation token. Use it to add a new node to the Teleport cluster")
nodeAdd := nodes.Command("add", "Generate an invitation token. Use it to add a new node to the Teleport cluster")
nodeAdd.Flag("roles", "Comma-separated list of roles for the new node to assume [node]").Default("node").StringVar(&cmdNodes.roles)
nodeAdd.Flag("ttl", "Time to live for a generated token").DurationVar(&cmdNodes.ttl)
nodeAdd.Flag("count", "add count tokens and output JSON with the list").Hidden().Default("1").IntVar(&cmdNodes.count)
nodeAdd.Flag("format", "output format, 'text' or 'json'").Hidden().Default("text").StringVar(&cmdNodes.format)
nodeAdd.Alias(AddNodeHelp)
nodeList := nodes.Command("ls", "Lists all active SSH nodes within the cluster")
nodeList := nodes.Command("ls", "List all active SSH nodes within the cluster")
nodeList.Alias(ListNodesHelp)
// operations on invitation tokens
@ -153,16 +159,16 @@ func main() {
tokenDel.Arg("token", "Token to delete").StringVar(&cmdTokens.token)
// operations with authorities
auth := app.Command("authorities", "Operations with user and host certificate authorities").Hidden()
auth.Flag("type", "authority type, 'user' or 'host'").Default(string(services.UserCA)).StringVar(&cmdAuth.authType)
authList := auth.Command("ls", "List trusted user certificate authorities")
authExport := auth.Command("export", "Export concatenated keys to standard output")
authExport.Flag("private-keys", "if set, will print private keys").BoolVar(&cmdAuth.exportPrivateKeys)
auth := app.Command("auth", "Operations with user and host certificate authorities").Hidden()
auth.Flag("type", "authority type, 'user' or 'host'").StringVar(&cmdAuth.authType)
authList := auth.Command("ls", "List trusted certificate authorities (CAs)")
authExport := auth.Command("export", "Export CA keys to standard output")
authExport.Flag("keys", "if set, will print private keys").BoolVar(&cmdAuth.exportPrivateKeys)
authExport.Flag("fingerprint", "filter authority by fingerprint").StringVar(&cmdAuth.exportAuthorityFingerprint)
authGenerate := auth.Command("gen", "Generate new OpenSSH keypair")
authGenerate.Flag("pub-key", "path to the public key to write").Required().StringVar(&cmdAuth.genPubPath)
authGenerate.Flag("priv-key", "path to the private key to write").Required().StringVar(&cmdAuth.genPrivPath)
authGenerate := auth.Command("gen", "Generate a new SSH keypair")
authGenerate.Flag("pub-key", "path to the public key").Required().StringVar(&cmdAuth.genPubPath)
authGenerate.Flag("priv-key", "path to the private key").Required().StringVar(&cmdAuth.genPrivPath)
authGenAndSign := auth.Command("gencert", "Generate OpenSSH keys and certificate for a joining teleport proxy, node or auth server").Hidden()
authGenAndSign.Flag("priv-key", "path to the private key to write").Required().StringVar(&cmdAuth.genPrivPath)
@ -172,17 +178,17 @@ func main() {
authGenAndSign.Flag("domain", "cluster certificate authority domain name").Required().StringVar(&cmdAuth.genAuthorityDomain)
// operations with reverse tunnels
reverseTunnels := app.Command("rts", "Operations with reverse tunnels").Hidden()
reverseTunnelsList := reverseTunnels.Command("ls", "List reverse tunnels").Hidden()
reverseTunnelsDelete := reverseTunnels.Command("del", "Deletes reverse tunnels").Hidden()
reverseTunnelsDelete.Arg("domain", "Comma-separated list of reverse tunnels to delete").
reverseTunnels := app.Command("tunnels", "Operations on reverse tunnels clusters").Hidden()
reverseTunnelsList := reverseTunnels.Command("ls", "List tunnels").Hidden()
reverseTunnelsDelete := reverseTunnels.Command("del", "Delete a tunnel").Hidden()
reverseTunnelsDelete.Arg("name", "Tunnels to delete").
Required().StringVar(&cmdReverseTunnel.domainNames)
reverseTunnelsUpsert := reverseTunnels.Command("upsert", "Update or add a new reverse tunnel").Hidden()
reverseTunnelsUpsert.Arg("domain", "Domain name of the reverse tunnel").
reverseTunnelsUpsert := reverseTunnels.Command("add", "Create a new reverse tunnel").Hidden()
reverseTunnelsUpsert.Arg("name", "Name of the tunnel").
Required().StringVar(&cmdReverseTunnel.domainNames)
reverseTunnelsUpsert.Arg("addrs", "Comma-separated list of dial addresses for reverse tunnels to dial").
reverseTunnelsUpsert.Arg("addrs", "Comma-separated list of tunnels").
Required().SetValue(&cmdReverseTunnel.dialAddrs)
reverseTunnelsUpsert.Flag("ttl", "Optional TTL (time to live) for reverse tunnel").DurationVar(&cmdReverseTunnel.ttl)
reverseTunnelsUpsert.Flag("ttl", "Optional TTL (time to live) for the tunnel").DurationVar(&cmdReverseTunnel.ttl)
// parse CLI commands+flags:
command, err := app.Parse(os.Args[1:])
@ -190,11 +196,7 @@ func main() {
utils.FatalError(err)
}
// --debug flag
if ccf.Debug {
utils.InitLoggerDebug()
}
applyConfig(&ccf, cfg)
validateConfig(cfg)
// some commands do not need a connection to client
@ -411,17 +413,29 @@ func (u *NodeCommand) ListActive(client *auth.TunClient) error {
// ListAuthorities shows list of user authorities we trust
func (a *AuthCommand) ListAuthorities(client *auth.TunClient) error {
authType := services.CertAuthType(a.authType)
if err := authType.Check(); err != nil {
// by default show authorities of both types:
authTypes := []services.CertAuthType{
services.UserCA,
services.HostCA,
}
// but if there was a --type switch, only select those:
if a.authType != "" {
authTypes = []services.CertAuthType{services.CertAuthType(a.authType)}
if err := authTypes[0].Check(); err != nil {
return trace.Wrap(err)
}
authorities, err := client.GetCertAuthorities(authType, false)
}
authorities := make([]*services.CertAuthority, 0)
for _, t := range authTypes {
ats, err := client.GetCertAuthorities(t, false)
if err != nil {
return trace.Wrap(err)
}
authorities = append(authorities, ats...)
}
view := func() string {
t := goterm.NewTable(0, 10, 5, ' ', 0)
printHeader(t, []string{"Type", "Authority Domain", "Fingerprint", "Restricted to logins"})
printHeader(t, []string{"Type", "Cluster Name", "Fingerprint", "Allowed Logins"})
if len(authorities) == 0 {
return t.String()
}
@ -431,7 +445,11 @@ func (a *AuthCommand) ListAuthorities(client *auth.TunClient) error {
if err != nil {
fingerprint = fmt.Sprintf("<bad key: %v", err)
}
fmt.Fprintf(t, "%v\t%v\t%v\t%v\n", a.Type, a.DomainName, fingerprint, strings.Join(a.AllowedLogins, ","))
logins := strings.Join(a.AllowedLogins, ",")
if logins == "" {
logins = "<any>"
}
fmt.Fprintf(t, "%v\t%v\t%v\t%v\n", a.Type, a.DomainName, fingerprint, logins)
}
}
return t.String()
@ -578,9 +596,12 @@ func (r *ReverseTunnelCommand) Upsert(client *auth.TunClient) error {
func (r *ReverseTunnelCommand) Delete(client *auth.TunClient) error {
for _, domainName := range strings.Split(r.domainNames, ",") {
if err := client.DeleteReverseTunnel(domainName); err != nil {
if trace.IsNotFound(err) {
return trace.Errorf("'%v' is not found", domainName)
}
return trace.Wrap(err)
}
fmt.Printf("Reverse tunnel '%v' has been deleted\n", domainName)
fmt.Printf("Cluster '%v' has been disconnected\n", domainName)
}
return nil
}
@ -589,10 +610,11 @@ func (r *ReverseTunnelCommand) Delete(client *auth.TunClient) error {
func connectToAuthService(cfg *service.Config) (client *auth.TunClient, err error) {
// connect to the local auth server by default:
cfg.Auth.Enabled = true
if len(cfg.AuthServers) == 0 {
cfg.AuthServers = []utils.NetAddr{
*defaults.AuthConnectAddr(),
}
}
// read the host SSH keys and use them to open an SSH connection to the auth service
i, err := auth.ReadIdentity(cfg.DataDir, auth.IdentityID{Role: teleport.RoleAdmin, HostUUID: cfg.HostUUID})
if err != nil {
@ -627,6 +649,25 @@ func validateConfig(cfg *service.Config) {
}
}
// applyConfig takes configuration values from the config file and applies
// them to 'service.Config' object
func applyConfig(ccf *CLIConfig, cfg *service.Config) error {
// load /etc/teleport.yaml and apply it's values:
fileConf, err := config.ReadConfigFile(ccf.ConfigFile)
if err != nil {
return trace.Wrap(err)
}
if err = config.ApplyFileConfig(fileConf, cfg); err != nil {
return trace.Wrap(err)
}
// --debug flag
if ccf.Debug {
utils.InitLoggerDebug()
logrus.Debugf("DEBUG loggign enabled")
}
return nil
}
// onTokenList is called to execute "tokens ls" command
func (c *TokenCommand) List(client *auth.TunClient) error {
tokens, err := client.GetTokens()

View file

@ -7,12 +7,12 @@ teleport:
auth_service:
enabled: yes
cluster_name: cluster-a.local.com
cluster_name: cluster-a
listen_addr: 0.0.0.0:3025
tokens: ["proxy:helloworld", "node:node"]
ssh_service:
enabled: no
enabled: yes
proxy_service:
enabled: no
enabled: yes

View file

@ -7,11 +7,11 @@ teleport:
auth_service:
enabled: yes
cluster_name: cluster-a.local.com
cluster_name: cluster-b
listen_addr: 0.0.0.0:3025
ssh_service:
enabled: no
enabled: yes
proxy_service:
enabled: no
enabled: yes