Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

(6.2) Propagate external traits to leaf clusters #6649

Merged
merged 6 commits into from
Apr 30, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions integration/app_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -489,11 +489,6 @@ func setup(t *testing.T) *pack {
privateKey, publicKey, err := testauthority.New().GenerateKeyPair("")
require.NoError(t, err)

// Find AllocatePortsNum free listening ports to use.
startNumber := utils.PortStartingNumber + (AllocatePortsNum * 2) + 1
ports, err := utils.GetFreeTCPPorts(AllocatePortsNum, startNumber+1)
require.NoError(t, err)

// Create a new Teleport instance with passed in configuration.
p.rootCluster = NewInstance(InstanceConfig{
ClusterName: "example.com",
Expand Down
5 changes: 0 additions & 5 deletions integration/db_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,11 +202,6 @@ func setupDatabaseTest(t *testing.T) *databasePack {
SetTestTimeouts(100 * time.Millisecond)
log := testlog.FailureOnly(t)

// Create ports allocator.
startPort := utils.PortStartingNumber + (3 * AllocatePortsNum) + 1
ports, err := utils.GetFreeTCPPorts(AllocatePortsNum, startPort)
require.NoError(t, err)

// Generate keypair.
privateKey, publicKey, err := testauthority.New().GenerateKeyPair("")
require.NoError(t, err)
Expand Down
184 changes: 143 additions & 41 deletions integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,22 @@ import (
const (
HostID = "00000000-0000-0000-0000-000000000000"
Site = "local-site"

AllocatePortsNum = 600
)

// ports contains tcp ports allocated for all integration tests.
var ports utils.PortList

func init() {
// Allocate tcp ports for all integration tests. 5000 should be plenty.
var err error
ports, err = utils.GetFreeTCPPorts(5000, utils.PortStartingNumber)
if err != nil {
panic(fmt.Sprintf("failed to allocate tcp ports for tests: %v", err))
}
}

type IntSuite struct {
ports utils.PortList
me *user.User
me *user.User
// priv/pub pair to avoid re-generating it
priv []byte
pub []byte
Expand Down Expand Up @@ -118,10 +127,6 @@ func (s *IntSuite) SetUpSuite(c *check.C) {
c.Assert(err, check.IsNil)

// Find AllocatePortsNum free listening ports to use.
s.ports, err = utils.GetFreeTCPPorts(AllocatePortsNum)
if err != nil {
c.Fatal(err)
}
s.me, _ = user.Current()

// close & re-open stdin because 'go test' runs with os.stdin connected to /dev/null
Expand Down Expand Up @@ -324,7 +329,7 @@ func (s *IntSuite) TestAuditOn(c *check.C) {
defer t.StopAll()

// Start a node.
nodeSSHPort := s.getPorts(1)[0]
nodeSSHPort := ports.PopInt()
nodeConfig := func() *service.Config {
tconf := s.defaultServiceConfig()

Expand Down Expand Up @@ -692,7 +697,7 @@ func (s *IntSuite) TestUUIDBasedProxy(c *check.C) {
// addNode adds a node to the teleport instance, returning its uuid.
// All nodes added this way have the same hostname.
addNode := func() (string, error) {
nodeSSHPort := s.getPorts(1)[0]
nodeSSHPort := ports.PopInt()
tconf := s.defaultServiceConfig()
tconf.Hostname = Host

Expand Down Expand Up @@ -1603,7 +1608,7 @@ func (s *IntSuite) TestHA(c *check.C) {
c.Assert(b.Start(), check.IsNil)
c.Assert(a.Start(), check.IsNil)

nodePorts := s.getPorts(3)
nodePorts := ports.PopIntSlice(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
c.Assert(a.StartNodeAndProxy("cluster-a-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil)

Expand Down Expand Up @@ -1747,7 +1752,7 @@ func (s *IntSuite) TestMapRoles(c *check.C) {
// try and upsert a trusted cluster
tryCreateTrustedCluster(c, aux.Process.GetAuthServer(), trustedCluster)

nodePorts := s.getPorts(3)
nodePorts := ports.PopIntSlice(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
c.Assert(aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil)

Expand Down Expand Up @@ -1978,7 +1983,7 @@ func (s *IntSuite) trustedClusters(c *check.C, test trustedClusterTest) {
ClusterName: clusterMain,
HostID: HostID,
NodeName: Host,
Ports: s.getPorts(6),
Ports: ports.PopIntSlice(6),
Priv: s.priv,
Pub: s.pub,
MultiplexProxy: test.multiplex,
Expand Down Expand Up @@ -2082,7 +2087,7 @@ func (s *IntSuite) trustedClusters(c *check.C, test trustedClusterTest) {
// try and upsert a trusted cluster
tryCreateTrustedCluster(c, aux.Process.GetAuthServer(), trustedCluster)

nodePorts := s.getPorts(3)
nodePorts := ports.PopIntSlice(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
c.Assert(aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil)

Expand Down Expand Up @@ -2381,7 +2386,7 @@ func (s *IntSuite) TestDiscoveryRecovers(c *check.C) {
username := s.me.Username

// create load balancer for main cluster proxies
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0])))
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(ports.PopInt())))
lb, err := utils.NewLoadBalancer(context.TODO(), frontend)
c.Assert(err, check.IsNil)
c.Assert(lb.Listen(), check.IsNil)
Expand Down Expand Up @@ -2417,7 +2422,7 @@ func (s *IntSuite) TestDiscoveryRecovers(c *check.C) {
// Helper function for adding a new proxy to "main".
addNewMainProxy := func(name string) (reversetunnel.Server, ProxyConfig) {
c.Logf("adding main proxy %q...", name)
nodePorts := s.getPorts(3)
nodePorts := ports.PopIntSlice(3)
proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
newConfig := ProxyConfig{
Name: name,
Expand Down Expand Up @@ -2520,7 +2525,7 @@ func (s *IntSuite) TestDiscovery(c *check.C) {
username := s.me.Username

// create load balancer for main cluster proxies
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0])))
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(ports.PopInt())))
lb, err := utils.NewLoadBalancer(context.TODO(), frontend)
c.Assert(err, check.IsNil)
c.Assert(lb.Listen(), check.IsNil)
Expand Down Expand Up @@ -2554,7 +2559,7 @@ func (s *IntSuite) TestDiscovery(c *check.C) {
}

// start second proxy
nodePorts := s.getPorts(3)
nodePorts := ports.PopIntSlice(3)
proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
proxyConfig := ProxyConfig{
Name: "cluster-main-proxy",
Expand Down Expand Up @@ -2651,7 +2656,7 @@ func (s *IntSuite) TestDiscoveryNode(c *check.C) {
defer lib.SetInsecureDevMode(false)

// Create and start load balancer for proxies.
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0])))
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(ports.PopInt())))
lb, err := utils.NewLoadBalancer(context.TODO(), frontend)
c.Assert(err, check.IsNil)
err = lb.Listen()
Expand Down Expand Up @@ -2683,7 +2688,7 @@ func (s *IntSuite) TestDiscoveryNode(c *check.C) {
defer main.StopAll()

// Create a Teleport instance with a Proxy.
nodePorts := s.getPorts(3)
nodePorts := ports.PopIntSlice(3)
proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
proxyConfig := ProxyConfig{
Name: "cluster-main-proxy",
Expand Down Expand Up @@ -3109,7 +3114,7 @@ func (s *IntSuite) TestProxyHostKeyCheck(c *check.C) {
c.Assert(err, check.IsNil)

// start a ssh server that presents a host key instead of a certificate
nodePort := s.getPorts(1)[0]
nodePort := ports.PopInt()
sshNode, err := newDiscardServer(Host, nodePort, hostSigner)
c.Assert(err, check.IsNil)
err = sshNode.Start()
Expand Down Expand Up @@ -4329,7 +4334,7 @@ func (s *IntSuite) TestList(c *check.C) {
defer t.StopAll()

// Create and start a Teleport node.
nodeSSHPort := s.getPorts(1)[0]
nodeSSHPort := ports.PopInt()
nodeConfig := func() *service.Config {
tconf := s.defaultServiceConfig()
tconf.Hostname = "server-02"
Expand Down Expand Up @@ -5097,19 +5102,6 @@ func runCommand(instance *TeleInstance, cmd []string, cfg ClientConfig, attempts
return output.String(), nil
}

// getPorts helper returns a range of unallocated ports available for listening on
func (s *IntSuite) getPorts(num int) []int {
if len(s.ports) < num {
panic("do not have enough ports! increase AllocatePortsNum constant")
}
ports := make([]int, num)
for i := range ports {
p, _ := strconv.Atoi(s.ports.Pop())
ports[i] = p
}
return ports
}

func (s *IntSuite) newTeleportInstance(c *check.C) *TeleInstance {
return NewInstance(s.defaultInstanceConfig())
}
Expand All @@ -5119,7 +5111,7 @@ func (s *IntSuite) defaultInstanceConfig() InstanceConfig {
ClusterName: Site,
HostID: HostID,
NodeName: Host,
Ports: s.getPorts(6),
Ports: ports.PopIntSlice(6),
Priv: s.priv,
Pub: s.pub,
log: s.log,
Expand All @@ -5131,7 +5123,7 @@ func (s *IntSuite) newNamedTeleportInstance(c *check.C, clusterName string) *Tel
ClusterName: clusterName,
HostID: HostID,
NodeName: Host,
Ports: s.getPorts(6),
Ports: ports.PopIntSlice(6),
Priv: s.priv,
Pub: s.pub,
log: s.log,
Expand Down Expand Up @@ -5256,10 +5248,6 @@ func dumpGoroutineProfile() {

// TestWebProxyInsecure makes sure that proxy endpoint works when TLS is disabled.
func TestWebProxyInsecure(t *testing.T) {
startPort := utils.PortStartingNumber + (4 * AllocatePortsNum) + 1
ports, err := utils.GetFreeTCPPorts(AllocatePortsNum, startPort)
require.NoError(t, err)

privateKey, publicKey, err := testauthority.New().GenerateKeyPair("")
require.NoError(t, err)

Expand Down Expand Up @@ -5298,3 +5286,117 @@ func TestWebProxyInsecure(t *testing.T) {
require.Equal(t, http.StatusOK, resp.StatusCode)
require.NoError(t, resp.Body.Close())
}

// TestTraitsPropagation makes sure that user traits are applied properly to
// roles in root and leaf clusters.
func TestTraitsPropagation(t *testing.T) {
log := testlog.FailureOnly(t)

privateKey, publicKey, err := testauthority.New().GenerateKeyPair("")
require.NoError(t, err)

// Create root cluster.
rc := NewInstance(InstanceConfig{
ClusterName: "root.example.com",
HostID: uuid.New(),
NodeName: Host,
Ports: ports.PopIntSlice(6),
Priv: privateKey,
Pub: publicKey,
log: log,
})

// Create leaf cluster.
lc := NewInstance(InstanceConfig{
ClusterName: "leaf.example.com",
HostID: uuid.New(),
NodeName: Host,
Ports: ports.PopIntSlice(6),
Priv: privateKey,
Pub: publicKey,
log: log,
})

// Make root cluster config.
rcConf := service.MakeDefaultConfig()
rcConf.DataDir = t.TempDir()
rcConf.Auth.Enabled = true
rcConf.Auth.Preference.SetSecondFactor("off")
rcConf.Proxy.Enabled = true
rcConf.Proxy.DisableWebService = true
rcConf.Proxy.DisableWebInterface = true
rcConf.SSH.Enabled = true
rcConf.SSH.Addr.Addr = net.JoinHostPort(rc.Hostname, rc.GetPortSSH())
rcConf.SSH.Labels = map[string]string{"env": "integration"}

// Make leaf cluster config.
lcConf := service.MakeDefaultConfig()
lcConf.DataDir = t.TempDir()
lcConf.Auth.Enabled = true
lcConf.Auth.Preference.SetSecondFactor("off")
lcConf.Proxy.Enabled = true
lcConf.Proxy.DisableWebInterface = true
lcConf.SSH.Enabled = true
lcConf.SSH.Addr.Addr = net.JoinHostPort(lc.Hostname, lc.GetPortSSH())
lcConf.SSH.Labels = map[string]string{"env": "integration"}

// Create identical user/role in both clusters.
me, err := user.Current()
require.NoError(t, err)

role := services.NewAdminRole()
role.SetName("test")
role.SetLogins(services.Allow, []string{me.Username})
// Users created by CreateEx have "testing: integration" trait.
role.SetNodeLabels(services.Allow, map[string]utils.Strings{"env": []string{"{{external.testing}}"}})

rc.AddUserWithRole(me.Username, role)
lc.AddUserWithRole(me.Username, role)

// Establish trust b/w root and leaf.
err = rc.CreateEx(lc.Secrets.AsSlice(), rcConf)
require.NoError(t, err)
err = lc.CreateEx(rc.Secrets.AsSlice(), lcConf)
require.NoError(t, err)

// Start both clusters.
require.NoError(t, rc.Start())
t.Cleanup(func() {
rc.StopAll()
})
require.NoError(t, lc.Start())
t.Cleanup(func() {
lc.StopAll()
})

// Update root's certificate authority on leaf to configure role mapping.
ca, err := lc.Process.GetAuthServer().GetCertAuthority(services.CertAuthID{
Type: services.UserCA,
DomainName: rc.Secrets.SiteName,
}, false)
require.NoError(t, err)
ca.SetRoles(nil) // Reset roles, otherwise they will take precedence.
ca.SetRoleMap(services.RoleMap{{Remote: role.GetName(), Local: []string{role.GetName()}}})
err = lc.Process.GetAuthServer().UpsertCertAuthority(ca)
require.NoError(t, err)

// Run command in root.
outputRoot, err := runCommand(rc, []string{"echo", "hello root"}, ClientConfig{
Login: me.Username,
Cluster: "root.example.com",
Host: Loopback,
Port: rc.GetPortSSHInt(),
}, 1)
require.NoError(t, err)
require.Equal(t, "hello root", strings.TrimSpace(outputRoot))

// Run command in leaf.
outputLeaf, err := runCommand(rc, []string{"echo", "hello leaf"}, ClientConfig{
Login: me.Username,
Cluster: "leaf.example.com",
Host: Loopback,
Port: lc.GetPortSSHInt(),
}, 1)
require.NoError(t, err)
require.Equal(t, "hello leaf", strings.TrimSpace(outputLeaf))
}
Loading