diff --git a/integration/app_integration_test.go b/integration/app_integration_test.go index 49014635735b8..5f012e076997c 100644 --- a/integration/app_integration_test.go +++ b/integration/app_integration_test.go @@ -489,11 +489,6 @@ func setup(t *testing.T) *pack { privateKey, publicKey, err := testauthority.New().GenerateKeyPair("") require.NoError(t, err) - // Find AllocatePortsNum free listening ports to use. - startNumber := utils.PortStartingNumber + (AllocatePortsNum * 2) + 1 - ports, err := utils.GetFreeTCPPorts(AllocatePortsNum, startNumber+1) - require.NoError(t, err) - // Create a new Teleport instance with passed in configuration. p.rootCluster = NewInstance(InstanceConfig{ ClusterName: "example.com", diff --git a/integration/db_integration_test.go b/integration/db_integration_test.go index 7de463dc0f261..b8ea4e9d65515 100644 --- a/integration/db_integration_test.go +++ b/integration/db_integration_test.go @@ -202,11 +202,6 @@ func setupDatabaseTest(t *testing.T) *databasePack { SetTestTimeouts(100 * time.Millisecond) log := testlog.FailureOnly(t) - // Create ports allocator. - startPort := utils.PortStartingNumber + (3 * AllocatePortsNum) + 1 - ports, err := utils.GetFreeTCPPorts(AllocatePortsNum, startPort) - require.NoError(t, err) - // Generate keypair. privateKey, publicKey, err := testauthority.New().GenerateKeyPair("") require.NoError(t, err) diff --git a/integration/integration_test.go b/integration/integration_test.go index 1712617798909..73ec28ab89409 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -73,13 +73,22 @@ import ( const ( HostID = "00000000-0000-0000-0000-000000000000" Site = "local-site" - - AllocatePortsNum = 600 ) +// ports contains tcp ports allocated for all integration tests. +var ports utils.PortList + +func init() { + // Allocate tcp ports for all integration tests. 5000 should be plenty. + var err error + ports, err = utils.GetFreeTCPPorts(5000, utils.PortStartingNumber) + if err != nil { + panic(fmt.Sprintf("failed to allocate tcp ports for tests: %v", err)) + } +} + type IntSuite struct { - ports utils.PortList - me *user.User + me *user.User // priv/pub pair to avoid re-generating it priv []byte pub []byte @@ -118,10 +127,6 @@ func (s *IntSuite) SetUpSuite(c *check.C) { c.Assert(err, check.IsNil) // Find AllocatePortsNum free listening ports to use. - s.ports, err = utils.GetFreeTCPPorts(AllocatePortsNum) - if err != nil { - c.Fatal(err) - } s.me, _ = user.Current() // close & re-open stdin because 'go test' runs with os.stdin connected to /dev/null @@ -324,7 +329,7 @@ func (s *IntSuite) TestAuditOn(c *check.C) { defer t.StopAll() // Start a node. - nodeSSHPort := s.getPorts(1)[0] + nodeSSHPort := ports.PopInt() nodeConfig := func() *service.Config { tconf := s.defaultServiceConfig() @@ -692,7 +697,7 @@ func (s *IntSuite) TestUUIDBasedProxy(c *check.C) { // addNode adds a node to the teleport instance, returning its uuid. // All nodes added this way have the same hostname. addNode := func() (string, error) { - nodeSSHPort := s.getPorts(1)[0] + nodeSSHPort := ports.PopInt() tconf := s.defaultServiceConfig() tconf.Hostname = Host @@ -1603,7 +1608,7 @@ func (s *IntSuite) TestHA(c *check.C) { c.Assert(b.Start(), check.IsNil) c.Assert(a.Start(), check.IsNil) - nodePorts := s.getPorts(3) + nodePorts := ports.PopIntSlice(3) sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2] c.Assert(a.StartNodeAndProxy("cluster-a-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil) @@ -1747,7 +1752,7 @@ func (s *IntSuite) TestMapRoles(c *check.C) { // try and upsert a trusted cluster tryCreateTrustedCluster(c, aux.Process.GetAuthServer(), trustedCluster) - nodePorts := s.getPorts(3) + nodePorts := ports.PopIntSlice(3) sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2] c.Assert(aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil) @@ -1978,7 +1983,7 @@ func (s *IntSuite) trustedClusters(c *check.C, test trustedClusterTest) { ClusterName: clusterMain, HostID: HostID, NodeName: Host, - Ports: s.getPorts(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, MultiplexProxy: test.multiplex, @@ -2082,7 +2087,7 @@ func (s *IntSuite) trustedClusters(c *check.C, test trustedClusterTest) { // try and upsert a trusted cluster tryCreateTrustedCluster(c, aux.Process.GetAuthServer(), trustedCluster) - nodePorts := s.getPorts(3) + nodePorts := ports.PopIntSlice(3) sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2] c.Assert(aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil) @@ -2381,7 +2386,7 @@ func (s *IntSuite) TestDiscoveryRecovers(c *check.C) { username := s.me.Username // create load balancer for main cluster proxies - frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0]))) + frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(ports.PopInt()))) lb, err := utils.NewLoadBalancer(context.TODO(), frontend) c.Assert(err, check.IsNil) c.Assert(lb.Listen(), check.IsNil) @@ -2417,7 +2422,7 @@ func (s *IntSuite) TestDiscoveryRecovers(c *check.C) { // Helper function for adding a new proxy to "main". addNewMainProxy := func(name string) (reversetunnel.Server, ProxyConfig) { c.Logf("adding main proxy %q...", name) - nodePorts := s.getPorts(3) + nodePorts := ports.PopIntSlice(3) proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2] newConfig := ProxyConfig{ Name: name, @@ -2520,7 +2525,7 @@ func (s *IntSuite) TestDiscovery(c *check.C) { username := s.me.Username // create load balancer for main cluster proxies - frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0]))) + frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(ports.PopInt()))) lb, err := utils.NewLoadBalancer(context.TODO(), frontend) c.Assert(err, check.IsNil) c.Assert(lb.Listen(), check.IsNil) @@ -2554,7 +2559,7 @@ func (s *IntSuite) TestDiscovery(c *check.C) { } // start second proxy - nodePorts := s.getPorts(3) + nodePorts := ports.PopIntSlice(3) proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2] proxyConfig := ProxyConfig{ Name: "cluster-main-proxy", @@ -2651,7 +2656,7 @@ func (s *IntSuite) TestDiscoveryNode(c *check.C) { defer lib.SetInsecureDevMode(false) // Create and start load balancer for proxies. - frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0]))) + frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(ports.PopInt()))) lb, err := utils.NewLoadBalancer(context.TODO(), frontend) c.Assert(err, check.IsNil) err = lb.Listen() @@ -2683,7 +2688,7 @@ func (s *IntSuite) TestDiscoveryNode(c *check.C) { defer main.StopAll() // Create a Teleport instance with a Proxy. - nodePorts := s.getPorts(3) + nodePorts := ports.PopIntSlice(3) proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2] proxyConfig := ProxyConfig{ Name: "cluster-main-proxy", @@ -3109,7 +3114,7 @@ func (s *IntSuite) TestProxyHostKeyCheck(c *check.C) { c.Assert(err, check.IsNil) // start a ssh server that presents a host key instead of a certificate - nodePort := s.getPorts(1)[0] + nodePort := ports.PopInt() sshNode, err := newDiscardServer(Host, nodePort, hostSigner) c.Assert(err, check.IsNil) err = sshNode.Start() @@ -4329,7 +4334,7 @@ func (s *IntSuite) TestList(c *check.C) { defer t.StopAll() // Create and start a Teleport node. - nodeSSHPort := s.getPorts(1)[0] + nodeSSHPort := ports.PopInt() nodeConfig := func() *service.Config { tconf := s.defaultServiceConfig() tconf.Hostname = "server-02" @@ -5097,19 +5102,6 @@ func runCommand(instance *TeleInstance, cmd []string, cfg ClientConfig, attempts return output.String(), nil } -// getPorts helper returns a range of unallocated ports available for listening on -func (s *IntSuite) getPorts(num int) []int { - if len(s.ports) < num { - panic("do not have enough ports! increase AllocatePortsNum constant") - } - ports := make([]int, num) - for i := range ports { - p, _ := strconv.Atoi(s.ports.Pop()) - ports[i] = p - } - return ports -} - func (s *IntSuite) newTeleportInstance(c *check.C) *TeleInstance { return NewInstance(s.defaultInstanceConfig()) } @@ -5119,7 +5111,7 @@ func (s *IntSuite) defaultInstanceConfig() InstanceConfig { ClusterName: Site, HostID: HostID, NodeName: Host, - Ports: s.getPorts(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -5131,7 +5123,7 @@ func (s *IntSuite) newNamedTeleportInstance(c *check.C, clusterName string) *Tel ClusterName: clusterName, HostID: HostID, NodeName: Host, - Ports: s.getPorts(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -5256,10 +5248,6 @@ func dumpGoroutineProfile() { // TestWebProxyInsecure makes sure that proxy endpoint works when TLS is disabled. func TestWebProxyInsecure(t *testing.T) { - startPort := utils.PortStartingNumber + (4 * AllocatePortsNum) + 1 - ports, err := utils.GetFreeTCPPorts(AllocatePortsNum, startPort) - require.NoError(t, err) - privateKey, publicKey, err := testauthority.New().GenerateKeyPair("") require.NoError(t, err) @@ -5298,3 +5286,117 @@ func TestWebProxyInsecure(t *testing.T) { require.Equal(t, http.StatusOK, resp.StatusCode) require.NoError(t, resp.Body.Close()) } + +// TestTraitsPropagation makes sure that user traits are applied properly to +// roles in root and leaf clusters. +func TestTraitsPropagation(t *testing.T) { + log := testlog.FailureOnly(t) + + privateKey, publicKey, err := testauthority.New().GenerateKeyPair("") + require.NoError(t, err) + + // Create root cluster. + rc := NewInstance(InstanceConfig{ + ClusterName: "root.example.com", + HostID: uuid.New(), + NodeName: Host, + Ports: ports.PopIntSlice(6), + Priv: privateKey, + Pub: publicKey, + log: log, + }) + + // Create leaf cluster. + lc := NewInstance(InstanceConfig{ + ClusterName: "leaf.example.com", + HostID: uuid.New(), + NodeName: Host, + Ports: ports.PopIntSlice(6), + Priv: privateKey, + Pub: publicKey, + log: log, + }) + + // Make root cluster config. + rcConf := service.MakeDefaultConfig() + rcConf.DataDir = t.TempDir() + rcConf.Auth.Enabled = true + rcConf.Auth.Preference.SetSecondFactor("off") + rcConf.Proxy.Enabled = true + rcConf.Proxy.DisableWebService = true + rcConf.Proxy.DisableWebInterface = true + rcConf.SSH.Enabled = true + rcConf.SSH.Addr.Addr = net.JoinHostPort(rc.Hostname, rc.GetPortSSH()) + rcConf.SSH.Labels = map[string]string{"env": "integration"} + + // Make leaf cluster config. + lcConf := service.MakeDefaultConfig() + lcConf.DataDir = t.TempDir() + lcConf.Auth.Enabled = true + lcConf.Auth.Preference.SetSecondFactor("off") + lcConf.Proxy.Enabled = true + lcConf.Proxy.DisableWebInterface = true + lcConf.SSH.Enabled = true + lcConf.SSH.Addr.Addr = net.JoinHostPort(lc.Hostname, lc.GetPortSSH()) + lcConf.SSH.Labels = map[string]string{"env": "integration"} + + // Create identical user/role in both clusters. + me, err := user.Current() + require.NoError(t, err) + + role := services.NewAdminRole() + role.SetName("test") + role.SetLogins(services.Allow, []string{me.Username}) + // Users created by CreateEx have "testing: integration" trait. + role.SetNodeLabels(services.Allow, map[string]utils.Strings{"env": []string{"{{external.testing}}"}}) + + rc.AddUserWithRole(me.Username, role) + lc.AddUserWithRole(me.Username, role) + + // Establish trust b/w root and leaf. + err = rc.CreateEx(lc.Secrets.AsSlice(), rcConf) + require.NoError(t, err) + err = lc.CreateEx(rc.Secrets.AsSlice(), lcConf) + require.NoError(t, err) + + // Start both clusters. + require.NoError(t, rc.Start()) + t.Cleanup(func() { + rc.StopAll() + }) + require.NoError(t, lc.Start()) + t.Cleanup(func() { + lc.StopAll() + }) + + // Update root's certificate authority on leaf to configure role mapping. + ca, err := lc.Process.GetAuthServer().GetCertAuthority(services.CertAuthID{ + Type: services.UserCA, + DomainName: rc.Secrets.SiteName, + }, false) + require.NoError(t, err) + ca.SetRoles(nil) // Reset roles, otherwise they will take precedence. + ca.SetRoleMap(services.RoleMap{{Remote: role.GetName(), Local: []string{role.GetName()}}}) + err = lc.Process.GetAuthServer().UpsertCertAuthority(ca) + require.NoError(t, err) + + // Run command in root. + outputRoot, err := runCommand(rc, []string{"echo", "hello root"}, ClientConfig{ + Login: me.Username, + Cluster: "root.example.com", + Host: Loopback, + Port: rc.GetPortSSHInt(), + }, 1) + require.NoError(t, err) + require.Equal(t, "hello root", strings.TrimSpace(outputRoot)) + + // Run command in leaf. + outputLeaf, err := runCommand(rc, []string{"echo", "hello leaf"}, ClientConfig{ + Login: me.Username, + Cluster: "leaf.example.com", + Host: Loopback, + Port: lc.GetPortSSHInt(), + }, 1) + require.NoError(t, err) + require.Equal(t, "hello leaf", strings.TrimSpace(outputLeaf)) +} diff --git a/integration/kube_integration_test.go b/integration/kube_integration_test.go index a3ddf6f7f760e..5c4e4adfd7270 100644 --- a/integration/kube_integration_test.go +++ b/integration/kube_integration_test.go @@ -68,8 +68,7 @@ var _ = check.Suite(&KubeSuite{}) type KubeSuite struct { *kubernetes.Clientset - ports utils.PortList - me *user.User + me *user.User // priv/pub pair to avoid re-generating it priv []byte pub []byte @@ -104,10 +103,6 @@ func (s *KubeSuite) SetUpSuite(c *check.C) { s.priv, s.pub, err = testauthority.New().GenerateKeyPair("") c.Assert(err, check.IsNil) - s.ports, err = utils.GetFreeTCPPorts(AllocatePortsNum, utils.PortStartingNumber+AllocatePortsNum+1) - if err != nil { - c.Fatal(err) - } s.me, err = user.Current() c.Assert(err, check.IsNil) @@ -177,7 +172,7 @@ func (s *KubeSuite) TestKubeExec(c *check.C) { ClusterName: Site, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -351,7 +346,7 @@ func (s *KubeSuite) TestKubeDeny(c *check.C) { ClusterName: Site, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -407,7 +402,7 @@ func (s *KubeSuite) TestKubePortForward(c *check.C) { ClusterName: Site, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -440,7 +435,7 @@ func (s *KubeSuite) TestKubePortForward(c *check.C) { c.Assert(err, check.IsNil) // forward local port to target port 80 of the nginx container - localPort := s.ports.Pop() + localPort := ports.Pop() forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{ ports: []string{fmt.Sprintf("%v:80", localPort)}, @@ -476,7 +471,7 @@ func (s *KubeSuite) TestKubePortForward(c *check.C) { }) c.Assert(err, check.IsNil) - localPort = s.ports.Pop() + localPort = ports.Pop() impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{ ports: []string{fmt.Sprintf("%v:80", localPort)}, podName: testPod, @@ -506,7 +501,7 @@ func (s *KubeSuite) TestKubeTrustedClustersClientCert(c *check.C) { ClusterName: clusterMain, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -530,7 +525,7 @@ func (s *KubeSuite) TestKubeTrustedClustersClientCert(c *check.C) { ClusterName: clusterAux, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -709,7 +704,7 @@ loop: c.Assert(err.Error(), check.Matches, ".*impersonation request has been denied.*") // forward local port to target port 80 of the nginx container - localPort := s.ports.Pop() + localPort := ports.Pop() forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{ ports: []string{fmt.Sprintf("%v:80", localPort)}, @@ -737,7 +732,7 @@ loop: c.Assert(resp.Body.Close(), check.IsNil) // impersonating client requests will be denied - localPort = s.ports.Pop() + localPort = ports.Pop() impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{ ports: []string{fmt.Sprintf("%v:80", localPort)}, podName: pod.Name, @@ -767,7 +762,7 @@ func (s *KubeSuite) TestKubeTrustedClustersSNI(c *check.C) { ClusterName: clusterMain, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -791,7 +786,7 @@ func (s *KubeSuite) TestKubeTrustedClustersSNI(c *check.C) { ClusterName: clusterAux, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -972,7 +967,7 @@ loop: c.Assert(err.Error(), check.Matches, ".*impersonation request has been denied.*") // forward local port to target port 80 of the nginx container - localPort := s.ports.Pop() + localPort := ports.Pop() forwarder, err := newPortForwarder(proxyClientConfig, kubePortForwardArgs{ ports: []string{fmt.Sprintf("%v:80", localPort)}, @@ -1000,7 +995,7 @@ loop: c.Assert(resp.Body.Close(), check.IsNil) // impersonating client requests will be denied - localPort = s.ports.Pop() + localPort = ports.Pop() impersonatingForwarder, err := newPortForwarder(impersonatingProxyClientConfig, kubePortForwardArgs{ ports: []string{fmt.Sprintf("%v:80", localPort)}, podName: pod.Name, @@ -1050,7 +1045,7 @@ func (s *KubeSuite) runKubeDisconnectTest(c *check.C, tc disconnectTestCase) { ClusterName: Site, HostID: HostID, NodeName: Host, - Ports: s.ports.PopIntSlice(6), + Ports: ports.PopIntSlice(6), Priv: s.priv, Pub: s.pub, log: s.log, @@ -1142,7 +1137,7 @@ func (s *KubeSuite) teleKubeConfig(hostname string) *service.Config { // set kubernetes specific parameters tconf.Proxy.Kube.Enabled = true - tconf.Proxy.Kube.ListenAddr.Addr = net.JoinHostPort(hostname, s.ports.Pop()) + tconf.Proxy.Kube.ListenAddr.Addr = net.JoinHostPort(hostname, ports.Pop()) tconf.Proxy.Kube.KubeconfigPath = s.kubeConfigPath return tconf diff --git a/lib/auth/permissions.go b/lib/auth/permissions.go index 16a76fabb7933..77ea196f56942 100644 --- a/lib/auth/permissions.go +++ b/lib/auth/permissions.go @@ -139,11 +139,9 @@ func (a *authorizer) authorizeRemoteUser(u RemoteUser) (*Context, error) { if len(roleNames) == 0 { return nil, trace.AccessDenied("no roles mapped for remote user %q from cluster %q with remote roles %v", u.Username, u.ClusterName, u.RemoteRoles) } - // Set "logins" trait and "kubernetes_groups" for the remote user. This allows Teleport to work by - // passing exact logins, kubernetes groups and users to the remote cluster. Note that claims (OIDC/SAML) - // are not passed, but rather the exact logins, this is done to prevent - // leaking too much of identity to the remote cluster, and instead of focus - // on main cluster's interpretation of this identity + // Set internal traits for the remote user. This allows Teleport to work by + // passing exact logins, Kubernetes users/groups and database users/names + // to the remote cluster. traits := map[string][]string{ teleport.TraitLogins: u.Principals, teleport.TraitKubeGroups: u.KubernetesGroups, @@ -151,6 +149,20 @@ func (a *authorizer) authorizeRemoteUser(u RemoteUser) (*Context, error) { teleport.TraitDBNames: u.DatabaseNames, teleport.TraitDBUsers: u.DatabaseUsers, } + // Prior to Teleport 6.2 no user traits were passed to remote clusters + // except for the internal ones specified above. + // + // To preserve backwards compatible behavior, when applying traits from user + // identity, make sure to filter out those already present in the map above. + // + // This ensures that if e.g. there's a "logins" trait in the root user's + // identity, it won't overwrite the internal "logins" trait set above + // causing behavior change. + for k, v := range u.Identity.Traits { + if _, ok := traits[k]; !ok { + traits[k] = v + } + } log.Debugf("Mapped roles %v of remote user %q to local roles %v and traits %v.", u.RemoteRoles, u.Username, roleNames, traits) checker, err := services.FetchRoles(roleNames, a.access, traits) diff --git a/lib/services/role.go b/lib/services/role.go index 88210d34317b6..0525c97235cd1 100644 --- a/lib/services/role.go +++ b/lib/services/role.go @@ -906,11 +906,11 @@ func ExtractFromCertificate(access UserGetter, cert *ssh.Certificate) ([]string, } // Standard certificates have the roles and traits embedded in them. - roles, err := extractRolesFromCert(cert) + roles, err := ExtractRolesFromCert(cert) if err != nil { return nil, nil, trace.Wrap(err) } - traits, err := extractTraitsFromCert(cert) + traits, err := ExtractTraitsFromCert(cert) if err != nil { return nil, nil, trace.Wrap(err) } @@ -988,8 +988,8 @@ func missingIdentity(identity tlsca.Identity) bool { return false } -// extractRolesFromCert extracts roles from certificate metadata extensions. -func extractRolesFromCert(cert *ssh.Certificate) ([]string, error) { +// ExtractRolesFromCert extracts roles from certificate metadata extensions. +func ExtractRolesFromCert(cert *ssh.Certificate) ([]string, error) { data, ok := cert.Extensions[teleport.CertExtensionTeleportRoles] if !ok { return nil, trace.NotFound("no roles found") @@ -997,8 +997,8 @@ func extractRolesFromCert(cert *ssh.Certificate) ([]string, error) { return UnmarshalCertRoles(data) } -// extractTraitsFromCert extracts traits from the certificate extensions. -func extractTraitsFromCert(cert *ssh.Certificate) (wrappers.Traits, error) { +// ExtractTraitsFromCert extracts traits from the certificate extensions. +func ExtractTraitsFromCert(cert *ssh.Certificate) (wrappers.Traits, error) { rawTraits, ok := cert.Extensions[teleport.CertExtensionTeleportTraits] if !ok { return nil, trace.NotFound("no traits found") diff --git a/lib/srv/authhandlers.go b/lib/srv/authhandlers.go index db1979d8b559d..65a54eb422b45 100644 --- a/lib/srv/authhandlers.go +++ b/lib/srv/authhandlers.go @@ -388,25 +388,34 @@ func (h *AuthHandlers) fetchRoleSet(cert *ssh.Certificate, ca services.CertAutho return nil, trace.Wrap(err) } } else { - roles, err := extractRolesFromCert(cert) - if err != nil { + // Old-style SSH certificates don't have roles in metadata. + roles, err := services.ExtractRolesFromCert(cert) + if err != nil && !trace.IsNotFound(err) { return nil, trace.AccessDenied("failed to parse certificate roles") } roleNames, err := services.MapRoles(ca.CombinedMapping(), roles) if err != nil { return nil, trace.AccessDenied("failed to map roles") } - // Pass the principals on the certificate along as the login traits - // to the remote cluster. - traits := map[string][]string{ - teleport.TraitLogins: cert.ValidPrincipals, + // Old-style SSH certificates don't have traits in metadata. + traits, err := services.ExtractTraitsFromCert(cert) + if err != nil && !trace.IsNotFound(err) { + return nil, trace.AccessDenied("failed to parse certificate traits") + } + if traits == nil { + traits = make(map[string][]string) } + // Prior to Teleport 6.2 the only trait passed to the remote cluster + // was the "logins" trait set to the SSH certificate principals. + // + // Keep backwards-compatible behavior and set it in addition to the + // traits extracted from the certificate. + traits[teleport.TraitLogins] = cert.ValidPrincipals roleset, err = services.FetchRoles(roleNames, h.AccessPoint, traits) if err != nil { return nil, trace.Wrap(err) } } - return roleset, nil } @@ -460,13 +469,3 @@ func (h *AuthHandlers) authorityForCert(caType services.CertAuthType, key ssh.Pu func (h *AuthHandlers) isProxy() bool { return h.Component == teleport.ComponentProxy } - -// extractRolesFromCert extracts roles from certificate metadata extensions. -func extractRolesFromCert(cert *ssh.Certificate) ([]string, error) { - data, ok := cert.Extensions[teleport.CertExtensionTeleportRoles] - if !ok { - // it's ok to not have any roles in the metadata - return nil, nil - } - return services.UnmarshalCertRoles(data) -}