Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Install Kibana per tenant #3348

Open
wants to merge 20 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 16 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion api/v1/kibana_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ type KibanaPodSpec struct {
type KibanaContainer struct {
// Name is an enum which identifies the Kibana Deployment container by name.
// Supported values are: kibana
// +kubebuilder:validation:Enum=kibana
// +kubebuilder:validation:Enum=kibana;challenger
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should have validation that challenger is not allowed in non-multi-tenent clusters.

Name string `json:"name"`

// Resources allows customization of limits and requests for compute resources such as cpu and memory.
Expand Down
29 changes: 27 additions & 2 deletions api/v1/tenant_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,14 @@ type TenantSpec struct {
// Indices defines the how to store a tenant's data
Indices []Index `json:"indices"`

// Elastic configures per-tenant ElasticSearch and Kibana parameters.
// Elastic configures per-tenant ElasticSearch parameters.
// This field is required for clusters using external ES.
Elastic *TenantElasticSpec `json:"elastic,omitempty"`

// Kibana configures per-tenant Kibana parameters
// This field will enable or disable Kibana
Kibana *TenantKibanaSpec `json:"kibana,omitempty"`

// ControlPlaneReplicas defines how many replicas of the control plane core components will be deployed
// in the Tenant's namespace. Defaults to the controlPlaneReplicas in Installation CR
// +optional
Expand All @@ -103,9 +107,14 @@ type Index struct {
DataType DataType `json:"dataType"`
}

type TenantKibanaSpec struct {
URL string `json:"url,omitempty"`
MutualTLS bool `json:"mutualTLS,omitempty"`
BaseURL string `json:"baseURL,omitempty"`
}

type TenantElasticSpec struct {
URL string `json:"url"`
KibanaURL string `json:"kibanaURL,omitempty"`
MutualTLS bool `json:"mutualTLS"`
}

Expand All @@ -127,6 +136,22 @@ func (t *Tenant) ElasticMTLS() bool {
return t != nil && t.Spec.Elastic != nil && t.Spec.Elastic.MutualTLS
}

func (t *Tenant) KibanaMTLS() bool {
return t != nil && t.Spec.Kibana != nil && t.Spec.Kibana.MutualTLS
}

func (t *Tenant) IsKibanaEnabled() bool {
return t != nil && t.Spec.Kibana != nil
}

func (t *Tenant) KibanaBaseURL() string {
if t != nil && t.Spec.Kibana != nil {
return t.Spec.Kibana.BaseURL
}

return ""
}

// MultiTenant returns true if this management cluster is configured to support multiple tenants, and false otherwise.
func (t *Tenant) MultiTenant() bool {
// In order to support multiple tenants, the tenant CR must not be nil, and it must be assigned to a namespace.
Expand Down
20 changes: 20 additions & 0 deletions api/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

31 changes: 17 additions & 14 deletions pkg/controller/logstorage/dashboards/dashboards_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ type DashboardsSubController struct {
}

func Add(mgr manager.Manager, opts options.AddOptions) error {
if !opts.EnterpriseCRDExists || opts.MultiTenant {
if !opts.EnterpriseCRDExists {
return nil
}

Expand Down Expand Up @@ -113,7 +113,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error {
return fmt.Errorf("logstorage-dashboards-controller failed to watch logstorage Tigerastatus: %w", err)
}
if opts.MultiTenant {
if err = c.WatchObject(&operatorv1.Tenant{}, &handler.EnqueueRequestForObject{}); err != nil {
if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch Tenant resource: %w", err)
}
}
Expand All @@ -122,6 +122,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error {
// For single-tenant, everything is installed in the tigera-manager namespace.
// Make a helper for determining which namespaces to use based on tenancy mode.
helper := utils.NewNamespaceHelper(opts.MultiTenant, render.ElasticsearchNamespace, "")
kibanaHelper := utils.NewNamespaceHelper(opts.MultiTenant, kibana.Namespace, "")

// Watch secrets this controller cares about.
secretsToWatch := []string{
Expand All @@ -142,10 +143,10 @@ func Add(mgr manager.Manager, opts options.AddOptions) error {
}

// Catch if something modifies the resources that this controller consumes.
if err := utils.AddServiceWatch(c, kibana.ServiceName, helper.InstallNamespace()); err != nil {
if err := utils.AddServiceWatch(c, kibana.ServiceName, kibanaHelper.InstallNamespace()); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err)
}
if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), &handler.EnqueueRequestForObject{}); err != nil {
if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), eventHandler); err != nil {
return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err)
}

Expand Down Expand Up @@ -260,13 +261,19 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil

d.status.OnCRFound()

if !render.KibanaEnabled(tenant, install) {
reqLogger.Info("Kibana is not enabled. Will skip installing dashboards job")
return reconcile.Result{}, nil
}

// Determine where to access Kibana.
kibanaHost := "tigera-secure-kb-http.tigera-kibana.svc"
kibanaPort := uint16(5601)
kibanaScheme := "https"

var externalKibanaSecret *corev1.Secret
if !d.elasticExternal {
// This is the configuration for zero tenant or single tenant with internal elastic
// Wait for Elasticsearch to be installed and available.
elasticsearch, err := utils.GetElasticsearch(ctx, d.client)
if err != nil {
Expand All @@ -278,15 +285,11 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil
return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil
}
} else {
// If we're using an external ES and Kibana, the Tenant resource must specify the Kibana endpoint.
if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.KibanaURL == "" {
reqLogger.Error(nil, "Kibana URL must be specified for this tenant")
d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL must be specified for this tenant", nil, reqLogger)
return reconcile.Result{}, nil
}

// Determine the host and port from the URL.
url, err := url.Parse(tenant.Spec.Elastic.KibanaURL)
// This is the configuration for multi-tenant and single tenant with external elastic
// The Tenant resource must specify the Kibana endpoint in both cases. For multi-tenant
// it should be the service inside the tenant namespace. For single tenant it should be the
// URL that points to external Kibana. Determine the host and port from the URL.
url, err := url.Parse(tenant.Spec.Kibana.URL)
if err != nil {
reqLogger.Error(err, "Kibana URL is invalid")
d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL is invalid", err, reqLogger)
Expand All @@ -301,7 +304,7 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil
return reconcile.Result{}, nil
}

if tenant.ElasticMTLS() {
if tenant.KibanaMTLS() {
// If mTLS is enabled, get the secret containing the CA and client certificate.
externalKibanaSecret = &corev1.Secret{}
err = d.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, externalKibanaSecret)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ import (
"context"
"fmt"

"github.com/stretchr/testify/mock"
"github.com/tigera/operator/pkg/render/logstorage/dashboards"
"k8s.io/apimachinery/pkg/types"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

"github.com/stretchr/testify/mock"

esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1"
v3 "github.com/tigera/api/pkg/apis/projectcalico/v3"
operatorv1 "github.com/tigera/operator/api/v1"
Expand Down Expand Up @@ -111,6 +111,18 @@ var _ = Describe("LogStorage Dashboards controller", func() {
ctx = context.Background()
cli = ctrlrfake.DefaultFakeClientBuilder(scheme).Build()

mockStatus = &status.MockStatus{}
mockStatus.On("Run").Return()
mockStatus.On("AddDaemonsets", mock.Anything)
mockStatus.On("AddDeployments", mock.Anything)
mockStatus.On("AddStatefulSets", mock.Anything)
mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return()
mockStatus.On("AddCronJobs", mock.Anything)
mockStatus.On("OnCRFound").Return()
mockStatus.On("ReadyToMonitor")
mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything)
mockStatus.On("ClearDegraded")

// Create a basic Installation.
var replicas int32 = 2
install = &operatorv1.Installation{
Expand Down Expand Up @@ -150,18 +162,6 @@ var _ = Describe("LogStorage Dashboards controller", func() {

Context("Zero tenant", func() {
BeforeEach(func() {
mockStatus = &status.MockStatus{}
mockStatus.On("Run").Return()
mockStatus.On("AddDaemonsets", mock.Anything)
mockStatus.On("AddDeployments", mock.Anything)
mockStatus.On("AddStatefulSets", mock.Anything)
mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return()
mockStatus.On("AddCronJobs", mock.Anything)
mockStatus.On("OnCRFound").Return()
mockStatus.On("ReadyToMonitor")
mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything)
mockStatus.On("ClearDegraded")

// Create a CA secret for the test, and create its KeyPair.
cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, common.OperatorNamespace(), certificatemanager.AllowCACreation())
Expect(err).ShouldNot(HaveOccurred())
Expand Down Expand Up @@ -267,4 +267,68 @@ var _ = Describe("LogStorage Dashboards controller", func() {
Expect(dashboardInstaller.Image).To(Equal(fmt.Sprintf("some.registry.org/%s@%s", components.ComponentElasticTseeInstaller.Image, "sha256:dashboardhash")))
})
})

Context("Multi-tenant", func() {
var tenant *operatorv1.Tenant
var tenantNS string

BeforeEach(func() {
tenantNS = "tenant-ns-a"
Expect(cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tenantNS}})).ShouldNot(HaveOccurred())

tenant = &operatorv1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "default",
Namespace: tenantNS,
},
Spec: operatorv1.TenantSpec{
ID: "tenant-a",
Kibana: &operatorv1.TenantKibanaSpec{
URL: fmt.Sprintf("https://kibana.%s.svc:5601", tenantNS),
},
},
}
Expect(cli.Create(ctx, tenant)).ShouldNot(HaveOccurred())

// Create a CA secret for the test, and create its KeyPair.
opts := []certificatemanager.Option{
certificatemanager.AllowCACreation(),
certificatemanager.WithTenant(tenant),
}
cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, tenantNS, opts...)
Expect(err).ShouldNot(HaveOccurred())
Expect(cli.Create(ctx, cm.KeyPair().Secret(tenantNS))).ShouldNot(HaveOccurred())
bundle := cm.CreateTrustedBundle()
Expect(cli.Create(ctx, bundle.ConfigMap(tenantNS))).ShouldNot(HaveOccurred())

// Create the ES user secret. Generally this is created by either es-kube-controllers or the user controller in this operator.
userSecret := &corev1.Secret{}
userSecret.Name = dashboards.ElasticCredentialsSecret
userSecret.Namespace = tenantNS
userSecret.Data = map[string][]byte{"username": []byte("test-username"), "password": []byte("test-password")}
Expect(cli.Create(ctx, userSecret)).ShouldNot(HaveOccurred())

// Create the reconciler for the tests.
r, err = NewDashboardsControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true, true)
Expect(err).ShouldNot(HaveOccurred())
})

It("should reconcile resources", func() {
// Run the reconciler.
result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}})
Expect(err).ShouldNot(HaveOccurred())
Expect(result).Should(Equal(successResult))

// Check that K8s Job was created as expected. We don't need to check every resource in detail, since
// the render package has its own tests which cover this in more detail.
dashboardJob := batchv1.Job{
TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: dashboards.Name,
Namespace: tenantNS,
},
}
Expect(test.GetResource(cli, &dashboardJob)).To(BeNil())
})
})
})
14 changes: 8 additions & 6 deletions pkg/controller/logstorage/elastic/elastic_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
cmnv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1"
esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1"
kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1"
"github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s"
"github.com/elastic/cloud-on-k8s/v2/pkg/utils/stringsutil"
"github.com/go-logr/logr"
apps "k8s.io/api/apps/v1"
Expand Down Expand Up @@ -453,7 +454,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile.

var kibanaCR *kbv1.Kibana
if kibanaEnabled {
kibanaCR, err = r.getKibana(ctx)
kibanaCR, err = getKibana(ctx, r.client, kibana.Namespace)
if err != nil {
r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger)
return reconcile.Result{}, err
Expand Down Expand Up @@ -504,7 +505,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile.
var kbService *corev1.Service
if kibanaEnabled {
// For now, Kibana is only supported in single tenant configurations.
kbService, err = r.getKibanaService(ctx)
kbService, err = getKibanaService(ctx, r.client, kibana.Namespace)
if err != nil {
r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger)
return reconcile.Result{}, err
Expand Down Expand Up @@ -563,6 +564,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile.
UnusedTLSSecret: unusedTLSSecret,
UsePSP: r.usePSP,
Enabled: kibanaEnabled,
Namespace: kibana.Namespace,
}),
}

Expand Down Expand Up @@ -708,9 +710,9 @@ func (r *ElasticSubController) getElasticsearchService(ctx context.Context) (*co
return &svc, nil
}

func (r *ElasticSubController) getKibana(ctx context.Context) (*kbv1.Kibana, error) {
func getKibana(ctx context.Context, cli k8s.Client, namespace string) (*kbv1.Kibana, error) {
kb := kbv1.Kibana{}
err := r.client.Get(ctx, client.ObjectKey{Name: kibana.CRName, Namespace: kibana.Namespace}, &kb)
err := cli.Get(ctx, client.ObjectKey{Name: kibana.CRName, Namespace: namespace}, &kb)
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
Expand All @@ -720,9 +722,9 @@ func (r *ElasticSubController) getKibana(ctx context.Context) (*kbv1.Kibana, err
return &kb, nil
}

func (r *ElasticSubController) getKibanaService(ctx context.Context) (*corev1.Service, error) {
func getKibanaService(ctx context.Context, cli k8s.Client, namespace string) (*corev1.Service, error) {
svc := corev1.Service{}
err := r.client.Get(ctx, client.ObjectKey{Name: kibana.ServiceName, Namespace: kibana.Namespace}, &svc)
err := cli.Get(ctx, client.ObjectKey{Name: kibana.ServiceName, Namespace: namespace}, &svc)
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
Expand Down
Loading
Loading