From 5717073b082c14d125884f12db947d68f9cda543 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Mon, 6 May 2024 14:05:28 -0700 Subject: [PATCH 01/20] Install Kibana per tenant --- api/v1/kibana_types.go | 2 +- .../dashboards/dashboards_controller.go | 15 +- .../logstorage/elastic/elastic_controller.go | 14 +- .../elastic/external_elastic_controller.go | 328 +++++++++++++++++- .../linseed/linseed_controller_test.go | 1 + pkg/controller/manager/manager_controller.go | 12 +- pkg/render/logstorage/eck/eck.go | 7 +- pkg/render/logstorage/kibana/kibana.go | 165 ++++++--- pkg/render/logstorage/kibana/kibana_test.go | 1 + pkg/render/logstorage/linseed/linseed.go | 12 +- pkg/render/manager.go | 2 + 11 files changed, 474 insertions(+), 85 deletions(-) diff --git a/api/v1/kibana_types.go b/api/v1/kibana_types.go index 5c844d34b9..6809445f31 100644 --- a/api/v1/kibana_types.go +++ b/api/v1/kibana_types.go @@ -60,7 +60,7 @@ type KibanaPodSpec struct { type KibanaContainer struct { // Name is an enum which identifies the Kibana Deployment container by name. // Supported values are: kibana - // +kubebuilder:validation:Enum=kibana + // +kubebuilder:validation:Enum=kibana,challenger Name string `json:"name"` // Resources allows customization of limits and requests for compute resources such as cpu and memory. diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go index b6ab6f86e8..f02c2a7849 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -113,7 +113,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("logstorage-dashboards-controller failed to watch logstorage Tigerastatus: %w", err) } if opts.MultiTenant { - if err = c.WatchObject(&operatorv1.Tenant{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { return fmt.Errorf("log-storage-dashboards-controller failed to watch Tenant resource: %w", err) } } @@ -122,6 +122,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { // For single-tenant, everything is installed in the tigera-manager namespace. // Make a helper for determining which namespaces to use based on tenancy mode. helper := utils.NewNamespaceHelper(opts.MultiTenant, render.ElasticsearchNamespace, "") + kibanaHelper := utils.NewNamespaceHelper(opts.MultiTenant, kibana.Namespace, "") // Watch secrets this controller cares about. secretsToWatch := []string{ @@ -142,10 +143,10 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } // Catch if something modifies the resources that this controller consumes. - if err := utils.AddServiceWatch(c, kibana.ServiceName, helper.InstallNamespace()); err != nil { + if err := utils.AddServiceWatch(c, kibana.ServiceName, kibanaHelper.InstallNamespace()); err != nil { return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err) } - if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), &handler.EnqueueRequestForObject{}); err != nil { + if err := utils.AddConfigMapWatch(c, certificatemanagement.TrustedCertConfigMapName, helper.InstallNamespace(), eventHandler); err != nil { return fmt.Errorf("log-storage-dashboards-controller failed to watch the Service resource: %w", err) } @@ -267,6 +268,7 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil var externalKibanaSecret *corev1.Secret if !d.elasticExternal { + // This is the configuration for zero tenant or single tenant with internal elastic // Wait for Elasticsearch to be installed and available. elasticsearch, err := utils.GetElasticsearch(ctx, d.client) if err != nil { @@ -277,7 +279,8 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", nil, reqLogger) return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil } - } else { + } else if !d.multiTenant { + // This is the configuration for single tenant with external elastic // If we're using an external ES and Kibana, the Tenant resource must specify the Kibana endpoint. if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.KibanaURL == "" { reqLogger.Error(nil, "Kibana URL must be specified for this tenant") @@ -311,6 +314,10 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, err } } + } else { + // This is the configuration for multi-tenant + // We connect to a kibana service deployed in the tenant namespace + kibanaHost = fmt.Sprintf("tigera-secure-kb-http.%s.svc", helper.InstallNamespace()) } // Query the username and password this Dashboards Installer instance should use to authenticate with Elasticsearch. diff --git a/pkg/controller/logstorage/elastic/elastic_controller.go b/pkg/controller/logstorage/elastic/elastic_controller.go index ada5a2e0a0..341f609623 100644 --- a/pkg/controller/logstorage/elastic/elastic_controller.go +++ b/pkg/controller/logstorage/elastic/elastic_controller.go @@ -22,6 +22,7 @@ import ( cmnv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/k8s" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/stringsutil" "github.com/go-logr/logr" apps "k8s.io/api/apps/v1" @@ -453,7 +454,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. var kibanaCR *kbv1.Kibana if kibanaEnabled { - kibanaCR, err = r.getKibana(ctx) + kibanaCR, err = getKibana(ctx, r.client, kibana.Namespace) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger) return reconcile.Result{}, err @@ -504,7 +505,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. var kbService *corev1.Service if kibanaEnabled { // For now, Kibana is only supported in single tenant configurations. - kbService, err = r.getKibanaService(ctx) + kbService, err = getKibanaService(ctx, r.client, kibana.Namespace) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) return reconcile.Result{}, err @@ -563,6 +564,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. UnusedTLSSecret: unusedTLSSecret, UsePSP: r.usePSP, Enabled: kibanaEnabled, + Namespace: kibana.Namespace, }), } @@ -708,9 +710,9 @@ func (r *ElasticSubController) getElasticsearchService(ctx context.Context) (*co return &svc, nil } -func (r *ElasticSubController) getKibana(ctx context.Context) (*kbv1.Kibana, error) { +func getKibana(ctx context.Context, cli k8s.Client, namespace string) (*kbv1.Kibana, error) { kb := kbv1.Kibana{} - err := r.client.Get(ctx, client.ObjectKey{Name: kibana.CRName, Namespace: kibana.Namespace}, &kb) + err := cli.Get(ctx, client.ObjectKey{Name: kibana.CRName, Namespace: namespace}, &kb) if err != nil { if errors.IsNotFound(err) { return nil, nil @@ -720,9 +722,9 @@ func (r *ElasticSubController) getKibana(ctx context.Context) (*kbv1.Kibana, err return &kb, nil } -func (r *ElasticSubController) getKibanaService(ctx context.Context) (*corev1.Service, error) { +func getKibanaService(ctx context.Context, cli k8s.Client, namespace string) (*corev1.Service, error) { svc := corev1.Service{} - err := r.client.Get(ctx, client.ObjectKey{Name: kibana.ServiceName, Namespace: kibana.Namespace}, &svc) + err := cli.Get(ctx, client.ObjectKey{Name: kibana.ServiceName, Namespace: namespace}, &svc) if err != nil { if errors.IsNotFound(err) { return nil, nil diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index c0006afb0f..a6bcb2b2ba 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -17,6 +17,22 @@ package elastic import ( "context" "fmt" + "net/url" + + kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/render/common/networkpolicy" + "github.com/tigera/operator/pkg/render/logstorage" + "github.com/tigera/operator/pkg/render/logstorage/eck" + "github.com/tigera/operator/pkg/render/logstorage/kibana" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" "github.com/tigera/operator/pkg/controller/logstorage/initializer" @@ -43,12 +59,14 @@ import ( type ExternalESController struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme - status status.StatusManager - provider operatorv1.Provider - clusterDomain string - usePSP bool + client client.Client + scheme *runtime.Scheme + status status.StatusManager + provider operatorv1.Provider + tierWatchReady *utils.ReadyFlag + clusterDomain string + usePSP bool + multiTenant bool } func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { @@ -67,17 +85,25 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { usePSP: opts.UsePSP, clusterDomain: opts.ClusterDomain, provider: opts.DetectedProvider, + multiTenant: opts.MultiTenant, } r.status.Run(opts.ShutdownContext) // Create a controller using the reconciler and register it with the manager to receive reconcile calls. - c, err := ctrlruntime.NewController("log-storage-external-es-controller", mgr, controller.Options{Reconciler: r}) + c, err := ctrlruntime.NewController("log-storage-external-es-controllerr", mgr, controller.Options{Reconciler: r}) if err != nil { return err } + // Determine how to handle watch events for cluster-scoped resources. For multi-tenant clusters, + // we should update all tenants whenever one changes. For single-tenatn clusters, we can just queue the object. + var eventHandler handler.EventHandler = &handler.EnqueueRequestForObject{} + if opts.MultiTenant { + eventHandler = utils.EnqueueAllTenants(mgr.GetClient()) + } + // Configure watches for operator.tigera.io APIs this controller cares about. - if err = c.WatchObject(&operatorv1.LogStorage{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.LogStorage{}, eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch LogStorage resource: %w", err) } if err = utils.AddInstallationWatch(c); err != nil { @@ -86,28 +112,108 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { if err = imageset.AddImageSetWatch(c); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch ImageSet: %w", err) } - if err = c.WatchObject(&operatorv1.ManagementCluster{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.ManagementCluster{}, eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch ManagementCluster resource: %w", err) } - if err = c.WatchObject(&operatorv1.ManagementClusterConnection{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.ManagementClusterConnection{}, eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch ManagementClusterConnection resource: %w", err) } if err = utils.AddTigeraStatusWatch(c, initializer.TigeraStatusLogStorageElastic); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch logstorage Tigerastatus: %w", err) } - if err = utils.AddConfigMapWatch(c, "cloud-kibana-config", common.OperatorNamespace(), &handler.EnqueueRequestForObject{}); err != nil { + if err = utils.AddConfigMapWatch(c, "cloud-kibana-config", common.OperatorNamespace(), eventHandler); err != nil { return fmt.Errorf("log-storage-external-es-controller failed to watch the ConfigMap resource: %w", err) } + + if opts.MultiTenant { + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to establish a connection to k8s: %w", err) + } + + // Establish a watch for any tenant related changes + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { + return fmt.Errorf("log-storage-access-controller failed to watch Tenant resource: %w", err) + } + // Establish a watch on the tenant CA secret across all namespaces if multi-tenancy is enabled. + if err = utils.AddSecretsWatch(c, certificatemanagement.TenantCASecretName, ""); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Secret resource: %w", err) + } + + // The namespace(s) we need to monitor depend upon what tenancy mode we're running in. + // For single-tenant, everything is installed in the tigera-manager namespace. + // Make a helper for determining which namespaces to use based on tenancy mode. + kibanaNamespaceHelper := utils.NewNamespaceHelper(opts.MultiTenant, kibana.Namespace, "") + + // Start goroutines to establish watches against projectcalico.org/v3 resources. + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, c, k8sClient, log, r.tierWatchReady) + go utils.WaitToAddNetworkPolicyWatches(c, k8sClient, log, []types.NamespacedName{ + {Name: kibana.PolicyName, Namespace: kibanaNamespaceHelper.InstallNamespace()}, + {Name: eck.OperatorPolicyName, Namespace: eck.OperatorNamespace}, + {Name: networkpolicy.TigeraComponentDefaultDenyPolicyName, Namespace: kibanaNamespaceHelper.InstallNamespace()}, + }) + + if err = c.WatchObject(&apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Namespace: eck.OperatorNamespace, Name: eck.OperatorName}, + }, eventHandler); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch StatefulSet resource: %w", err) + } + + if err = utils.AddConfigMapWatch(c, eck.LicenseConfigMapName, eck.OperatorNamespace, eventHandler); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch ConfigMap resource: %w", err) + } + + if err = c.WatchObject(&kbv1.Kibana{ + ObjectMeta: metav1.ObjectMeta{Namespace: kibanaNamespaceHelper.InstallNamespace(), Name: kibana.CRName}, + }, eventHandler); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Kibana resource: %w", err) + } + + for _, secretName := range []string{ + kibana.TigeraKibanaCertSecret, + } { + if err = utils.AddSecretsWatch(c, secretName, kibanaNamespaceHelper.TruthNamespace()); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Secret resource: %w", err) + } + } + // TODO: ALINA - We need a user for kibana + } + + // Perform periodic reconciliation. This acts as a backstop to catch reconcile issues, + // and also makes sure we spot when things change that might not trigger a reconciliation. + err = utils.AddPeriodicReconcile(c, utils.PeriodicReconcileTime, eventHandler) + if err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to create periodic reconcile watch: %w", err) + } + return nil } func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + kibanaHelper := utils.NewNamespaceHelper(r.multiTenant, kibana.Namespace, request.Namespace) reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) reqLogger.Info("Reconciling LogStorage") + // We skip requests without a namespace specified in multi-tenant setups. + if r.multiTenant && request.Namespace == "" { + return reconcile.Result{}, nil + } + + // When running in multi-tenant mode, we need to install Kibana in tenant Namespaces. However, the LogStorage + // resource is still cluster-scoped (since ES is a cluster-wide resource), so we need to look elsewhere to determine + // which tenant namespaces require a Kibana instance. We use the tenant API to determine the set of namespaces that should have Kibana. + tenant, _, err := utils.GetTenant(ctx, r.multiTenant, r.client, request.Namespace) + if errors.IsNotFound(err) { + reqLogger.Info("No Tenant in this Namespace, skip") + return reconcile.Result{}, nil + } else if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Tenant", err, reqLogger) + return reconcile.Result{}, err + } + ls := &operatorv1.LogStorage{} - err := r.client.Get(ctx, utils.DefaultTSEEInstanceKey, ls) + err = r.client.Get(ctx, utils.DefaultTSEEInstanceKey, ls) if err != nil { if !errors.IsNotFound(err) { return reconcile.Result{}, err @@ -117,7 +223,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } r.status.OnCRFound() - _, install, err := utils.GetInstallation(context.Background(), r.client) + variant, install, err := utils.GetInstallation(context.Background(), r.client) if err != nil { if errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) @@ -132,22 +238,216 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } + managementCluster, err := utils.GetManagementCluster(ctx, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) + return reconcile.Result{}, err + } + + managementClusterConnection, err := utils.GetManagementClusterConnection(ctx, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementClusterConnection", err, reqLogger) + return reconcile.Result{}, err + } + if managementClusterConnection != nil { + // LogStorage is not support on a managed cluster. + r.status.SetDegraded(operatorv1.ResourceNotReady, "LogStorage is not supported on a managed cluster", nil, reqLogger) + return reconcile.Result{}, nil + } + pullSecrets, err := utils.GetNetworkingPullSecrets(install, r.client) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurring while retrieving pull secrets", err, reqLogger) return reconcile.Result{}, err } + var multiTenantComponents []render.Component + if r.multiTenant { + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created, see the 'tiers' TigeraStatus for more information", err, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } else { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err + } + } + + esLicenseType, err := utils.GetElasticLicenseType(ctx, r.client, reqLogger) + if err != nil { + // If LicenseConfigMapName is not found, it means ECK operator is not running yet, log the information and proceed + if errors.IsNotFound(err) { + reqLogger.Info("ConfigMap not found yet", "name", eck.LicenseConfigMapName) + } else { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get elastic license", err, reqLogger) + return reconcile.Result{}, err + } + } + + // ECK will be deployed per management cluster and it will be configured + // to watch all namespaces in order to create a Kibana deployment + multiTenantComponents = append(multiTenantComponents, + eck.ECK(&eck.Configuration{ + LogStorage: ls, + Installation: install, + ManagementCluster: managementCluster, + PullSecrets: pullSecrets, + Provider: r.provider, + ElasticLicenseType: esLicenseType, + UsePSP: r.usePSP, + // TODO: Alina check if false is the correct value for multi-tenant + ApplyTrial: false, + Tenant: tenant, + }), + ) + + // TODO: Retrieve from tenant CR + var kibanaEnabled = true + if kibanaEnabled { + // Collect the certificates we need to provision Kibana. + // These will have been provisioned already by the ES secrets controller. + opts := []certificatemanager.Option{ + certificatemanager.WithLogger(reqLogger), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(r.client, install, r.clusterDomain, kibanaHelper.TruthNamespace(), opts...) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) + return reconcile.Result{}, err + } + + // We want to retrieve Kibana certificate for all supported configurations + kbDNSNames := dns.GetServiceDNSNames(kibana.ServiceName, kibanaHelper.InstallNamespace(), r.clusterDomain) + kibanaKeyPair, err := cm.GetKeyPair(r.client, kibana.TigeraKibanaCertSecret, kibanaHelper.TruthNamespace(), kbDNSNames) + if err != nil { + log.Error(err, err.Error()) + r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to create Kibana secrets", err, reqLogger) + return reconcile.Result{}, err + } + + if kibanaKeyPair == nil { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Waiting for kibana key pair to be available", err, reqLogger) + return reconcile.Result{}, nil + } + + kbService, err := getKibanaService(ctx, r.client, kibanaHelper.InstallNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) + return reconcile.Result{}, err + } + kibanaCR, err := getKibana(ctx, r.client, kibanaHelper.InstallNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger) + return reconcile.Result{}, err + } + + var unusedTLSSecret *corev1.Secret + if install.CertificateManagement != nil { + // Eck requires us to provide a TLS secret for Kibana. It will also inspect that it has a + // certificate and private key. However, when certificate management is enabled, we do not want to use a + // private key stored in a secret. For this reason, we mount a dummy that the actual Elasticsearch and Kibana + // pods are never using. + unusedTLSSecret, err = utils.GetSecret(ctx, r.client, relasticsearch.UnusedCertSecret, common.OperatorNamespace()) + if unusedTLSSecret == nil { + unusedTLSSecret, err = certificatemanagement.CreateSelfSignedSecret(relasticsearch.UnusedCertSecret, common.OperatorNamespace(), relasticsearch.UnusedCertSecret, []string{}) + unusedTLSSecret.Data[corev1.TLSCertKey] = install.CertificateManagement.CACert + } + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Failed to retrieve secret %s/%s", common.OperatorNamespace(), relasticsearch.UnusedCertSecret), err, reqLogger) + return reconcile.Result{}, nil + } + } + + // Query the trusted bundle from the namespace. + trustedBundle, err := cm.LoadTrustedBundle(ctx, r.client, tenant.Namespace) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error getting trusted bundle", err, reqLogger) + return reconcile.Result{}, err + } + + // If we're using an external ES, the Tenant resource must specify the ES endpoint. + if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.URL == "" { + reqLogger.Error(nil, "Elasticsearch URL must be specified for this tenant") + r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL must be specified for this tenant", nil, reqLogger) + return reconcile.Result{}, nil + } + + // Determine the host and port from the URL. + url, err := url.Parse(tenant.Spec.Elastic.URL) + if err != nil { + reqLogger.Error(err, "Elasticsearch URL is invalid") + r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL is invalid", err, reqLogger) + return reconcile.Result{}, nil + } + + var esClientSecret *corev1.Secret + if tenant.ElasticMTLS() { + // If mTLS is enabled, get the secret containing the CA and client certificate. + esClientSecret = &corev1.Secret{} + err = r.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, esClientSecret) + if err != nil { + reqLogger.Error(err, "Failed to read external Elasticsearch client certificate secret") + r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch client certificate secret to be available", err, reqLogger) + return reconcile.Result{}, err + } + } + + // TODO: Alina - Copy user to tenant namespace + // TODO: Alina Retrieve it from tenant CR + baseURL := "tigera-kibana" + multiTenantComponents = append(multiTenantComponents, + kibana.Kibana(&kibana.Configuration{ + LogStorage: ls, + Installation: install, + Kibana: kibanaCR, + KibanaKeyPair: kibanaKeyPair, + PullSecrets: pullSecrets, + Provider: r.provider, + KbService: kbService, + ClusterDomain: r.clusterDomain, + BaseURL: baseURL, + TrustedBundle: trustedBundle, + UnusedTLSSecret: unusedTLSSecret, + UsePSP: r.usePSP, + Enabled: kibanaEnabled, + Tenant: tenant, + Namespace: kibanaHelper.InstallNamespace(), + ElasticClientSecret: esClientSecret, + ExternalElasticEndpoint: url.String(), + }), + ) + } + } + flowShards := logstoragecommon.CalculateFlowShards(ls.Spec.Nodes, logstoragecommon.DefaultElasticsearchShards) clusterConfig := relasticsearch.NewClusterConfig(render.DefaultElasticsearchClusterName, ls.Replicas(), logstoragecommon.DefaultElasticsearchShards, flowShards) - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. + var hdler utils.ComponentHandler + if r.multiTenant { + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + } else { + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + } + externalElasticsearch := externalelasticsearch.ExternalElasticsearch(install, clusterConfig, pullSecrets) if err := hdler.CreateOrUpdateOrDelete(ctx, externalElasticsearch, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) return reconcile.Result{}, err } + for _, component := range multiTenantComponents { + if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) + return reconcile.Result{}, err + } + if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } + } + r.status.ReadyToMonitor() r.status.ClearDegraded() return reconcile.Result{}, nil diff --git a/pkg/controller/logstorage/linseed/linseed_controller_test.go b/pkg/controller/logstorage/linseed/linseed_controller_test.go index 355b4c5cd2..b176d95560 100644 --- a/pkg/controller/logstorage/linseed/linseed_controller_test.go +++ b/pkg/controller/logstorage/linseed/linseed_controller_test.go @@ -226,6 +226,7 @@ var _ = Describe("LogStorage Linseed controller", func() { ObjectMeta: metav1.ObjectMeta{Name: "enterprise-" + components.EnterpriseRelease}, Spec: operatorv1.ImageSetSpec{ Images: []operatorv1.Image{ + // TODO: Alina are all needed ? {Image: "tigera/elasticsearch", Digest: "sha256:elasticsearchhash"}, {Image: "tigera/kube-controllers", Digest: "sha256:kubecontrollershash"}, {Image: "tigera/kibana", Digest: "sha256:kibanahash"}, diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index c97ce0247e..33489137de 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -105,17 +105,17 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { }) // Watch for changes to primary resource Manager - err = c.WatchObject(&operatorv1.Manager{}, &handler.EnqueueRequestForObject{}) + err = c.WatchObject(&operatorv1.Manager{}, eventHandler) if err != nil { return fmt.Errorf("manager-controller failed to watch primary resource: %w", err) } - err = c.WatchObject(&operatorv1.TLSTerminatedRoute{}, &handler.EnqueueRequestForObject{}) + err = c.WatchObject(&operatorv1.TLSTerminatedRoute{}, eventHandler) if err != nil { return fmt.Errorf("manager-controller failed to watch TLSTerminatedRoutes: %w", err) } - err = c.WatchObject(&operatorv1.TLSPassThroughRoute{}, &handler.EnqueueRequestForObject{}) + err = c.WatchObject(&operatorv1.TLSPassThroughRoute{}, eventHandler) if err != nil { return fmt.Errorf("manager-controller failed to watch TLSPassThroughRoutes: %w", err) } @@ -146,7 +146,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("manager-controller failed to watch ImageSet: %w", err) } if opts.MultiTenant { - if err = c.WatchObject(&operatorv1.Tenant{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { return fmt.Errorf("manager-controller failed to watch Tenant resource: %w", err) } } @@ -158,8 +158,8 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } for _, namespace := range namespacesToWatch { for _, secretName := range []string{ - // We need to watch for es-gateway certificate because es-proxy still creates a - // client to talk to elastic via es-gateway + // TODO: ALINA - Do we need to add back esgateway here ? + // TODO: ALINA - Do we need to add Kibana for multi-tenant ? render.ManagerTLSSecretName, relasticsearch.PublicCertSecret, render.VoltronTunnelSecretName, render.ComplianceServerCertSecret, render.PacketCaptureServerCert, render.ManagerInternalTLSSecretName, monitor.PrometheusServerTLSSecretName, certificatemanagement.CASecretName, diff --git a/pkg/render/logstorage/eck/eck.go b/pkg/render/logstorage/eck/eck.go index 330d61954f..1609e5d836 100644 --- a/pkg/render/logstorage/eck/eck.go +++ b/pkg/render/logstorage/eck/eck.go @@ -62,6 +62,7 @@ type Configuration struct { Provider operatorv1.Provider ElasticLicenseType render.ElasticsearchLicenseType ApplyTrial bool + Tenant *operatorv1.Tenant // Whether the cluster supports pod security policies. UsePSP bool @@ -309,6 +310,10 @@ func (e *eck) operatorStatefulSet() *appsv1.StatefulSet { memoryRequest = c.ResourceRequirements.Requests[corev1.ResourceMemory] } } + var namespacesToWatch string + if e.cfg.Tenant.MultiTenant() { + namespacesToWatch = "tigera-elasticsearch,tigera-kibana" + } s := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ @@ -353,7 +358,7 @@ func (e *eck) operatorStatefulSet() *appsv1.StatefulSet { // Verbosity level of logs. -2=Error, -1=Warn, 0=Info, 0 and above=Debug Args: []string{ "manager", - "--namespaces=tigera-elasticsearch,tigera-kibana", + fmt.Sprintf("--namespaces=%s", namespacesToWatch), "--log-verbosity=0", "--metrics-port=0", "--container-registry=" + e.cfg.Installation.Registry, diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index 439dbccc95..4fcf59c965 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -93,16 +93,26 @@ type Configuration struct { TrustedBundle certificatemanagement.TrustedBundleRO UnusedTLSSecret *corev1.Secret Enabled bool + Tenant *operatorv1.Tenant + Namespace string + + // Secret containing client certificate and key for connecting to the Elastic cluster. If configured, + // mTLS is used between Challenger and the external Elastic cluster. + // TODO: Alina Mount volume + ElasticClientSecret *corev1.Secret + ElasticChallengerUser *corev1.Secret + ExternalElasticEndpoint string // Whether the cluster supports pod security policies. UsePSP bool } type kibana struct { - cfg *Configuration - kibanaSecrets []*corev1.Secret - kibanaImage string - csrImage string + cfg *Configuration + kibanaSecrets []*corev1.Secret + kibanaImage string + challengerImage string + csrImage string } func (k *kibana) ResolveImages(is *operatorv1.ImageSet) error { @@ -121,6 +131,13 @@ func (k *kibana) ResolveImages(is *operatorv1.ImageSet) error { errMsgs = append(errMsgs, err.Error()) } + if k.cfg.Tenant.MultiTenant() { + k.challengerImage, err = components.GetReference(components.ComponentESGateway, reg, path, prefix, is) + if err != nil { + errMsgs = append(errMsgs, err.Error()) + } + } + if k.cfg.Installation.CertificateManagement != nil { k.csrImage, err = certificatemanagement.ResolveCSRInitImage(k.cfg.Installation, is) if err != nil { @@ -168,20 +185,28 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { // - securityContext.capabilities.drop=["ALL"] // - securityContext.runAsNonRoot=true // - securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost" - toCreate = append(toCreate, render.CreateNamespace(Namespace, k.cfg.Installation.KubernetesProvider, render.PSSBaseline)) + // We only create the certain objects in a zero tenant or single tenant installation + // For example, tigera-kibana namespace, pull secrets and default deny + // For multi-tenancy, these are already created by other renderers + if !k.cfg.Tenant.MultiTenant() { + toCreate = append(toCreate, render.CreateNamespace(Namespace, k.cfg.Installation.KubernetesProvider, render.PSSBaseline)) + if len(k.cfg.PullSecrets) > 0 { + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(Namespace, k.cfg.PullSecrets...)...)...) + } + toCreate = append(toCreate, networkpolicy.AllowTigeraDefaultDeny(Namespace)) + } toCreate = append(toCreate, k.allowTigeraPolicy()) - toCreate = append(toCreate, networkpolicy.AllowTigeraDefaultDeny(Namespace)) toCreate = append(toCreate, k.serviceAccount()) - - if len(k.cfg.PullSecrets) > 0 { - toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(Namespace, k.cfg.PullSecrets...)...)...) - } - if len(k.kibanaSecrets) > 0 { toCreate = append(toCreate, secret.ToRuntimeObjects(k.kibanaSecrets...)...) } - toCreate = append(toCreate, k.kibanaCR()) + // TODO: ALINA: I think we do the same in Linseed + if k.cfg.ElasticClientSecret != nil { + // If using External ES, we need to copy the client certificates into the tenant namespace to be mounted. + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ElasticClientSecret)...)...) + } + } else { toDelete = append(toDelete, k.kibanaCR()) } @@ -194,7 +219,7 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { if k.cfg.KibanaKeyPair != nil && k.cfg.KibanaKeyPair.UseCertificateManagement() { // We need to render a secret. It won't ever be used by Kibana for TLS, but is needed to pass ECK's checks. // If the secret changes / gets reconciled, it will not trigger a re-render of Kibana. - unusedSecret := k.cfg.KibanaKeyPair.Secret(Namespace) + unusedSecret := k.cfg.KibanaKeyPair.Secret(k.cfg.Namespace) unusedSecret.Data = k.cfg.UnusedTLSSecret.Data toCreate = append(toCreate, unusedSecret) } @@ -213,7 +238,7 @@ func (k *kibana) serviceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: ObjectName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, }, } } @@ -230,7 +255,6 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { } config := map[string]interface{}{ - "elasticsearch.ssl.certificateAuthorities": []string{"/usr/share/kibana/config/elasticsearch-certs/tls.crt"}, "server": server, "xpack.security.session.lifespan": "8h", "xpack.security.session.idleTimeout": "30m", @@ -243,6 +267,13 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { "telemetry.optIn": false, } + if k.cfg.Tenant.MultiTenant() { + config["elasticsearch.host"] = "http://localhost:8080" + config["elasticsearch.ssl.verificationMode"] = "none" + } else { + config["elasticsearch.ssl.certificateAuthorities"] = []string{"/usr/share/kibana/config/elasticsearch-certs/tls.crt"} + } + var initContainers []corev1.Container var volumes []corev1.Volume var automountToken bool @@ -279,7 +310,8 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { VolumeSource: corev1.VolumeSource{ EmptyDir: &corev1.EmptyDirVolumeSource{}, }, - }) + }, + ) } count := int32(1) @@ -287,11 +319,68 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { count = *k.cfg.Installation.ControlPlaneReplicas } + containers := []corev1.Container{ + { + Name: "kibana", + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: fmt.Sprintf("/%s/login", BasePath), + Port: intstr.IntOrString{ + IntVal: Port, + }, + Scheme: corev1.URISchemeHTTPS, + }, + }, + }, + SecurityContext: securitycontext.NewNonRootContext(), + VolumeMounts: volumeMounts, + }, + } + + if k.cfg.Tenant.MultiTenant() { + volumes = append(volumes, k.cfg.TrustedBundle.Volume()) + containers = append(containers, corev1.Container{ + Name: "challenger", + Env: []corev1.EnvVar{ + { + Name: "ES_GATEWAY_LOG_LEVEL", + Value: "INFO", + }, + { + Name: "ES_GATEWAY_KIBANA_CATCH_ALL_ROUTE", + Value: "/", + }, + { + Name: "ES_GATEWAY_ELASTIC_ENDPOINT", + Value: k.cfg.ExternalElasticEndpoint, + }, + {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "elastic"}, + {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + // TODO: Alina change user + Name: render.ElasticsearchAdminUserSecret, + }, + Key: "elastic", + }, + }}, + {Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", Value: k.cfg.TrustedBundle.MountPath()}, + }, + Command: []string{ + "/usr/bin/es-gateway", "-run-as-challenger", + }, + Image: k.challengerImage, + SecurityContext: securitycontext.NewNonRootContext(), + VolumeMounts: k.cfg.TrustedBundle.VolumeMounts(k.SupportedOSType()), + }) + } + kibana := &kbv1.Kibana{ TypeMeta: metav1.TypeMeta{Kind: "Kibana", APIVersion: "kibana.k8s.elastic.co/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: CRName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, Labels: map[string]string{ "k8s-app": CRName, }, @@ -310,13 +399,9 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { }, }, }, - ElasticsearchRef: cmnv1.ObjectSelector{ - Name: render.ElasticsearchName, - Namespace: render.ElasticsearchNamespace, - }, PodTemplate: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Namespace: Namespace, + Namespace: k.cfg.Namespace, Annotations: map[string]string{ TLSAnnotationHash: rmeta.SecretsAnnotationHash(k.kibanaSecrets...), }, @@ -332,28 +417,20 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Tolerations: k.cfg.Installation.ControlPlaneTolerations, InitContainers: initContainers, AutomountServiceAccountToken: &automountToken, - Containers: []corev1.Container{{ - Name: "kibana", - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: fmt.Sprintf("/%s/login", BasePath), - Port: intstr.IntOrString{ - IntVal: Port, - }, - Scheme: corev1.URISchemeHTTPS, - }, - }, - }, - SecurityContext: securitycontext.NewNonRootContext(), - VolumeMounts: volumeMounts, - }}, - Volumes: volumes, + Containers: containers, + Volumes: volumes, }, }, }, } + if !k.cfg.Tenant.MultiTenant() { + kibana.Spec.ElasticsearchRef = cmnv1.ObjectSelector{ + Name: render.ElasticsearchName, + Namespace: render.ElasticsearchNamespace, + } + } + if k.cfg.Installation.ControlPlaneReplicas != nil && *k.cfg.Installation.ControlPlaneReplicas > 1 { kibana.Spec.PodTemplate.Spec.Affinity = podaffinity.NewPodAntiAffinity(CRName, Namespace) } @@ -398,7 +475,7 @@ func (k *kibana) clusterRoleBinding() *rbacv1.ClusterRoleBinding { { Kind: "ServiceAccount", Name: ObjectName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, }, }, } @@ -410,6 +487,7 @@ func (k *kibana) kibanaPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { // Allow access to Kibana func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { + networkPolicyHelper := networkpolicy.Helper(k.cfg.Tenant.MultiTenant(), k.cfg.Namespace) egressRules := []v3.Rule{ { Action: v3.Allow, @@ -425,6 +503,7 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { Protocol: &networkpolicy.TCPProtocol, Destination: networkpolicy.KubeAPIServerServiceSelectorEntityRule, }, + // TODO: ALINA - DO WE NEED TO REMOVE EGRESS GATEWAY FOR MULTI-TENANT { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, @@ -439,7 +518,7 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: PolicyName, - Namespace: Namespace, + Namespace: k.cfg.Namespace, }, Spec: v3.NetworkPolicySpec{ Order: &networkpolicy.HighPrecedenceOrder, @@ -465,6 +544,7 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { }, Destination: kibanaPortIngressDestination, }, + // TODO: ALINA - DO WE NEED TO REMOVE EGRESS GATEWAY FOR MULTI-TENANT { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, @@ -474,9 +554,10 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, - Source: networkpolicy.DefaultHelper().DashboardInstallerSourceEntityRule(), + Source: networkPolicyHelper.DashboardInstallerSourceEntityRule(), Destination: kibanaPortIngressDestination, }, + // TODO: ALINA - DO WE NEED TO ADD MANAGER? { Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, diff --git a/pkg/render/logstorage/kibana/kibana_test.go b/pkg/render/logstorage/kibana/kibana_test.go index ebf20789bb..7aef738c8d 100644 --- a/pkg/render/logstorage/kibana/kibana_test.go +++ b/pkg/render/logstorage/kibana/kibana_test.go @@ -104,6 +104,7 @@ var _ = Describe("Kibana rendering tests", func() { TrustedBundle: bundle, UsePSP: true, Enabled: true, + Namespace: kibana.Namespace, } }) diff --git a/pkg/render/logstorage/linseed/linseed.go b/pkg/render/logstorage/linseed/linseed.go index c44c9329a5..62710d07a2 100644 --- a/pkg/render/logstorage/linseed/linseed.go +++ b/pkg/render/logstorage/linseed/linseed.go @@ -168,7 +168,7 @@ func (l *linseed) Objects() (toCreate, toDelete []client.Object) { toCreate = append(toCreate, l.linseedPodSecurityPolicy()) } if l.cfg.ElasticClientSecret != nil { - // If using External ES, we need to copy the client certificates into Linseed's naespace to be mounted. + // If using External ES, we need to copy the client certificates into Linseed's namespace to be mounted. toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(l.cfg.Namespace, l.cfg.ElasticClientSecret)...)...) } return toCreate, toDelete @@ -691,13 +691,3 @@ func (l *linseed) linseedAllowTigeraPolicy() *v3.NetworkPolicy { }, } } - -// LinseedNamespace determine the namespace in which Linseed is running. -// For management and standalone clusters, this is always the tigera-elasticsearch -// namespace. For multi-tenant management clusters, this is the tenant namespace -func LinseedNamespace(tenant *operatorv1.Tenant) string { - if tenant.MultiTenant() { - return tenant.Namespace - } - return "tigera-elasticsearch" -} diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 6ac4489114..770e16db12 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -429,6 +429,7 @@ func (c *managerComponent) managerProxyProbe() *corev1.Probe { func KibanaEnabled(tenant *operatorv1.Tenant, installation *operatorv1.InstallationSpec) bool { enableKibana := !operatorv1.IsFIPSModeEnabled(installation.FIPSMode) if tenant.MultiTenant() { + // TODO: Alina Extract from CR enableKibana = false } return enableKibana @@ -613,6 +614,7 @@ func (c *managerComponent) managerEsProxyContainer() corev1.Container { env := []corev1.EnvVar{ {Name: "ELASTIC_LICENSE_TYPE", Value: string(c.cfg.ESLicenseType)}, + // TODO: ALINA - For multi-tenancy this needs to be in the tenant namespace {Name: "ELASTIC_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)}, {Name: "FIPS_MODE_ENABLED", Value: operatorv1.IsFIPSModeEnabledString(c.cfg.Installation.FIPSMode)}, {Name: "LINSEED_CLIENT_CERT", Value: certPath}, From fb8defa95446a09eaa625e2343cb7f0feddaf052 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 8 May 2024 13:55:21 -0700 Subject: [PATCH 02/20] Make changes to Kibana CR and create kibana cert --- api/v1/tenant_types.go | 29 +++++++- .../dashboards/dashboards_controller.go | 29 ++++---- .../elastic/external_elastic_controller.go | 72 +++++++++---------- .../logstorage/secrets/secret_controller.go | 16 +++-- .../secrets/secret_controller_test.go | 3 - pkg/controller/manager/manager_controller.go | 3 +- .../logstorage/dashboards/dashboards_test.go | 62 +++------------- pkg/render/logstorage/kibana/kibana.go | 10 +-- pkg/render/manager.go | 3 +- 9 files changed, 105 insertions(+), 122 deletions(-) diff --git a/api/v1/tenant_types.go b/api/v1/tenant_types.go index 0d36aa0508..279d7e37a1 100644 --- a/api/v1/tenant_types.go +++ b/api/v1/tenant_types.go @@ -76,10 +76,14 @@ type TenantSpec struct { // Indices defines the how to store a tenant's data Indices []Index `json:"indices"` - // Elastic configures per-tenant ElasticSearch and Kibana parameters. + // Elastic configures per-tenant ElasticSearch parameters. // This field is required for clusters using external ES. Elastic *TenantElasticSpec `json:"elastic,omitempty"` + // Kibana configures per-tenant Kibana parameters + // This field will enable or disable Kibana + Kibana *TenantKibanaSpec `json:"kibana,omitempty"` + // ControlPlaneReplicas defines how many replicas of the control plane core components will be deployed // in the Tenant's namespace. Defaults to the controlPlaneReplicas in Installation CR // +optional @@ -103,9 +107,14 @@ type Index struct { DataType DataType `json:"dataType"` } +type TenantKibanaSpec struct { + URL string `json:"url,omitempty"` + MutualTLS bool `json:"mutualTLS"` + BaseURL string `json:"baseURL,omitempty"` +} + type TenantElasticSpec struct { URL string `json:"url"` - KibanaURL string `json:"kibanaURL,omitempty"` MutualTLS bool `json:"mutualTLS"` } @@ -127,6 +136,22 @@ func (t *Tenant) ElasticMTLS() bool { return t != nil && t.Spec.Elastic != nil && t.Spec.Elastic.MutualTLS } +func (t *Tenant) KibanaMTLS() bool { + return t != nil && t.Spec.Kibana != nil && t.Spec.Kibana.MutualTLS +} + +func (t *Tenant) IsKibanaEnabled() bool { + return t != nil && t.Spec.Kibana != nil +} + +func (t *Tenant) KibanaBaseURL() string { + if t != nil && t.Spec.Kibana != nil { + return t.Spec.Kibana.BaseURL + } + + return "" +} + // MultiTenant returns true if this management cluster is configured to support multiple tenants, and false otherwise. func (t *Tenant) MultiTenant() bool { // In order to support multiple tenants, the tenant CR must not be nil, and it must be assigned to a namespace. diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go index f02c2a7849..9b91ae97f5 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -70,7 +70,7 @@ type DashboardsSubController struct { } func Add(mgr manager.Manager, opts options.AddOptions) error { - if !opts.EnterpriseCRDExists || opts.MultiTenant { + if !opts.EnterpriseCRDExists { return nil } @@ -261,6 +261,11 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil d.status.OnCRFound() + if !render.KibanaEnabled(tenant, install) { + reqLogger.Info("Kibana is not enabled. Will skip installing dashboards job") + return reconcile.Result{}, nil + } + // Determine where to access Kibana. kibanaHost := "tigera-secure-kb-http.tigera-kibana.svc" kibanaPort := uint16(5601) @@ -279,17 +284,13 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil d.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Elasticsearch cluster to be operational", nil, reqLogger) return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil } - } else if !d.multiTenant { - // This is the configuration for single tenant with external elastic - // If we're using an external ES and Kibana, the Tenant resource must specify the Kibana endpoint. - if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.KibanaURL == "" { - reqLogger.Error(nil, "Kibana URL must be specified for this tenant") - d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL must be specified for this tenant", nil, reqLogger) - return reconcile.Result{}, nil - } - + } else { + // This is the configuration for multi-tenant and single tenant with external elastic + // The Tenant resource must specify the Kibana endpoint in both cases. For multi-tenant + // it should be the service inside the tenant namespace. For single tenant it should be the + // an URL that points to external Kibana // Determine the host and port from the URL. - url, err := url.Parse(tenant.Spec.Elastic.KibanaURL) + url, err := url.Parse(tenant.Spec.Kibana.URL) if err != nil { reqLogger.Error(err, "Kibana URL is invalid") d.status.SetDegraded(operatorv1.ResourceValidationError, "Kibana URL is invalid", err, reqLogger) @@ -304,7 +305,7 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, nil } - if tenant.ElasticMTLS() { + if tenant.KibanaMTLS() { // If mTLS is enabled, get the secret containing the CA and client certificate. externalKibanaSecret = &corev1.Secret{} err = d.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, externalKibanaSecret) @@ -314,10 +315,6 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, err } } - } else { - // This is the configuration for multi-tenant - // We connect to a kibana service deployed in the tenant namespace - kibanaHost = fmt.Sprintf("tigera-secure-kb-http.%s.svc", helper.InstallNamespace()) } // Query the username and password this Dashboards Installer instance should use to authenticate with Elasticsearch. diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index a6bcb2b2ba..36dd47c6bc 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -34,17 +34,17 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" - "github.com/tigera/operator/pkg/controller/logstorage/initializer" - operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/common" logstoragecommon "github.com/tigera/operator/pkg/controller/logstorage/common" + "github.com/tigera/operator/pkg/controller/logstorage/initializer" "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/controller/utils" "github.com/tigera/operator/pkg/controller/utils/imageset" "github.com/tigera/operator/pkg/ctrlruntime" "github.com/tigera/operator/pkg/render" + rcertificatemanagement "github.com/tigera/operator/pkg/render/certificatemanagement" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" "github.com/tigera/operator/pkg/render/logstorage/externalelasticsearch" "k8s.io/apimachinery/pkg/api/errors" @@ -296,14 +296,11 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. Provider: r.provider, ElasticLicenseType: esLicenseType, UsePSP: r.usePSP, - // TODO: Alina check if false is the correct value for multi-tenant - ApplyTrial: false, - Tenant: tenant, + Tenant: tenant, }), ) - // TODO: Retrieve from tenant CR - var kibanaEnabled = true + var kibanaEnabled = render.KibanaEnabled(tenant, install) if kibanaEnabled { // Collect the certificates we need to provision Kibana. // These will have been provisioned already by the ES secrets controller. @@ -319,18 +316,13 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. // We want to retrieve Kibana certificate for all supported configurations kbDNSNames := dns.GetServiceDNSNames(kibana.ServiceName, kibanaHelper.InstallNamespace(), r.clusterDomain) - kibanaKeyPair, err := cm.GetKeyPair(r.client, kibana.TigeraKibanaCertSecret, kibanaHelper.TruthNamespace(), kbDNSNames) + kibanaKeyPair, err := cm.GetOrCreateKeyPair(r.client, kibana.TigeraKibanaCertSecret, kibanaHelper.TruthNamespace(), kbDNSNames) if err != nil { log.Error(err, err.Error()) r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to create Kibana secrets", err, reqLogger) return reconcile.Result{}, err } - if kibanaKeyPair == nil { - r.status.SetDegraded(operatorv1.ResourceNotFound, "Waiting for kibana key pair to be available", err, reqLogger) - return reconcile.Result{}, nil - } - kbService, err := getKibanaService(ctx, r.client, kibanaHelper.InstallNamespace()) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) @@ -374,18 +366,18 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } // Determine the host and port from the URL. - url, err := url.Parse(tenant.Spec.Elastic.URL) + elasticURL, err := url.Parse(tenant.Spec.Elastic.URL) if err != nil { reqLogger.Error(err, "Elasticsearch URL is invalid") r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL is invalid", err, reqLogger) return reconcile.Result{}, nil } - var esClientSecret *corev1.Secret + var challengerClientCertificate *corev1.Secret if tenant.ElasticMTLS() { // If mTLS is enabled, get the secret containing the CA and client certificate. - esClientSecret = &corev1.Secret{} - err = r.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, esClientSecret) + challengerClientCertificate = &corev1.Secret{} + err = r.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) if err != nil { reqLogger.Error(err, "Failed to read external Elasticsearch client certificate secret") r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch client certificate secret to be available", err, reqLogger) @@ -394,27 +386,35 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } // TODO: Alina - Copy user to tenant namespace - // TODO: Alina Retrieve it from tenant CR - baseURL := "tigera-kibana" + multiTenantComponents = append(multiTenantComponents, + rcertificatemanagement.CertificateManagement(&rcertificatemanagement.Config{ + Namespace: kibanaHelper.InstallNamespace(), + TruthNamespace: kibanaHelper.TruthNamespace(), + ServiceAccounts: []string{kibana.ObjectName}, + KeyPairOptions: []rcertificatemanagement.KeyPairOption{ + rcertificatemanagement.NewKeyPairOption(kibanaKeyPair, true, true), + }, + TrustedBundle: nil, + }), kibana.Kibana(&kibana.Configuration{ - LogStorage: ls, - Installation: install, - Kibana: kibanaCR, - KibanaKeyPair: kibanaKeyPair, - PullSecrets: pullSecrets, - Provider: r.provider, - KbService: kbService, - ClusterDomain: r.clusterDomain, - BaseURL: baseURL, - TrustedBundle: trustedBundle, - UnusedTLSSecret: unusedTLSSecret, - UsePSP: r.usePSP, - Enabled: kibanaEnabled, - Tenant: tenant, - Namespace: kibanaHelper.InstallNamespace(), - ElasticClientSecret: esClientSecret, - ExternalElasticEndpoint: url.String(), + LogStorage: ls, + Installation: install, + Kibana: kibanaCR, + KibanaKeyPair: kibanaKeyPair, + PullSecrets: pullSecrets, + Provider: r.provider, + KbService: kbService, + ClusterDomain: r.clusterDomain, + BaseURL: tenant.KibanaBaseURL(), + TrustedBundle: trustedBundle, + UnusedTLSSecret: unusedTLSSecret, + UsePSP: r.usePSP, + Enabled: kibanaEnabled, + Tenant: tenant, + Namespace: kibanaHelper.InstallNamespace(), + ChallengerClientCertificate: challengerClientCertificate, + ExternalElasticEndpoint: elasticURL.String(), }), ) } diff --git a/pkg/controller/logstorage/secrets/secret_controller.go b/pkg/controller/logstorage/secrets/secret_controller.go index 7b10177f23..a907a00645 100644 --- a/pkg/controller/logstorage/secrets/secret_controller.go +++ b/pkg/controller/logstorage/secrets/secret_controller.go @@ -111,7 +111,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("logstorage-controller failed to watch logstorage Tigerastatus: %w", err) } if opts.MultiTenant { - if err = c.WatchObject(&operatorv1.Tenant{}, &handler.EnqueueRequestForObject{}); err != nil { + if err = c.WatchObject(&operatorv1.Tenant{}, eventHandler); err != nil { return fmt.Errorf("log-storage-secrets-controller failed to watch Tenant resource: %w", err) } } @@ -262,7 +262,7 @@ func (r *SecretSubController) Reconcile(ctx context.Context, request reconcile.R hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) // Determine if Kibana should be enabled for this cluster. - kibanaEnabled := !operatorv1.IsFIPSModeEnabled(install.FIPSMode) && !r.multiTenant + kibanaEnabled := render.KibanaEnabled(tenant, install) // Internal ES modes: // - Zero-tenant: everything installed in tigera-elasticsearch/tigera-kibana Namespaces. We need a single trusted bundle in each. @@ -270,7 +270,7 @@ func (r *SecretSubController) Reconcile(ctx context.Context, request reconcile.R // // External ES modes: // - Single-tenant: everything installed in tigera-elasticsearch/tigera-kibana Namespaces. We need a single trusted bundle in each. - // - Multi-tenant: nothing installed in tigera-elasticsearch Namespace. The trusted bundle isn't created by this controller, but per-tenant keypairs are. + // - Multi-tenant: nothing installed in tigera-elasticsearch and tigera-kibana Namespace. The trusted bundle isn't created by this controller, but per-tenant keypairs are. if !r.elasticExternal { // This branch provisions the necessary KeyPairs for the internal ES cluster and Kibana, and installs a trusted bundle into tigera-kibana. // The trusted bundle for the tigera-elasticsearch namespace will be created further below as part of generateTigeraSecrets(), as it @@ -499,10 +499,16 @@ func (r *SecretSubController) collectUpstreamCerts(log logr.Logger, helper utils if r.elasticExternal { // For external ES, we don't need to generate a keypair for ES itself. Instead, a public certificate - // for the external ES and Kibana instances must be provided. Load and include in these into + // for the external ES instances must be provided. Load and include in these into // the trusted bundle for Linseed and es-gateway. certs[logstorage.ExternalESPublicCertName] = common.OperatorNamespace() - certs[logstorage.ExternalKBPublicCertName] = common.OperatorNamespace() + if r.multiTenant { + // A multi-tenant setup will have an external Elastic, but an internal Kibana + certs[kibana.TigeraKibanaCertSecret] = helper.TruthNamespace() + } else { + // Single tenant with external Elastic uses an external Kibana instance + certs[logstorage.ExternalKBPublicCertName] = common.OperatorNamespace() + } } else { // For internal ES, the operator creates a keypair for ES and Kibana itself earlier in the execution of this controller. // Include these in the trusted bundle as well, so that Linseed and es-gateway can trust them. diff --git a/pkg/controller/logstorage/secrets/secret_controller_test.go b/pkg/controller/logstorage/secrets/secret_controller_test.go index 1a31dd9c0d..7dfc9cd83a 100644 --- a/pkg/controller/logstorage/secrets/secret_controller_test.go +++ b/pkg/controller/logstorage/secrets/secret_controller_test.go @@ -576,8 +576,6 @@ var _ = Describe("LogStorage Secrets controller", func() { // Create the external ES and Kibana public certificates, used for external ES. externalESSecret := rtest.CreateCertSecret(logstorage.ExternalESPublicCertName, common.OperatorNamespace(), "external.es.com") Expect(cli.Create(ctx, externalESSecret)).ShouldNot(HaveOccurred()) - externalKibanaSecret := rtest.CreateCertSecret(logstorage.ExternalKBPublicCertName, common.OperatorNamespace(), "external.kb.com") - Expect(cli.Create(ctx, externalKibanaSecret)).ShouldNot(HaveOccurred()) // Create a per-tenant CA secret for the test, and create its KeyPair. cm, err := certificatemanager.Create(cli, @@ -609,7 +607,6 @@ var _ = Describe("LogStorage Secrets controller", func() { {Name: certificatemanagement.CASecretName, Namespace: common.OperatorNamespace()}, {Name: certificatemanagement.TenantCASecretName, Namespace: tenantNS}, {Name: logstorage.ExternalESPublicCertName, Namespace: common.OperatorNamespace()}, - {Name: logstorage.ExternalKBPublicCertName, Namespace: common.OperatorNamespace()}, // These are created by the controller. {Name: render.TigeraLinseedSecret, Namespace: tenantNS}, diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index 33489137de..2ce45d1f96 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -158,7 +158,8 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } for _, namespace := range namespacesToWatch { for _, secretName := range []string{ - // TODO: ALINA - Do we need to add back esgateway here ? + // We need to watch for es-gateway certificate because es-proxy still creates a + // client to talk to kibana via es-gateway // TODO: ALINA - Do we need to add Kibana for multi-tenant ? render.ManagerTLSSecretName, relasticsearch.PublicCertSecret, render.VoltronTunnelSecretName, render.ComplianceServerCertSecret, render.PacketCaptureServerCert, diff --git a/pkg/render/logstorage/dashboards/dashboards_test.go b/pkg/render/logstorage/dashboards/dashboards_test.go index cb5fb604c8..9dcd7e1383 100644 --- a/pkg/render/logstorage/dashboards/dashboards_test.go +++ b/pkg/render/logstorage/dashboards/dashboards_test.go @@ -226,9 +226,8 @@ var _ = Describe("Dashboards rendering tests", func() { }, Spec: operatorv1.TenantSpec{ ID: "test-tenant", - Elastic: &operatorv1.TenantElasticSpec{ - KibanaURL: "https://external-kibana:443", - MutualTLS: true, + Kibana: &operatorv1.TenantKibanaSpec{ + URL: "https://tigera-secure-kb-http.test-tenant-ns.svc:5601", }, }, } @@ -241,55 +240,10 @@ var _ = Describe("Dashboards rendering tests", func() { TrustedBundle: bundle, Namespace: "tenant-test-tenant", Tenant: tenant, - KibanaHost: "external-kibana", + KibanaHost: "tigera-secure-kb-http.test-tenant-ns.svc", KibanaScheme: "https", - KibanaPort: 443, - } - }) - - It("should support an external kibana endpoint", func() { - cfg.ExternalKibanaClientSecret = &corev1.Secret{ - TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: logstorage.ExternalCertsSecret, - Namespace: cfg.Namespace, - }, - Data: map[string][]byte{ - "client.crt": {1, 2, 3}, - "client.key": {4, 5, 6}, - }, + KibanaPort: 5601, } - component := Dashboards(cfg) - createResources, _ := component.Objects() - d, ok := rtest.GetResource(createResources, Name, cfg.Namespace, "batch", "v1", "Job").(*batchv1.Job) - Expect(ok).To(BeTrue(), "Job not found") - - // The deployment should have the hash annotation set, as well as a volume and volume mount for the client secret. - Expect(d.Spec.Template.Annotations["hash.operator.tigera.io/kibana-client-secret"]).To(Equal("ae1a6776a81bf1fc0ee4aac936a90bd61a07aea7")) - Expect(d.Spec.Template.Spec.Volumes).To(ContainElement(corev1.Volume{ - Name: logstorage.ExternalCertsVolumeName, - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: logstorage.ExternalCertsSecret, - }, - }, - })) - Expect(d.Spec.Template.Spec.Containers[0].VolumeMounts).To(ContainElement(corev1.VolumeMount{ - Name: logstorage.ExternalCertsVolumeName, - MountPath: "/certs/kibana/mtls", - ReadOnly: true, - })) - - // Should expect mTLS env vars set. - Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ - Name: "KIBANA_CLIENT_KEY", Value: "/certs/kibana/mtls/client.key", - })) - Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ - Name: "KIBANA_CLIENT_CERT", Value: "/certs/kibana/mtls/client.crt", - })) - Expect(d.Spec.Template.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{ - Name: "KIBANA_MTLS_ENABLED", Value: "true", - })) }) It("should render resources in the tenant namespace", func() { @@ -312,8 +266,8 @@ var _ = Describe("Dashboards rendering tests", func() { envs := job.Spec.Template.Spec.Containers[0].Env Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SPACE_ID", Value: cfg.Tenant.Spec.ID})) Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_SCHEME", Value: "https"})) - Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_HOST", Value: "external-kibana"})) - Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_PORT", Value: "443"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_HOST", Value: "tigera-secure-kb-http.test-tenant-ns.svc"})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "KIBANA_PORT", Value: "5601"})) }) It("should override resource request with the value from TenantSpec's dashboardsJob when available", func() { @@ -372,6 +326,10 @@ var _ = Describe("Dashboards rendering tests", func() { }, Spec: operatorv1.TenantSpec{ ID: "test-tenant", + Kibana: &operatorv1.TenantKibanaSpec{ + URL: "https://external-kibana:443", + MutualTLS: true, + }, }, } bundle := getBundle(installation) diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index 4fcf59c965..5f11f7c4ce 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -99,9 +99,9 @@ type Configuration struct { // Secret containing client certificate and key for connecting to the Elastic cluster. If configured, // mTLS is used between Challenger and the external Elastic cluster. // TODO: Alina Mount volume - ElasticClientSecret *corev1.Secret - ElasticChallengerUser *corev1.Secret - ExternalElasticEndpoint string + ChallengerClientCertificate *corev1.Secret + ElasticChallengerUser *corev1.Secret + ExternalElasticEndpoint string // Whether the cluster supports pod security policies. UsePSP bool @@ -202,9 +202,9 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { } toCreate = append(toCreate, k.kibanaCR()) // TODO: ALINA: I think we do the same in Linseed - if k.cfg.ElasticClientSecret != nil { + if k.cfg.ChallengerClientCertificate != nil { // If using External ES, we need to copy the client certificates into the tenant namespace to be mounted. - toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ElasticClientSecret)...)...) + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ChallengerClientCertificate)...)...) } } else { diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 770e16db12..14f48d063c 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -429,8 +429,7 @@ func (c *managerComponent) managerProxyProbe() *corev1.Probe { func KibanaEnabled(tenant *operatorv1.Tenant, installation *operatorv1.InstallationSpec) bool { enableKibana := !operatorv1.IsFIPSModeEnabled(installation.FIPSMode) if tenant.MultiTenant() { - // TODO: Alina Extract from CR - enableKibana = false + return tenant.IsKibanaEnabled() } return enableKibana } From 93a18a061ef19f6866dd14f1b2b0841a387f0cd2 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 8 May 2024 14:31:13 -0700 Subject: [PATCH 03/20] Enable mTLS --- .../dashboards/dashboards_controller.go | 3 +- .../elastic/external_elastic_controller.go | 9 ++++- pkg/render/logstorage/kibana/kibana.go | 34 ++++++++++++++++--- 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go index 9b91ae97f5..5c33cb7c72 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -288,8 +288,7 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil // This is the configuration for multi-tenant and single tenant with external elastic // The Tenant resource must specify the Kibana endpoint in both cases. For multi-tenant // it should be the service inside the tenant namespace. For single tenant it should be the - // an URL that points to external Kibana - // Determine the host and port from the URL. + // URL that points to external Kibana Determine the host and port from the URL. url, err := url.Parse(tenant.Spec.Kibana.URL) if err != nil { reqLogger.Error(err, "Kibana URL is invalid") diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 36dd47c6bc..764996791f 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -385,7 +385,13 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } } - // TODO: Alina - Copy user to tenant namespace + elasticChallengerUser := &corev1.Secret{} + err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) + if err != nil { + reqLogger.Error(err, "Failed to read external user secret") + r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch user to be available", err, reqLogger) + return reconcile.Result{}, err + } multiTenantComponents = append(multiTenantComponents, rcertificatemanagement.CertificateManagement(&rcertificatemanagement.Config{ @@ -415,6 +421,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. Namespace: kibanaHelper.InstallNamespace(), ChallengerClientCertificate: challengerClientCertificate, ExternalElasticEndpoint: elasticURL.String(), + ElasticChallengerUser: elasticChallengerUser, }), ) } diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index 5f11f7c4ce..62d7b6d7be 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -40,6 +40,7 @@ import ( "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/common/securitycontext" + "github.com/tigera/operator/pkg/render/logstorage" "github.com/tigera/operator/pkg/tls/certificatemanagement" ) @@ -98,7 +99,6 @@ type Configuration struct { // Secret containing client certificate and key for connecting to the Elastic cluster. If configured, // mTLS is used between Challenger and the external Elastic cluster. - // TODO: Alina Mount volume ChallengerClientCertificate *corev1.Secret ElasticChallengerUser *corev1.Secret ExternalElasticEndpoint string @@ -206,7 +206,10 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { // If using External ES, we need to copy the client certificates into the tenant namespace to be mounted. toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ChallengerClientCertificate)...)...) } - + if k.cfg.ElasticChallengerUser != nil { + // If using External ES, we need to copy the elastic user into the tenant namespace + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ElasticChallengerUser)...)...) + } } else { toDelete = append(toDelete, k.kibanaCR()) } @@ -340,6 +343,24 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { if k.cfg.Tenant.MultiTenant() { volumes = append(volumes, k.cfg.TrustedBundle.Volume()) + volumeMounts = k.cfg.TrustedBundle.VolumeMounts(k.SupportedOSType()) + if k.cfg.ChallengerClientCertificate != nil { + // Add a volume for the required client certificate and key. + volumes = append(volumes, corev1.Volume{ + Name: logstorage.ExternalCertsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: logstorage.ExternalCertsSecret, + }, + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: logstorage.ExternalCertsVolumeName, + MountPath: "/certs/elasticsearch", + ReadOnly: true, + }) + } + containers = append(containers, corev1.Container{ Name: "challenger", Env: []corev1.EnvVar{ @@ -355,17 +376,20 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Name: "ES_GATEWAY_ELASTIC_ENDPOINT", Value: k.cfg.ExternalElasticEndpoint, }, + { + Name: "ES_GATEWAY_ELASTIC_CA_PATH", + Value: k.cfg.TrustedBundle.MountPath(), + }, {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "elastic"}, {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ - // TODO: Alina change user Name: render.ElasticsearchAdminUserSecret, }, - Key: "elastic", + // TODO: ALINA - IS THIS THE correct user or do we need to create a new one ? + Key: "tigera-mgmt", }, }}, - {Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", Value: k.cfg.TrustedBundle.MountPath()}, }, Command: []string{ "/usr/bin/es-gateway", "-run-as-challenger", From e9a493a97da84de7e4e800f8a0b037b0d95b8137 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 8 May 2024 15:23:53 -0700 Subject: [PATCH 04/20] Fix some stuff --- api/v1/kibana_types.go | 2 +- api/v1/tenant_types.go | 2 +- api/v1/zz_generated.deepcopy.go | 20 +++++++++++++ .../elastic/external_elastic_controller.go | 29 +++++++++---------- .../operator.tigera.io_logstorages.yaml | 1 + .../operator/operator.tigera.io_tenants.yaml | 18 ++++++++++-- 6 files changed, 51 insertions(+), 21 deletions(-) diff --git a/api/v1/kibana_types.go b/api/v1/kibana_types.go index 6809445f31..d03afff981 100644 --- a/api/v1/kibana_types.go +++ b/api/v1/kibana_types.go @@ -60,7 +60,7 @@ type KibanaPodSpec struct { type KibanaContainer struct { // Name is an enum which identifies the Kibana Deployment container by name. // Supported values are: kibana - // +kubebuilder:validation:Enum=kibana,challenger + // +kubebuilder:validation:Enum=kibana;challenger Name string `json:"name"` // Resources allows customization of limits and requests for compute resources such as cpu and memory. diff --git a/api/v1/tenant_types.go b/api/v1/tenant_types.go index 279d7e37a1..0b66b14b2a 100644 --- a/api/v1/tenant_types.go +++ b/api/v1/tenant_types.go @@ -109,7 +109,7 @@ type Index struct { type TenantKibanaSpec struct { URL string `json:"url,omitempty"` - MutualTLS bool `json:"mutualTLS"` + MutualTLS bool `json:"mutualTLS,omitempty"` BaseURL string `json:"baseURL,omitempty"` } diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 1f2f4a3eb9..093ad928dc 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -6797,6 +6797,21 @@ func (in *TenantElasticSpec) DeepCopy() *TenantElasticSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TenantKibanaSpec) DeepCopyInto(out *TenantKibanaSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantKibanaSpec. +func (in *TenantKibanaSpec) DeepCopy() *TenantKibanaSpec { + if in == nil { + return nil + } + out := new(TenantKibanaSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TenantList) DeepCopyInto(out *TenantList) { *out = *in @@ -6842,6 +6857,11 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) { *out = new(TenantElasticSpec) **out = **in } + if in.Kibana != nil { + in, out := &in.Kibana, &out.Kibana + *out = new(TenantKibanaSpec) + **out = **in + } if in.ControlPlaneReplicas != nil { in, out := &in.ControlPlaneReplicas, &out.ControlPlaneReplicas *out = new(int32) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 764996791f..4e7d027e81 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -430,28 +430,25 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. flowShards := logstoragecommon.CalculateFlowShards(ls.Spec.Nodes, logstoragecommon.DefaultElasticsearchShards) clusterConfig := relasticsearch.NewClusterConfig(render.DefaultElasticsearchClusterName, ls.Replicas(), logstoragecommon.DefaultElasticsearchShards, flowShards) - // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. - var hdler utils.ComponentHandler - if r.multiTenant { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) - } else { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) - } - + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) externalElasticsearch := externalelasticsearch.ExternalElasticsearch(install, clusterConfig, pullSecrets) if err := hdler.CreateOrUpdateOrDelete(ctx, externalElasticsearch, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) return reconcile.Result{}, err } - for _, component := range multiTenantComponents { - if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) - return reconcile.Result{}, err - } - if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) - return reconcile.Result{}, err + // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. + if r.multiTenant { + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + for _, component := range multiTenantComponents { + if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) + return reconcile.Result{}, err + } + if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } } } diff --git a/pkg/crds/operator/operator.tigera.io_logstorages.yaml b/pkg/crds/operator/operator.tigera.io_logstorages.yaml index 6750dd766a..d476022d8b 100644 --- a/pkg/crds/operator/operator.tigera.io_logstorages.yaml +++ b/pkg/crds/operator/operator.tigera.io_logstorages.yaml @@ -495,6 +495,7 @@ spec: Supported values are: kibana enum: - kibana + - challenger type: string resources: description: |- diff --git a/pkg/crds/operator/operator.tigera.io_tenants.yaml b/pkg/crds/operator/operator.tigera.io_tenants.yaml index 1cda632fb2..fdf9c0bcb5 100644 --- a/pkg/crds/operator/operator.tigera.io_tenants.yaml +++ b/pkg/crds/operator/operator.tigera.io_tenants.yaml @@ -137,11 +137,9 @@ spec: type: object elastic: description: |- - Elastic configures per-tenant ElasticSearch and Kibana parameters. + Elastic configures per-tenant ElasticSearch parameters. This field is required for clusters using external ES. properties: - kibanaURL: - type: string mutualTLS: type: boolean url: @@ -187,6 +185,20 @@ spec: - dataType type: object type: array + kibana: + description: |- + Kibana configures per-tenant Kibana parameters + This field will enable or disable Kibana + properties: + baseURL: + type: string + mutualTLS: + type: boolean + url: + type: string + required: + - mutualTLS + type: object linseedDeployment: description: LinseedDeployment configures the linseed Deployment. properties: From fb859a20cbdf7994e68e00189e16f49d8fd5509b Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 8 May 2024 15:43:35 -0700 Subject: [PATCH 05/20] ECK operator will be owned by LogStorage --- .../elastic/external_elastic_controller.go | 292 +++++++++--------- 1 file changed, 146 insertions(+), 146 deletions(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 4e7d027e81..23320ecf29 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -261,170 +261,153 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } - var multiTenantComponents []render.Component - if r.multiTenant { - // Ensure the allow-tigera tier exists, before rendering any network policies within it. - if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created, see the 'tiers' TigeraStatus for more information", err, reqLogger) - return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil - } else { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) - return reconcile.Result{}, err - } + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created, see the 'tiers' TigeraStatus for more information", err, reqLogger) + return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil + } else { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err } + } - esLicenseType, err := utils.GetElasticLicenseType(ctx, r.client, reqLogger) - if err != nil { - // If LicenseConfigMapName is not found, it means ECK operator is not running yet, log the information and proceed - if errors.IsNotFound(err) { - reqLogger.Info("ConfigMap not found yet", "name", eck.LicenseConfigMapName) - } else { - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get elastic license", err, reqLogger) - return reconcile.Result{}, err - } + esLicenseType, err := utils.GetElasticLicenseType(ctx, r.client, reqLogger) + if err != nil { + // If LicenseConfigMapName is not found, it means ECK operator is not running yet, log the information and proceed + if errors.IsNotFound(err) { + reqLogger.Info("ConfigMap not found yet", "name", eck.LicenseConfigMapName) + } else { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get elastic license", err, reqLogger) + return reconcile.Result{}, err } + } - // ECK will be deployed per management cluster and it will be configured - // to watch all namespaces in order to create a Kibana deployment - multiTenantComponents = append(multiTenantComponents, - eck.ECK(&eck.Configuration{ - LogStorage: ls, - Installation: install, - ManagementCluster: managementCluster, - PullSecrets: pullSecrets, - Provider: r.provider, - ElasticLicenseType: esLicenseType, - UsePSP: r.usePSP, - Tenant: tenant, - }), - ) - - var kibanaEnabled = render.KibanaEnabled(tenant, install) - if kibanaEnabled { - // Collect the certificates we need to provision Kibana. - // These will have been provisioned already by the ES secrets controller. - opts := []certificatemanager.Option{ - certificatemanager.WithLogger(reqLogger), - certificatemanager.WithTenant(tenant), - } - cm, err := certificatemanager.Create(r.client, install, r.clusterDomain, kibanaHelper.TruthNamespace(), opts...) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) - return reconcile.Result{}, err - } + var kibanaComponents []render.Component + var kibanaEnabled = render.KibanaEnabled(tenant, install) + if r.multiTenant && kibanaEnabled { + // Collect the certificates we need to provision Kibana. + // These will have been provisioned already by the ES secrets controller. + opts := []certificatemanager.Option{ + certificatemanager.WithLogger(reqLogger), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(r.client, install, r.clusterDomain, kibanaHelper.TruthNamespace(), opts...) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) + return reconcile.Result{}, err + } - // We want to retrieve Kibana certificate for all supported configurations - kbDNSNames := dns.GetServiceDNSNames(kibana.ServiceName, kibanaHelper.InstallNamespace(), r.clusterDomain) - kibanaKeyPair, err := cm.GetOrCreateKeyPair(r.client, kibana.TigeraKibanaCertSecret, kibanaHelper.TruthNamespace(), kbDNSNames) - if err != nil { - log.Error(err, err.Error()) - r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to create Kibana secrets", err, reqLogger) - return reconcile.Result{}, err - } + // We want to retrieve Kibana certificate for all supported configurations + kbDNSNames := dns.GetServiceDNSNames(kibana.ServiceName, kibanaHelper.InstallNamespace(), r.clusterDomain) + kibanaKeyPair, err := cm.GetOrCreateKeyPair(r.client, kibana.TigeraKibanaCertSecret, kibanaHelper.TruthNamespace(), kbDNSNames) + if err != nil { + log.Error(err, err.Error()) + r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to create Kibana secrets", err, reqLogger) + return reconcile.Result{}, err + } - kbService, err := getKibanaService(ctx, r.client, kibanaHelper.InstallNamespace()) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) - return reconcile.Result{}, err - } - kibanaCR, err := getKibana(ctx, r.client, kibanaHelper.InstallNamespace()) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger) - return reconcile.Result{}, err - } + kbService, err := getKibanaService(ctx, r.client, kibanaHelper.InstallNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to retrieve the Kibana service", err, reqLogger) + return reconcile.Result{}, err + } + kibanaCR, err := getKibana(ctx, r.client, kibanaHelper.InstallNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred trying to retrieve Kibana", err, reqLogger) + return reconcile.Result{}, err + } - var unusedTLSSecret *corev1.Secret - if install.CertificateManagement != nil { - // Eck requires us to provide a TLS secret for Kibana. It will also inspect that it has a - // certificate and private key. However, when certificate management is enabled, we do not want to use a - // private key stored in a secret. For this reason, we mount a dummy that the actual Elasticsearch and Kibana - // pods are never using. - unusedTLSSecret, err = utils.GetSecret(ctx, r.client, relasticsearch.UnusedCertSecret, common.OperatorNamespace()) - if unusedTLSSecret == nil { - unusedTLSSecret, err = certificatemanagement.CreateSelfSignedSecret(relasticsearch.UnusedCertSecret, common.OperatorNamespace(), relasticsearch.UnusedCertSecret, []string{}) - unusedTLSSecret.Data[corev1.TLSCertKey] = install.CertificateManagement.CACert - } - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Failed to retrieve secret %s/%s", common.OperatorNamespace(), relasticsearch.UnusedCertSecret), err, reqLogger) - return reconcile.Result{}, nil - } + var unusedTLSSecret *corev1.Secret + if install.CertificateManagement != nil { + // Eck requires us to provide a TLS secret for Kibana. It will also inspect that it has a + // certificate and private key. However, when certificate management is enabled, we do not want to use a + // private key stored in a secret. For this reason, we mount a dummy that the actual Elasticsearch and Kibana + // pods are never using. + unusedTLSSecret, err = utils.GetSecret(ctx, r.client, relasticsearch.UnusedCertSecret, common.OperatorNamespace()) + if unusedTLSSecret == nil { + unusedTLSSecret, err = certificatemanagement.CreateSelfSignedSecret(relasticsearch.UnusedCertSecret, common.OperatorNamespace(), relasticsearch.UnusedCertSecret, []string{}) + unusedTLSSecret.Data[corev1.TLSCertKey] = install.CertificateManagement.CACert } - - // Query the trusted bundle from the namespace. - trustedBundle, err := cm.LoadTrustedBundle(ctx, r.client, tenant.Namespace) if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error getting trusted bundle", err, reqLogger) - return reconcile.Result{}, err - } - - // If we're using an external ES, the Tenant resource must specify the ES endpoint. - if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.URL == "" { - reqLogger.Error(nil, "Elasticsearch URL must be specified for this tenant") - r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL must be specified for this tenant", nil, reqLogger) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Failed to retrieve secret %s/%s", common.OperatorNamespace(), relasticsearch.UnusedCertSecret), err, reqLogger) return reconcile.Result{}, nil } + } - // Determine the host and port from the URL. - elasticURL, err := url.Parse(tenant.Spec.Elastic.URL) - if err != nil { - reqLogger.Error(err, "Elasticsearch URL is invalid") - r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL is invalid", err, reqLogger) - return reconcile.Result{}, nil - } + // Query the trusted bundle from the namespace. + trustedBundle, err := cm.LoadTrustedBundle(ctx, r.client, tenant.Namespace) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error getting trusted bundle", err, reqLogger) + return reconcile.Result{}, err + } - var challengerClientCertificate *corev1.Secret - if tenant.ElasticMTLS() { - // If mTLS is enabled, get the secret containing the CA and client certificate. - challengerClientCertificate = &corev1.Secret{} - err = r.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) - if err != nil { - reqLogger.Error(err, "Failed to read external Elasticsearch client certificate secret") - r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch client certificate secret to be available", err, reqLogger) - return reconcile.Result{}, err - } - } + // If we're using an external ES, the Tenant resource must specify the ES endpoint. + if tenant == nil || tenant.Spec.Elastic == nil || tenant.Spec.Elastic.URL == "" { + reqLogger.Error(nil, "Elasticsearch URL must be specified for this tenant") + r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL must be specified for this tenant", nil, reqLogger) + return reconcile.Result{}, nil + } - elasticChallengerUser := &corev1.Secret{} - err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) + // Determine the host and port from the URL. + elasticURL, err := url.Parse(tenant.Spec.Elastic.URL) + if err != nil { + reqLogger.Error(err, "Elasticsearch URL is invalid") + r.status.SetDegraded(operatorv1.ResourceValidationError, "Elasticsearch URL is invalid", err, reqLogger) + return reconcile.Result{}, nil + } + + var challengerClientCertificate *corev1.Secret + if tenant.ElasticMTLS() { + // If mTLS is enabled, get the secret containing the CA and client certificate. + challengerClientCertificate = &corev1.Secret{} + err = r.client.Get(ctx, client.ObjectKey{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) if err != nil { - reqLogger.Error(err, "Failed to read external user secret") - r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch user to be available", err, reqLogger) + reqLogger.Error(err, "Failed to read external Elasticsearch client certificate secret") + r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch client certificate secret to be available", err, reqLogger) return reconcile.Result{}, err } + } - multiTenantComponents = append(multiTenantComponents, - rcertificatemanagement.CertificateManagement(&rcertificatemanagement.Config{ - Namespace: kibanaHelper.InstallNamespace(), - TruthNamespace: kibanaHelper.TruthNamespace(), - ServiceAccounts: []string{kibana.ObjectName}, - KeyPairOptions: []rcertificatemanagement.KeyPairOption{ - rcertificatemanagement.NewKeyPairOption(kibanaKeyPair, true, true), - }, - TrustedBundle: nil, - }), - kibana.Kibana(&kibana.Configuration{ - LogStorage: ls, - Installation: install, - Kibana: kibanaCR, - KibanaKeyPair: kibanaKeyPair, - PullSecrets: pullSecrets, - Provider: r.provider, - KbService: kbService, - ClusterDomain: r.clusterDomain, - BaseURL: tenant.KibanaBaseURL(), - TrustedBundle: trustedBundle, - UnusedTLSSecret: unusedTLSSecret, - UsePSP: r.usePSP, - Enabled: kibanaEnabled, - Tenant: tenant, - Namespace: kibanaHelper.InstallNamespace(), - ChallengerClientCertificate: challengerClientCertificate, - ExternalElasticEndpoint: elasticURL.String(), - ElasticChallengerUser: elasticChallengerUser, - }), - ) + elasticChallengerUser := &corev1.Secret{} + err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) + if err != nil { + reqLogger.Error(err, "Failed to read external user secret") + r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch user to be available", err, reqLogger) + return reconcile.Result{}, err } + + kibanaComponents = append(kibanaComponents, + rcertificatemanagement.CertificateManagement(&rcertificatemanagement.Config{ + Namespace: kibanaHelper.InstallNamespace(), + TruthNamespace: kibanaHelper.TruthNamespace(), + ServiceAccounts: []string{kibana.ObjectName}, + KeyPairOptions: []rcertificatemanagement.KeyPairOption{ + rcertificatemanagement.NewKeyPairOption(kibanaKeyPair, true, true), + }, + TrustedBundle: nil, + }), + kibana.Kibana(&kibana.Configuration{ + LogStorage: ls, + Installation: install, + Kibana: kibanaCR, + KibanaKeyPair: kibanaKeyPair, + PullSecrets: pullSecrets, + Provider: r.provider, + KbService: kbService, + ClusterDomain: r.clusterDomain, + BaseURL: tenant.KibanaBaseURL(), + TrustedBundle: trustedBundle, + UnusedTLSSecret: unusedTLSSecret, + UsePSP: r.usePSP, + Enabled: kibanaEnabled, + Tenant: tenant, + Namespace: kibanaHelper.InstallNamespace(), + ChallengerClientCertificate: challengerClientCertificate, + ExternalElasticEndpoint: elasticURL.String(), + ElasticChallengerUser: elasticChallengerUser, + }), + ) } flowShards := logstoragecommon.CalculateFlowShards(ls.Spec.Nodes, logstoragecommon.DefaultElasticsearchShards) @@ -437,10 +420,27 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } + // ECK will be deployed per management cluster and will be configured + // to watch all namespaces in order to create a Kibana deployment + eck := eck.ECK(&eck.Configuration{ + LogStorage: ls, + Installation: install, + ManagementCluster: managementCluster, + PullSecrets: pullSecrets, + Provider: r.provider, + ElasticLicenseType: esLicenseType, + UsePSP: r.usePSP, + Tenant: tenant, + }) + if err := hdler.CreateOrUpdateOrDelete(ctx, eck, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } + // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. if r.multiTenant { hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) - for _, component := range multiTenantComponents { + for _, component := range kibanaComponents { if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) return reconcile.Result{}, err From 62b860f030c13380294c63b1b7e57a1afc6bf1ba Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Thu, 9 May 2024 08:22:38 -0700 Subject: [PATCH 06/20] Some fixes --- .../elastic/external_elastic_controller.go | 48 +++++++++---------- pkg/render/logstorage/eck/eck.go | 2 +- pkg/render/logstorage/kibana/kibana.go | 8 +++- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 23320ecf29..5a6037722f 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -369,9 +369,8 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } } - elasticChallengerUser := &corev1.Secret{} - err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, challengerClientCertificate) - if err != nil { + var elasticChallengerUser corev1.Secret + err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, &elasticChallengerUser) if err != nil { reqLogger.Error(err, "Failed to read external user secret") r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch user to be available", err, reqLogger) return reconcile.Result{}, err @@ -405,7 +404,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. Namespace: kibanaHelper.InstallNamespace(), ChallengerClientCertificate: challengerClientCertificate, ExternalElasticEndpoint: elasticURL.String(), - ElasticChallengerUser: elasticChallengerUser, + ElasticChallengerUser: &elasticChallengerUser, }), ) } @@ -420,32 +419,33 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } - // ECK will be deployed per management cluster and will be configured - // to watch all namespaces in order to create a Kibana deployment - eck := eck.ECK(&eck.Configuration{ - LogStorage: ls, - Installation: install, - ManagementCluster: managementCluster, - PullSecrets: pullSecrets, - Provider: r.provider, - ElasticLicenseType: esLicenseType, - UsePSP: r.usePSP, - Tenant: tenant, - }) - if err := hdler.CreateOrUpdateOrDelete(ctx, eck, r.status); err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) - return reconcile.Result{}, err - } - // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. - if r.multiTenant { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + tenantHandler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + + if r.multiTenant && kibanaEnabled { + // ECK will be deployed per management cluster and will be configured + // to watch all namespaces in order to create a Kibana deployment + eck := eck.ECK(&eck.Configuration{ + LogStorage: ls, + Installation: install, + ManagementCluster: managementCluster, + PullSecrets: pullSecrets, + Provider: r.provider, + ElasticLicenseType: esLicenseType, + UsePSP: r.usePSP, + Tenant: tenant, + }) + if err := hdler.CreateOrUpdateOrDelete(ctx, eck, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } + for _, component := range kibanaComponents { if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) return reconcile.Result{}, err } - if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { + if err := tenantHandler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) return reconcile.Result{}, err } diff --git a/pkg/render/logstorage/eck/eck.go b/pkg/render/logstorage/eck/eck.go index 1609e5d836..fe184bfdd0 100644 --- a/pkg/render/logstorage/eck/eck.go +++ b/pkg/render/logstorage/eck/eck.go @@ -311,7 +311,7 @@ func (e *eck) operatorStatefulSet() *appsv1.StatefulSet { } } var namespacesToWatch string - if e.cfg.Tenant.MultiTenant() { + if !e.cfg.Tenant.MultiTenant() { namespacesToWatch = "tigera-elasticsearch,tigera-kibana" } s := &appsv1.StatefulSet{ diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index 62d7b6d7be..c0f6a6d22c 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -343,7 +343,7 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { if k.cfg.Tenant.MultiTenant() { volumes = append(volumes, k.cfg.TrustedBundle.Volume()) - volumeMounts = k.cfg.TrustedBundle.VolumeMounts(k.SupportedOSType()) + volumeMounts = append(volumeMounts, k.cfg.TrustedBundle.VolumeMounts(k.SupportedOSType())...) if k.cfg.ChallengerClientCertificate != nil { // Add a volume for the required client certificate and key. volumes = append(volumes, corev1.Volume{ @@ -380,6 +380,10 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Name: "ES_GATEWAY_ELASTIC_CA_PATH", Value: k.cfg.TrustedBundle.MountPath(), }, + { + Name: "TENANT_ID", + Value: k.cfg.Tenant.Spec.ID, + }, {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "elastic"}, {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ @@ -396,7 +400,7 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { }, Image: k.challengerImage, SecurityContext: securitycontext.NewNonRootContext(), - VolumeMounts: k.cfg.TrustedBundle.VolumeMounts(k.SupportedOSType()), + VolumeMounts: volumeMounts, }) } From 3c685d87227b5d82972a459446786c9d9f4067e6 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Thu, 9 May 2024 08:23:11 -0700 Subject: [PATCH 07/20] Some fixes --- .../logstorage/elastic/external_elastic_controller.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 5a6037722f..bab112eb01 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -370,7 +370,8 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } var elasticChallengerUser corev1.Secret - err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, &elasticChallengerUser) if err != nil { + err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, &elasticChallengerUser) + if err != nil { reqLogger.Error(err, "Failed to read external user secret") r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch user to be available", err, reqLogger) return reconcile.Result{}, err From ebce741d4eaa3a65d5744129460386ef2eb55f3c Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Thu, 9 May 2024 08:35:40 -0700 Subject: [PATCH 08/20] Clean up kibana usage in linseed tests --- .../logstorage/linseed/linseed_controller_test.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/pkg/controller/logstorage/linseed/linseed_controller_test.go b/pkg/controller/logstorage/linseed/linseed_controller_test.go index b176d95560..88837d864e 100644 --- a/pkg/controller/logstorage/linseed/linseed_controller_test.go +++ b/pkg/controller/logstorage/linseed/linseed_controller_test.go @@ -226,13 +226,6 @@ var _ = Describe("LogStorage Linseed controller", func() { ObjectMeta: metav1.ObjectMeta{Name: "enterprise-" + components.EnterpriseRelease}, Spec: operatorv1.ImageSetSpec{ Images: []operatorv1.Image{ - // TODO: Alina are all needed ? - {Image: "tigera/elasticsearch", Digest: "sha256:elasticsearchhash"}, - {Image: "tigera/kube-controllers", Digest: "sha256:kubecontrollershash"}, - {Image: "tigera/kibana", Digest: "sha256:kibanahash"}, - {Image: "tigera/eck-operator", Digest: "sha256:eckoperatorhash"}, - {Image: "tigera/elasticsearch-metrics", Digest: "sha256:esmetricshash"}, - {Image: "tigera/es-gateway", Digest: "sha256:esgatewayhash"}, {Image: "tigera/linseed", Digest: "sha256:linseedhash"}, {Image: "tigera/key-cert-provisioner", Digest: "sha256:deadbeef0123456789"}, }, @@ -425,12 +418,6 @@ var _ = Describe("LogStorage Linseed controller", func() { ObjectMeta: metav1.ObjectMeta{Name: "enterprise-" + components.EnterpriseRelease}, Spec: operatorv1.ImageSetSpec{ Images: []operatorv1.Image{ - {Image: "tigera/elasticsearch", Digest: "sha256:elasticsearchhash"}, - {Image: "tigera/kube-controllers", Digest: "sha256:kubecontrollershash"}, - {Image: "tigera/kibana", Digest: "sha256:kibanahash"}, - {Image: "tigera/eck-operator", Digest: "sha256:eckoperatorhash"}, - {Image: "tigera/elasticsearch-metrics", Digest: "sha256:esmetricshash"}, - {Image: "tigera/es-gateway", Digest: "sha256:esgatewayhash"}, {Image: "tigera/linseed", Digest: "sha256:linseedhash"}, {Image: "tigera/key-cert-provisioner", Digest: "sha256:deadbeef0123456789"}, }, From 8b86809a368d6e3372d9d3240723df60bd4ed2a0 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Thu, 9 May 2024 10:45:02 -0700 Subject: [PATCH 09/20] Add more tests --- .../logstorage/secrets/secret_controller.go | 2 +- .../secrets/secret_controller_test.go | 90 ++++++++++++++++--- 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/pkg/controller/logstorage/secrets/secret_controller.go b/pkg/controller/logstorage/secrets/secret_controller.go index a907a00645..bf676a859b 100644 --- a/pkg/controller/logstorage/secrets/secret_controller.go +++ b/pkg/controller/logstorage/secrets/secret_controller.go @@ -269,7 +269,7 @@ func (r *SecretSubController) Reconcile(ctx context.Context, request reconcile.R // - Single-tenant: everything installed in tigera-elasticsearch/tigera-kibana Namespaces. We need a single trusted bundle in each. // // External ES modes: - // - Single-tenant: everything installed in tigera-elasticsearch/tigera-kibana Namespaces. We need a single trusted bundle in each. + // - Single-tenant: everything installed in tigera-elasticsearch/tigera-kibana Namespaces. We need a trusted bundle in tigera-elasticsearch. // - Multi-tenant: nothing installed in tigera-elasticsearch and tigera-kibana Namespace. The trusted bundle isn't created by this controller, but per-tenant keypairs are. if !r.elasticExternal { // This branch provisions the necessary KeyPairs for the internal ES cluster and Kibana, and installs a trusted bundle into tigera-kibana. diff --git a/pkg/controller/logstorage/secrets/secret_controller_test.go b/pkg/controller/logstorage/secrets/secret_controller_test.go index 7dfc9cd83a..bef08bf9ce 100644 --- a/pkg/controller/logstorage/secrets/secret_controller_test.go +++ b/pkg/controller/logstorage/secrets/secret_controller_test.go @@ -78,19 +78,22 @@ func NewSecretControllerWithShims( status status.StatusManager, provider operatorv1.Provider, clusterDomain string, + externalElastic bool, ) (*SecretSubController, error) { opts := options.AddOptions{ DetectedProvider: provider, ClusterDomain: clusterDomain, ShutdownContext: context.TODO(), + ElasticExternal: externalElastic, } r := &SecretSubController{ - client: cli, - scheme: scheme, - status: status, - clusterDomain: opts.ClusterDomain, - multiTenant: opts.MultiTenant, + client: cli, + scheme: scheme, + status: status, + clusterDomain: opts.ClusterDomain, + multiTenant: opts.MultiTenant, + elasticExternal: opts.ElasticExternal, } r.status.Run(opts.ShutdownContext) return r, nil @@ -198,7 +201,7 @@ var _ = Describe("LogStorage Secrets controller", func() { Expect(cli.Delete(ctx, caSecret)).ShouldNot(HaveOccurred()) // Run the reconciler. - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) _, err = r.Reconcile(ctx, reconcile.Request{}) Expect(err).Should(HaveOccurred()) @@ -213,7 +216,7 @@ var _ = Describe("LogStorage Secrets controller", func() { CreateLogStorage(cli, ls) // Run the reconciler. - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) _, err = r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -304,7 +307,7 @@ var _ = Describe("LogStorage Secrets controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) // Elasticsearch and kibana secrets are good. @@ -347,7 +350,7 @@ var _ = Describe("LogStorage Secrets controller", func() { }, }) - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) result, err := r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -409,7 +412,7 @@ var _ = Describe("LogStorage Secrets controller", func() { }, }) - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) result, err := r.Reconcile(ctx, reconcile.Request{}) @@ -450,7 +453,7 @@ var _ = Describe("LogStorage Secrets controller", func() { Expect(err).ShouldNot(HaveOccurred()) Expect(cli.Create(ctx, kbSecret)).ShouldNot(HaveOccurred()) - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) // Reconcile - the secret should be unchanged. @@ -492,7 +495,7 @@ var _ = Describe("LogStorage Secrets controller", func() { Expect(err).ShouldNot(HaveOccurred()) Expect(cli.Create(ctx, gwSecret)).ShouldNot(HaveOccurred()) - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) result, err := r.Reconcile(ctx, reconcile.Request{}) @@ -524,7 +527,7 @@ var _ = Describe("LogStorage Secrets controller", func() { Data: map[string]string{"eck_license_level": string(render.ElasticsearchLicenseTypeEnterprise)}, })).ShouldNot(HaveOccurred()) - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) result, err := r.Reconcile(ctx, reconcile.Request{}) @@ -545,7 +548,7 @@ var _ = Describe("LogStorage Secrets controller", func() { Expect(cli.Create(ctx, mcc)).ShouldNot(HaveOccurred()) // Run the reconciler. - r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) Expect(err).ShouldNot(HaveOccurred()) _, err = r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -619,6 +622,65 @@ var _ = Describe("LogStorage Secrets controller", func() { Expect(cli.Get(ctx, bundleKey, bundle)).Should(HaveOccurred()) }) }) + + Context("External elastic secret rendering", func() { + + BeforeEach(func() { + // Create the external ES and Kibana public certificates, used for external ES. + externalESSecret := rtest.CreateCertSecret(logstorage.ExternalESPublicCertName, common.OperatorNamespace(), "external.es.com") + Expect(cli.Create(ctx, externalESSecret)).ShouldNot(HaveOccurred()) + + // Create the external ES and Kibana public certificates, used for external ES. + externalKBSecret := rtest.CreateCertSecret(logstorage.ExternalKBPublicCertName, common.OperatorNamespace(), "external.es.com") + Expect(cli.Create(ctx, externalKBSecret)).ShouldNot(HaveOccurred()) + }) + + It("should render all necessary secrets for a single-tenant management cluster with external elastic", func() { + // Create a LogStorage instance with a default configuration. + ls := &operatorv1.LogStorage{} + ls.Name = "tigera-secure" + ls.Status.State = operatorv1.TigeraStatusReady + CreateLogStorage(cli, ls) + + // Run the reconciler. + r, err := NewSecretControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true) + Expect(err).ShouldNot(HaveOccurred()) + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + + // Expect secrets to have been created. + expected := []types.NamespacedName{ + // These are created in BeforeEach. + {Name: certificatemanagement.CASecretName, Namespace: common.OperatorNamespace()}, + {Name: logstorage.ExternalESPublicCertName, Namespace: common.OperatorNamespace()}, + {Name: logstorage.ExternalKBPublicCertName, Namespace: common.OperatorNamespace()}, + + // These are created by the controller. + {Name: esmetrics.ElasticsearchMetricsServerTLSSecret, Namespace: common.OperatorNamespace()}, + {Name: esmetrics.ElasticsearchMetricsServerTLSSecret, Namespace: render.ElasticsearchNamespace}, + + {Name: render.TigeraElasticsearchGatewaySecret, Namespace: common.OperatorNamespace()}, + {Name: render.TigeraElasticsearchGatewaySecret, Namespace: render.ElasticsearchNamespace}, + + {Name: render.TigeraLinseedSecret, Namespace: common.OperatorNamespace()}, + {Name: render.TigeraLinseedSecret, Namespace: render.ElasticsearchNamespace}, + } + ExpectSecrets(ctx, cli, expected) + + // Expect the trusted bundle to be provisioned in both the tigera-elasticsearch and tigera-kibana namespaces. + // The bundle should include the tigera-operator CA and external certificate + bundle := &corev1.ConfigMap{} + bundleKey := types.NamespacedName{Name: certificatemanagement.TrustedCertConfigMapName, Namespace: render.ElasticsearchNamespace} + Expect(cli.Get(ctx, bundleKey, bundle)).ShouldNot(HaveOccurred()) + + // For this test, we expect both the tigera-operator CA to be included in the bundle and the external certificates + rtest.ExpectBundleContents(bundle, + types.NamespacedName{Name: certificatemanagement.CASecretName, Namespace: common.OperatorNamespace()}, + types.NamespacedName{Name: logstorage.ExternalESPublicCertName, Namespace: common.OperatorNamespace()}, + types.NamespacedName{Name: logstorage.ExternalKBPublicCertName, Namespace: common.OperatorNamespace()}, + ) + }) + }) }) // CreateLogStorage creates a LogStorage object with the given parameters after filling in defaults, From 9a32f96a3dd1ccd19b69d4bd4cb18ed1935526de Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Thu, 9 May 2024 14:48:22 -0700 Subject: [PATCH 10/20] Add more tests --- pkg/render/logstorage/eck/eck_test.go | 132 +++++++---- pkg/render/logstorage/kibana/kibana.go | 2 +- pkg/render/logstorage/kibana/kibana_test.go | 234 +++++++++++++++++++- 3 files changed, 321 insertions(+), 47 deletions(-) diff --git a/pkg/render/logstorage/eck/eck_test.go b/pkg/render/logstorage/eck/eck_test.go index 1b15e633a8..791dd67e04 100644 --- a/pkg/render/logstorage/eck/eck_test.go +++ b/pkg/render/logstorage/eck/eck_test.go @@ -35,56 +35,57 @@ import ( ) var _ = Describe("ECK rendering tests", func() { - Context("zero-tenant rendering", func() { - var installation *operatorv1.InstallationSpec - var replicas int32 - var cfg *eck.Configuration - eckPolicy := testutils.GetExpectedPolicyFromFile("../../testutils/expected_policies/elastic-operator.json") - eckPolicyForOpenshift := testutils.GetExpectedPolicyFromFile("../../testutils/expected_policies/elastic-operator_ocp.json") + var installation *operatorv1.InstallationSpec + var replicas int32 + var cfg *eck.Configuration - expectedResources := []client.Object{ - &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorNamespace}}, - &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorPolicyName, Namespace: eck.OperatorNamespace}}, - &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret", Namespace: eck.OperatorNamespace}}, - &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "elastic-operator"}}, - &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "elastic-operator"}}, - &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "elastic-operator", Namespace: eck.OperatorNamespace}}, - &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorName}}, - &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorName, Namespace: eck.OperatorNamespace}}, - } + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorNamespace}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorPolicyName, Namespace: eck.OperatorNamespace}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret", Namespace: eck.OperatorNamespace}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "elastic-operator"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "elastic-operator"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "elastic-operator", Namespace: eck.OperatorNamespace}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorName}}, + &appsv1.StatefulSet{ObjectMeta: metav1.ObjectMeta{Name: eck.OperatorName, Namespace: eck.OperatorNamespace}}, + } - BeforeEach(func() { - logStorage := &operatorv1.LogStorage{ - ObjectMeta: metav1.ObjectMeta{ - Name: "tigera-secure", - }, - Spec: operatorv1.LogStorageSpec{ - Nodes: &operatorv1.Nodes{ - Count: 1, - ResourceRequirements: nil, - }, - }, - Status: operatorv1.LogStorageStatus{ - State: "", + BeforeEach(func() { + logStorage := &operatorv1.LogStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tigera-secure", + }, + Spec: operatorv1.LogStorageSpec{ + Nodes: &operatorv1.Nodes{ + Count: 1, + ResourceRequirements: nil, }, - } + }, + Status: operatorv1.LogStorageStatus{ + State: "", + }, + } - installation = &operatorv1.InstallationSpec{ - ControlPlaneReplicas: &replicas, - KubernetesProvider: operatorv1.ProviderNone, - Registry: "testregistry.com/", - } + installation = &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + } - cfg = &eck.Configuration{ - LogStorage: logStorage, - Installation: installation, - PullSecrets: []*corev1.Secret{ - {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, - }, - Provider: operatorv1.ProviderNone, - UsePSP: true, - } - }) + cfg = &eck.Configuration{ + LogStorage: logStorage, + Installation: installation, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + Provider: operatorv1.ProviderNone, + UsePSP: true, + } + }) + + Context("zero-tenant rendering", func() { + eckPolicy := testutils.GetExpectedPolicyFromFile("../../testutils/expected_policies/elastic-operator.json") + eckPolicyForOpenshift := testutils.GetExpectedPolicyFromFile("../../testutils/expected_policies/elastic-operator_ocp.json") It("should render all supporting resources for ECK Operator", func() { component := eck.ECK(cfg) @@ -327,4 +328,45 @@ var _ = Describe("ECK rendering tests", func() { }) }) }) + + Context("multi-tenant rendering", func() { + + It("should render all supporting resources for ECK Operator", func() { + cfg.Tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tenant", + Namespace: "test-tenant-ns", + }, + Spec: operatorv1.TenantSpec{ + ID: "test-tenant", + }, + } + component := eck.ECK(cfg) + createResources, _ := component.Objects() + rtest.ExpectResources(createResources, expectedResources) + + // Check the namespaces. + namespace := rtest.GetResource(createResources, "tigera-eck-operator", "", "", "v1", "Namespace").(*corev1.Namespace) + Expect(namespace.Labels["pod-security.kubernetes.io/enforce"]).To(Equal("restricted")) + Expect(namespace.Labels["pod-security.kubernetes.io/enforce-version"]).To(Equal("latest")) + + resultECK := rtest.GetResource(createResources, eck.OperatorName, eck.OperatorNamespace, + "apps", "v1", "StatefulSet").(*appsv1.StatefulSet) + Expect(resultECK.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(resultECK.Spec.Template.Spec.Containers[0].Args).To(ConsistOf([]string{ + "manager", + "--namespaces=", + "--log-verbosity=0", + "--metrics-port=0", + "--container-registry=testregistry.com/", + "--max-concurrent-reconciles=3", + "--ca-cert-validity=8760h", + "--ca-cert-rotate-before=24h", + "--cert-validity=8760h", + "--cert-rotate-before=24h", + "--enable-webhook=false", + "--manage-webhook-certs=false", + })) + }) + }) }) diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index c0f6a6d22c..b4801541f8 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -384,7 +384,7 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Name: "TENANT_ID", Value: k.cfg.Tenant.Spec.ID, }, - {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "elastic"}, + {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "tigera-mgmt"}, {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ diff --git a/pkg/render/logstorage/kibana/kibana_test.go b/pkg/render/logstorage/kibana/kibana_test.go index 7aef738c8d..ab6dd06443 100644 --- a/pkg/render/logstorage/kibana/kibana_test.go +++ b/pkg/render/logstorage/kibana/kibana_test.go @@ -37,10 +37,12 @@ import ( "github.com/tigera/operator/pkg/controller/certificatemanager" ctrlrfake "github.com/tigera/operator/pkg/ctrlruntime/client/fake" "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/render" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" "github.com/tigera/operator/pkg/render/common/networkpolicy" "github.com/tigera/operator/pkg/render/common/podaffinity" rtest "github.com/tigera/operator/pkg/render/common/test" + "github.com/tigera/operator/pkg/render/logstorage" "github.com/tigera/operator/pkg/render/logstorage/kibana" "github.com/tigera/operator/pkg/render/testutils" "github.com/tigera/operator/pkg/tls/certificatemanagement" @@ -356,6 +358,7 @@ var _ = Describe("Kibana rendering tests", func() { TrustedBundle: bundle, Enabled: true, UsePSP: true, + Namespace: kibana.Namespace, } }) @@ -452,10 +455,239 @@ var _ = Describe("Kibana rendering tests", func() { initcontainer := test.GetContainer(kibana.Spec.PodTemplate.Spec.InitContainers, "key-cert-provisioner") Expect(initcontainer).NotTo(BeNil()) Expect(initcontainer.Resources).To(Equal(expectedResourcesRequirements)) - }) }) + }) + + Context("multi-tenant rendering", func() { + var tenant *operatorv1.Tenant + var replicas int32 + var cfg *kibana.Configuration + var installation *operatorv1.InstallationSpec + + BeforeEach(func() { + logStorage := &operatorv1.LogStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tigera-secure", + }, + Spec: operatorv1.LogStorageSpec{ + Nodes: &operatorv1.Nodes{ + Count: 1, + ResourceRequirements: nil, + }, + }, + Status: operatorv1.LogStorageStatus{ + State: "", + }, + } + + installation = &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + } + + replicas = 2 + kibanaKeyPair, bundle := getX509Certs(installation) + + tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-tenant", + Namespace: "test-tenant-ns", + }, + Spec: operatorv1.TenantSpec{ + ID: "test-tenant", + }, + } + + cfg = &kibana.Configuration{ + LogStorage: logStorage, + Installation: installation, + KibanaKeyPair: kibanaKeyPair, + PullSecrets: []*corev1.Secret{ + {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, + }, + Provider: operatorv1.ProviderNone, + ClusterDomain: dns.DefaultClusterDomain, + TrustedBundle: bundle, + UsePSP: true, + Enabled: true, + Namespace: tenant.Namespace, + Tenant: tenant, + ExternalElasticEndpoint: "https://external-elastic-endpoint:443", + ChallengerClientCertificate: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: logstorage.ExternalCertsSecret, + Namespace: common.OperatorNamespace(), + }, + Data: map[string][]byte{ + "client.crt": []byte(``), + "client.key": []byte(``), + }, + }, + ElasticChallengerUser: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: render.ElasticsearchAdminUserSecret, + Namespace: common.OperatorNamespace(), + }, + Data: map[string][]byte{ + "tigera-mgmt": []byte(``), + }, + }, + } + }) + It("should render all supporting resources for Kibana", func() { + component := kibana.Kibana(cfg) + createResources, _ := component.Objects() + + expectedResources := []client.Object{ + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-kibana"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-kibana"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-kibana"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: kibana.PolicyName, Namespace: tenant.Namespace}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "tigera-kibana", Namespace: tenant.Namespace}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: logstorage.ExternalCertsSecret, Namespace: tenant.Namespace}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: render.ElasticsearchAdminUserSecret, Namespace: tenant.Namespace}}, + &kbv1.Kibana{ObjectMeta: metav1.ObjectMeta{Name: kibana.CRName, Namespace: tenant.Namespace}}, + } + rtest.ExpectResources(createResources, expectedResources) + + resultKB := rtest.GetResource(createResources, kibana.CRName, tenant.Namespace, + "kibana.k8s.elastic.co", "v1", "Kibana").(*kbv1.Kibana) + Expect(resultKB.Spec.Config.Data["xpack.security.session.lifespan"]).To(Equal("8h")) + Expect(resultKB.Spec.Config.Data["xpack.security.session.idleTimeout"]).To(Equal("30m")) + }) + + It("should render challenger container in addition to kibana", func() { + component := kibana.Kibana(cfg) + createResources, _ := component.Objects() + kb := rtest.GetResource(createResources, "tigera-secure", tenant.Namespace, "kibana.k8s.elastic.co", "v1", "Kibana") + Expect(kb).NotTo(BeNil()) + kibanaCR := kb.(*kbv1.Kibana) + Expect(kibanaCR.Spec.PodTemplate.Spec.Containers).To(HaveLen(2)) + + kibanaContainer := kibanaCR.Spec.PodTemplate.Spec.Containers[0] + challengerContainer := kibanaCR.Spec.PodTemplate.Spec.Containers[1] + + Expect(kibanaContainer.Name).To(Equal("kibana")) + Expect(*kibanaContainer.SecurityContext.AllowPrivilegeEscalation).To(BeFalse()) + Expect(*kibanaContainer.SecurityContext.Privileged).To(BeFalse()) + Expect(*kibanaContainer.SecurityContext.RunAsGroup).To(BeEquivalentTo(10001)) + Expect(*kibanaContainer.SecurityContext.RunAsNonRoot).To(BeTrue()) + Expect(*kibanaContainer.SecurityContext.RunAsUser).To(BeEquivalentTo(10001)) + Expect(kibanaContainer.SecurityContext.Capabilities).To(Equal( + &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + )) + Expect(kibanaContainer.SecurityContext.SeccompProfile).To(Equal( + &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + })) + + Expect(challengerContainer.Name).To(Equal("challenger")) + Expect(*challengerContainer.SecurityContext.AllowPrivilegeEscalation).To(BeFalse()) + Expect(*challengerContainer.SecurityContext.Privileged).To(BeFalse()) + Expect(*challengerContainer.SecurityContext.RunAsGroup).To(BeEquivalentTo(10001)) + Expect(*challengerContainer.SecurityContext.RunAsNonRoot).To(BeTrue()) + Expect(*challengerContainer.SecurityContext.RunAsUser).To(BeEquivalentTo(10001)) + Expect(challengerContainer.SecurityContext.Capabilities).To(Equal( + &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + )) + Expect(challengerContainer.SecurityContext.SeccompProfile).To(Equal( + &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + })) + + }) + + It("should configure challenger", func() { + component := kibana.Kibana(cfg) + createResources, _ := component.Objects() + kb := rtest.GetResource(createResources, "tigera-secure", tenant.Namespace, "kibana.k8s.elastic.co", "v1", "Kibana") + Expect(kb).NotTo(BeNil()) + kibanaCR := kb.(*kbv1.Kibana) + Expect(kibanaCR.Spec.PodTemplate.Spec.Containers).To(HaveLen(2)) + + challengerContainer := kibanaCR.Spec.PodTemplate.Spec.Containers[1] + Expect(challengerContainer.Name).To(Equal("challenger")) + + Expect(challengerContainer.Env).To(ContainElements( + corev1.EnvVar{ + Name: "TENANT_ID", + Value: tenant.Spec.ID, + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_KIBANA_CATCH_ALL_ROUTE", + Value: "/", + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ELASTIC_ENDPOINT", + Value: cfg.ExternalElasticEndpoint, + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ELASTIC_CA_PATH", + Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt", + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ELASTIC_USERNAME", + Value: "tigera-mgmt", + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ELASTIC_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: render.ElasticsearchAdminUserSecret, + }, + Key: "tigera-mgmt", + }, + }, + }, + )) + + Expect(challengerContainer.VolumeMounts).To(ContainElements( + corev1.VolumeMount{ + Name: "tigera-ca-bundle", + MountPath: "/etc/pki/tls/certs", + ReadOnly: true, + }, + corev1.VolumeMount{ + Name: logstorage.ExternalCertsVolumeName, + MountPath: "/certs/elasticsearch", + ReadOnly: true, + }, + )) + + Expect(kibanaCR.Spec.PodTemplate.Spec.Volumes).To(ContainElements( + corev1.Volume{ + Name: "tigera-ca-bundle", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "tigera-ca-bundle", + }, + }, + }, + }, + corev1.Volume{ + Name: logstorage.ExternalCertsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: logstorage.ExternalCertsSecret, + }, + }, + }, + )) + + Expect(kibanaCR.Spec.Config).NotTo(BeNil()) + Expect(kibanaCR.Spec.Config.Data).NotTo(BeEmpty()) + Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.host", "http://localhost:8080")) + Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.ssl.verificationMode", "none")) + }) }) }) From 2c98d440b83cbdd08c13590bdd9c65a1039b4509 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Mon, 13 May 2024 23:30:39 -0700 Subject: [PATCH 11/20] Allow access to external elastic --- .../elastic/external_elastic_controller.go | 13 +++- pkg/render/logstorage/kibana/kibana.go | 76 +++++++++++++++---- pkg/render/logstorage/kibana/kibana_test.go | 22 +++++- 3 files changed, 91 insertions(+), 20 deletions(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index bab112eb01..f4cb031e53 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -96,7 +96,7 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { } // Determine how to handle watch events for cluster-scoped resources. For multi-tenant clusters, - // we should update all tenants whenever one changes. For single-tenatn clusters, we can just queue the object. + // we should update all tenants whenever one changes. For single-tenant clusters, we can just queue the object. var eventHandler handler.EventHandler = &handler.EnqueueRequestForObject{} if opts.MultiTenant { eventHandler = utils.EnqueueAllTenants(mgr.GetClient()) @@ -177,7 +177,16 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("log-storage-elastic-controller failed to watch Secret resource: %w", err) } } - // TODO: ALINA - We need a user for kibana + + if r.multiTenant { + for _, secretName := range []string{ + kibana.MultiTenantCredentialsSecretName, + } { + if err = utils.AddSecretsWatch(c, secretName, kibanaNamespaceHelper.TruthNamespace()); err != nil { + return fmt.Errorf("log-storage-elastic-controller failed to watch Secret resource: %w", err) + } + } + } } // Perform periodic reconciliation. This acts as a backstop to catch reconcile issues, diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index b4801541f8..bddca8b928 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + "github.com/tigera/api/pkg/lib/numorstring" operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/components" "github.com/tigera/operator/pkg/dns" @@ -58,10 +59,13 @@ const ( PolicyName = networkpolicy.TigeraComponentPolicyPrefix + "kibana-access" Port = 5601 + // TODO: ALINA Check annotations TLSAnnotationHash = "hash.operator.tigera.io/kb-secrets" - TimeFilter = "_g=(time:(from:now-24h,to:now))" - FlowsDashboardName = "Calico Enterprise Flow Logs" + TimeFilter = "_g=(time:(from:now-24h,to:now))" + FlowsDashboardName = "Calico Enterprise Flow Logs" + MultiTenantCredentialsSecretName = "kibana-elasticsearch-credentials" + MultiTenantKibanaUser = "tigera-mgmt" ) var ( @@ -271,8 +275,9 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { } if k.cfg.Tenant.MultiTenant() { - config["elasticsearch.host"] = "http://localhost:8080" + config["elasticsearch.hosts"] = "http://localhost:8080" config["elasticsearch.ssl.verificationMode"] = "none" + config["elasticsearch.username"] = MultiTenantKibanaUser } else { config["elasticsearch.ssl.certificateAuthorities"] = []string{"/usr/share/kibana/config/elasticsearch-certs/tls.crt"} } @@ -292,8 +297,8 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { render.ElasticsearchServiceName, corev1.TLSPrivateKeyKey, corev1.TLSCertKey, - dns.GetServiceDNSNames(ServiceName, Namespace, k.cfg.ClusterDomain), - Namespace) + dns.GetServiceDNSNames(ServiceName, k.cfg.Namespace, k.cfg.ClusterDomain), + k.cfg.Namespace) initContainers = append(initContainers, csrInitContainer) volumeMounts = append(volumeMounts, corev1.VolumeMount{ @@ -377,23 +382,36 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Value: k.cfg.ExternalElasticEndpoint, }, { - Name: "ES_GATEWAY_ELASTIC_CA_PATH", + Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", Value: k.cfg.TrustedBundle.MountPath(), }, + { + Name: "ES_GATEWAY_ELASTIC_CLIENT_KEY_PATH", + Value: "/certs/elasticsearch/client.key", + }, + { + Name: "ES_GATEWAY_ELASTIC_CLIENT_CERT_PATH", + Value: "/certs/elasticsearch/client.crt", + }, + { + Name: "ES_GATEWAY_ENABLE_ELASTIC_MUTUAL_TLS", + Value: "true", + }, { Name: "TENANT_ID", Value: k.cfg.Tenant.Spec.ID, }, - {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "tigera-mgmt"}, + // TODO: ALINA - These are not actually needed + {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: MultiTenantKibanaUser}, {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ SecretKeyRef: &corev1.SecretKeySelector{ LocalObjectReference: corev1.LocalObjectReference{ Name: render.ElasticsearchAdminUserSecret, }, - // TODO: ALINA - IS THIS THE correct user or do we need to create a new one ? - Key: "tigera-mgmt", + Key: MultiTenantKibanaUser, }, - }}, + }, + }, }, Command: []string{ "/usr/bin/es-gateway", "-run-as-challenger", @@ -459,6 +477,14 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { } } + if k.cfg.Tenant.MultiTenant() { + kibana.Spec.SecureSettings = []cmnv1.SecretSource{ + { + SecretName: MultiTenantCredentialsSecretName, + }, + } + } + if k.cfg.Installation.ControlPlaneReplicas != nil && *k.cfg.Installation.ControlPlaneReplicas > 1 { kibana.Spec.PodTemplate.Spec.Affinity = podaffinity.NewPodAntiAffinity(CRName, Namespace) } @@ -531,17 +557,35 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { Protocol: &networkpolicy.TCPProtocol, Destination: networkpolicy.KubeAPIServerServiceSelectorEntityRule, }, - // TODO: ALINA - DO WE NEED TO REMOVE EGRESS GATEWAY FOR MULTI-TENANT - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Destination: networkpolicy.DefaultHelper().ESGatewayEntityRule(), - }, }...) + if k.cfg.Tenant.MultiTenant() { + // Allow egress traffic to the external Elasticsearch. + egressRules = append(egressRules, + v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Destination: v3.EntityRule{ + Ports: []numorstring.Port{{MinPort: 443, MaxPort: 443}}, + Domains: []string{k.cfg.ExternalElasticEndpoint}, + }, + }, + ) + } else { + // Allow egress traffic to es gateway. + egressRules = append(egressRules, + v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Destination: networkpolicy.DefaultHelper().ESGatewayEntityRule(), + }, + ) + } + kibanaPortIngressDestination := v3.EntityRule{ Ports: networkpolicy.Ports(Port), } + return &v3.NetworkPolicy{ TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/render/logstorage/kibana/kibana_test.go b/pkg/render/logstorage/kibana/kibana_test.go index ab6dd06443..73cbc1cc43 100644 --- a/pkg/render/logstorage/kibana/kibana_test.go +++ b/pkg/render/logstorage/kibana/kibana_test.go @@ -17,6 +17,7 @@ package kibana_test import ( "context" + cmnv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" @@ -629,9 +630,21 @@ var _ = Describe("Kibana rendering tests", func() { Value: cfg.ExternalElasticEndpoint, }, corev1.EnvVar{ - Name: "ES_GATEWAY_ELASTIC_CA_PATH", + Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt", }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ELASTIC_CLIENT_KEY_PATH", + Value: "/certs/elasticsearch/client.key", + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ELASTIC_CLIENT_CERT_PATH", + Value: "/certs/elasticsearch/client.crt", + }, + corev1.EnvVar{ + Name: "ES_GATEWAY_ENABLE_ELASTIC_MUTUAL_TLS", + Value: "true", + }, corev1.EnvVar{ Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: "tigera-mgmt", @@ -685,8 +698,13 @@ var _ = Describe("Kibana rendering tests", func() { Expect(kibanaCR.Spec.Config).NotTo(BeNil()) Expect(kibanaCR.Spec.Config.Data).NotTo(BeEmpty()) - Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.host", "http://localhost:8080")) + Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.hosts", "http://localhost:8080")) Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.ssl.verificationMode", "none")) + Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.username", kibana.MultiTenantKibanaUser)) + + Expect(kibanaCR.Spec.SecureSettings).NotTo(BeNil()) + Expect(kibanaCR.Spec.SecureSettings).To(ContainElement( + cmnv1.SecretSource{SecretName: kibana.MultiTenantCredentialsSecretName})) }) }) }) From 77e09d428f1902f1e51c1fbaa9bc6716c10f7596 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Tue, 14 May 2024 17:54:16 -0700 Subject: [PATCH 12/20] Fixes for credentials and network policies --- .../elastic/external_elastic_controller.go | 40 +++-- .../logstorage/users/users_controller.go | 78 ++++++++-- pkg/controller/utils/elasticsearch.go | 14 ++ .../common/networkpolicy/networkpolicy.go | 4 + pkg/render/logstorage/kibana/kibana.go | 145 +++++++++--------- pkg/render/manager.go | 16 +- 6 files changed, 192 insertions(+), 105 deletions(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index f4cb031e53..0cef55f90d 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -212,7 +212,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. // When running in multi-tenant mode, we need to install Kibana in tenant Namespaces. However, the LogStorage // resource is still cluster-scoped (since ES is a cluster-wide resource), so we need to look elsewhere to determine // which tenant namespaces require a Kibana instance. We use the tenant API to determine the set of namespaces that should have Kibana. - tenant, _, err := utils.GetTenant(ctx, r.multiTenant, r.client, request.Namespace) + tenant, tenantID, err := utils.GetTenant(ctx, r.multiTenant, r.client, request.Namespace) if errors.IsNotFound(err) { reqLogger.Info("No Tenant in this Namespace, skip") return reconcile.Result{}, nil @@ -378,11 +378,19 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } } - var elasticChallengerUser corev1.Secret - err = r.client.Get(ctx, client.ObjectKey{Name: render.ElasticsearchAdminUserSecret, Namespace: common.OperatorNamespace()}, &elasticChallengerUser) + clusterIDConfigMap := corev1.ConfigMap{} + clusterIDConfigMapKey := client.ObjectKey{Name: "cluster-info", Namespace: "tigera-operator"} + err = r.client.Get(ctx, clusterIDConfigMapKey, &clusterIDConfigMap) if err != nil { - reqLogger.Error(err, "Failed to read external user secret") - r.status.SetDegraded(operatorv1.ResourceReadError, "Waiting for external Elasticsearch user to be available", err, reqLogger) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Waiting for ConfigMap %s/%s to be available", clusterIDConfigMapKey.Namespace, clusterIDConfigMapKey.Name), + nil, reqLogger) + return reconcile.Result{}, err + } + clusterID, ok := clusterIDConfigMap.Data["cluster-id"] + if !ok { + err = fmt.Errorf("%s/%s ConfigMap does not contain expected 'cluster-id' key", + clusterIDConfigMap.Namespace, clusterIDConfigMap.Name) + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("%v", err), err, reqLogger) return reconcile.Result{}, err } @@ -413,8 +421,8 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. Tenant: tenant, Namespace: kibanaHelper.InstallNamespace(), ChallengerClientCertificate: challengerClientCertificate, - ExternalElasticEndpoint: elasticURL.String(), - ElasticChallengerUser: &elasticChallengerUser, + ExternalElasticURL: elasticURL, + KibanaUsername: utils.KibanaUser(clusterID, tenantID).Username, }), ) } @@ -422,16 +430,15 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. flowShards := logstoragecommon.CalculateFlowShards(ls.Spec.Nodes, logstoragecommon.DefaultElasticsearchShards) clusterConfig := relasticsearch.NewClusterConfig(render.DefaultElasticsearchClusterName, ls.Replicas(), logstoragecommon.DefaultElasticsearchShards, flowShards) - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) - externalElasticsearch := externalelasticsearch.ExternalElasticsearch(install, clusterConfig, pullSecrets) - if err := hdler.CreateOrUpdateOrDelete(ctx, externalElasticsearch, r.status); err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) - return reconcile.Result{}, err + if !r.multiTenant { + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + externalElasticsearch := externalelasticsearch.ExternalElasticsearch(install, clusterConfig, pullSecrets) + if err := hdler.CreateOrUpdateOrDelete(ctx, externalElasticsearch, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } } - // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. - tenantHandler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) - if r.multiTenant && kibanaEnabled { // ECK will be deployed per management cluster and will be configured // to watch all namespaces in order to create a Kibana deployment @@ -445,11 +452,14 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. UsePSP: r.usePSP, Tenant: tenant, }) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) if err := hdler.CreateOrUpdateOrDelete(ctx, eck, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) return reconcile.Result{}, err } + // In standard installs, the LogStorage owns the external elastic. For multi-tenant, it's owned by the Tenant instance. + tenantHandler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) for _, component := range kibanaComponents { if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) diff --git a/pkg/controller/logstorage/users/users_controller.go b/pkg/controller/logstorage/users/users_controller.go index a64b6ca967..d5107140e2 100644 --- a/pkg/controller/logstorage/users/users_controller.go +++ b/pkg/controller/logstorage/users/users_controller.go @@ -18,23 +18,10 @@ import ( "context" "fmt" - "github.com/tigera/operator/pkg/controller/logstorage/initializer" - esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" "github.com/elastic/cloud-on-k8s/v2/pkg/utils/stringsutil" "github.com/go-logr/logr" - operatorv1 "github.com/tigera/operator/api/v1" - "github.com/tigera/operator/pkg/ctrlruntime" - "github.com/tigera/operator/pkg/render/logstorage/dashboards" corev1 "k8s.io/api/core/v1" - - "github.com/tigera/operator/pkg/controller/options" - "github.com/tigera/operator/pkg/controller/status" - "github.com/tigera/operator/pkg/controller/utils" - "github.com/tigera/operator/pkg/crypto" - "github.com/tigera/operator/pkg/render" - relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" - "github.com/tigera/operator/pkg/render/common/secret" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -44,6 +31,19 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + + operatorv1 "github.com/tigera/operator/api/v1" + "github.com/tigera/operator/pkg/controller/logstorage/initializer" + "github.com/tigera/operator/pkg/controller/options" + "github.com/tigera/operator/pkg/controller/status" + "github.com/tigera/operator/pkg/controller/utils" + "github.com/tigera/operator/pkg/crypto" + "github.com/tigera/operator/pkg/ctrlruntime" + "github.com/tigera/operator/pkg/render" + relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" + "github.com/tigera/operator/pkg/render/common/secret" + "github.com/tigera/operator/pkg/render/logstorage/dashboards" + "github.com/tigera/operator/pkg/render/logstorage/kibana" ) var log = logf.Log.WithName("controller_logstorage_users") @@ -276,10 +276,38 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques credentialSecrets = append(credentialSecrets, &dashboardUserSecret) } + var kibanaCredentialSecret *corev1.Secret + var kibanaUser *utils.User + if r.multiTenant { + // Query any existing username and password for this Dashboards instance. If one already exists, we'll simply + // use that. Otherwise, generate a new one. + keyKibanaCred := types.NamespacedName{Name: kibana.MultiTenantCredentialsSecretName, Namespace: helper.TruthNamespace()} + kibanaUser = utils.KibanaUser(clusterID, tenantID) + kibanaUser.Password = crypto.GeneratePassword(16) + kibanaCredentialSecret = &corev1.Secret{} + if err = r.client.Get(ctx, key, kibanaCredentialSecret); err != nil && !errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Error getting Secret %s", keyKibanaCred), err, reqLogger) + return reconcile.Result{}, err + } else if errors.IsNotFound(err) { + // Create the secret to provision into the cluster. + kibanaCredentialSecret.Name = kibana.MultiTenantCredentialsSecretName + kibanaCredentialSecret.Namespace = helper.TruthNamespace() + kibanaCredentialSecret.StringData = map[string]string{ + "elastic.password": kibanaUser.Password, + } + + // Make sure we install the generated credentials into the truth namespace. + credentialSecrets = append(credentialSecrets, kibanaCredentialSecret) + } + } + if helper.TruthNamespace() != helper.InstallNamespace() { // Copy the credentials into the install namespace. credentialSecrets = append(credentialSecrets, secret.CopyToNamespace(helper.InstallNamespace(), &linseedUserSecret)[0]) credentialSecrets = append(credentialSecrets, secret.CopyToNamespace(helper.InstallNamespace(), &dashboardUserSecret)[0]) + if kibanaCredentialSecret != nil { + credentialSecrets = append(credentialSecrets, secret.CopyToNamespace(helper.InstallNamespace(), kibanaCredentialSecret)[0]) + } } credentialComponent := render.NewPassthrough(credentialSecrets...) @@ -320,6 +348,13 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques return reconcile.Result{}, err } + if kibanaUser != nil { + if err = r.createUser(ctx, elasticEndpoint, kibanaUser, reqLogger); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to create Kibana user in ES", err, reqLogger) + return reconcile.Result{}, err + } + } + r.status.ReadyToMonitor() r.status.ClearDegraded() return reconcile.Result{}, nil @@ -351,6 +386,20 @@ func (r *UserController) createUserLogin(ctx context.Context, elasticEndpoint st return nil } +func (r *UserController) createUser(ctx context.Context, elasticEndpoint string, user *utils.User, reqLogger logr.Logger) error { + esClient, err := r.esClientFn(r.client, ctx, elasticEndpoint) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceCreateError, "Failed to connect to Elasticsearch - failed to create the Elasticsearch client", err, reqLogger) + return err + } + if err = esClient.CreateUser(ctx, user); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to create or update Elasticsearch user", err, reqLogger) + return err + } + + return nil +} + func (r *UsersCleanupController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { helper := utils.NewNamespaceHelper(true, render.ElasticsearchNamespace, request.Namespace) reqLogger := logf.Log.WithName("controller_logstorage_users_cleanup").WithValues("Request.Namespace", @@ -418,8 +467,9 @@ func (r *UsersCleanupController) cleanupStaleUsers(ctx context.Context, logger l lu := utils.LinseedUser(clusterID, t.Spec.ID) dashboardsUser := utils.DashboardUser(clusterID, t.Spec.ID) + kibanaUser := utils.KibanaUser(clusterID, t.Spec.ID) for _, user := range allESUsers { - if user.Username == lu.Username || user.Username == dashboardsUser.Username { + if user.Username == lu.Username || user.Username == dashboardsUser.Username || user.Username == kibanaUser.Username { err = esClient.DeleteUser(ctx, &user) if err != nil { logger.Error(err, "Failed to delete elastic user") diff --git a/pkg/controller/utils/elasticsearch.go b/pkg/controller/utils/elasticsearch.go index 37fa14ad36..9f2dfb6bd8 100644 --- a/pkg/controller/utils/elasticsearch.go +++ b/pkg/controller/utils/elasticsearch.go @@ -184,6 +184,7 @@ func indexPattern(prefix, cluster, suffix, tenant string) string { var ( ElasticsearchUserNameLinseed = "tigera-ee-linseed" ElasticsearchUserNameDashboardInstaller = "tigera-ee-dashboards-installer" + ElasticsearchUserNameKibana = "tigera-kibana" ) func LinseedUser(clusterID, tenant string) *User { @@ -228,6 +229,19 @@ func DashboardUser(clusterID, tenant string) *User { } } +func KibanaUser(clusterID, tenant string) *User { + username := formatName(ElasticsearchUserNameKibana, clusterID, tenant) + + return &User{ + Username: username, + Roles: []Role{ + { + Name: "kibana-system", + }, + }, + } +} + // User represents an Elasticsearch user, which may or may not have roles attached to it type User struct { Username string diff --git a/pkg/render/common/networkpolicy/networkpolicy.go b/pkg/render/common/networkpolicy/networkpolicy.go index a98fe05871..6fff426373 100644 --- a/pkg/render/common/networkpolicy/networkpolicy.go +++ b/pkg/render/common/networkpolicy/networkpolicy.go @@ -290,6 +290,10 @@ func (h *NetworkPolicyHelper) ComplianceReporterSourceEntityRule() v3.EntityRule return CreateSourceEntityRule(h.namespace("tigera-compliance"), "compliance-reporter") } +func (h *NetworkPolicyHelper) KibanaEntityRule() v3.EntityRule { + return CreateEntityRule(h.namespace("tigera-kibana"), "tigera-secure", 5601) +} + const PrometheusSelector = "k8s-app == 'tigera-prometheus'" var PrometheusEntityRule = v3.EntityRule{ diff --git a/pkg/render/logstorage/kibana/kibana.go b/pkg/render/logstorage/kibana/kibana.go index bddca8b928..2493d8e39d 100644 --- a/pkg/render/logstorage/kibana/kibana.go +++ b/pkg/render/logstorage/kibana/kibana.go @@ -64,8 +64,7 @@ const ( TimeFilter = "_g=(time:(from:now-24h,to:now))" FlowsDashboardName = "Calico Enterprise Flow Logs" - MultiTenantCredentialsSecretName = "kibana-elasticsearch-credentials" - MultiTenantKibanaUser = "tigera-mgmt" + MultiTenantCredentialsSecretName = "tigera-kibana-elasticsearch-credentials" ) var ( @@ -104,8 +103,8 @@ type Configuration struct { // Secret containing client certificate and key for connecting to the Elastic cluster. If configured, // mTLS is used between Challenger and the external Elastic cluster. ChallengerClientCertificate *corev1.Secret - ElasticChallengerUser *corev1.Secret - ExternalElasticEndpoint string + ExternalElasticURL *url.URL + KibanaUsername string // Whether the cluster supports pod security policies. UsePSP bool @@ -210,10 +209,6 @@ func (k *kibana) Objects() ([]client.Object, []client.Object) { // If using External ES, we need to copy the client certificates into the tenant namespace to be mounted. toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ChallengerClientCertificate)...)...) } - if k.cfg.ElasticChallengerUser != nil { - // If using External ES, we need to copy the elastic user into the tenant namespace - toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(k.cfg.Namespace, k.cfg.ElasticChallengerUser)...)...) - } } else { toDelete = append(toDelete, k.kibanaCR()) } @@ -277,7 +272,7 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { if k.cfg.Tenant.MultiTenant() { config["elasticsearch.hosts"] = "http://localhost:8080" config["elasticsearch.ssl.verificationMode"] = "none" - config["elasticsearch.username"] = MultiTenantKibanaUser + config["elasticsearch.username"] = k.cfg.KibanaUsername } else { config["elasticsearch.ssl.certificateAuthorities"] = []string{"/usr/share/kibana/config/elasticsearch-certs/tls.crt"} } @@ -379,7 +374,7 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { }, { Name: "ES_GATEWAY_ELASTIC_ENDPOINT", - Value: k.cfg.ExternalElasticEndpoint, + Value: k.cfg.ExternalElasticURL.String(), }, { Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", @@ -401,17 +396,6 @@ func (k *kibana) kibanaCR() *kbv1.Kibana { Name: "TENANT_ID", Value: k.cfg.Tenant.Spec.ID, }, - // TODO: ALINA - These are not actually needed - {Name: "ES_GATEWAY_ELASTIC_USERNAME", Value: MultiTenantKibanaUser}, - {Name: "ES_GATEWAY_ELASTIC_PASSWORD", ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: render.ElasticsearchAdminUserSecret, - }, - Key: MultiTenantKibanaUser, - }, - }, - }, }, Command: []string{ "/usr/bin/es-gateway", "-run-as-challenger", @@ -542,14 +526,7 @@ func (k *kibana) kibanaPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { // Allow access to Kibana func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { networkPolicyHelper := networkpolicy.Helper(k.cfg.Tenant.MultiTenant(), k.cfg.Namespace) - egressRules := []v3.Rule{ - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: v3.EntityRule{}, - Destination: render.ElasticsearchEntityRule, - }, - } + var egressRules []v3.Rule egressRules = networkpolicy.AppendDNSEgressRules(egressRules, k.cfg.Provider == operatorv1.ProviderOpenShift) egressRules = append(egressRules, []v3.Rule{ { @@ -567,25 +544,84 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { Protocol: &networkpolicy.TCPProtocol, Destination: v3.EntityRule{ Ports: []numorstring.Port{{MinPort: 443, MaxPort: 443}}, - Domains: []string{k.cfg.ExternalElasticEndpoint}, + Domains: []string{k.cfg.ExternalElasticURL.Hostname()}, }, }, ) } else { - // Allow egress traffic to es gateway. + // Allow egress traffic to ES Gateway and Elastic egressRules = append(egressRules, v3.Rule{ Action: v3.Allow, Protocol: &networkpolicy.TCPProtocol, - Destination: networkpolicy.DefaultHelper().ESGatewayEntityRule(), + Source: v3.EntityRule{}, + Destination: render.ElasticsearchEntityRule, }, - ) + v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Destination: networkpolicy.DefaultHelper().ESGatewayEntityRule(), + }) } kibanaPortIngressDestination := v3.EntityRule{ Ports: networkpolicy.Ports(Port), } + ingressRules := []v3.Rule{ + { + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: v3.EntityRule{ + // This policy allows access to Kibana from anywhere. + Nets: []string{"0.0.0.0/0"}, + }, + Destination: kibanaPortIngressDestination, + }, + { + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: v3.EntityRule{ + // This policy allows access to Kibana from anywhere. + Nets: []string{"::/0"}, + }, + Destination: kibanaPortIngressDestination, + }, + { + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: networkPolicyHelper.DashboardInstallerSourceEntityRule(), + Destination: kibanaPortIngressDestination, + }, + { + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: render.ECKOperatorSourceEntityRule, + Destination: kibanaPortIngressDestination, + }, + } + + if k.cfg.Tenant.MultiTenant() { + ingressRules = append(ingressRules, + v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: networkPolicyHelper.ManagerSourceEntityRule(), + Destination: kibanaPortIngressDestination, + }) + } else { + // Zero and single tenant with internal elastic will have all Kibana + // traffic proxied via ES Gateway + ingressRules = append(ingressRules, + v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Source: networkpolicy.DefaultHelper().ESGatewaySourceEntityRule(), + Destination: kibanaPortIngressDestination, + }, + ) + } + return &v3.NetworkPolicy{ TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ @@ -597,47 +633,8 @@ func (k *kibana) allowTigeraPolicy() *v3.NetworkPolicy { Tier: networkpolicy.TigeraComponentTierName, Selector: networkpolicy.KubernetesAppSelector(CRName), Types: []v3.PolicyType{v3.PolicyTypeIngress, v3.PolicyTypeEgress}, - Ingress: []v3.Rule{ - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: v3.EntityRule{ - // This policy allows access to Kibana from anywhere. - Nets: []string{"0.0.0.0/0"}, - }, - Destination: kibanaPortIngressDestination, - }, - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: v3.EntityRule{ - // This policy allows access to Kibana from anywhere. - Nets: []string{"::/0"}, - }, - Destination: kibanaPortIngressDestination, - }, - // TODO: ALINA - DO WE NEED TO REMOVE EGRESS GATEWAY FOR MULTI-TENANT - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: networkpolicy.DefaultHelper().ESGatewaySourceEntityRule(), - Destination: kibanaPortIngressDestination, - }, - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: networkPolicyHelper.DashboardInstallerSourceEntityRule(), - Destination: kibanaPortIngressDestination, - }, - // TODO: ALINA - DO WE NEED TO ADD MANAGER? - { - Action: v3.Allow, - Protocol: &networkpolicy.TCPProtocol, - Source: render.ECKOperatorSourceEntityRule, - Destination: kibanaPortIngressDestination, - }, - }, - Egress: egressRules, + Ingress: ingressRules, + Egress: egressRules, }, } } diff --git a/pkg/render/manager.go b/pkg/render/manager.go index 14f48d063c..e24e9965e6 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -611,10 +611,13 @@ func (c *managerComponent) managerEsProxyContainer() corev1.Container { keyPath, certPath = c.cfg.InternalTLSKeyPair.VolumeMountKeyFilePath(), c.cfg.InternalTLSKeyPair.VolumeMountCertificateFilePath() } + kibanaEndpoint := rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain) + if c.cfg.Tenant.MultiTenant() { + kibanaEndpoint = fmt.Sprintf("https://tigera-secure-kb-http.%s.svc.cluster.local:5601", c.cfg.Namespace) + } env := []corev1.EnvVar{ {Name: "ELASTIC_LICENSE_TYPE", Value: string(c.cfg.ESLicenseType)}, - // TODO: ALINA - For multi-tenancy this needs to be in the tenant namespace - {Name: "ELASTIC_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)}, + {Name: "ELASTIC_KIBANA_ENDPOINT", Value: kibanaEndpoint}, {Name: "FIPS_MODE_ENABLED", Value: operatorv1.IsFIPSModeEnabledString(c.cfg.Installation.FIPSMode)}, {Name: "LINSEED_CLIENT_CERT", Value: certPath}, {Name: "LINSEED_CLIENT_KEY", Value: keyPath}, @@ -961,6 +964,15 @@ func (c *managerComponent) managerAllowTigeraNetworkPolicy() *v3.NetworkPolicy { Destination: networkpolicy.PrometheusEntityRule, }) + if c.cfg.Tenant.MultiTenant() { + egressRules = append(egressRules, v3.Rule{ + Action: v3.Allow, + Protocol: &networkpolicy.TCPProtocol, + Destination: networkpolicyHelper.KibanaEntityRule(), + }) + + } + ingressRules := []v3.Rule{ { Action: v3.Allow, From 0dd522569f42369cfedced2f74db663582db5fc3 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Tue, 14 May 2024 23:02:57 -0700 Subject: [PATCH 13/20] Update tenant CR --- pkg/crds/operator/operator.tigera.io_tenants.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/crds/operator/operator.tigera.io_tenants.yaml b/pkg/crds/operator/operator.tigera.io_tenants.yaml index fdf9c0bcb5..5d8dfa9d19 100644 --- a/pkg/crds/operator/operator.tigera.io_tenants.yaml +++ b/pkg/crds/operator/operator.tigera.io_tenants.yaml @@ -196,8 +196,6 @@ spec: type: boolean url: type: string - required: - - mutualTLS type: object linseedDeployment: description: LinseedDeployment configures the linseed Deployment. From d0963d30439302575ef71102cfbe088dcd1ddb7f Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 15 May 2024 00:57:37 -0700 Subject: [PATCH 14/20] More tests --- .../dashboards/dashboards_controller.go | 2 +- .../dashboards/dashboards_controller_test.go | 92 ++++++++++++++++--- pkg/controller/logstorage/elastic/mock.go | 12 ++- .../logstorage/users/users_controller.go | 31 +++++-- .../logstorage/users/users_controller_test.go | 5 + pkg/controller/utils/elasticsearch.go | 10 +- pkg/render/logstorage/kibana/kibana_test.go | 53 ++++------- 7 files changed, 136 insertions(+), 69 deletions(-) diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go index 5c33cb7c72..043d84a90d 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -288,7 +288,7 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil // This is the configuration for multi-tenant and single tenant with external elastic // The Tenant resource must specify the Kibana endpoint in both cases. For multi-tenant // it should be the service inside the tenant namespace. For single tenant it should be the - // URL that points to external Kibana Determine the host and port from the URL. + // URL that points to external Kibana. Determine the host and port from the URL. url, err := url.Parse(tenant.Spec.Kibana.URL) if err != nil { reqLogger.Error(err, "Kibana URL is invalid") diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller_test.go b/pkg/controller/logstorage/dashboards/dashboards_controller_test.go index 40f1f41de3..40d49434c6 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller_test.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller_test.go @@ -18,13 +18,13 @@ import ( "context" "fmt" + "github.com/stretchr/testify/mock" "github.com/tigera/operator/pkg/render/logstorage/dashboards" + "k8s.io/apimachinery/pkg/types" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/stretchr/testify/mock" - esv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/elasticsearch/v1" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" operatorv1 "github.com/tigera/operator/api/v1" @@ -111,6 +111,18 @@ var _ = Describe("LogStorage Dashboards controller", func() { ctx = context.Background() cli = ctrlrfake.DefaultFakeClientBuilder(scheme).Build() + mockStatus = &status.MockStatus{} + mockStatus.On("Run").Return() + mockStatus.On("AddDaemonsets", mock.Anything) + mockStatus.On("AddDeployments", mock.Anything) + mockStatus.On("AddStatefulSets", mock.Anything) + mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return() + mockStatus.On("AddCronJobs", mock.Anything) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ReadyToMonitor") + mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + mockStatus.On("ClearDegraded") + // Create a basic Installation. var replicas int32 = 2 install = &operatorv1.Installation{ @@ -150,18 +162,6 @@ var _ = Describe("LogStorage Dashboards controller", func() { Context("Zero tenant", func() { BeforeEach(func() { - mockStatus = &status.MockStatus{} - mockStatus.On("Run").Return() - mockStatus.On("AddDaemonsets", mock.Anything) - mockStatus.On("AddDeployments", mock.Anything) - mockStatus.On("AddStatefulSets", mock.Anything) - mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return() - mockStatus.On("AddCronJobs", mock.Anything) - mockStatus.On("OnCRFound").Return() - mockStatus.On("ReadyToMonitor") - mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything) - mockStatus.On("ClearDegraded") - // Create a CA secret for the test, and create its KeyPair. cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, common.OperatorNamespace(), certificatemanager.AllowCACreation()) Expect(err).ShouldNot(HaveOccurred()) @@ -267,4 +267,68 @@ var _ = Describe("LogStorage Dashboards controller", func() { Expect(dashboardInstaller.Image).To(Equal(fmt.Sprintf("some.registry.org/%s@%s", components.ComponentElasticTseeInstaller.Image, "sha256:dashboardhash"))) }) }) + + Context("Multi-tenant", func() { + var tenant *operatorv1.Tenant + var tenantNS string + + BeforeEach(func() { + tenantNS = "tenant-ns-a" + Expect(cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tenantNS}})).ShouldNot(HaveOccurred()) + + tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: tenantNS, + }, + Spec: operatorv1.TenantSpec{ + ID: "tenant-a", + Kibana: &operatorv1.TenantKibanaSpec{ + URL: fmt.Sprintf("https://kibana.%s.svc:5601", tenantNS), + }, + }, + } + Expect(cli.Create(ctx, tenant)).ShouldNot(HaveOccurred()) + + // Create a CA secret for the test, and create its KeyPair. + opts := []certificatemanager.Option{ + certificatemanager.AllowCACreation(), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, tenantNS, opts...) + Expect(err).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, cm.KeyPair().Secret(tenantNS))).ShouldNot(HaveOccurred()) + bundle := cm.CreateTrustedBundle() + Expect(cli.Create(ctx, bundle.ConfigMap(tenantNS))).ShouldNot(HaveOccurred()) + + // Create the ES user secret. Generally this is created by either es-kube-controllers or the user controller in this operator. + userSecret := &corev1.Secret{} + userSecret.Name = dashboards.ElasticCredentialsSecret + userSecret.Namespace = tenantNS + userSecret.Data = map[string][]byte{"username": []byte("test-username"), "password": []byte("test-password")} + Expect(cli.Create(ctx, userSecret)).ShouldNot(HaveOccurred()) + + // Create the reconciler for the tests. + r, err = NewDashboardsControllerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true, true) + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("should reconcile resources", func() { + // Run the reconciler. + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).ShouldNot(HaveOccurred()) + Expect(result).Should(Equal(successResult)) + + // Check that K8s Job was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + dashboardJob := batchv1.Job{ + TypeMeta: metav1.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: dashboards.Name, + Namespace: tenantNS, + }, + } + Expect(test.GetResource(cli, &dashboardJob)).To(BeNil()) + }) + }) }) diff --git a/pkg/controller/logstorage/elastic/mock.go b/pkg/controller/logstorage/elastic/mock.go index c3ab9898c1..bc335ca1ad 100644 --- a/pkg/controller/logstorage/elastic/mock.go +++ b/pkg/controller/logstorage/elastic/mock.go @@ -58,13 +58,15 @@ func (m *MockESClient) DeleteRoles(ctx context.Context, roles []utils.Role) erro return ret.Error(0) } -func (m *MockESClient) DeleteUser(ctx context.Context, u *utils.User) error { - ret := m.MethodCalled("DeleteRoles", ctx, u.Roles) - if ret.Error(0) != nil { - return ret.Error(0) +func (m *MockESClient) DeleteUser(ctx context.Context, u *utils.User, keepRoles bool) error { + if !keepRoles { + ret := m.MethodCalled("DeleteRoles", ctx, u.Roles) + if ret.Error(0) != nil { + return ret.Error(0) + } } - ret = m.Called(ctx, u) + ret := m.Called(ctx, u) return ret.Error(0) } diff --git a/pkg/controller/logstorage/users/users_controller.go b/pkg/controller/logstorage/users/users_controller.go index d5107140e2..cfe39e1940 100644 --- a/pkg/controller/logstorage/users/users_controller.go +++ b/pkg/controller/logstorage/users/users_controller.go @@ -468,23 +468,36 @@ func (r *UsersCleanupController) cleanupStaleUsers(ctx context.Context, logger l lu := utils.LinseedUser(clusterID, t.Spec.ID) dashboardsUser := utils.DashboardUser(clusterID, t.Spec.ID) kibanaUser := utils.KibanaUser(clusterID, t.Spec.ID) + deletedUsers := 0 for _, user := range allESUsers { - if user.Username == lu.Username || user.Username == dashboardsUser.Username || user.Username == kibanaUser.Username { - err = esClient.DeleteUser(ctx, &user) + switch user.Username { + case lu.Username, dashboardsUser.Username: + err = esClient.DeleteUser(ctx, &user, false) if err != nil { logger.Error(err, "Failed to delete elastic user") + } else { + deletedUsers++ } + case kibanaUser.Username: + err = esClient.DeleteUser(ctx, &user, true) + if err != nil { + logger.Error(err, "Failed to delete elastic user") + } else { + deletedUsers++ + } + } + } - // Remove the finalizer from the tenant to allow it to be deleted. - if stringsutil.StringInSlice(userCleanupFinalizer, t.GetFinalizers()) { - t.SetFinalizers(stringsutil.RemoveStringInSlice(userCleanupFinalizer, t.GetFinalizers())) - if err = r.client.Update(ctx, &t); err != nil { - logger.Error(err, "Failed to remove user cleanup finalizer from tenant") - } + if deletedUsers > 0 { + // Remove the finalizer from the tenant to allow it to be deleted. + if stringsutil.StringInSlice(userCleanupFinalizer, t.GetFinalizers()) { + t.SetFinalizers(stringsutil.RemoveStringInSlice(userCleanupFinalizer, t.GetFinalizers())) + if err = r.client.Update(ctx, &t); err != nil { + logger.Error(err, "Failed to remove user cleanup finalizer from tenant") } - break } } + } return nil } diff --git a/pkg/controller/logstorage/users/users_controller_test.go b/pkg/controller/logstorage/users/users_controller_test.go index e9207f93b1..cc00bae20a 100644 --- a/pkg/controller/logstorage/users/users_controller_test.go +++ b/pkg/controller/logstorage/users/users_controller_test.go @@ -63,16 +63,21 @@ var _ = Describe("LogStorage cleanup controller", func() { staleLinseedUser := utils.LinseedUser(clusterID1, tenantID1) staleDashboardsUser := utils.DashboardUser(clusterID1, tenantID1) + staleKibanaUser := utils.KibanaUser(clusterID1, tenantID1) esTestUsers := []utils.User{ *staleLinseedUser, *staleDashboardsUser, + *staleKibanaUser, *utils.LinseedUser(clusterID1, tenantID2), *utils.DashboardUser(clusterID1, tenantID2), + *utils.KibanaUser(clusterID1, tenantID2), *utils.LinseedUser(clusterID2, tenantID1), *utils.DashboardUser(clusterID2, tenantID1), + *utils.KibanaUser(clusterID2, tenantID1), *utils.LinseedUser(clusterID2, tenantID2), *utils.DashboardUser(clusterID2, tenantID2), + *utils.KibanaUser(clusterID2, tenantID2), } testESClient.On("GetUsers", ctx).Return(esTestUsers, nil) diff --git a/pkg/controller/utils/elasticsearch.go b/pkg/controller/utils/elasticsearch.go index 9f2dfb6bd8..deabdc2457 100644 --- a/pkg/controller/utils/elasticsearch.go +++ b/pkg/controller/utils/elasticsearch.go @@ -114,7 +114,7 @@ type ElasticsearchClientCreator func(client client.Client, ctx context.Context, type ElasticClient interface { SetILMPolicies(context.Context, *operatorv1.LogStorage) error CreateUser(context.Context, *User) error - DeleteUser(context.Context, *User) error + DeleteUser(ctx context.Context, user *User, keepRoles bool) error GetUsers(ctx context.Context) ([]User, error) } @@ -363,9 +363,11 @@ func (es *esClient) deleteRole(ctx context.Context, role Role) error { return nil } -func (es *esClient) DeleteUser(ctx context.Context, user *User) error { - if err := es.DeleteRoles(ctx, user.Roles); err != nil { - return err +func (es *esClient) DeleteUser(ctx context.Context, user *User, keepRoles bool) error { + if !keepRoles { + if err := es.DeleteRoles(ctx, user.Roles); err != nil { + return err + } } _, err := es.client.XPackSecurityDeleteUser(user.Username).Do(ctx) diff --git a/pkg/render/logstorage/kibana/kibana_test.go b/pkg/render/logstorage/kibana/kibana_test.go index 73cbc1cc43..d9e14c020c 100644 --- a/pkg/render/logstorage/kibana/kibana_test.go +++ b/pkg/render/logstorage/kibana/kibana_test.go @@ -16,6 +16,7 @@ package kibana_test import ( "context" + "net/url" cmnv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/common/v1" kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" @@ -38,7 +39,6 @@ import ( "github.com/tigera/operator/pkg/controller/certificatemanager" ctrlrfake "github.com/tigera/operator/pkg/ctrlruntime/client/fake" "github.com/tigera/operator/pkg/dns" - "github.com/tigera/operator/pkg/render" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" "github.com/tigera/operator/pkg/render/common/networkpolicy" "github.com/tigera/operator/pkg/render/common/podaffinity" @@ -508,14 +508,14 @@ var _ = Describe("Kibana rendering tests", func() { PullSecrets: []*corev1.Secret{ {ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret"}}, }, - Provider: operatorv1.ProviderNone, - ClusterDomain: dns.DefaultClusterDomain, - TrustedBundle: bundle, - UsePSP: true, - Enabled: true, - Namespace: tenant.Namespace, - Tenant: tenant, - ExternalElasticEndpoint: "https://external-elastic-endpoint:443", + Provider: operatorv1.ProviderNone, + ClusterDomain: dns.DefaultClusterDomain, + TrustedBundle: bundle, + UsePSP: true, + Enabled: true, + Namespace: tenant.Namespace, + Tenant: tenant, + ExternalElasticURL: toURL("https://external-elastic-endpoint:443"), ChallengerClientCertificate: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: logstorage.ExternalCertsSecret, @@ -526,15 +526,7 @@ var _ = Describe("Kibana rendering tests", func() { "client.key": []byte(``), }, }, - ElasticChallengerUser: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: render.ElasticsearchAdminUserSecret, - Namespace: common.OperatorNamespace(), - }, - Data: map[string][]byte{ - "tigera-mgmt": []byte(``), - }, - }, + KibanaUsername: "kibana-user", } }) @@ -549,7 +541,6 @@ var _ = Describe("Kibana rendering tests", func() { &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: kibana.PolicyName, Namespace: tenant.Namespace}}, &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "tigera-kibana", Namespace: tenant.Namespace}}, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: logstorage.ExternalCertsSecret, Namespace: tenant.Namespace}}, - &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: render.ElasticsearchAdminUserSecret, Namespace: tenant.Namespace}}, &kbv1.Kibana{ObjectMeta: metav1.ObjectMeta{Name: kibana.CRName, Namespace: tenant.Namespace}}, } rtest.ExpectResources(createResources, expectedResources) @@ -627,7 +618,7 @@ var _ = Describe("Kibana rendering tests", func() { }, corev1.EnvVar{ Name: "ES_GATEWAY_ELASTIC_ENDPOINT", - Value: cfg.ExternalElasticEndpoint, + Value: cfg.ExternalElasticURL.String(), }, corev1.EnvVar{ Name: "ES_GATEWAY_ELASTIC_CA_BUNDLE_PATH", @@ -645,21 +636,6 @@ var _ = Describe("Kibana rendering tests", func() { Name: "ES_GATEWAY_ENABLE_ELASTIC_MUTUAL_TLS", Value: "true", }, - corev1.EnvVar{ - Name: "ES_GATEWAY_ELASTIC_USERNAME", - Value: "tigera-mgmt", - }, - corev1.EnvVar{ - Name: "ES_GATEWAY_ELASTIC_PASSWORD", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: render.ElasticsearchAdminUserSecret, - }, - Key: "tigera-mgmt", - }, - }, - }, )) Expect(challengerContainer.VolumeMounts).To(ContainElements( @@ -700,7 +676,7 @@ var _ = Describe("Kibana rendering tests", func() { Expect(kibanaCR.Spec.Config.Data).NotTo(BeEmpty()) Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.hosts", "http://localhost:8080")) Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.ssl.verificationMode", "none")) - Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.username", kibana.MultiTenantKibanaUser)) + Expect(kibanaCR.Spec.Config.Data).To(HaveKeyWithValue("elasticsearch.username", "kibana-user")) Expect(kibanaCR.Spec.SecureSettings).NotTo(BeNil()) Expect(kibanaCR.Spec.SecureSettings).To(ContainElement( @@ -709,6 +685,11 @@ var _ = Describe("Kibana rendering tests", func() { }) }) +func toURL(val string) *url.URL { + url, _ := url.Parse(val) + return url +} + func getX509Certs(installation *operatorv1.InstallationSpec) (certificatemanagement.KeyPairInterface, certificatemanagement.TrustedBundle) { scheme := runtime.NewScheme() Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) From 43d8705890d0a5f5311df49d03259099f36b7794 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 15 May 2024 11:55:27 -0700 Subject: [PATCH 15/20] More tests --- .../external_elastic_controller_test.go | 332 +++++++++++++----- pkg/render/manager_test.go | 3 + .../testutils/expected_policies/kibana.json | 40 +-- .../expected_policies/kibana_ocp.json | 40 +-- 4 files changed, 283 insertions(+), 132 deletions(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller_test.go b/pkg/controller/logstorage/elastic/external_elastic_controller_test.go index 20e421cbc8..476bc67d0e 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller_test.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller_test.go @@ -16,6 +16,13 @@ package elastic import ( "context" + "fmt" + kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" + "github.com/stretchr/testify/mock" + "github.com/tigera/operator/pkg/render/logstorage/eck" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + "github.com/tigera/operator/test" + "k8s.io/apimachinery/pkg/types" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" "github.com/tigera/operator/pkg/render/logstorage" @@ -73,37 +80,11 @@ var _ = Describe("External ES Controller", func() { ctx = context.Background() cli = ctrlrfake.DefaultFakeClientBuilder(scheme).Build() - var err error - certificateManager, err = certificatemanager.Create(cli, nil, "", common.OperatorNamespace(), certificatemanager.AllowCACreation()) - Expect(err).NotTo(HaveOccurred()) - Expect(cli.Create(ctx, certificateManager.KeyPair().Secret(common.OperatorNamespace()))) // Persist the root-ca in the operator namespace. - // Create secrets necessary for reconcile to complete. These are typically created by the secrets controller. - esKeyPair, err := certificateManager.GetOrCreateKeyPair(cli, render.TigeraElasticsearchInternalCertSecret, common.OperatorNamespace(), []string{render.TigeraElasticsearchInternalCertSecret}) - Expect(err).NotTo(HaveOccurred()) - Expect(cli.Create(ctx, esKeyPair.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) - dnsNames := dns.GetServiceDNSNames(kibana.ServiceName, kibana.Namespace, dns.DefaultClusterDomain) - kbKeyPair, err := certificateManager.GetOrCreateKeyPair(cli, kibana.TigeraKibanaCertSecret, common.OperatorNamespace(), dnsNames) - Expect(err).NotTo(HaveOccurred()) - Expect(cli.Create(ctx, kbKeyPair.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) - - // Create the trusted bundle configmap. This is normally created out of band by the secret controller. - bundle := certificateManager.CreateTrustedBundle(esKeyPair) - Expect(cli.Create(ctx, bundle.ConfigMap(render.ElasticsearchNamespace))).NotTo(HaveOccurred()) - - prometheusTLS, err := certificateManager.GetOrCreateKeyPair(cli, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace(), []string{monitor.PrometheusClientTLSSecretName}) - Expect(err).NotTo(HaveOccurred()) - - Expect(cli.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) - - Expect(cli.Create(ctx, &operatorv1.APIServer{ - ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, - Status: operatorv1.APIServerStatus{State: operatorv1.TigeraStatusReady}, - })).NotTo(HaveOccurred()) - - Expect(cli.Create(ctx, &v3.Tier{ - ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}, - })).NotTo(HaveOccurred()) + mockStatus = &status.MockStatus{} + mockStatus.On("Run").Return() + mockStatus.On("OnCRFound").Return() + mockStatus.On("ReadyToMonitor") install = &operatorv1.Installation{ ObjectMeta: metav1.ObjectMeta{ @@ -120,81 +101,245 @@ var _ = Describe("External ES Controller", func() { } Expect(cli.Create(ctx, install)).ShouldNot(HaveOccurred()) - // Create the public certs used to verify the Elasticsearch and Kibana. - esPublicCert, err := secret.CreateTLSSecret( - nil, - "tigera-secure-es-http-certs-public", - common.OperatorNamespace(), - "tls.key", - "tls.crt", - tls.DefaultCertificateDuration, - nil, - ) - Expect(err).ShouldNot(HaveOccurred()) - Expect(cli.Create(ctx, esPublicCert)).ShouldNot(HaveOccurred()) - - kbPublicCert, err := secret.CreateTLSSecret( - nil, - "tigera-secure-kb-http-certs-public", - common.OperatorNamespace(), - "tls.key", - "tls.crt", - tls.DefaultCertificateDuration, - nil, - ) - Expect(err).ShouldNot(HaveOccurred()) - Expect(cli.Create(ctx, kbPublicCert)).ShouldNot(HaveOccurred()) - - // Create the ES admin username and password. - esAdminUserSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: render.ElasticsearchAdminUserSecret, - Namespace: common.OperatorNamespace(), - }, - Data: map[string][]byte{"tigera-mgmt": []byte("password")}, - } - Expect(cli.Create(ctx, esAdminUserSecret)).ShouldNot(HaveOccurred()) - - // Create the ExternalCertsSecret which contains the client certificate for connecting to the external ES cluster. - externalCertsSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: logstorage.ExternalCertsSecret, - Namespace: common.OperatorNamespace(), - }, - Data: map[string][]byte{ - "tls.crt": {}, - }, - } - Expect(cli.Create(ctx, externalCertsSecret)).ShouldNot(HaveOccurred()) - Expect(cli.Create( ctx, &operatorv1.ManagementCluster{ ObjectMeta: metav1.ObjectMeta{Name: utils.DefaultTSEEInstanceKey.Name}, })).NotTo(HaveOccurred()) - mockStatus = &status.MockStatus{} - mockStatus.On("Run").Return() - mockStatus.On("OnCRFound").Return() - mockStatus.On("ReadyToMonitor") - }) - - It("reconciles successfully", func() { CreateLogStorage(cli, &operatorv1.LogStorage{ ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, Spec: operatorv1.LogStorageSpec{}, Status: operatorv1.LogStorageStatus{State: operatorv1.TigeraStatusReady}, }) - // Run the reconciler and expect it to reach the end successfully. - mockStatus.On("ClearDegraded") - r, err := NewExternalESReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain) - Expect(err).ShouldNot(HaveOccurred()) - result, err := r.Reconcile(ctx, reconcile.Request{}) - Expect(err).ToNot(HaveOccurred()) - Expect(result).Should(Equal(reconcile.Result{})) - mockStatus.AssertExpectations(GinkgoT()) + Expect(cli.Create(ctx, &operatorv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, + Status: operatorv1.APIServerStatus{State: operatorv1.TigeraStatusReady}, + })).NotTo(HaveOccurred()) + + Expect(cli.Create(ctx, &v3.Tier{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-tigera"}, + })).NotTo(HaveOccurred()) + + }) + + Context("Single tenant", func() { + BeforeEach(func() { + var err error + certificateManager, err = certificatemanager.Create(cli, nil, "", common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).NotTo(HaveOccurred()) + Expect(cli.Create(ctx, certificateManager.KeyPair().Secret(common.OperatorNamespace()))) // Persist the root-ca in the operator namespace. + + // Create secrets necessary for reconcile to complete. These are typically created by the secrets controller. + esKeyPair, err := certificateManager.GetOrCreateKeyPair(cli, render.TigeraElasticsearchInternalCertSecret, common.OperatorNamespace(), []string{render.TigeraElasticsearchInternalCertSecret}) + Expect(err).NotTo(HaveOccurred()) + Expect(cli.Create(ctx, esKeyPair.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + dnsNames := dns.GetServiceDNSNames(kibana.ServiceName, kibana.Namespace, dns.DefaultClusterDomain) + kbKeyPair, err := certificateManager.GetOrCreateKeyPair(cli, kibana.TigeraKibanaCertSecret, common.OperatorNamespace(), dnsNames) + Expect(err).NotTo(HaveOccurred()) + Expect(cli.Create(ctx, kbKeyPair.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + + // Create the trusted bundle configmap. This is normally created out of band by the secret controller. + bundle := certificateManager.CreateTrustedBundle(esKeyPair) + Expect(cli.Create(ctx, bundle.ConfigMap(render.ElasticsearchNamespace))).NotTo(HaveOccurred()) + + prometheusTLS, err := certificateManager.GetOrCreateKeyPair(cli, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace(), []string{monitor.PrometheusClientTLSSecretName}) + Expect(err).NotTo(HaveOccurred()) + + Expect(cli.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + + // Create the public certs used to verify the Elasticsearch and Kibana. + esPublicCert, err := secret.CreateTLSSecret( + nil, + "tigera-secure-es-http-certs-public", + common.OperatorNamespace(), + "tls.key", + "tls.crt", + tls.DefaultCertificateDuration, + nil, + ) + Expect(err).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, esPublicCert)).ShouldNot(HaveOccurred()) + + kbPublicCert, err := secret.CreateTLSSecret( + nil, + "tigera-secure-kb-http-certs-public", + common.OperatorNamespace(), + "tls.key", + "tls.crt", + tls.DefaultCertificateDuration, + nil, + ) + Expect(err).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, kbPublicCert)).ShouldNot(HaveOccurred()) + + // Create the ES admin username and password. + esAdminUserSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: render.ElasticsearchAdminUserSecret, + Namespace: common.OperatorNamespace(), + }, + Data: map[string][]byte{"tigera-mgmt": []byte("password")}, + } + Expect(cli.Create(ctx, esAdminUserSecret)).ShouldNot(HaveOccurred()) + + // Create the ExternalCertsSecret which contains the client certificate for connecting to the external ES cluster. + externalCertsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: logstorage.ExternalCertsSecret, + Namespace: common.OperatorNamespace(), + }, + Data: map[string][]byte{ + "tls.crt": {}, + }, + } + Expect(cli.Create(ctx, externalCertsSecret)).ShouldNot(HaveOccurred()) + }) + + It("reconciles successfully", func() { + // Run the reconciler and expect it to reach the end successfully. + mockStatus.On("ClearDegraded") + r, err := NewExternalESReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, false) + Expect(err).ShouldNot(HaveOccurred()) + result, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(Equal(reconcile.Result{})) + mockStatus.AssertExpectations(GinkgoT()) + }) }) + + Context("Multi tenant", func() { + var ( + tenant *operatorv1.Tenant + tenantNS = "tenant-ns-a" + eckOperator = appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{Kind: "StatefulSet", APIVersion: "apps/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: eck.OperatorName, + Namespace: eck.OperatorNamespace, + }, + } + kibanaCR = kbv1.Kibana{ObjectMeta: metav1.ObjectMeta{Name: kibana.CRName, Namespace: tenantNS}} + ) + + BeforeEach(func() { + Expect(cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tenantNS}})).ShouldNot(HaveOccurred()) + + // Create a dummy secret mocking the client certificates needed for mTLS. + esClientSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: logstorage.ExternalCertsSecret, Namespace: common.OperatorNamespace()}, + Data: map[string][]byte{"client.crt": []byte("cert"), "client.key": []byte("key")}, + } + Expect(cli.Create(ctx, esClientSecret)).ShouldNot(HaveOccurred()) + + clusterIDConfigMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-info", + Namespace: "tigera-operator", + }, + Data: map[string]string{ + "cluster-id": "cluster-id", + }, + } + err := cli.Create(ctx, &clusterIDConfigMap) + Expect(err).NotTo(HaveOccurred()) + + tenant = &operatorv1.Tenant{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: tenantNS, + }, + Spec: operatorv1.TenantSpec{ + ID: "tenant-a", + Elastic: &operatorv1.TenantElasticSpec{ + URL: "https://external.elastic:443", + MutualTLS: true, + }, + Kibana: &operatorv1.TenantKibanaSpec{ + URL: fmt.Sprintf("https://kibana.%s.svc:5601", tenantNS), + }, + }, + } + Expect(cli.Create(ctx, tenant)).ShouldNot(HaveOccurred()) + + // Create a CA secret for the test, and create its KeyPair. + opts := []certificatemanager.Option{ + certificatemanager.AllowCACreation(), + certificatemanager.WithTenant(tenant), + } + cm, err := certificatemanager.Create(cli, &install.Spec, dns.DefaultClusterDomain, tenantNS, opts...) + Expect(err).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, cm.KeyPair().Secret(tenantNS))).ShouldNot(HaveOccurred()) + bundle := cm.CreateTrustedBundle() + Expect(cli.Create(ctx, bundle.ConfigMap(tenantNS))).ShouldNot(HaveOccurred()) + }) + + It("should reconcile resources", func() { + mockStatus.On("AddStatefulSets", mock.Anything) + mockStatus.On("ClearDegraded") + + r, err := NewExternalESReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true) + Expect(err).ShouldNot(HaveOccurred()) + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(Equal(reconcile.Result{})) + mockStatus.AssertExpectations(GinkgoT()) + + // Check that ECK was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + Expect(test.GetResource(cli, &eckOperator)).NotTo(HaveOccurred()) + + // Check that Kibana CR was created as expected. We don't need to check every resource in detail, since + // the render package has its own tests which cover this in more detail. + Expect(test.GetResource(cli, &kibanaCR)).NotTo(HaveOccurred()) + }) + + It("should reconcile resources when kibana is disabled per tenant", func() { + mockStatus.On("ClearDegraded") + + // Disable Kibana + tenant.Spec.Kibana = nil + Expect(cli.Update(ctx, tenant)).ShouldNot(HaveOccurred()) + + r, err := NewExternalESReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true) + Expect(err).ShouldNot(HaveOccurred()) + result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).ToNot(HaveOccurred()) + Expect(result).Should(Equal(reconcile.Result{})) + mockStatus.AssertExpectations(GinkgoT()) + + Expect(test.GetResource(cli, &eckOperator)).To(HaveOccurred()) + Expect(test.GetResource(cli, &kibanaCR)).To(HaveOccurred()) + }) + + It("should wait for the tenant CA to be provisioned", func() { + mockStatus.On("ClearDegraded") + mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + + // Delete the CA secret for this test. + caSecret := &corev1.Secret{} + caSecret.Name = certificatemanagement.TenantCASecretName + caSecret.Namespace = tenantNS + Expect(cli.Delete(ctx, caSecret)).ShouldNot(HaveOccurred()) + + // Run the reconciler. + r, err := NewExternalESReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true) + Expect(err).ShouldNot(HaveOccurred()) + _, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: "default", Namespace: tenantNS}}) + Expect(err).Should(HaveOccurred()) + Expect(err.Error()).Should(ContainSubstring("CA secret")) + }) + + It("should not reconcile any resources if no Namespace was given", func() { + // Run the reconciler. + r, err := NewExternalESReconcilerWithShims(cli, scheme, mockStatus, operatorv1.ProviderNone, dns.DefaultClusterDomain, true) + Expect(err).ShouldNot(HaveOccurred()) + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + }) + }) + }) func NewExternalESReconcilerWithShims( @@ -203,12 +348,14 @@ func NewExternalESReconcilerWithShims( status status.StatusManager, provider operatorv1.Provider, clusterDomain string, + multiTenant bool, ) (*ExternalESController, error) { opts := options.AddOptions{ DetectedProvider: provider, ClusterDomain: clusterDomain, ShutdownContext: context.TODO(), ElasticExternal: true, + MultiTenant: multiTenant, } r := &ExternalESController{ @@ -218,6 +365,7 @@ func NewExternalESReconcilerWithShims( usePSP: opts.UsePSP, clusterDomain: opts.ClusterDomain, provider: opts.DetectedProvider, + multiTenant: opts.MultiTenant, } r.status.Run(opts.ShutdownContext) return r, nil diff --git a/pkg/render/manager_test.go b/pkg/render/manager_test.go index eb7a78ba7c..1545a0f2db 100644 --- a/pkg/render/manager_test.go +++ b/pkg/render/manager_test.go @@ -1112,6 +1112,7 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_URL", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", tenantANamespace)})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "TENANT_ID", Value: "tenant-a"})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "TENANT_NAMESPACE", Value: tenantANamespace})) + Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "ELASTIC_KIBANA_ENDPOINT", Value: fmt.Sprintf("https://tigera-secure-kb-http.%s.svc.cluster.local:5601", tenantANamespace)})) }) It("should not install UISettings / UISettingsGroups", func() { @@ -1227,6 +1228,7 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_URL", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", render.ManagerNamespace)})) Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: "https://tigera-linseed.tigera-elasticsearch.svc.cluster.local"})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "TENANT_ID", Value: "tenant-a"})) + Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "ELASTIC_KIBANA_ENDPOINT", Value: "https://tigera-secure-es-gateway-http.tigera-elasticsearch.svc:5601"})) // Make sure we don't render multi-tenant environment variables for _, env := range envs { @@ -1262,6 +1264,7 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_TENANT_CLAIM", Value: "tenant-a"})) Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: "https://tigera-linseed.tigera-elasticsearch.svc.cluster.local"})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_URL", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", render.ManagerNamespace)})) + Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "ELASTIC_KIBANA_ENDPOINT", Value: "https://tigera-secure-es-gateway-http.tigera-elasticsearch.svc:5601"})) // Make sure we don't render multi-tenant environment variables for _, env := range envs { diff --git a/pkg/render/testutils/expected_policies/kibana.json b/pkg/render/testutils/expected_policies/kibana.json index f77504f308..29658c07c7 100644 --- a/pkg/render/testutils/expected_policies/kibana.json +++ b/pkg/render/testutils/expected_policies/kibana.json @@ -42,19 +42,6 @@ ] } }, - { - "action": "Allow", - "protocol": "TCP", - "source": { - "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" - }, - "destination": { - "ports": [ - 5601 - ] - } - }, { "action": "Allow", "protocol": "TCP", @@ -80,22 +67,22 @@ "selector": "k8s-app == 'elastic-operator'", "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } - } - ], - "egress": [ + }, { "action": "Allow", "protocol": "TCP", "source": { + "selector": "k8s-app == 'tigera-secure-es-gateway'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { - "selector": "elasticsearch.k8s.elastic.co/cluster-name == 'tigera-secure'", - "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'", "ports": [ - 9200 + 5601 ] } - }, + } + ], + "egress": [ { "action": "Allow", "protocol": "UDP", @@ -117,6 +104,19 @@ } } }, + { + "action": "Allow", + "protocol": "TCP", + "source": { + }, + "destination": { + "selector": "elasticsearch.k8s.elastic.co/cluster-name == 'tigera-secure'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'", + "ports": [ + 9200 + ] + } + }, { "action": "Allow", "destination": { diff --git a/pkg/render/testutils/expected_policies/kibana_ocp.json b/pkg/render/testutils/expected_policies/kibana_ocp.json index a825adae3c..3dfec1f321 100644 --- a/pkg/render/testutils/expected_policies/kibana_ocp.json +++ b/pkg/render/testutils/expected_policies/kibana_ocp.json @@ -42,19 +42,6 @@ ] } }, - { - "action": "Allow", - "protocol": "TCP", - "source": { - "selector": "k8s-app == 'tigera-secure-es-gateway'", - "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" - }, - "destination": { - "ports": [ - 5601 - ] - } - }, { "action": "Allow", "protocol": "TCP", @@ -80,22 +67,22 @@ "selector": "k8s-app == 'elastic-operator'", "namespaceSelector": "projectcalico.org/name == 'tigera-eck-operator'" } - } - ], - "egress": [ + }, { "action": "Allow", "protocol": "TCP", "source": { + "selector": "k8s-app == 'tigera-secure-es-gateway'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'" }, "destination": { - "selector": "elasticsearch.k8s.elastic.co/cluster-name == 'tigera-secure'", - "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'", "ports": [ - 9200 + 5601 ] } - }, + } + ], + "egress": [ { "action": "Allow", "protocol": "UDP", @@ -128,6 +115,19 @@ } } }, + { + "action": "Allow", + "protocol": "TCP", + "source": { + }, + "destination": { + "selector": "elasticsearch.k8s.elastic.co/cluster-name == 'tigera-secure'", + "namespaceSelector": "projectcalico.org/name == 'tigera-elasticsearch'", + "ports": [ + 9200 + ] + } + }, { "action": "Allow", "destination": { From 2139a5a943a856352fd202e5ee821587def9119d Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 15 May 2024 11:57:43 -0700 Subject: [PATCH 16/20] Rearrange imports --- .../external_elastic_controller_test.go | 36 +++++++++---------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller_test.go b/pkg/controller/logstorage/elastic/external_elastic_controller_test.go index 476bc67d0e..7b5925eec2 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller_test.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller_test.go @@ -17,29 +17,11 @@ package elastic import ( "context" "fmt" - kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" - "github.com/stretchr/testify/mock" - "github.com/tigera/operator/pkg/render/logstorage/eck" - "github.com/tigera/operator/pkg/tls/certificatemanagement" - "github.com/tigera/operator/test" - "k8s.io/apimachinery/pkg/types" - - v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" - "github.com/tigera/operator/pkg/render/logstorage" - "github.com/tigera/operator/pkg/tls" - - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/tigera/operator/pkg/controller/certificatemanager" - "github.com/tigera/operator/pkg/controller/options" - "github.com/tigera/operator/pkg/controller/utils" - "github.com/tigera/operator/pkg/dns" - "github.com/tigera/operator/pkg/render" - "github.com/tigera/operator/pkg/render/common/secret" + kbv1 "github.com/elastic/cloud-on-k8s/v2/pkg/apis/kibana/v1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - + "github.com/stretchr/testify/mock" admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -48,15 +30,29 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/apis" "github.com/tigera/operator/pkg/common" + "github.com/tigera/operator/pkg/controller/certificatemanager" + "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" + "github.com/tigera/operator/pkg/controller/utils" ctrlrfake "github.com/tigera/operator/pkg/ctrlruntime/client/fake" + "github.com/tigera/operator/pkg/dns" + "github.com/tigera/operator/pkg/render" + "github.com/tigera/operator/pkg/render/common/secret" + "github.com/tigera/operator/pkg/render/logstorage" + "github.com/tigera/operator/pkg/render/logstorage/eck" "github.com/tigera/operator/pkg/render/logstorage/kibana" "github.com/tigera/operator/pkg/render/monitor" + "github.com/tigera/operator/pkg/tls" + "github.com/tigera/operator/pkg/tls/certificatemanagement" + "github.com/tigera/operator/test" ) var _ = Describe("External ES Controller", func() { From 6c500ac3e4e5b4fd8c5388dffabe35d727dcd735 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 22 May 2024 12:00:20 -0700 Subject: [PATCH 17/20] [CODE REVIEW] Fix type --- .../logstorage/elastic/external_elastic_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 0cef55f90d..6a9183da03 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -90,7 +90,7 @@ func AddExternalES(mgr manager.Manager, opts options.AddOptions) error { r.status.Run(opts.ShutdownContext) // Create a controller using the reconciler and register it with the manager to receive reconcile calls. - c, err := ctrlruntime.NewController("log-storage-external-es-controllerr", mgr, controller.Options{Reconciler: r}) + c, err := ctrlruntime.NewController("log-storage-external-es-controller", mgr, controller.Options{Reconciler: r}) if err != nil { return err } From a53308df02546f60c2291f63b69e2dab25efcade Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 22 May 2024 12:01:30 -0700 Subject: [PATCH 18/20] [CODE REVIEW] Watch for Kibana Secret --- pkg/controller/manager/manager_controller.go | 23 +++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index 2ce45d1f96..909dc3e014 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -48,6 +48,7 @@ import ( relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" "github.com/tigera/operator/pkg/render/common/networkpolicy" "github.com/tigera/operator/pkg/render/logstorage/eck" + "github.com/tigera/operator/pkg/render/logstorage/kibana" rmanager "github.com/tigera/operator/pkg/render/manager" "github.com/tigera/operator/pkg/render/monitor" "github.com/tigera/operator/pkg/tls/certificatemanagement" @@ -156,15 +157,21 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { if helper.TruthNamespace() == helper.InstallNamespace() { namespacesToWatch = []string{helper.InstallNamespace()} } + secretsToWatch := []string{ + render.ManagerTLSSecretName, + render.VoltronTunnelSecretName, render.ComplianceServerCertSecret, render.PacketCaptureServerCert, + render.ManagerInternalTLSSecretName, monitor.PrometheusServerTLSSecretName, certificatemanagement.CASecretName, + } + if opts.MultiTenant { + secretsToWatch = append(secretsToWatch, kibana.TigeraKibanaCertSecret) + } else { + // We need to watch for es-gateway certificate because es-proxy still creates a + // client to talk to kibana via es-gateway for zero-tenant and single-tenant + secretsToWatch = append(secretsToWatch, relasticsearch.PublicCertSecret) + } + for _, namespace := range namespacesToWatch { - for _, secretName := range []string{ - // We need to watch for es-gateway certificate because es-proxy still creates a - // client to talk to kibana via es-gateway - // TODO: ALINA - Do we need to add Kibana for multi-tenant ? - render.ManagerTLSSecretName, relasticsearch.PublicCertSecret, - render.VoltronTunnelSecretName, render.ComplianceServerCertSecret, render.PacketCaptureServerCert, - render.ManagerInternalTLSSecretName, monitor.PrometheusServerTLSSecretName, certificatemanagement.CASecretName, - } { + for _, secretName := range []string{} { if err = utils.AddSecretsWatch(c, secretName, namespace); err != nil { return fmt.Errorf("manager-controller failed to watch the secret '%s' in '%s' namespace: %w", secretName, namespace, err) } From 3d2dff2ba9a4bc66ceb80bc5138c1f181aa0c9b0 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 22 May 2024 12:08:10 -0700 Subject: [PATCH 19/20] Set Kibana Enpoint in Voltron and clean up old dependencies --- pkg/controller/manager/manager_controller.go | 15 --------------- pkg/render/manager.go | 17 ++++++++++------- pkg/render/manager_test.go | 15 +-------------- 3 files changed, 11 insertions(+), 36 deletions(-) diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index 909dc3e014..5e76c67997 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -499,20 +499,6 @@ func (r *ReconcileManager) Reconcile(ctx context.Context, request reconcile.Requ return reconcile.Result{}, err } - var clusterConfig *relasticsearch.ClusterConfig - // We only require Elastic cluster configuration when Kibana is enabled. - if render.KibanaEnabled(tenant, installation) { - clusterConfig, err = utils.GetElasticsearchClusterConfig(context.Background(), r.client) - if err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotFound, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, logc) - return reconcile.Result{}, nil - } - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, logc) - return reconcile.Result{}, err - } - } - managementCluster, err := utils.GetManagementCluster(ctx, r.client) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, logc) @@ -665,7 +651,6 @@ func (r *ReconcileManager) Reconcile(ctx context.Context, request reconcile.Requ VoltronRouteConfig: routeConfig, KeyValidatorConfig: keyValidatorConfig, TrustedCertBundle: trustedBundle, - ClusterConfig: clusterConfig, TLSKeyPair: tlsSecret, VoltronLinseedKeyPair: linseedVoltronServerCert, PullSecrets: pullSecrets, diff --git a/pkg/render/manager.go b/pkg/render/manager.go index e24e9965e6..320ca2b156 100644 --- a/pkg/render/manager.go +++ b/pkg/render/manager.go @@ -130,7 +130,6 @@ type ManagerConfiguration struct { KeyValidatorConfig authentication.KeyValidatorConfig ESSecrets []*corev1.Secret - ClusterConfig *relasticsearch.ClusterConfig PullSecrets []*corev1.Secret Openshift bool Installation *operatorv1.InstallationSpec @@ -521,7 +520,7 @@ func (c *managerComponent) voltronContainer() corev1.Container { {Name: "VOLTRON_PORT", Value: defaultVoltronPort}, {Name: "VOLTRON_COMPLIANCE_ENDPOINT", Value: fmt.Sprintf("https://compliance.%s.svc.%s", c.cfg.ComplianceNamespace, c.cfg.ClusterDomain)}, {Name: "VOLTRON_LOGLEVEL", Value: "Info"}, - {Name: "VOLTRON_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)}, + {Name: "VOLTRON_KIBANA_ENDPOINT", Value: c.kibanaEndpoint()}, {Name: "VOLTRON_KIBANA_BASE_PATH", Value: fmt.Sprintf("/%s/", KibanaBasePath)}, {Name: "VOLTRON_KIBANA_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()}, {Name: "VOLTRON_PACKET_CAPTURE_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()}, @@ -611,13 +610,9 @@ func (c *managerComponent) managerEsProxyContainer() corev1.Container { keyPath, certPath = c.cfg.InternalTLSKeyPair.VolumeMountKeyFilePath(), c.cfg.InternalTLSKeyPair.VolumeMountCertificateFilePath() } - kibanaEndpoint := rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain) - if c.cfg.Tenant.MultiTenant() { - kibanaEndpoint = fmt.Sprintf("https://tigera-secure-kb-http.%s.svc.cluster.local:5601", c.cfg.Namespace) - } env := []corev1.EnvVar{ {Name: "ELASTIC_LICENSE_TYPE", Value: string(c.cfg.ESLicenseType)}, - {Name: "ELASTIC_KIBANA_ENDPOINT", Value: kibanaEndpoint}, + {Name: "ELASTIC_KIBANA_ENDPOINT", Value: c.kibanaEndpoint()}, {Name: "FIPS_MODE_ENABLED", Value: operatorv1.IsFIPSModeEnabledString(c.cfg.Installation.FIPSMode)}, {Name: "LINSEED_CLIENT_CERT", Value: certPath}, {Name: "LINSEED_CLIENT_KEY", Value: keyPath}, @@ -664,6 +659,14 @@ func (c *managerComponent) managerEsProxyContainer() corev1.Container { } } +func (c *managerComponent) kibanaEndpoint() string { + kibanaEndpoint := rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain) + if c.cfg.Tenant.MultiTenant() { + kibanaEndpoint = fmt.Sprintf("https://tigera-secure-kb-http.%s.svc.cluster.local:5601", c.cfg.Namespace) + } + return kibanaEndpoint +} + // managerTolerations returns the tolerations for the Tigera Secure manager deployment pods. func (c *managerComponent) managerTolerations() []corev1.Toleration { return append(c.cfg.Installation.ControlPlaneTolerations, rmeta.TolerateCriticalAddonsAndControlPlane...) diff --git a/pkg/render/manager_test.go b/pkg/render/manager_test.go index 1545a0f2db..00c0a5a73a 100644 --- a/pkg/render/manager_test.go +++ b/pkg/render/manager_test.go @@ -33,7 +33,6 @@ import ( "github.com/tigera/operator/pkg/dns" "github.com/tigera/operator/pkg/render" "github.com/tigera/operator/pkg/render/common/authentication" - relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/podaffinity" rtest "github.com/tigera/operator/pkg/render/common/test" @@ -433,7 +432,6 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { VoltronLinseedKeyPair: voltronLinseedCert, InternalTLSKeyPair: internalTraffic, Installation: installation, - ClusterConfig: &relasticsearch.ClusterConfig{}, Namespace: render.ManagerNamespace, TruthNamespace: common.OperatorNamespace(), } @@ -699,14 +697,8 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { // renderManager passes in as few parameters as possible to render.Manager without it // panicing. It accepts variations on the installspec for testing purposes. renderManager := func(i *operatorv1.InstallationSpec) *appsv1.Deployment { - var esConfigMap *relasticsearch.ClusterConfig - // We only require Elastic cluster configuration when Kibana is enabled. - if render.KibanaEnabled(nil, i) { - esConfigMap = relasticsearch.NewClusterConfig("clusterTestName", 1, 1, 1) - } cfg := &render.ManagerConfiguration{ TrustedCertBundle: bundle, - ClusterConfig: esConfigMap, TLSKeyPair: kp, VoltronLinseedKeyPair: voltronLinseedKP, Installation: i, @@ -1109,6 +1101,7 @@ var _ = Describe("Tigera Secure Manager rendering tests", func() { Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_REQUIRE_TENANT_CLAIM", Value: "true"})) Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_TENANT_CLAIM", Value: "tenant-a"})) Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_LINSEED_ENDPOINT", Value: fmt.Sprintf("https://tigera-linseed.%s.svc", tenantANamespace)})) + Expect(envs).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_KIBANA_ENDPOINT", Value: fmt.Sprintf("https://tigera-secure-kb-http.%s.svc.cluster.local:5601", tenantANamespace)})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "VOLTRON_URL", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", tenantANamespace)})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "TENANT_ID", Value: "tenant-a"})) Expect(esProxyEnv).To(ContainElement(corev1.EnvVar{Name: "TENANT_NAMESPACE", Value: tenantANamespace})) @@ -1333,15 +1326,9 @@ func renderObjects(roc renderConfig) []client.Object { roc.bindingNamespaces = []string{roc.ns} } - var esConfigMap *relasticsearch.ClusterConfig - // We only require Elastic cluster configuration when Kibana is enabled. - if render.KibanaEnabled(roc.tenant, roc.installation) { - esConfigMap = relasticsearch.NewClusterConfig("clusterTestName", 1, 1, 1) - } cfg := &render.ManagerConfiguration{ KeyValidatorConfig: dexCfg, TrustedCertBundle: bundle, - ClusterConfig: esConfigMap, TLSKeyPair: managerTLS, Installation: roc.installation, ManagementCluster: roc.managementCluster, From 3e4f8bcde27320cd4f5ca0ed1187fa5260ea6b19 Mon Sep 17 00:00:00 2001 From: Alina Militaru <41362174+asincu@users.noreply.github.com> Date: Wed, 22 May 2024 13:21:40 -0700 Subject: [PATCH 20/20] [CODE REVIEW] Watch for Kibana cert --- pkg/controller/manager/manager_controller.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index 5e76c67997..69ec1f2a86 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -157,11 +157,13 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { if helper.TruthNamespace() == helper.InstallNamespace() { namespacesToWatch = []string{helper.InstallNamespace()} } + secretsToWatch := []string{ render.ManagerTLSSecretName, render.VoltronTunnelSecretName, render.ComplianceServerCertSecret, render.PacketCaptureServerCert, render.ManagerInternalTLSSecretName, monitor.PrometheusServerTLSSecretName, certificatemanagement.CASecretName, } + if opts.MultiTenant { secretsToWatch = append(secretsToWatch, kibana.TigeraKibanaCertSecret) } else { @@ -171,7 +173,7 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } for _, namespace := range namespacesToWatch { - for _, secretName := range []string{} { + for _, secretName := range secretsToWatch { if err = utils.AddSecretsWatch(c, secretName, namespace); err != nil { return fmt.Errorf("manager-controller failed to watch the secret '%s' in '%s' namespace: %w", secretName, namespace, err) }