Skip to content

Commit

Permalink
Merge pull request #263 from sknot-rh/logging
Browse files Browse the repository at this point in the history
Set glog levels to control verbosity
  • Loading branch information
danielqsj authored Sep 16, 2021
2 parents 49ae4a1 + 50666c4 commit b44f44d
Showing 1 changed file with 15 additions and 9 deletions.
24 changes: 15 additions & 9 deletions kafka_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,12 @@ const (
clientID = "kafka_exporter"
)

const (
INFO = 0
DEBUG = 1
TRACE = 2
)

var (
clusterBrokers *prometheus.Desc
topicPartitions *prometheus.Desc
Expand Down Expand Up @@ -225,7 +231,7 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor
}

if opts.useZooKeeperLag {
glog.Infoln("Using zookeeper lag, so connecting to zookeeper")
glog.V(DEBUG).Infoln("Using zookeeper lag, so connecting to zookeeper")
zookeeperClient, err = kazoo.NewKazoo(opts.uriZookeeper, nil)
if err != nil {
return nil, errors.Wrap(err, "error connecting to zookeeper")
Expand All @@ -245,7 +251,7 @@ func NewExporter(opts kafkaOpts, topicFilter string, groupFilter string) (*Expor
return nil, errors.Wrap(err, "Error Init Kafka Client")
}

glog.Infoln("Done Init Clients")
glog.V(TRACE).Infoln("Done Init Clients")
// Init our exporter.
return &Exporter{
client: client,
Expand Down Expand Up @@ -309,7 +315,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.sgWaitCh = make(chan struct{})
go e.collectChans(e.sgWaitCh)
} else {
glog.Info("concurrent calls detected, waiting for first to finish")
glog.V(TRACE).Info("concurrent calls detected, waiting for first to finish")
}
// Put in another variable to ensure not overwriting it in another Collect once we wait
waiter := e.sgWaitCh
Expand Down Expand Up @@ -357,7 +363,7 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) {
now := time.Now()

if now.After(e.nextMetadataRefresh) {
glog.Info("Refreshing client metadata")
glog.V(DEBUG).Info("Refreshing client metadata")

if err := e.client.RefreshMetadata(); err != nil {
glog.Errorf("Cannot refresh topics, using cached data: %v", err)
Expand Down Expand Up @@ -628,7 +634,7 @@ func (e *Exporter) collect(ch chan<- prometheus.Metric) {
}
}

glog.Info("Fetching consumer group metrics")
glog.V(DEBUG).Info("Fetching consumer group metrics")
if len(e.client.Brokers()) > 0 {
for _, broker := range e.client.Brokers() {
wg.Add(1)
Expand Down Expand Up @@ -730,8 +736,8 @@ func setup(
flag.Parse()
defer glog.Flush()

glog.Infoln("Starting kafka_exporter", version.Info())
glog.Infoln("Build context", version.BuildContext())
glog.V(INFO).Infoln("Starting kafka_exporter", version.Info())
glog.V(DEBUG).Infoln("Build context", version.BuildContext())

clusterBrokers = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "brokers"),
Expand Down Expand Up @@ -847,7 +853,7 @@ func setup(
})

if opts.serverUseTLS {
glog.Infoln("Listening on HTTPS", listenAddress)
glog.V(INFO).Infoln("Listening on HTTPS", listenAddress)

_, err := CanReadCertAndKey(opts.serverTlsCertFile, opts.serverTlsKeyFile)
if err != nil {
Expand Down Expand Up @@ -890,7 +896,7 @@ func setup(
}
glog.Fatal(server.ListenAndServeTLS(opts.serverTlsCertFile, opts.serverTlsKeyFile))
} else {
glog.Infoln("Listening on HTTP", listenAddress)
glog.V(INFO).Infoln("Listening on HTTP", listenAddress)
glog.Fatal(http.ListenAndServe(listenAddress, nil))
}
}

0 comments on commit b44f44d

Please # to comment.