diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f10c746de..7e1c3b9ddf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ We use the following categories for changes: ### Added - Alerts from promscale monitoring mixin are groupped also by namespace label [#1714] +- Added a new family of metrics tracking database maintenance jobs durations and failures [#1745] ### Changed - Reduced the verbosity of the logs emitted by the vacuum engine [#1715] diff --git a/docs/mixin/dashboards/promscale.json b/docs/mixin/dashboards/promscale.json index 615b5a6608..ccb511c5da 100644 --- a/docs/mixin/dashboards/promscale.json +++ b/docs/mixin/dashboards/promscale.json @@ -2455,6 +2455,330 @@ "title": "Longest running maintenance query", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 41 + }, + "id": 54, + "interval": "2m", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, max(rate(promscale_sql_database_worker_maintenance_job_metrics_compression_last_duration_seconds_bucket{namespace=~\"$namespace\"}[$__rate_interval])) by (le, job))", + "interval": "", + "legendFormat": "metrics-compression", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, max(rate(promscale_sql_database_worker_maintenance_job_metrics_retention_last_duration_seconds_bucket{namespace=~\"$namespace\"}[$__rate_interval])) by (le, job))", + "hide": false, + "interval": "", + "legendFormat": "metrics-retention", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, max(rate(promscale_sql_database_worker_maintenance_job_traces_retention_last_duration_seconds_bucket{namespace=~\"$namespace\"}[$__rate_interval])) by (le, job))", + "hide": false, + "interval": "", + "legendFormat": "traces-retention", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, max(rate(promscale_sql_database_worker_maintenance_job_traces_compression_last_duration_seconds_bucket{namespace=~\"$namespace\"}[$__rate_interval])) by (le, job))", + "hide": false, + "interval": "", + "legendFormat": "traces-compression", + "range": true, + "refId": "D" + } + ], + "title": "Duration of recent jobs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "opm" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 41 + }, + "id": 57, + "interval": "2m", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_metrics_compression_total_runs_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "interval": "", + "legendFormat": "metrics-compression-total", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_metrics_retention_total_runs_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "metrics-retention-total", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_traces_retention_total_runs_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "traces-retention-total", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_metrics_compression_failures_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "metrics-compression-failures", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_metrics_retention_failures_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "metrics-retention-failures", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_traces_retention_failures_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "traces-retention-failures", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_traces_compression_failures_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "traces-compression-failures", + "range": true, + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max by (job, instance)(rate(promscale_sql_database_worker_maintenance_job_traces_retention_total_runs_count{namespace=~\"$namespace\"}[$__rate_interval])) * 60", + "hide": false, + "interval": "", + "legendFormat": "traces-compression-total", + "range": true, + "refId": "H" + } + ], + "title": "Completion and failure rates", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -2514,7 +2838,7 @@ "h": 9, "w": 12, "x": 0, - "y": 49 + "y": 42 }, "id": 50, "interval": "2m", @@ -2691,7 +3015,7 @@ "h": 9, "w": 12, "x": 12, - "y": 49 + "y": 42 }, "id": 52, "interval": "2m", diff --git a/pkg/pgmodel/metrics/database/database.go b/pkg/pgmodel/metrics/database/database.go index 56f128156c..0bdb4ce805 100644 --- a/pkg/pgmodel/metrics/database/database.go +++ b/pkg/pgmodel/metrics/database/database.go @@ -200,6 +200,8 @@ func updateMetric(m prometheus.Collector, value int64) { n.Set(float64(value)) case prometheus.Counter: n.Add(float64(value)) + case prometheus.Histogram: + n.Observe(float64(value)) default: panic(fmt.Sprintf("metric %s is of type %T", m, m)) } diff --git a/pkg/pgmodel/metrics/database/metrics.go b/pkg/pgmodel/metrics/database/metrics.go index bf63cb5b79..e0c78d9c70 100644 --- a/pkg/pgmodel/metrics/database/metrics.go +++ b/pkg/pgmodel/metrics/database/metrics.go @@ -62,6 +62,13 @@ func counters(opts ...prometheus.CounterOpts) []prometheus.Collector { } return res } +func histograms(opts ...prometheus.HistogramOpts) []prometheus.Collector { + res := make([]prometheus.Collector, 0, len(opts)) + for _, opt := range opts { + res = append(res, prometheus.NewHistogram(opt)) + } + return res +} var metrics = []metricQueryWrap{ { @@ -397,6 +404,126 @@ var metrics = []metricQueryWrap{ timescaledb_information.jobs jobs on jobs.job_id = stats.job_id where jobs.proc_name = 'execute_maintenance_job' and stats.last_run_status = 'Failed'`, + }, { + metrics: append(histograms( + prometheus.HistogramOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Buckets: prometheus.ExponentialBucketsRange(1, 86400.0, 15), // up to a day + Name: "worker_maintenance_job_metrics_compression_last_duration_seconds", + Help: "The duration of the most recently completed metrics compression job.", + }, + prometheus.HistogramOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Buckets: prometheus.ExponentialBucketsRange(1, 86400.0, 15), // up to a day + Name: "worker_maintenance_job_metrics_retention_last_duration_seconds", + Help: "The duration of the most recently completed metrics retention job.", + }, + prometheus.HistogramOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Buckets: prometheus.ExponentialBucketsRange(1, 86400.0, 15), // up to a day + Name: "worker_maintenance_job_traces_retention_last_duration_seconds", + Help: "The duration of the most recently completed traces retention job.", + }, + prometheus.HistogramOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Buckets: prometheus.ExponentialBucketsRange(1, 86400.0, 15), // up to a day + Name: "worker_maintenance_job_traces_compression_last_duration_seconds", + Help: "The duration of the most recently completed traces compression job.", + }, + ), gauges( + // They need to be guagues, because the source is already a sum + // and our DB metric colleciton system uses Add on counters. + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_metrics_compression_failures_count", + Help: "The number of failed metrics compression jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_metrics_compression_total_runs_count", + Help: "The total number of completed metrics compression jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_metrics_retention_failures_count", + Help: "The number of failed metrics retention jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_metrics_retention_total_runs_count", + Help: "The total number of completed metrics retention jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_traces_retention_failures_count", + Help: "The number of failed traces retention jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_traces_retention_total_runs_count", + Help: "The total number of completed traces retention jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_traces_compression_failures_count", + Help: "The number of failed traces compression jobs.", + }, + prometheus.GaugeOpts{ + Namespace: util.PromNamespace, + Subsystem: "sql_database", + Name: "worker_maintenance_job_traces_compression_total_runs_count", + Help: "The total number of completed traces compression jobs.", + }, + )...), + query: `WITH maintenance_jobs_stats AS ( + SELECT + coalesce(config ->> 'signal', 'traces') AS signal_type, + coalesce(config ->> 'type', 'compression') AS job_type, + MAX(js.last_run_duration) AS last_duration, + SUM(js.total_failures) AS failures_count, + SUM(js.total_runs) AS total_runs_count + FROM timescaledb_information.job_stats js + JOIN timescaledb_information.jobs j ON j.job_id = js.job_id + WHERE proc_schema = '_prom_catalog' OR proc_schema = '_ps_trace' + GROUP BY 1, 2 + ) + SELECT + coalesce(extract(EPOCH FROM MAX(last_duration) + FILTER ( WHERE signal_type = 'metrics' AND job_type = 'compression' )), 0)::BIGINT AS metrics_compression_last_duration, + coalesce(extract(EPOCH FROM MAX(last_duration) + FILTER ( WHERE signal_type = 'metrics' AND job_type = 'retention' )), 0)::BIGINT AS metrics_retention_last_duration, + coalesce(extract(EPOCH FROM MAX(last_duration) + FILTER ( WHERE signal_type = 'traces' AND job_type = 'retention' )), 0)::BIGINT AS traces_retention_last_duration, + coalesce(extract(EPOCH FROM MAX(last_duration) + FILTER ( WHERE signal_type = 'traces' AND job_type = 'compression' )), 0)::BIGINT AS traces_compression_last_duration, + coalesce(MAX(failures_count) + FILTER ( WHERE signal_type = 'metrics' AND job_type = 'compression' ), 0)::BIGINT AS metrics_compression_failures_count, + coalesce(MAX(total_runs_count) + FILTER ( WHERE signal_type = 'metrics' AND job_type = 'compression' ), 0)::BIGINT AS metrics_compression_total_runs_count, + coalesce(MAX(failures_count) + FILTER ( WHERE signal_type = 'metrics' AND job_type = 'retention' ), 0)::BIGINT AS metrics_retention_failures_count, + coalesce(MAX(total_runs_count) + FILTER ( WHERE signal_type = 'metrics' AND job_type = 'retention' ), 0)::BIGINT AS metrics_retention_total_runs_count, + coalesce(MAX(failures_count) + FILTER ( WHERE signal_type = 'traces' AND job_type = 'retention' ), 0)::BIGINT AS traces_retention_failures_count, + coalesce(MAX(total_runs_count) + FILTER ( WHERE signal_type = 'traces' AND job_type = 'retention' ), 0)::BIGINT AS traces_retention_total_runs_count, + coalesce(MAX(failures_count) + FILTER ( WHERE signal_type = 'traces' AND job_type = 'compression' ), 0)::BIGINT AS traces_compression_failures_count, + coalesce(MAX(total_runs_count) + FILTER ( WHERE signal_type = 'traces' AND job_type = 'compression' ), 0)::BIGINT AS traces_compression_total_runs_count + FROM maintenance_jobs_stats;`, }, { metrics: gauges( prometheus.GaugeOpts{ @@ -453,6 +580,8 @@ func getMetric(c prometheus.Collector) prometheus.Metric { return n case prometheus.Counter: return n + case prometheus.Histogram: + return n default: panic(fmt.Sprintf("invalid type: %T", n)) } diff --git a/pkg/tests/end_to_end_tests/create_test.go b/pkg/tests/end_to_end_tests/create_test.go index 1d3d9a9edd..d1acf0e5ae 100644 --- a/pkg/tests/end_to_end_tests/create_test.go +++ b/pkg/tests/end_to_end_tests/create_test.go @@ -1463,93 +1463,6 @@ func TestExecuteCompressionMetricsLocked(t *testing.T) { }) } -func TestConfigMaintenanceJobs(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - withDB(t, *testDatabase, func(dbOwner *pgxpool.Pool, t testing.TB) { - db := testhelpers.PgxPoolWithRole(t, *testDatabase, "prom_admin") - defer db.Close() - cnt := 0 - err := db.QueryRow(context.Background(), - "SELECT count(*) FROM timescaledb_information.jobs WHERE proc_schema = '_prom_catalog' AND proc_name = 'execute_maintenance_job'"). - Scan(&cnt) - if err != nil { - t.Fatal(err) - } - - if cnt != 2 { - t.Fatal("Incorrect number of jobs at startup") - } - - changeJobs := func(numJobs int, scheduleInterval time.Duration, config *string, configErr bool) { - _, err = db.Exec(context.Background(), "SELECT config_maintenance_jobs($1, $2, $3)", numJobs, scheduleInterval, config) - if err != nil { - if !configErr { - t.Fatal(err) - } - return - } - if configErr { - t.Fatal("Expect config error") - } - err = db.QueryRow(context.Background(), - "SELECT count(*) FROM timescaledb_information.jobs WHERE proc_schema = '_prom_catalog' AND proc_name = 'execute_maintenance_job' AND schedule_interval = $1", scheduleInterval). - Scan(&cnt) - if err != nil { - t.Fatal(err) - } - if cnt != numJobs { - t.Fatalf("Unexpected number of jobs. Got %v, expected %v", cnt, numJobs) - } - err = db.QueryRow(context.Background(), - "SELECT count(*) FROM timescaledb_information.jobs WHERE proc_schema = '_prom_catalog' AND proc_name = 'execute_maintenance_job' AND schedule_interval != $1", scheduleInterval). - Scan(&cnt) - if err != nil { - t.Fatal(err) - } - if cnt != 0 { - t.Fatalf("found %v jobs with wrong schedule interval", cnt) - } - if config == nil { - err = db.QueryRow(context.Background(), - "SELECT count(*) FROM timescaledb_information.jobs WHERE proc_schema = '_prom_catalog' AND proc_name = 'execute_maintenance_job' AND config IS NOT NULL"). - Scan(&cnt) - if err != nil { - t.Fatal(err) - } - if cnt != 0 { - t.Fatalf("found %v jobs with wrong NULL config", cnt) - } - } else { - err = db.QueryRow(context.Background(), - "SELECT count(*) FROM timescaledb_information.jobs WHERE proc_schema = '_prom_catalog' AND proc_name = 'execute_maintenance_job' AND config != $1::jsonb", config). - Scan(&cnt) - if err != nil { - t.Fatal(err) - } - if cnt != 0 { - t.Fatalf("found %v jobs with wrong config", cnt) - } - } - } - - changeJobs(4, time.Minute*30, nil, false) - changeJobs(4, time.Minute*45, nil, false) - changeJobs(5, time.Minute*45, nil, false) - changeJobs(2, time.Minute*45, nil, false) - changeJobs(1, time.Minute*30, nil, false) - changeJobs(0, time.Minute*30, nil, false) - config := `{"log_verbose": true}` - changeJobs(2, time.Minute*45, &config, false) - changeJobs(3, time.Minute*45, &config, false) - config = `{"log_verbose": false}` - changeJobs(1, time.Minute*45, &config, false) - config = `{"log_verbose": "rand"}` - changeJobs(1, time.Minute*45, &config, true) - }) -} - func TestExecuteMaintJob(t *testing.T) { if testing.Short() { t.Skip("skipping integration test") diff --git a/pkg/tests/end_to_end_tests/database_metrics_test.go b/pkg/tests/end_to_end_tests/database_metrics_test.go index d86e1a2f6f..2919ec7693 100644 --- a/pkg/tests/end_to_end_tests/database_metrics_test.go +++ b/pkg/tests/end_to_end_tests/database_metrics_test.go @@ -30,6 +30,8 @@ func TestDatabaseMetrics(t *testing.T) { require.Equal(t, float64(0), compressionStatus) numMaintenanceJobs := getMetricValue(t, "worker_maintenance_job") require.Equal(t, float64(0), numMaintenanceJobs) + metricsJobDuration := getMetricValue(t, "worker_maintenance_job_metrics_compression_last_duration_seconds") + require.Equal(t, float64(0), metricsJobDuration) chunksCount := getMetricValue(t, "chunks_count") require.Equal(t, float64(0), chunksCount) chunksCompressedCount := getMetricValue(t, "chunks_compressed_count") @@ -50,7 +52,9 @@ func TestDatabaseMetrics(t *testing.T) { compressionStatus = getMetricValue(t, "compression_status") require.Equal(t, float64(1), compressionStatus) numMaintenanceJobs = getMetricValue(t, "worker_maintenance_job") - require.Equal(t, float64(2), numMaintenanceJobs) + require.GreaterOrEqual(t, numMaintenanceJobs, float64(1)) + metricsJobDuration = getMetricValue(t, "worker_maintenance_job_metrics_compression_last_duration_seconds") + require.Equal(t, float64(0), metricsJobDuration) chunksCount = getMetricValue(t, "chunks_count") require.Equal(t, float64(0), chunksCount) chunksCompressedCount = getMetricValue(t, "chunks_compressed_count") @@ -112,8 +116,6 @@ func TestDatabaseMetricsAfterCompression(t *testing.T) { // Get metrics before compressing the firstMetric metric chunk. compressionStatus := getMetricValue(t, "compression_status") require.Equal(t, float64(1), compressionStatus) - numMaintenanceJobs := getMetricValue(t, "worker_maintenance_job") - require.Equal(t, float64(2), numMaintenanceJobs) chunksCount := getMetricValue(t, "chunks_count") require.Equal(t, float64(2), chunksCount) chunksCompressedCount := getMetricValue(t, "chunks_compressed_count") diff --git a/pkg/util/metrics.go b/pkg/util/metrics.go index 1a811a4e1a..9dbd1e10a3 100644 --- a/pkg/util/metrics.go +++ b/pkg/util/metrics.go @@ -44,8 +44,14 @@ func ExtractMetricValue(counterOrGauge prometheus.Metric) (float64, error) { return internal.Gauge.GetValue(), nil case internal.Counter != nil: return internal.Counter.GetValue(), nil + case internal.Histogram != nil: + if sampleCnt := internal.Histogram.GetSampleCount(); sampleCnt != 0 { + return internal.Histogram.GetSampleSum() / float64(sampleCnt), nil + } else { + return 0.0, nil + } default: - return 0, fmt.Errorf("both Gauge and Counter are nil") + return 0, fmt.Errorf("all three Gauge, Counter and Histogram are nil") } } diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index b2a08b9026..a1ae94908f 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -5,6 +5,7 @@ package util import ( + "errors" "flag" "fmt" "os" @@ -138,9 +139,8 @@ func TestExtractMetricValue(t *testing.T) { require.NoError(t, err) require.Equal(t, float64(164), value) - wrongMetric := prometheus.NewHistogram(prometheus.HistogramOpts{Namespace: "test", Name: "wrong", Buckets: prometheus.DefBuckets}) + wrongMetric := prometheus.NewInvalidMetric(prometheus.NewDesc("invalid", "invalid", nil, nil), errors.New("an invalid metric")) - wrongMetric.Observe(164) _, err = ExtractMetricValue(wrongMetric) require.Error(t, err) }