From abb292bc7e994c806ef9974b0dc4a0417918538d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 15:59:54 -0500 Subject: [PATCH 001/289] Add the metrics tracer factory class --- src/metrics-tracer-factory.ts | 106 +++++++++++++++++++++++++++++ system-test/client-side-metrics.ts | 0 2 files changed, 106 insertions(+) create mode 100644 src/metrics-tracer-factory.ts create mode 100644 system-test/client-side-metrics.ts diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts new file mode 100644 index 000000000..eff1f8ef4 --- /dev/null +++ b/src/metrics-tracer-factory.ts @@ -0,0 +1,106 @@ +// import * as SDKMetrics from '@opentelemetry/sdk-metrics'; +const { MeterProvider, Histogram, Counter, PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics'); +// import { MeterProvider, PeriodicExportingMetricReader, Histogram} from '@opentelemetry/sdk-metrics'; +import * as Resources from '@opentelemetry/resources'; +import { MetricExporter } from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; + +interface OperationInfo { + retries: number; +} + +const buckets = [0.001, 0.01, 0.1, 1, 10, 100] +const count = 0; + +interface Metrics { + operationLatencies: typeof Histogram; + attemptLatencies: typeof Histogram; + retryCount: typeof Counter; + applicationBlockingLatencies: typeof Histogram; + firstResponseLatencies: typeof Histogram; + serverLatencies: typeof Histogram; + connectivityErrorCount: typeof Histogram; + clientBlockingLatencies: typeof Histogram; +} + +class MetricsTracer { // TODO: Consider rename. + private metrics: Metrics; + + constructor(metrics: Metrics) { + this.metrics = metrics; + } + + onAttemptComplete(info: OperationInfo) { + console.log('onAttemptComplete'); + } + + onOperationComplete(info: OperationInfo) { + console.log('onOperationComplete'); + } +} + +export class MetricsTracerFactory { + private metrics: Metrics; + + constructor() { + // Create MeterProvider + const meterProvider = new MeterProvider({ + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + "service.name": "example-metric-service", + "service.namespace": "samples", + "service.instance.id": "12345", + "cloud.resource_manager.project_id": "cloud-native-db-dpes-shared" + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 10_000, + exporter: new MetricExporter({ + projectId: 'cloud-native-db-dpes-shared' // TODO: Replace later + }), + }) + ] + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.metrics = { + operationLatencies: meter.createHistogram('operation_latencies', { + description: 'The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation\'s round trip from the client to Bigtable and back to the client and includes all retries.', + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { + description: "The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.", + unit: 'ms', + }), + retryCount: meter.createCounter('retry_count', { + description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram('application_blocking_latencies', { + description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + }), + firstResponseLatencies: meter.createHistogram('first_response_latencies', { + description: 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + }), + serverLatencies: meter.createHistogram('server_latencies', { + description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram('connectivity_error_count', { + description: 'The number of requests that failed to reach Google\'s network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.', + }), + clientBlockingLatencies: meter.createHistogram('client_blocking_latencies', { + description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + }), + }; + } + + getMetricsTracer() { + return new MetricsTracer(this.metrics); + } +} diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..e69de29bb From 051b488ec6ffda36ca96c7b4117366b838f691d6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 16:08:32 -0500 Subject: [PATCH 002/289] Add system tests for the client side metrics --- system-test/client-side-metrics.ts | 95 ++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index e69de29bb..6e41740f4 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,95 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + + +import {Bigtable} from '../src'; +import {Mutation} from '../src/mutation'; +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; + +describe.only('Bigtable/Table#getRows', () => { + const bigtable = new Bigtable(); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; + + before(async () => { + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [,operation] = await instance.create({ // Fix: Destructure correctly + clusters: { // Fix: Use computed property name + [clusterId]: { + location, + nodes: 3, + }, + }, + } as any); // any cast resolves type mismatch for options. + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create(); + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + const rows = [ + { + key: 'row1', + data: { + cf1: { + q1: 'value1', + }, + }, + }, + { + key: 'row2', + data: { + cf1: { + q2: 'value2', + }, + }, + }, + ]; + await table.insert(rows); + const retrievedRows = await table.getRows(); + assert.strictEqual(retrievedRows[0].length, 2); + const row1 = retrievedRows[0].find(row => row.key === 'row1'); + assert(row1); + const row1Data = row1.data; + assert.deepStrictEqual(row1Data, rows[0].data); + const row2 = retrievedRows[0].find(row => row.key === 'row2'); + assert(row2); + const row2Data = row2.data; + assert.deepStrictEqual(row2Data, rows[1].data); + }); +}); From 1c49f8651376604fce987ea26bbf739a944899a7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 16:09:37 -0500 Subject: [PATCH 003/289] Add open telemetry packages --- package.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/package.json b/package.json index c0b1a07e9..7b1c30d4a 100644 --- a/package.json +++ b/package.json @@ -47,9 +47,13 @@ "precompile": "gts clean" }, "dependencies": { + "@google-cloud/opentelemetry-cloud-monitoring-exporter": "^0.20.0", + "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", "@google-cloud/promisify": "^4.0.0", + "@opentelemetry/resources": "^1.30.0", + "@opentelemetry/sdk-metrics": "^1.30.0", "arrify": "^2.0.0", "concat-stream": "^2.0.0", "dot-prop": "^6.0.0", From 7a5be3bfbbbb345423c85b30f5297994c7a95a29 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 10 Jan 2025 16:10:27 -0500 Subject: [PATCH 004/289] Add a metrics tracer factory --- src/index.ts | 3 +++ src/tabular-api-surface.ts | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/src/index.ts b/src/index.ts index dc4143c99..07147a0c1 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,6 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; +import {MetricsTracerFactory} from './metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -395,8 +396,10 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; + metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { + this.metricsTracerFactory = new MetricsTracerFactory(); // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index a7f86e0a2..9cb164f70 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,6 +210,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer(); const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -506,6 +507,13 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; + /* + function onCallComplete() { + this.metricsTracer.onOperationComplete({ + retries: numConsecutiveErrors, + }); + } + */ rowStream .on('error', (error: ServiceError) => { @@ -548,6 +556,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); + //onCallComplete(); } }) .on('data', _ => { @@ -557,6 +566,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }) .on('end', () => { activeRequestStream = null; + //onCallComplete(); }); rowStreamPipe(rowStream, userStream); }; From 5390411f10c59d588bb955efc851dc8476016a61 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 11:05:17 -0500 Subject: [PATCH 005/289] metadata experimentation --- src/index.ts | 4 ++- src/tabular-api-surface.ts | 7 +++-- system-test/client-side-metrics.ts | 44 ++++++++++++++---------------- 3 files changed, 29 insertions(+), 26 deletions(-) diff --git a/src/index.ts b/src/index.ts index 07147a0c1..595bc267c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -870,7 +870,9 @@ export class Bigtable { gaxStream = requestFn!(); gaxStream .on('error', stream.destroy.bind(stream)) - .on('metadata', stream.emit.bind(stream, 'metadata')) + .on('metadata', (arg1, arg2) => { + stream.emit.bind(stream, 'metadata')(arg1, arg2); + }) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 9cb164f70..cea03d859 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -559,12 +559,15 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); //onCallComplete(); } }) - .on('data', _ => { + .on('data', (something: any) => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; }) - .on('end', () => { + .on('metadata', (something: any) => { + console.log(something); + }) + .on('end', (something: any) => { activeRequestStream = null; //onCallComplete(); }); diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 6e41740f4..9ac299773 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. - - import {Bigtable} from '../src'; -import {Mutation} from '../src/mutation'; import * as assert from 'assert'; import {describe, it, before, after} from 'mocha'; describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable(); + const bigtable = new Bigtable({ + projectId: 'cloud-native-db-dpes-shared', + }); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; + const columnFamilyId = 'cf1'; const clusterId = 'test-cluster'; const location = 'us-central1-c'; @@ -31,21 +31,27 @@ describe.only('Bigtable/Table#getRows', () => { try { const [instanceInfo] = await instance.exists(); if (!instanceInfo) { - const [,operation] = await instance.create({ // Fix: Destructure correctly - clusters: { // Fix: Use computed property name - [clusterId]: { - location, - nodes: 3, - }, + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, }, - } as any); // any cast resolves type mismatch for options. + }); await operation.promise(); } const table = instance.table(tableId); const [tableExists] = await table.exists(); if (!tableExists) { - await table.create(); + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if (!families.some(family => family.id === columnFamilyId)) { + await table.createFamily(columnFamilyId); + } } } catch (error) { console.error('Error during setup:', error); @@ -58,7 +64,6 @@ describe.only('Bigtable/Table#getRows', () => { await instance.delete({}); }); - it('should read rows after inserting data', async () => { const instance = bigtable.instance(instanceId); const table = instance.table(tableId); @@ -81,15 +86,8 @@ describe.only('Bigtable/Table#getRows', () => { }, ]; await table.insert(rows); - const retrievedRows = await table.getRows(); - assert.strictEqual(retrievedRows[0].length, 2); - const row1 = retrievedRows[0].find(row => row.key === 'row1'); - assert(row1); - const row1Data = row1.data; - assert.deepStrictEqual(row1Data, rows[0].data); - const row2 = retrievedRows[0].find(row => row.key === 'row2'); - assert(row2); - const row2Data = row2.data; - assert.deepStrictEqual(row2Data, rows[1].data); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); + } }); }); From a7f2fd433cef5af6774814e54680b867f4f4bfdf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 13:37:32 -0500 Subject: [PATCH 006/289] Pass metadata, status along --- src/index.ts | 5 ++--- src/tabular-api-surface.ts | 17 +++++++++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/index.ts b/src/index.ts index 595bc267c..4fc86fbee 100644 --- a/src/index.ts +++ b/src/index.ts @@ -870,9 +870,8 @@ export class Bigtable { gaxStream = requestFn!(); gaxStream .on('error', stream.destroy.bind(stream)) - .on('metadata', (arg1, arg2) => { - stream.emit.bind(stream, 'metadata')(arg1, arg2); - }) + .on('metadata', stream.emit.bind(stream, 'metadata')) + .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index cea03d859..da9fe3528 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -514,7 +514,19 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }); } */ - + requestStream + .on( + 'metadata', + (metadata: {internalRepr: Map; options: {}}) => { + console.log(metadata); + } + ) + .on( + 'status', + (status: {internalRepr: Map; options: {}}) => { + console.log(status); + } + ); rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); @@ -564,9 +576,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; }) - .on('metadata', (something: any) => { - console.log(something); - }) .on('end', (something: any) => { activeRequestStream = null; //onCallComplete(); From 9e3e5f5679c3d3fac0b698f2449354916d408bb0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 14:21:48 -0500 Subject: [PATCH 007/289] Get mapped entries --- src/tabular-api-surface.ts | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index da9fe3528..635d1cdbd 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -518,13 +518,23 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - console.log(metadata); + const mappedEntries = Array.from( + metadata.internalRepr.entries(), + ([key, value]) => [key, value.toString()] + ); + console.log(mappedEntries); } ) .on( 'status', - (status: {internalRepr: Map; options: {}}) => { - console.log(status); + (status: { + metadata: {internalRepr: Map; options: {}}; + }) => { + const mappedEntries = Array.from( + status.metadata.internalRepr.entries(), + ([key, value]) => [key, value.toString()] + ); + console.log(mappedEntries); } ); rowStream From 9f93172983223d617468adf6b5c0c7d6dd51ae41 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 13 Jan 2025 17:25:19 -0500 Subject: [PATCH 008/289] Start collecting a few metrics in the metrics trac --- src/metrics-tracer-factory.ts | 194 ++++++++++++++++++++++++++++------ src/tabular-api-surface.ts | 41 ++++--- 2 files changed, 179 insertions(+), 56 deletions(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index eff1f8ef4..9a04025b4 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -1,16 +1,36 @@ // import * as SDKMetrics from '@opentelemetry/sdk-metrics'; -const { MeterProvider, Histogram, Counter, PeriodicExportingMetricReader } = require('@opentelemetry/sdk-metrics'); +import {Table} from './table'; + +const { + MeterProvider, + Histogram, + Counter, + PeriodicExportingMetricReader, +} = require('@opentelemetry/sdk-metrics'); // import { MeterProvider, PeriodicExportingMetricReader, Histogram} from '@opentelemetry/sdk-metrics'; import * as Resources from '@opentelemetry/resources'; -import { MetricExporter } from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {TabularApiSurface} from './tabular-api-surface'; interface OperationInfo { retries: number; + finalOperationStatus: string; } -const buckets = [0.001, 0.01, 0.1, 1, 10, 100] +const buckets = [0.001, 0.01, 0.1, 1, 10, 100]; const count = 0; +interface Dimensions { + projectId: string; + instanceId: string; + table: string; + cluster?: string | null; + zone?: string | null; + appProfileId?: string; + methodName: string; + finalOperationStatus: string; + clientName: string; +} interface Metrics { operationLatencies: typeof Histogram; @@ -23,11 +43,43 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } -class MetricsTracer { // TODO: Consider rename. +class MetricsTracer { + // TODO: Consider rename. + private startTime: Date; private metrics: Metrics; + private zone: string | null | undefined; + private cluster: string | null | undefined; + private tabularApiSurface: TabularApiSurface; + private methodName: string; - constructor(metrics: Metrics) { + constructor( + metrics: Metrics, + tabularApiSurface: TabularApiSurface, + methodName: string + ) { this.metrics = metrics; + this.zone = null; + this.cluster = null; + this.startTime = new Date(); + this.tabularApiSurface = tabularApiSurface; + this.methodName = methodName; + } + + private getDimensions( + projectId: string, + finalOperationStatus: string + ): Dimensions { + return { + projectId, + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + finalOperationStatus: finalOperationStatus, + clientName: 'nodejs-bigtable', + }; } onAttemptComplete(info: OperationInfo) { @@ -35,8 +87,61 @@ class MetricsTracer { // TODO: Consider rename. } onOperationComplete(info: OperationInfo) { + const endTime = new Date(); + const totalTime = endTime.getTime() - this.startTime.getTime(); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId) { + const dimensions = this.getDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.operationLatencies.record(totalTime, dimensions); + this.metrics.retryCount.add(info.retries, dimensions); + } + } + ); console.log('onOperationComplete'); } + + onMetadataReceived(metadata: { + internalRepr: Map; + options: {}; + }) { + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const durationValues = mappedEntries.get('server-timing')?.split('dur='); + if (durationValues && durationValues[1]) { + const serverTime = parseInt(durationValues[1]); + } + console.log(mappedEntries); + } + + onStatusReceived(status: { + metadata: {internalRepr: Map; options: {}}; + }) { + const mappedEntries = new Map( + Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const instanceInformation = mappedEntries + .get('x-goog-ext-425905942-bin') + ?.replace(new RegExp('\\n', 'g'), '') + .split('\r'); + if (instanceInformation && instanceInformation[0]) { + this.zone = instanceInformation[0]; + } + if (instanceInformation && instanceInformation[1]) { + this.cluster = instanceInformation[0]; + } + console.log(mappedEntries); + } } export class MetricsTracerFactory { @@ -51,56 +156,77 @@ export class MetricsTracerFactory { // resource if running on GCP. Otherwise, metrics will be sent with monitored resource // `generic_task`. resource: new Resources.Resource({ - "service.name": "example-metric-service", - "service.namespace": "samples", - "service.instance.id": "12345", - "cloud.resource_manager.project_id": "cloud-native-db-dpes-shared" + 'service.name': 'example-metric-service', + 'service.namespace': 'samples', + 'service.instance.id': '12345', + 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ // Register the exporter + readers: [ + // Register the exporter new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. exportIntervalMillis: 10_000, exporter: new MetricExporter({ - projectId: 'cloud-native-db-dpes-shared' // TODO: Replace later + projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later }), - }) - ] + }), + ], }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); this.metrics = { operationLatencies: meter.createHistogram('operation_latencies', { - description: 'The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation\'s round trip from the client to Bigtable and back to the client and includes all retries.', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", }), attemptLatencies: meter.createHistogram('attempt_latencies', { - description: "The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.", + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', }), retryCount: meter.createCounter('retry_count', { - description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram('application_blocking_latencies', { - description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - }), - firstResponseLatencies: meter.createHistogram('first_response_latencies', { - description: 'Latencies from when a client sends a request and receives the first row of the response.', - unit: 'ms', + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), serverLatencies: meter.createHistogram('server_latencies', { - description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram('connectivity_error_count', { - description: 'The number of requests that failed to reach Google\'s network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.', - }), - clientBlockingLatencies: meter.createHistogram('client_blocking_latencies', { - description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), }; } - getMetricsTracer() { - return new MetricsTracer(this.metrics); + getMetricsTracer(tabularApiSurface: TabularApiSurface, methodName: string) { + return new MetricsTracer(this.metrics, tabularApiSurface, methodName); } } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 635d1cdbd..57fd73135 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,7 +210,18 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer(); + // Initialize objects for collecting client side metrics. + const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( + this, + 'readRows' + ); + function onCallComplete(finalOperationStatus: string) { + metricsTracer.onOperationComplete({ + retries: numRequestsMade - 1, + finalOperationStatus, + }); + } + const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -507,22 +518,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; - /* - function onCallComplete() { - this.metricsTracer.onOperationComplete({ - retries: numConsecutiveErrors, - }); - } - */ requestStream .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - const mappedEntries = Array.from( - metadata.internalRepr.entries(), - ([key, value]) => [key, value.toString()] - ); - console.log(mappedEntries); + metricsTracer.onMetadataReceived(metadata); } ) .on( @@ -530,11 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); (status: { metadata: {internalRepr: Map; options: {}}; }) => { - const mappedEntries = Array.from( - status.metadata.internalRepr.entries(), - ([key, value]) => [key, value.toString()] - ); - console.log(mappedEntries); + metricsTracer.onStatusReceived(status); } ); rowStream @@ -578,17 +574,18 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); - //onCallComplete(); + onCallComplete('ERROR'); } }) - .on('data', (something: any) => { + .on('data', () => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; }) - .on('end', (something: any) => { + .on('end', () => { + numRequestsMade++; activeRequestStream = null; - //onCallComplete(); + onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; From b32aea01f24c668d043545123eb8870c0e465fcb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 14 Jan 2025 09:50:03 -0500 Subject: [PATCH 009/289] on attempt start --- src/metrics-tracer-factory.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 9a04025b4..68f249683 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -86,6 +86,10 @@ class MetricsTracer { console.log('onAttemptComplete'); } + onAttemptStart() { + console.log('onAttemptStart'); + } + onOperationComplete(info: OperationInfo) { const endTime = new Date(); const totalTime = endTime.getTime() - this.startTime.getTime(); From 750c1e7e2b3aa2e5ef61b394493fd8d561c42bc1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 15 Jan 2025 10:57:33 -0500 Subject: [PATCH 010/289] Adding more metrics --- src/metrics-tracer-factory.ts | 114 ++++++++++++++++++++++++++++------ src/tabular-api-surface.ts | 11 ++++ 2 files changed, 107 insertions(+), 18 deletions(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 68f249683..b8c1e5e96 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -14,12 +14,11 @@ import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {TabularApiSurface} from './tabular-api-surface'; interface OperationInfo { - retries: number; + retries?: number; finalOperationStatus: string; + connectivityErrorCount?: number; } -const buckets = [0.001, 0.01, 0.1, 1, 10, 100]; -const count = 0; interface Dimensions { projectId: string; instanceId: string; @@ -45,12 +44,15 @@ interface Metrics { class MetricsTracer { // TODO: Consider rename. - private startTime: Date; + private operationStartTime: Date | null; + private attemptStartTime: Date | null; private metrics: Metrics; private zone: string | null | undefined; private cluster: string | null | undefined; private tabularApiSurface: TabularApiSurface; private methodName: string; + private receivedFirstResponse: boolean; + private serverTimeRead: boolean; constructor( metrics: Metrics, @@ -60,15 +62,15 @@ class MetricsTracer { this.metrics = metrics; this.zone = null; this.cluster = null; - this.startTime = new Date(); this.tabularApiSurface = tabularApiSurface; this.methodName = methodName; + this.operationStartTime = null; + this.attemptStartTime = null; + this.receivedFirstResponse = false; + this.serverTimeRead = false; } - private getDimensions( - projectId: string, - finalOperationStatus: string - ): Dimensions { + private getBasicDimensions(projectId: string) { return { projectId, instanceId: this.tabularApiSurface.instance.id, @@ -77,35 +79,98 @@ class MetricsTracer { zone: this.zone, appProfileId: this.tabularApiSurface.bigtable.appProfileId, methodName: this.methodName, - finalOperationStatus: finalOperationStatus, clientName: 'nodejs-bigtable', }; } + private getFinalOperationDimensions( + projectId: string, + finalOperationStatus: string + ): Dimensions { + return Object.assign( + { + finalOperationStatus: finalOperationStatus, + }, + this.getBasicDimensions(projectId) + ); + } + + private getAttemptDimensions(projectId: string, attemptStatus: string) { + return Object.assign( + { + attemptStatus: attemptStatus, + }, + this.getBasicDimensions(projectId) + ); + } + + onOperationStart() { + this.operationStartTime = new Date(); + } + onAttemptComplete(info: OperationInfo) { - console.log('onAttemptComplete'); + const endTime = new Date(); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId && this.attemptStartTime) { + const dimensions = this.getAttemptDimensions( + projectId, + info.finalOperationStatus + ); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metrics.operationLatencies.record(totalTime, dimensions); + } + } + ); } onAttemptStart() { - console.log('onAttemptStart'); + this.attemptStartTime = new Date(); + } + + onFirstResponse() { + const endTime = new Date(); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId && this.operationStartTime) { + const dimensions = this.getFinalOperationDimensions( + projectId, + 'PENDING' + ); + const totalTime = + endTime.getTime() - this.operationStartTime.getTime(); + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + this.metrics.operationLatencies.record(totalTime, dimensions); + } + } + } + ); } onOperationComplete(info: OperationInfo) { const endTime = new Date(); - const totalTime = endTime.getTime() - this.startTime.getTime(); + this.onAttemptComplete(info); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { - if (projectId) { - const dimensions = this.getDimensions( + if (projectId && this.operationStartTime) { + const totalTime = + endTime.getTime() - this.operationStartTime.getTime(); + const dimensions = this.getFinalOperationDimensions( projectId, info.finalOperationStatus ); this.metrics.operationLatencies.record(totalTime, dimensions); this.metrics.retryCount.add(info.retries, dimensions); + if (info.connectivityErrorCount) { + this.metrics.connectivityErrorCount.record( + info.connectivityErrorCount, + dimensions + ); + } } } ); - console.log('onOperationComplete'); } onMetadataReceived(metadata: { @@ -120,9 +185,22 @@ class MetricsTracer { ); const durationValues = mappedEntries.get('server-timing')?.split('dur='); if (durationValues && durationValues[1]) { - const serverTime = parseInt(durationValues[1]); + if (!this.serverTimeRead) { + this.serverTimeRead = true; + const serverTime = parseInt(durationValues[1]); + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId) { + const dimensions = this.getAttemptDimensions( + projectId, + 'PENDING' // TODO: Adjust this + ); + this.metrics.operationLatencies.record(serverTime, dimensions); + } + } + ); + } } - console.log(mappedEntries); } onStatusReceived(status: { diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 57fd73135..92935f143 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -219,6 +219,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onOperationComplete({ retries: numRequestsMade - 1, finalOperationStatus, + connectivityErrorCount, }); } @@ -230,6 +231,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; + let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -345,7 +347,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return originalEnd(chunk, encoding, cb); }; + metricsTracer.onOperationStart(); const makeNewRequest = () => { + metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -537,6 +541,11 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; + if (new Set([10, 14, 15]).has(error.code)) { + // The following grpc errors will be considered connectivity errors: + // ABORTED, UNAVAILABLE, DATA_LOSS + connectivityErrorCount++; + } if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -558,6 +567,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); + metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -581,6 +591,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; + metricsTracer.onFirstResponse(); }) .on('end', () => { numRequestsMade++; From b88512674b9748a9251fb21044697591b1ef1c3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 15 Jan 2025 15:19:43 -0500 Subject: [PATCH 011/289] Add support for application blocking latencies --- src/metrics-tracer-factory.ts | 24 ++++++++++++++++++++++++ src/tabular-api-surface.ts | 4 ++++ 2 files changed, 28 insertions(+) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index b8c1e5e96..1d6ff0c2e 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -53,6 +53,7 @@ class MetricsTracer { private methodName: string; private receivedFirstResponse: boolean; private serverTimeRead: boolean; + private lastReadTime: Date | null; constructor( metrics: Metrics, @@ -67,6 +68,7 @@ class MetricsTracer { this.operationStartTime = null; this.attemptStartTime = null; this.receivedFirstResponse = false; + this.lastReadTime = null; this.serverTimeRead = false; } @@ -108,6 +110,28 @@ class MetricsTracer { this.operationStartTime = new Date(); } + onRead() { + const currentTime = new Date(); + if (this.lastReadTime) { + this.tabularApiSurface.bigtable.getProjectId_( + (err: Error | null, projectId?: string) => { + if (projectId && this.lastReadTime) { + const dimensions = this.getAttemptDimensions(projectId, 'PENDING'); + const difference = + currentTime.getTime() - this.lastReadTime.getTime(); + this.metrics.applicationBlockingLatencies.record( + difference, + dimensions + ); + this.lastReadTime = currentTime; + } + } + ); + } else { + this.lastReadTime = currentTime; + } + } + onAttemptComplete(info: OperationInfo) { const endTime = new Date(); this.tabularApiSurface.bigtable.getProjectId_( diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 92935f143..5df73b1ac 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -315,6 +315,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, + read(size) { + metricsTracer.onRead(); + return this.read(size); + }, }); // The caller should be able to call userStream.end() to stop receiving From 5fb300bdae800f45907dc021f6bf146bb6f8d22c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 15 Jan 2025 16:37:09 -0500 Subject: [PATCH 012/289] Add a TODO for date wrapper --- src/metrics-tracer-factory.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 1d6ff0c2e..f19260015 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -1,6 +1,8 @@ // import * as SDKMetrics from '@opentelemetry/sdk-metrics'; import {Table} from './table'; +// TODO: Mock out Date - ie. DateWrapper + const { MeterProvider, Histogram, From feb36e7db4886b57f5ef4c0b9f0c4ed31e603d0b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:23:11 -0500 Subject: [PATCH 013/289] Add first unit test for the metrics tracer --- src/metrics-tracer-factory.ts | 158 +++++++++++++++++------- src/tabular-api-surface.ts | 2 +- test/metrics-tracer.ts | 218 ++++++++++++++++++++++++++++++++++ 3 files changed, 334 insertions(+), 44 deletions(-) create mode 100644 test/metrics-tracer.ts diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index f19260015..1354777da 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -44,23 +44,77 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +interface DateLike { + getTime(): number; +} + +interface DateProvider { + getDate(): DateLike; +} + +class DefaultDateProvider { + getDate() { + return new Date(); + } +} + +interface ICounter { + add(retries: number, dimensions: {}): void; +} + +interface IHistogram { + record(value: number, dimensions: {}): void; +} + +interface IMeter { + createCounter(instrument: string, attributes: {}): ICounter; + createHistogram(instrument: string, attributes: {}): IHistogram; +} + +interface IMeterProvider { + getMeter(name: string): IMeter; +} + +export interface ObservabilityOptions { + meterProvider: IMeterProvider; +} + +interface IBigtable { + appProfileId?: string; + getProjectId_( + callback: (err: Error | null, projectId?: string) => void + ): void; +} + +interface IInstance { + id: string; +} + +interface ITabularApiSurface { + instance: IInstance; + id: string; + bigtable: IBigtable; +} + class MetricsTracer { // TODO: Consider rename. - private operationStartTime: Date | null; - private attemptStartTime: Date | null; + private operationStartTime: DateLike | null; + private attemptStartTime: DateLike | null; private metrics: Metrics; private zone: string | null | undefined; private cluster: string | null | undefined; - private tabularApiSurface: TabularApiSurface; + private tabularApiSurface: ITabularApiSurface; private methodName: string; private receivedFirstResponse: boolean; private serverTimeRead: boolean; - private lastReadTime: Date | null; + private lastReadTime: DateLike | null; + private dateProvider: DateProvider; constructor( metrics: Metrics, - tabularApiSurface: TabularApiSurface, - methodName: string + tabularApiSurface: ITabularApiSurface, + methodName: string, + dateProvider?: DateProvider ) { this.metrics = metrics; this.zone = null; @@ -72,6 +126,11 @@ class MetricsTracer { this.receivedFirstResponse = false; this.lastReadTime = null; this.serverTimeRead = false; + if (dateProvider) { + this.dateProvider = dateProvider; + } else { + this.dateProvider = new DefaultDateProvider(); + } } private getBasicDimensions(projectId: string) { @@ -109,11 +168,11 @@ class MetricsTracer { } onOperationStart() { - this.operationStartTime = new Date(); + this.operationStartTime = this.dateProvider.getDate(); } onRead() { - const currentTime = new Date(); + const currentTime = this.dateProvider.getDate(); if (this.lastReadTime) { this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { @@ -135,7 +194,7 @@ class MetricsTracer { } onAttemptComplete(info: OperationInfo) { - const endTime = new Date(); + const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.attemptStartTime) { @@ -151,11 +210,11 @@ class MetricsTracer { } onAttemptStart() { - this.attemptStartTime = new Date(); + this.attemptStartTime = this.dateProvider.getDate(); } - onFirstResponse() { - const endTime = new Date(); + onResponse() { + const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.operationStartTime) { @@ -167,7 +226,7 @@ class MetricsTracer { endTime.getTime() - this.operationStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; - this.metrics.operationLatencies.record(totalTime, dimensions); + this.metrics.firstResponseLatencies.record(totalTime, dimensions); } } } @@ -175,7 +234,7 @@ class MetricsTracer { } onOperationComplete(info: OperationInfo) { - const endTime = new Date(); + const endTime = this.dateProvider.getDate(); this.onAttemptComplete(info); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { @@ -221,7 +280,7 @@ class MetricsTracer { projectId, 'PENDING' // TODO: Adjust this ); - this.metrics.operationLatencies.record(serverTime, dimensions); + this.metrics.serverLatencies.record(serverTime, dimensions); } } ); @@ -248,39 +307,43 @@ class MetricsTracer { if (instanceInformation && instanceInformation[1]) { this.cluster = instanceInformation[0]; } - console.log(mappedEntries); } } export class MetricsTracerFactory { private metrics: Metrics; - constructor() { + constructor(observabilityOptions?: ObservabilityOptions) { // Create MeterProvider - const meterProvider = new MeterProvider({ - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. - resource: new Resources.Resource({ - 'service.name': 'example-metric-service', - 'service.namespace': 'samples', - 'service.instance.id': '12345', - 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 10_000, - exporter: new MetricExporter({ - projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later - }), - }), - ], - }); + const meterProvider = + observabilityOptions && observabilityOptions.meterProvider + ? observabilityOptions.meterProvider + : new MeterProvider({ + // This is the default meter provider + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + 'service.name': 'example-metric-service', + 'service.namespace': 'samples', + 'service.instance.id': '12345', + 'cloud.resource_manager.project_id': + 'cloud-native-db-dpes-shared', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 10_000, + exporter: new MetricExporter({ + projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later + }), + }), + ], + }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); this.metrics = { operationLatencies: meter.createHistogram('operation_latencies', { @@ -334,7 +397,16 @@ export class MetricsTracerFactory { }; } - getMetricsTracer(tabularApiSurface: TabularApiSurface, methodName: string) { - return new MetricsTracer(this.metrics, tabularApiSurface, methodName); + getMetricsTracer( + tabularApiSurface: ITabularApiSurface, + methodName: string, + dateProvider?: DateProvider + ) { + return new MetricsTracer( + this.metrics, + tabularApiSurface, + methodName, + dateProvider + ); } } diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 5df73b1ac..7484f84b9 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -595,7 +595,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsTracer.onFirstResponse(); + metricsTracer.onResponse(); }) .on('end', () => { numRequestsMade++; diff --git a/test/metrics-tracer.ts b/test/metrics-tracer.ts new file mode 100644 index 000000000..433f119f9 --- /dev/null +++ b/test/metrics-tracer.ts @@ -0,0 +1,218 @@ +import {describe} from 'mocha'; +import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; + +// TODO: Shared folder + +class Logger { + private messages: string[] = []; + + log(message: string) { + this.messages.push(message); + } + + getMessages() { + return this.messages; + } +} + +class TestDateLike { + private fakeDate; + constructor(fakeDate: number) { + this.fakeDate = fakeDate; + } + getTime() { + return this.fakeDate; + } +} + +class TestDateProvider { + private dateCounter = 0; + private logger: Logger; + + constructor(logger: Logger) { + this.logger = logger; + } + getDate() { + // The test assumes exactly 1ms passes between each getDate call. + this.dateCounter++; + this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); + return new TestDateLike(this.dateCounter); + } +} + +class TestMeterProvider { + private logger: Logger; + constructor(logger: Logger) { + this.logger = logger; + } + getMeter(name: string) { + return new TestMeter(this.logger, name); + } +} + +class TestMeter { + private logger: Logger; + private name: string; + constructor(logger: Logger, name: string) { + this.logger = logger; + this.name = name; + } + createHistogram(instrument: string) { + return new TestHistogram(this.logger, `${this.name}:${instrument}`); + } + createCounter(instrument: string) { + return new TestCounter(this.logger, `${this.name}:${instrument}`); + } +} + +class TestCounter { + private logger: Logger; + private name: string; + constructor(logger: Logger, name: string) { + this.logger = logger; + this.name = name; + } + add(value: number) { + this.logger.log( + `Value added to counter ${this.name} = ${value.toString()} ` + ); + } +} + +class TestHistogram { + private logger: Logger; + private name: string; + constructor(logger: Logger, name: string) { + this.logger = logger; + this.name = name; + } + record(value: number) { + this.logger.log( + `Value added to histogram ${this.name} = ${value.toString()} ` + ); + } +} + +class FakeBigtable { + appProfileId?: string; + metricsTracerFactory: MetricsTracerFactory; + constructor(observabilityOptions: {meterProvider: TestMeterProvider}) { + this.metricsTracerFactory = new MetricsTracerFactory({ + meterProvider: observabilityOptions.meterProvider, + }); + } + + getProjectId_( + callback: (err: Error | null, projectId?: string) => void + ): void { + callback(null, 'my-project'); + } +} +// TODO: Put fixtures into a shared folder that are going to be used +// by system tests. + +class FakeInstance { + id = 'fakeInstanceId'; +} + +class FakeTable { + private logger: Logger; + id = 'fakeTableId'; + instance = new FakeInstance(); + bigtable: FakeBigtable; + + constructor(logger: Logger) { + this.logger = logger; + this.bigtable = new FakeBigtable({ + meterProvider: new TestMeterProvider(this.logger), + }); + } +} +// TODO: Check that there is a server latency for each attempt + +describe.only('Bigtable/MetricsTracer', () => { + it('should record the right metrics with a typical method call', () => { + const logger = new Logger(); + class FakeTable { + id = 'fakeTableId'; + instance = new FakeInstance(); + bigtable = new FakeBigtable({ + meterProvider: new TestMeterProvider(logger), + }); + + fakeMethod(): void { + function createMetadata(duration: string) { + return { + internalRepr: new Map([ + ['server-timing', Buffer.from(`dur=${duration}`)], + ]), + options: {}, + }; + } + const status = { + metadata: { + internalRepr: new Map([ + ['x-goog-ext-425905942-bin', Buffer.from('doLater')], + ]), + options: {}, + }, + }; + const metricsTracer = + this.bigtable.metricsTracerFactory.getMetricsTracer( + this, + 'fakeMethod', + new TestDateProvider(logger) + ); + // In this method we simulate a series of events that might happen + // when a user calls one of the Table methods. + // Here is an example of what might happen in a method call: + logger.log('1. The operation starts'); + metricsTracer.onOperationStart(); + logger.log('2. The attempt starts.'); + metricsTracer.onAttemptStart(); + logger.log('3. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('4. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1001')); + logger.log('5. Client receives first row.'); + metricsTracer.onResponse(); + logger.log('6. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1002')); + logger.log('7. Client receives second row.'); + metricsTracer.onResponse(); + logger.log('8. A transient error occurs.'); + metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); + logger.log('9. After a timeout, the second attempt is made.'); + metricsTracer.onAttemptStart(); + logger.log('10. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('11. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1003')); + logger.log('12. Client receives third row.'); + metricsTracer.onResponse(); + logger.log('13. Client receives metadata.'); + metricsTracer.onMetadataReceived(createMetadata('1004')); + logger.log('14. Client receives fourth row.'); + metricsTracer.onResponse(); + logger.log('15. User reads row 1'); + metricsTracer.onRead(); + logger.log('16. User reads row 2'); + metricsTracer.onRead(); + logger.log('17. User reads row 3'); + metricsTracer.onRead(); + logger.log('18. User reads row 4'); + metricsTracer.onRead(); + logger.log('19. Stream ends, operation completes'); + metricsTracer.onOperationComplete({ + retries: 1, + finalOperationStatus: 'SUCCESS', + connectivityErrorCount: 1, + }); + } + } + const table = new FakeTable(); + table.fakeMethod(); + // Ensure events occurred in the right order here: + console.log('test'); + }); +}); From 8465b3a79e78fa9cffd5c6da074f7cd6d70306a2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:32:05 -0500 Subject: [PATCH 014/289] Move the code for the TestMeterProvider to separate file --- common/test-meter-provider.ts | 56 +++++++++++++++++++++++++++++ test/metrics-tracer.ts | 67 +---------------------------------- 2 files changed, 57 insertions(+), 66 deletions(-) create mode 100644 common/test-meter-provider.ts diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts new file mode 100644 index 000000000..40bcee4a2 --- /dev/null +++ b/common/test-meter-provider.ts @@ -0,0 +1,56 @@ +export class TestMeterProvider { + private logger: ILogger; + constructor(logger: ILogger) { + this.logger = logger; + } + getMeter(name: string) { + return new TestMeter(this.logger, name); + } +} + +interface ILogger { + log(message: string): void; +} + +class TestMeter { + private logger: ILogger; + private name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } + createHistogram(instrument: string) { + return new TestHistogram(this.logger, `${this.name}:${instrument}`); + } + createCounter(instrument: string) { + return new TestCounter(this.logger, `${this.name}:${instrument}`); + } +} + +class TestCounter { + private logger: ILogger; + private name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } + add(value: number) { + this.logger.log( + `Value added to counter ${this.name} = ${value.toString()} ` + ); + } +} + +class TestHistogram { + private logger: ILogger; + private name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } + record(value: number) { + this.logger.log( + `Value added to histogram ${this.name} = ${value.toString()} ` + ); + } +} diff --git a/test/metrics-tracer.ts b/test/metrics-tracer.ts index 433f119f9..f23d65201 100644 --- a/test/metrics-tracer.ts +++ b/test/metrics-tracer.ts @@ -1,5 +1,6 @@ import {describe} from 'mocha'; import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; +import {TestMeterProvider} from '../common/test-meter-provider'; // TODO: Shared folder @@ -40,59 +41,6 @@ class TestDateProvider { } } -class TestMeterProvider { - private logger: Logger; - constructor(logger: Logger) { - this.logger = logger; - } - getMeter(name: string) { - return new TestMeter(this.logger, name); - } -} - -class TestMeter { - private logger: Logger; - private name: string; - constructor(logger: Logger, name: string) { - this.logger = logger; - this.name = name; - } - createHistogram(instrument: string) { - return new TestHistogram(this.logger, `${this.name}:${instrument}`); - } - createCounter(instrument: string) { - return new TestCounter(this.logger, `${this.name}:${instrument}`); - } -} - -class TestCounter { - private logger: Logger; - private name: string; - constructor(logger: Logger, name: string) { - this.logger = logger; - this.name = name; - } - add(value: number) { - this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} ` - ); - } -} - -class TestHistogram { - private logger: Logger; - private name: string; - constructor(logger: Logger, name: string) { - this.logger = logger; - this.name = name; - } - record(value: number) { - this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} ` - ); - } -} - class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; @@ -115,19 +63,6 @@ class FakeInstance { id = 'fakeInstanceId'; } -class FakeTable { - private logger: Logger; - id = 'fakeTableId'; - instance = new FakeInstance(); - bigtable: FakeBigtable; - - constructor(logger: Logger) { - this.logger = logger; - this.bigtable = new FakeBigtable({ - meterProvider: new TestMeterProvider(this.logger), - }); - } -} // TODO: Check that there is a server latency for each attempt describe.only('Bigtable/MetricsTracer', () => { From 7a97aab6d7306a103ef585f6843fe80896e182e3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:38:05 -0500 Subject: [PATCH 015/289] Move the Date provider to a second file --- common/test-date-provider.ts | 29 +++++++++++++++++++++++++++++ test/metrics-tracer.ts | 26 +------------------------- 2 files changed, 30 insertions(+), 25 deletions(-) create mode 100644 common/test-date-provider.ts diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts new file mode 100644 index 000000000..b078c8dac --- /dev/null +++ b/common/test-date-provider.ts @@ -0,0 +1,29 @@ +interface ILogger { + log(message: string): void; +} + +class TestDateLike { + private fakeDate; + constructor(fakeDate: number) { + this.fakeDate = fakeDate; + } + getTime() { + return this.fakeDate; + } +} + +// TODO: ILogger in separate file +export class TestDateProvider { + private dateCounter = 0; + private logger: ILogger; + + constructor(logger: ILogger) { + this.logger = logger; + } + getDate() { + // The test assumes exactly 1ms passes between each getDate call. + this.dateCounter++; + this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); + return new TestDateLike(this.dateCounter); + } +} diff --git a/test/metrics-tracer.ts b/test/metrics-tracer.ts index f23d65201..fc6e4fb63 100644 --- a/test/metrics-tracer.ts +++ b/test/metrics-tracer.ts @@ -1,6 +1,7 @@ import {describe} from 'mocha'; import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; import {TestMeterProvider} from '../common/test-meter-provider'; +import {TestDateProvider} from '../common/test-date-provider'; // TODO: Shared folder @@ -16,31 +17,6 @@ class Logger { } } -class TestDateLike { - private fakeDate; - constructor(fakeDate: number) { - this.fakeDate = fakeDate; - } - getTime() { - return this.fakeDate; - } -} - -class TestDateProvider { - private dateCounter = 0; - private logger: Logger; - - constructor(logger: Logger) { - this.logger = logger; - } - getDate() { - // The test assumes exactly 1ms passes between each getDate call. - this.dateCounter++; - this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); - return new TestDateLike(this.dateCounter); - } -} - class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; From 51d3dd3b5bb3171e4dd0244d8ec604dcc28bf33e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 16 Jan 2025 17:40:31 -0500 Subject: [PATCH 016/289] Fix attempt latencies bug --- src/metrics-tracer-factory.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index 1354777da..fdf602aad 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -203,7 +203,7 @@ class MetricsTracer { info.finalOperationStatus ); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.operationLatencies.record(totalTime, dimensions); + this.metrics.attemptLatencies.record(totalTime, dimensions); } } ); From ee8c272cdbe259afa6b816c2a48b9bf59574f9fb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 10:34:34 -0500 Subject: [PATCH 017/289] Add assertion check against text file --- test/{ => metrics-tracer}/metrics-tracer.ts | 13 +++++-- test/metrics-tracer/typical-method-call.txt | 43 +++++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) rename test/{ => metrics-tracer}/metrics-tracer.ts (90%) create mode 100644 test/metrics-tracer/typical-method-call.txt diff --git a/test/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts similarity index 90% rename from test/metrics-tracer.ts rename to test/metrics-tracer/metrics-tracer.ts index fc6e4fb63..14b7c8420 100644 --- a/test/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -1,7 +1,9 @@ import {describe} from 'mocha'; -import {MetricsTracerFactory} from '../src/metrics-tracer-factory'; -import {TestMeterProvider} from '../common/test-meter-provider'; -import {TestDateProvider} from '../common/test-date-provider'; +import {MetricsTracerFactory} from '../../src/metrics-tracer-factory'; +import {TestMeterProvider} from '../../common/test-meter-provider'; +import {TestDateProvider} from '../../common/test-date-provider'; +import * as assert from 'assert'; +import * as fs from 'fs'; // TODO: Shared folder @@ -123,7 +125,12 @@ describe.only('Bigtable/MetricsTracer', () => { } const table = new FakeTable(); table.fakeMethod(); + const expectedOutput = fs.readFileSync( + './test/metrics-tracer/typical-method-call.txt', + 'utf8' + ); // Ensure events occurred in the right order here: + assert.strictEqual(logger.getMessages().join('\n'), expectedOutput); console.log('test'); }); }); diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt new file mode 100644 index 000000000..106002085 --- /dev/null +++ b/test/metrics-tracer/typical-method-call.txt @@ -0,0 +1,43 @@ +1. The operation starts +getDate call returns 1 ms +2. The attempt starts. +getDate call returns 2 ms +3. Client receives status information. +4. Client receives metadata. +Value added to histogram bigtable.googleapis.com:server_latencies = 1001 +5. Client receives first row. +getDate call returns 3 ms +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2 +6. Client receives metadata. +7. Client receives second row. +getDate call returns 4 ms +8. A transient error occurs. +getDate call returns 5 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3 +9. After a timeout, the second attempt is made. +getDate call returns 6 ms +10. Client receives status information. +11. Client receives metadata. +12. Client receives third row. +getDate call returns 7 ms +13. Client receives metadata. +14. Client receives fourth row. +getDate call returns 8 ms +15. User reads row 1 +getDate call returns 9 ms +16. User reads row 2 +getDate call returns 10 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +17. User reads row 3 +getDate call returns 11 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +18. User reads row 4 +getDate call returns 12 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +19. Stream ends, operation completes +getDate call returns 13 ms +getDate call returns 14 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8 +Value added to histogram bigtable.googleapis.com:operation_latencies = 12 +Value added to counter bigtable.googleapis.com:retry_count = 1 +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 From b7413e881c0d8e6373d3119951e3c67f291ad1de Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 10:42:52 -0500 Subject: [PATCH 018/289] More realistic seconds increment --- common/test-date-provider.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index b078c8dac..0b6974cfc 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -21,8 +21,8 @@ export class TestDateProvider { this.logger = logger; } getDate() { - // The test assumes exactly 1ms passes between each getDate call. - this.dateCounter++; + // The test assumes exactly 1s passes between each getDate call. + this.dateCounter = this.dateCounter + 1000; this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); return new TestDateLike(this.dateCounter); } From 7c8877b436ec02694f2ac837980738d28787a212 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 10:43:02 -0500 Subject: [PATCH 019/289] Remove imports --- src/metrics-tracer-factory.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index fdf602aad..c6d3e4dd3 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -9,11 +9,9 @@ const { Counter, PeriodicExportingMetricReader, } = require('@opentelemetry/sdk-metrics'); -// import { MeterProvider, PeriodicExportingMetricReader, Histogram} from '@opentelemetry/sdk-metrics'; import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {TabularApiSurface} from './tabular-api-surface'; interface OperationInfo { retries?: number; From 503a2a9e013add0e2d25f9118fcabe2f813b3851 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:03:34 -0500 Subject: [PATCH 020/289] Adjust event timings to be more realistic --- common/test-meter-provider.ts | 4 +- test/metrics-tracer/metrics-tracer.ts | 10 ++--- test/metrics-tracer/typical-method-call.txt | 44 ++++++++++----------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 40bcee4a2..8eca67f35 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -36,7 +36,7 @@ class TestCounter { } add(value: number) { this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} ` + `Value added to counter ${this.name} = ${value.toString()}` ); } } @@ -50,7 +50,7 @@ class TestHistogram { } record(value: number) { this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} ` + `Value added to histogram ${this.name} = ${value.toString()}` ); } } diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 14b7c8420..12c84e9c6 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -86,11 +86,11 @@ describe.only('Bigtable/MetricsTracer', () => { logger.log('3. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1001')); + metricsTracer.onMetadataReceived(createMetadata('101')); logger.log('5. Client receives first row.'); metricsTracer.onResponse(); logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1002')); + metricsTracer.onMetadataReceived(createMetadata('102')); logger.log('7. Client receives second row.'); metricsTracer.onResponse(); logger.log('8. A transient error occurs.'); @@ -100,11 +100,11 @@ describe.only('Bigtable/MetricsTracer', () => { logger.log('10. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1003')); + metricsTracer.onMetadataReceived(createMetadata('103')); logger.log('12. Client receives third row.'); metricsTracer.onResponse(); logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('1004')); + metricsTracer.onMetadataReceived(createMetadata('104')); logger.log('14. Client receives fourth row.'); metricsTracer.onResponse(); logger.log('15. User reads row 1'); @@ -130,7 +130,7 @@ describe.only('Bigtable/MetricsTracer', () => { 'utf8' ); // Ensure events occurred in the right order here: - assert.strictEqual(logger.getMessages().join('\n'), expectedOutput); + assert.strictEqual(logger.getMessages().join('\n') + '\n', expectedOutput); console.log('test'); }); }); diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index 106002085..96cec94ac 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -1,43 +1,43 @@ 1. The operation starts -getDate call returns 1 ms +getDate call returns 1000 ms 2. The attempt starts. -getDate call returns 2 ms +getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 1001 +Value added to histogram bigtable.googleapis.com:server_latencies = 101 5. Client receives first row. -getDate call returns 3 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2 +getDate call returns 3000 ms +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 6. Client receives metadata. 7. Client receives second row. -getDate call returns 4 ms +getDate call returns 4000 ms 8. A transient error occurs. -getDate call returns 5 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3 +getDate call returns 5000 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 9. After a timeout, the second attempt is made. -getDate call returns 6 ms +getDate call returns 6000 ms 10. Client receives status information. 11. Client receives metadata. 12. Client receives third row. -getDate call returns 7 ms +getDate call returns 7000 ms 13. Client receives metadata. 14. Client receives fourth row. -getDate call returns 8 ms +getDate call returns 8000 ms 15. User reads row 1 -getDate call returns 9 ms +getDate call returns 9000 ms 16. User reads row 2 -getDate call returns 10 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +getDate call returns 10000 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 17. User reads row 3 -getDate call returns 11 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +getDate call returns 11000 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 18. User reads row 4 -getDate call returns 12 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1 +getDate call returns 12000 ms +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 19. Stream ends, operation completes -getDate call returns 13 ms -getDate call returns 14 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8 -Value added to histogram bigtable.googleapis.com:operation_latencies = 12 +getDate call returns 13000 ms +getDate call returns 14000 ms +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 Value added to counter bigtable.googleapis.com:retry_count = 1 Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 From 938cb2ce1364f1e6447ce45d3076bd24d0ab12c3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:23:09 -0500 Subject: [PATCH 021/289] Remove only --- test/metrics-tracer/metrics-tracer.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 12c84e9c6..16e0e6bd9 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -5,8 +5,6 @@ import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; -// TODO: Shared folder - class Logger { private messages: string[] = []; @@ -43,7 +41,7 @@ class FakeInstance { // TODO: Check that there is a server latency for each attempt -describe.only('Bigtable/MetricsTracer', () => { +describe('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', () => { const logger = new Logger(); class FakeTable { From 854e1d1e054de67baf3246891d0d84afb373aa3e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:28:24 -0500 Subject: [PATCH 022/289] Add comments to the table class --- src/tabular-api-surface.ts | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 7484f84b9..87f18edeb 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,6 +210,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + /* // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( this, @@ -222,6 +223,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); connectivityErrorCount, }); } + */ const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; @@ -315,10 +317,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, + /* read(size) { metricsTracer.onRead(); return this.read(size); }, + */ }); // The caller should be able to call userStream.end() to stop receiving @@ -351,9 +355,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return originalEnd(chunk, encoding, cb); }; - metricsTracer.onOperationStart(); + // metricsTracer.onOperationStart(); const makeNewRequest = () => { - metricsTracer.onAttemptStart(); + // metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -526,6 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; + /* requestStream .on( 'metadata', @@ -541,6 +546,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onStatusReceived(status); } ); + */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); @@ -571,7 +577,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); - metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum + // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -588,19 +594,19 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); - onCallComplete('ERROR'); + // onCallComplete('ERROR'); } }) .on('data', () => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - metricsTracer.onResponse(); + // metricsTracer.onResponse(); }) .on('end', () => { numRequestsMade++; activeRequestStream = null; - onCallComplete('SUCCESS'); + // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; From db7d1b1f8eaa1b8240f719051dd53c930ea8760d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:35:35 -0500 Subject: [PATCH 023/289] More comments in table --- src/tabular-api-surface.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index 87f18edeb..d62b13175 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -233,7 +233,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; - let connectivityErrorCount = 0; + // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -530,6 +530,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return false; }; + /* requestStream .on( @@ -551,11 +552,13 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; + /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: // ABORTED, UNAVAILABLE, DATA_LOSS connectivityErrorCount++; } + */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -597,14 +600,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // onCallComplete('ERROR'); } }) - .on('data', () => { + .on('data', _ => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; // metricsTracer.onResponse(); }) .on('end', () => { - numRequestsMade++; + // numRequestsMade++; activeRequestStream = null; // onCallComplete('SUCCESS'); }); From ee670378fdb095c8bd91655cc1a6f647a9438bb6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:38:22 -0500 Subject: [PATCH 024/289] Remove TODO --- src/metrics-tracer-factory.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/metrics-tracer-factory.ts b/src/metrics-tracer-factory.ts index c6d3e4dd3..1390775b3 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/metrics-tracer-factory.ts @@ -95,7 +95,6 @@ interface ITabularApiSurface { } class MetricsTracer { - // TODO: Consider rename. private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; private metrics: Metrics; From ea2fbe28eeda9e4d8068e0868f1b9585ec4e466d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 11:47:08 -0500 Subject: [PATCH 025/289] Move observability options into a separate file --- .../metrics-tracer-factory.ts | 24 ++----------------- .../observability-options.ts | 20 ++++++++++++++++ src/index.ts | 2 +- test/metrics-tracer/metrics-tracer.ts | 2 +- 4 files changed, 24 insertions(+), 24 deletions(-) rename src/{ => client-side-metrics}/metrics-tracer-factory.ts (96%) create mode 100644 src/client-side-metrics/observability-options.ts diff --git a/src/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts similarity index 96% rename from src/metrics-tracer-factory.ts rename to src/client-side-metrics/metrics-tracer-factory.ts index 1390775b3..2bf433f14 100644 --- a/src/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -1,5 +1,5 @@ // import * as SDKMetrics from '@opentelemetry/sdk-metrics'; -import {Table} from './table'; +import {Table} from '../table'; // TODO: Mock out Date - ie. DateWrapper @@ -12,6 +12,7 @@ const { import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {ObservabilityOptions} from './observability-options'; interface OperationInfo { retries?: number; @@ -56,27 +57,6 @@ class DefaultDateProvider { } } -interface ICounter { - add(retries: number, dimensions: {}): void; -} - -interface IHistogram { - record(value: number, dimensions: {}): void; -} - -interface IMeter { - createCounter(instrument: string, attributes: {}): ICounter; - createHistogram(instrument: string, attributes: {}): IHistogram; -} - -interface IMeterProvider { - getMeter(name: string): IMeter; -} - -export interface ObservabilityOptions { - meterProvider: IMeterProvider; -} - interface IBigtable { appProfileId?: string; getProjectId_( diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts new file mode 100644 index 000000000..ae0698b64 --- /dev/null +++ b/src/client-side-metrics/observability-options.ts @@ -0,0 +1,20 @@ +interface ICounter { + add(retries: number, dimensions: {}): void; +} + +interface IHistogram { + record(value: number, dimensions: {}): void; +} + +interface IMeter { + createCounter(instrument: string, attributes: {}): ICounter; + createHistogram(instrument: string, attributes: {}): IHistogram; +} + +interface IMeterProvider { + getMeter(name: string): IMeter; +} + +export interface ObservabilityOptions { + meterProvider: IMeterProvider; +} diff --git a/src/index.ts b/src/index.ts index 4fc86fbee..fc6020494 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,7 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -import {MetricsTracerFactory} from './metrics-tracer-factory'; +import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 16e0e6bd9..f666964cf 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -1,5 +1,5 @@ import {describe} from 'mocha'; -import {MetricsTracerFactory} from '../../src/metrics-tracer-factory'; +import {MetricsTracerFactory} from '../../src/client-side-metrics/metrics-tracer-factory'; import {TestMeterProvider} from '../../common/test-meter-provider'; import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; From c22eb5ba33ce818a695c91043fe3936df112eb51 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:00:32 -0500 Subject: [PATCH 026/289] inline definitions for the tabular api surface --- .../metrics-tracer-factory.ts | 29 +++++++------------ 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 2bf433f14..c1ddfcc8e 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -1,8 +1,3 @@ -// import * as SDKMetrics from '@opentelemetry/sdk-metrics'; -import {Table} from '../table'; - -// TODO: Mock out Date - ie. DateWrapper - const { MeterProvider, Histogram, @@ -57,21 +52,17 @@ class DefaultDateProvider { } } -interface IBigtable { - appProfileId?: string; - getProjectId_( - callback: (err: Error | null, projectId?: string) => void - ): void; -} - -interface IInstance { - id: string; -} - -interface ITabularApiSurface { - instance: IInstance; +export interface ITabularApiSurface { + instance: { + id: string; + }; id: string; - bigtable: IBigtable; + bigtable: { + appProfileId?: string; + getProjectId_( + callback: (err: Error | null, projectId?: string) => void + ): void; + }; } class MetricsTracer { From a658a39e0aa41eb83c44ea40202ef99138cb7371 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:02:50 -0500 Subject: [PATCH 027/289] Comment source code out for now --- src/index.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/index.ts b/src/index.ts index fc6020494..136cf4a8d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -396,10 +396,10 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - metricsTracerFactory: MetricsTracerFactory; + // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { - this.metricsTracerFactory = new MetricsTracerFactory(); + // this.metricsTracerFactory = new MetricsTracerFactory(); // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; @@ -871,7 +871,7 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) - .on('status', stream.emit.bind(stream, 'status')) + // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); From 960b402cb52fa0753d213043950ec8db5711f5e9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:13:29 -0500 Subject: [PATCH 028/289] Add abstractions for classes that have a logger --- common/logger.ts | 19 ++++++++++++++++++ common/test-date-provider.ts | 11 ++--------- common/test-meter-provider.ts | 36 ++++++----------------------------- 3 files changed, 27 insertions(+), 39 deletions(-) create mode 100644 common/logger.ts diff --git a/common/logger.ts b/common/logger.ts new file mode 100644 index 000000000..24cfc2d1a --- /dev/null +++ b/common/logger.ts @@ -0,0 +1,19 @@ +interface ILogger { + log(message: string): void; +} + +export abstract class WithLogger { + protected logger: ILogger; + constructor(logger: ILogger) { + this.logger = logger; + } +} + +export abstract class WithLoggerAndName { + protected logger: ILogger; + protected name: string; + constructor(logger: ILogger, name: string) { + this.logger = logger; + this.name = name; + } +} diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index 0b6974cfc..6cc74e83c 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -1,6 +1,4 @@ -interface ILogger { - log(message: string): void; -} +import {WithLogger} from './logger'; class TestDateLike { private fakeDate; @@ -13,13 +11,8 @@ class TestDateLike { } // TODO: ILogger in separate file -export class TestDateProvider { +export class TestDateProvider extends WithLogger { private dateCounter = 0; - private logger: ILogger; - - constructor(logger: ILogger) { - this.logger = logger; - } getDate() { // The test assumes exactly 1s passes between each getDate call. this.dateCounter = this.dateCounter + 1000; diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 8eca67f35..8764e7a30 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -1,24 +1,12 @@ -export class TestMeterProvider { - private logger: ILogger; - constructor(logger: ILogger) { - this.logger = logger; - } +import {WithLogger, WithLoggerAndName} from './logger'; + +export class TestMeterProvider extends WithLogger { getMeter(name: string) { return new TestMeter(this.logger, name); } } -interface ILogger { - log(message: string): void; -} - -class TestMeter { - private logger: ILogger; - private name: string; - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } +class TestMeter extends WithLoggerAndName { createHistogram(instrument: string) { return new TestHistogram(this.logger, `${this.name}:${instrument}`); } @@ -27,13 +15,7 @@ class TestMeter { } } -class TestCounter { - private logger: ILogger; - private name: string; - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } +class TestCounter extends WithLoggerAndName { add(value: number) { this.logger.log( `Value added to counter ${this.name} = ${value.toString()}` @@ -41,13 +23,7 @@ class TestCounter { } } -class TestHistogram { - private logger: ILogger; - private name: string; - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } +class TestHistogram extends WithLoggerAndName { record(value: number) { this.logger.log( `Value added to histogram ${this.name} = ${value.toString()}` From 23a7c14c0eaa4f938a536ba510a44d543583c13c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:20:34 -0500 Subject: [PATCH 029/289] Generate documentation for meter provider --- common/test-meter-provider.ts | 39 +++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 8764e7a30..609f458ae 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -1,21 +1,52 @@ import {WithLogger, WithLoggerAndName} from './logger'; +/** + * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. + * It doesn't send metrics to a backend, but instead logs metric updates for verification. + */ export class TestMeterProvider extends WithLogger { + /** + * Returns a TestMeter, that logs metric updates for verification. + * @param {string} name The name of the meter. + * @returns {TestMeter} + */ getMeter(name: string) { return new TestMeter(this.logger, name); } } +/** + * A test implementation of a Meter. Used for testing purposes. It doesn't send metrics to a backend, + * but instead logs metric updates for verification. + */ class TestMeter extends WithLoggerAndName { + /** + * Creates a test histogram. The TestHistogram logs when values are recorded. + * @param {string} instrument The name of the instrument. + * @returns {TestHistogram} + */ createHistogram(instrument: string) { return new TestHistogram(this.logger, `${this.name}:${instrument}`); } + /** + * Creates a test counter. The TestCounter logs when values are added. + * @param {string} instrument The name of the instrument. + * @returns {TestCounter} + */ createCounter(instrument: string) { return new TestCounter(this.logger, `${this.name}:${instrument}`); } } +/** + * A test implementation of a Counter. Used for testing purposes. It doesn't send metrics to a backend, + * but instead logs value additions for verification. + */ class TestCounter extends WithLoggerAndName { + /** + * Simulates adding a value to the counter. Logs the value and the counter name. + * @param {number} value The value to be added to the counter. + */ add(value: number) { this.logger.log( `Value added to counter ${this.name} = ${value.toString()}` @@ -23,7 +54,15 @@ class TestCounter extends WithLoggerAndName { } } +/** + * A test implementation of a Histogram. Used for testing purposes. It doesn't send metrics to a backend, + * but instead logs recorded values for verification. + */ class TestHistogram extends WithLoggerAndName { + /** + * Simulates recording a value in the histogram. Logs the value and the histogram name. + * @param {number} value The value to be recorded in the histogram. + */ record(value: number) { this.logger.log( `Value added to histogram ${this.name} = ${value.toString()}` From 0ac6d15aa8436027e042f2e6e5f1594cf6636e3f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:26:09 -0500 Subject: [PATCH 030/289] Generate documentation for the date provider --- common/test-date-provider.ts | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index 6cc74e83c..931fbc8eb 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -1,18 +1,38 @@ import {WithLogger} from './logger'; +/** + * A test implementation of a Date-like object. Used for testing purposes. It provides a + * getTime method that returns a pre-determined fake date value, allowing for + * deterministic testing of time-dependent functionality. + */ class TestDateLike { private fakeDate; + /** + * @param {number} fakeDate The fake date value to be returned by getTime(), in milliseconds. + */ constructor(fakeDate: number) { this.fakeDate = fakeDate; } + /** + * Returns the fake date value that this object was created with. + * @returns {number} The fake date, in milliseconds. + */ getTime() { return this.fakeDate; } } -// TODO: ILogger in separate file +/** + * A test implementation of a DateProvider. Used for testing purposes. Provides + * a deterministic series of fake dates, with each call to getDate() returning a date 1000ms later than the last. + * Logs each date value returned for verification purposes. + */ export class TestDateProvider extends WithLogger { private dateCounter = 0; + /** + * Returns a new fake date 1000ms later than the last. Logs the date for test verification. + * @returns {TestDateLike} A fake date object. + */ getDate() { // The test assumes exactly 1s passes between each getDate call. this.dateCounter = this.dateCounter + 1000; From bad23b276d67acf4e30ae1cc0c4106d902cf57bf Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:29:46 -0500 Subject: [PATCH 031/289] Generate logger documentation --- common/logger.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/common/logger.ts b/common/logger.ts index 24cfc2d1a..691eaefdc 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -1,17 +1,36 @@ +/** + * A simple logger interface for logging messages. Implementations of this interface + * can provide various logging mechanisms (e.g., console logging, file logging, etc.). + */ interface ILogger { log(message: string): void; } +/** + * An abstract base class that provides a logger instance. Subclasses can use this logger + * for logging messages. + */ export abstract class WithLogger { protected logger: ILogger; + /** + * @param logger The logger instance to be used by this object. + */ constructor(logger: ILogger) { this.logger = logger; } } +/** + * An abstract base class that provides a logger instance and a name. Subclasses + * can use the logger for logging messages, incorporating the name for context. + */ export abstract class WithLoggerAndName { protected logger: ILogger; protected name: string; + /** + * @param logger The logger instance to be used by this object. + * @param name The name associated with this object. + */ constructor(logger: ILogger, name: string) { this.logger = logger; this.name = name; From 49bd7cad2593cbc99b5c3d271251932320c0c192 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 13:36:03 -0500 Subject: [PATCH 032/289] Observability options documentation --- .../observability-options.ts | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index ae0698b64..91552ef81 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -1,20 +1,65 @@ +/** + * The Counter interface for recording increments of a metric. + */ interface ICounter { + /** + * Adds a value to the counter. + * @param retries The value to be added to the counter. + * @param dimensions The dimensions associated with this value. + */ add(retries: number, dimensions: {}): void; } +/** + * The Histogram interface for recording distributions of values of a metric. + */ interface IHistogram { + /** + * Records a value in the histogram. + * @param value The value to be recorded in the histogram. + * @param dimensions The dimensions associated with this value. + */ record(value: number, dimensions: {}): void; } +/** + * The Meter interface. Meters are responsible for creating and managing instruments (Counters, Histograms, etc.). + */ interface IMeter { + /** + * Creates a Counter instrument, which counts increments of a given metric. + * @param instrument The name of the counter instrument. + * @param attributes The attributes associated with this counter. + * @returns {ICounter} A Counter instance. + */ createCounter(instrument: string, attributes: {}): ICounter; + /** + * Creates a Histogram instrument, which records distributions of values for a given metric. + * @param instrument The name of the histogram instrument. + * @param attributes The attributes associated with this histogram. + * @returns {IHistogram} A Histogram instance. + */ createHistogram(instrument: string, attributes: {}): IHistogram; } +/** + * The MeterProvider interface. A MeterProvider creates and manages Meters. + */ interface IMeterProvider { + /** + * Returns a Meter, which can be used to create instruments for recording measurements. + * @param name The name of the Meter. + * @returns {IMeter} A Meter instance. + */ getMeter(name: string): IMeter; } +/** + * Options for configuring client-side metrics observability. Allows users to provide their own MeterProvider. + */ export interface ObservabilityOptions { + /** + * The MeterProvider to use for recording metrics. If not provided, a default MeterProvider will be used. + */ meterProvider: IMeterProvider; } From 129e8fde3d0d41466e5ada160b4d355335c55370 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:14:17 -0500 Subject: [PATCH 033/289] Add more documentation for various MTF methods --- .../metrics-tracer-factory.ts | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index c1ddfcc8e..840166ae1 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -9,12 +9,28 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-expor import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; +/** + * Information about a Bigtable operation. + */ interface OperationInfo { + /** + * The number of retries attempted for the operation. + */ retries?: number; + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ finalOperationStatus: string; + /** + * Number of times a connectivity error occurred during the operation. + */ connectivityErrorCount?: number; } +/** + * Dimensions (labels) associated with a Bigtable metric. These + * dimensions provide context for the metric values. + */ interface Dimensions { projectId: string; instanceId: string; @@ -27,6 +43,10 @@ interface Dimensions { clientName: string; } +/** + * A collection of OpenTelemetry metric instruments used to record + * Bigtable client-side metrics. + */ interface Metrics { operationLatencies: typeof Histogram; attemptLatencies: typeof Histogram; @@ -38,20 +58,46 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +/** + * An interface representing a Date-like object. Provides a `getTime` method + * for retrieving the time value in milliseconds. Used for abstracting time + * in tests. + */ interface DateLike { + /** + * Returns the time value in milliseconds. + * @returns The time value in milliseconds. + */ getTime(): number; } +/** + * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. + */ interface DateProvider { + /** + * Returns a DateLike object. + * @returns A DateLike object representing the current time or a fake time value. + */ getDate(): DateLike; } +/** + * The default DateProvider implementation. Returns the current date and time. + */ class DefaultDateProvider { + /** + * Returns a new Date object representing the current time. + * @returns {Date} The current date and time. + */ getDate() { return new Date(); } } +/** + * An interface representing a tabular API surface, such as a Bigtable table. + */ export interface ITabularApiSurface { instance: { id: string; @@ -65,6 +111,9 @@ export interface ITabularApiSurface { }; } +/** + * A class for tracing and recording client-side metrics related to Bigtable operations. + */ class MetricsTracer { private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; @@ -78,6 +127,12 @@ class MetricsTracer { private lastReadTime: DateLike | null; private dateProvider: DateProvider; + /** + * @param metrics The metrics instruments to record data with. + * @param tabularApiSurface Information about the Bigtable table being accessed. + * @param methodName The name of the method being traced. + * @param dateProvider A provider for date/time information (for testing). + */ constructor( metrics: Metrics, tabularApiSurface: ITabularApiSurface, @@ -135,10 +190,16 @@ class MetricsTracer { ); } + /** + * Called when the operation starts. Records the start time. + */ onOperationStart() { this.operationStartTime = this.dateProvider.getDate(); } + /** + * Called after the client reads a row. Records application blocking latencies. + */ onRead() { const currentTime = this.dateProvider.getDate(); if (this.lastReadTime) { @@ -161,6 +222,10 @@ class MetricsTracer { } } + /** + * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. + * @param info Information about the completed attempt. + */ onAttemptComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( @@ -177,10 +242,16 @@ class MetricsTracer { ); } + /** + * Called when a new attempt starts. Records the start time of the attempt. + */ onAttemptStart() { this.attemptStartTime = this.dateProvider.getDate(); } + /** + * Called when the first response is received. Records first response latencies. + */ onResponse() { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( @@ -201,6 +272,11 @@ class MetricsTracer { ); } + /** + * Called when an operation completes (successfully or unsuccessfully). + * Records operation latencies, retry counts, and connectivity error counts. + * @param info Information about the completed operation. + */ onOperationComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); this.onAttemptComplete(info); @@ -226,6 +302,10 @@ class MetricsTracer { ); } + /** + * Called when metadata is received. Extracts server timing information if available. + * @param metadata The received metadata. + */ onMetadataReceived(metadata: { internalRepr: Map; options: {}; @@ -256,6 +336,10 @@ class MetricsTracer { } } + /** + * Called when status information is received. Extracts zone and cluster information. + * @param status The received status information. + */ onStatusReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { @@ -278,9 +362,16 @@ class MetricsTracer { } } +/** + * A factory class for creating MetricsTracer instances. Initializes + * OpenTelemetry metrics instruments. + */ export class MetricsTracerFactory { private metrics: Metrics; + /** + * @param observabilityOptions Options for configuring client-side metrics observability. + */ constructor(observabilityOptions?: ObservabilityOptions) { // Create MeterProvider const meterProvider = @@ -365,6 +456,13 @@ export class MetricsTracerFactory { }; } + /** + * Creates a new MetricsTracer instance. + * @param tabularApiSurface The Bigtable table being accessed. + * @param methodName The name of the method being traced. + * @param dateProvider An optional DateProvider for testing purposes. + * @returns A new MetricsTracer instance. + */ getMetricsTracer( tabularApiSurface: ITabularApiSurface, methodName: string, From 052c7bbd349e74d1d8e4d44d52cc22f0a32d62df Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:30:36 -0500 Subject: [PATCH 034/289] Comment out Metrics --- src/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/index.ts b/src/index.ts index 136cf4a8d..f277a4cbf 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,7 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; +// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); From ac27a9563ae4e49f1bf186519365599e013268c1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:37:44 -0500 Subject: [PATCH 035/289] Add a bunch of TODOs in front of the comments --- src/index.ts | 5 +++++ src/tabular-api-surface.ts | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/index.ts b/src/index.ts index f277a4cbf..e77a67822 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,6 +35,7 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; +// TODO: Uncomment the next line after client-side metrics are well tested. // import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires @@ -396,10 +397,13 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { + // TODO: Uncomment the next line after client-side metrics are well tested. // this.metricsTracerFactory = new MetricsTracerFactory(); + // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; @@ -871,6 +875,7 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) + // TODO: Uncomment the next line after client-side metrics are well tested. // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index d62b13175..fda3c1eae 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,6 +210,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { + // TODO: Uncomment the next line after client-side metrics are well tested. /* // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( @@ -233,6 +234,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; + // TODO: Uncomment the next line after client-side metrics are well tested. // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; @@ -317,6 +319,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, + // TODO: Uncomment the next line after client-side metrics are well tested. /* read(size) { metricsTracer.onRead(); @@ -355,9 +358,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return originalEnd(chunk, encoding, cb); }; + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onOperationStart(); const makeNewRequest = () => { + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onAttemptStart(); + // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -531,6 +537,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; + // TODO: Uncomment the next line after client-side metrics are well tested. /* requestStream .on( @@ -552,6 +559,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; + // TODO: Uncomment the next line after client-side metrics are well tested. /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: @@ -580,6 +588,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { @@ -597,6 +606,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); + // TODO: Uncomment the next line after client-side metrics are well tested. // onCallComplete('ERROR'); } }) @@ -604,11 +614,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; + // TODO: Uncomment the next line after client-side metrics are well tested. // metricsTracer.onResponse(); }) .on('end', () => { + // TODO: Uncomment the next line after client-side metrics are well tested. // numRequestsMade++; activeRequestStream = null; + // TODO: Uncomment the next line after client-side metrics are well tested. // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); From 18c942e9a0a696468bc94780de26c3eb04b86a46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:53:28 -0500 Subject: [PATCH 036/289] Delete client-side-metrics file --- system-test/client-side-metrics.ts | 93 ------------------------------ 1 file changed, 93 deletions(-) delete mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts deleted file mode 100644 index 9ac299773..000000000 --- a/system-test/client-side-metrics.ts +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Bigtable} from '../src'; -import * as assert from 'assert'; -import {describe, it, before, after} from 'mocha'; - -describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable({ - projectId: 'cloud-native-db-dpes-shared', - }); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; - - before(async () => { - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if (!families.some(family => family.id === columnFamilyId)) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - const rows = [ - { - key: 'row1', - data: { - cf1: { - q1: 'value1', - }, - }, - }, - { - key: 'row2', - data: { - cf1: { - q2: 'value2', - }, - }, - }, - ]; - await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); - } - }); -}); From 7a3aabc3b236f7c509a10401c9fd5e1c18093d91 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:53:46 -0500 Subject: [PATCH 037/289] Revert "Delete client-side-metrics file" This reverts commit 18c942e9a0a696468bc94780de26c3eb04b86a46. --- system-test/client-side-metrics.ts | 93 ++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..9ac299773 --- /dev/null +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,93 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {Bigtable} from '../src'; +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; + +describe.only('Bigtable/Table#getRows', () => { + const bigtable = new Bigtable({ + projectId: 'cloud-native-db-dpes-shared', + }); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; + + before(async () => { + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if (!families.some(family => family.id === columnFamilyId)) { + await table.createFamily(columnFamilyId); + } + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + const rows = [ + { + key: 'row1', + data: { + cf1: { + q1: 'value1', + }, + }, + }, + { + key: 'row2', + data: { + cf1: { + q2: 'value2', + }, + }, + }, + ]; + await table.insert(rows); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); + } + }); +}); From 5906c29987fee2c55be44a9e9e0a931930051db8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:53:51 -0500 Subject: [PATCH 038/289] Revert "Revert "Delete client-side-metrics file"" This reverts commit 7a3aabc3b236f7c509a10401c9fd5e1c18093d91. --- system-test/client-side-metrics.ts | 93 ------------------------------ 1 file changed, 93 deletions(-) delete mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts deleted file mode 100644 index 9ac299773..000000000 --- a/system-test/client-side-metrics.ts +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Bigtable} from '../src'; -import * as assert from 'assert'; -import {describe, it, before, after} from 'mocha'; - -describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable({ - projectId: 'cloud-native-db-dpes-shared', - }); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; - - before(async () => { - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if (!families.some(family => family.id === columnFamilyId)) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - const rows = [ - { - key: 'row1', - data: { - cf1: { - q1: 'value1', - }, - }, - }, - { - key: 'row2', - data: { - cf1: { - q2: 'value2', - }, - }, - }, - ]; - await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); - } - }); -}); From be731af7d428825e5afe6cf99f50bf4bc3bdacc2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 14:58:32 -0500 Subject: [PATCH 039/289] Add headers --- common/logger.ts | 14 ++++++++++++++ common/test-date-provider.ts | 14 ++++++++++++++ common/test-meter-provider.ts | 14 ++++++++++++++ src/client-side-metrics/metrics-tracer-factory.ts | 14 ++++++++++++++ src/client-side-metrics/observability-options.ts | 14 ++++++++++++++ test/metrics-tracer/metrics-tracer.ts | 14 ++++++++++++++ 6 files changed, 84 insertions(+) diff --git a/common/logger.ts b/common/logger.ts index 691eaefdc..82baa0f9c 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * A simple logger interface for logging messages. Implementations of this interface * can provide various logging mechanisms (e.g., console logging, file logging, etc.). diff --git a/common/test-date-provider.ts b/common/test-date-provider.ts index 931fbc8eb..71ef66aee 100644 --- a/common/test-date-provider.ts +++ b/common/test-date-provider.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {WithLogger} from './logger'; /** diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 609f458ae..7a2494a92 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {WithLogger, WithLoggerAndName} from './logger'; /** diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 840166ae1..f0b76fb97 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + const { MeterProvider, Histogram, diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index 91552ef81..7d54fe623 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * The Counter interface for recording increments of a metric. */ diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index f666964cf..2cb8d9af6 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import {MetricsTracerFactory} from '../../src/client-side-metrics/metrics-tracer-factory'; import {TestMeterProvider} from '../../common/test-meter-provider'; From c26640f5e2f914afe2f3006d8056ce7cf11b6bbb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 15:01:01 -0500 Subject: [PATCH 040/289] Remove TODOs --- test/metrics-tracer/metrics-tracer.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 2cb8d9af6..91587b28c 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -46,15 +46,11 @@ class FakeBigtable { callback(null, 'my-project'); } } -// TODO: Put fixtures into a shared folder that are going to be used -// by system tests. class FakeInstance { id = 'fakeInstanceId'; } -// TODO: Check that there is a server latency for each attempt - describe('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', () => { const logger = new Logger(); From 3011e5069da2b534edd3c530e0f66e8199daa0f0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 15:28:14 -0500 Subject: [PATCH 041/289] Add AttemptInfo to distinguish from OperationInfo --- src/client-side-metrics/metrics-tracer-factory.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index f0b76fb97..790ecfc68 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -39,6 +39,12 @@ interface OperationInfo { * Number of times a connectivity error occurred during the operation. */ connectivityErrorCount?: number; + isStreaming: string; +} + +interface AttemptInfo { + finalOperationStatus: string; + isStreaming: string; } /** @@ -240,7 +246,7 @@ class MetricsTracer { * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. * @param info Information about the completed attempt. */ - onAttemptComplete(info: OperationInfo) { + onAttemptComplete(info: AttemptInfo) { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { From 945f237524db1037cca9bc09841ded725bc73f35 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 17:03:49 -0500 Subject: [PATCH 042/289] Adjust dimensions to match documentation --- common/client-side-metrics-dimensions.ts | 22 ++++ common/test-meter-provider.ts | 11 +- .../metrics-tracer-factory.ts | 114 ++++++++++++------ test/metrics-tracer/metrics-tracer.ts | 38 ++++-- 4 files changed, 135 insertions(+), 50 deletions(-) create mode 100644 common/client-side-metrics-dimensions.ts diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts new file mode 100644 index 000000000..8c85ee388 --- /dev/null +++ b/common/client-side-metrics-dimensions.ts @@ -0,0 +1,22 @@ +/** + * Dimensions (labels) associated with a Bigtable metric. These + * dimensions provide context for the metric values. + */ +export interface Dimensions { + projectId: string; + instanceId: string; + table: string; + cluster?: string | null; + zone?: string | null; + appProfileId?: string; + methodName: string; + attemptStatus?: string; + finalOperationStatus?: string; + streamingOperation?: string; + clientName: string; +} + +export function dimensionsToString(d: Dimensions) { + const p = (dimension?: string | null) => (dimension ? dimension : ''); + return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};${p(d.clientName)}`; +} diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index 7a2494a92..dc9cb741e 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -13,6 +13,7 @@ // limitations under the License. import {WithLogger, WithLoggerAndName} from './logger'; +import {Dimensions, dimensionsToString} from './client-side-metrics-dimensions'; /** * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. @@ -60,10 +61,11 @@ class TestCounter extends WithLoggerAndName { /** * Simulates adding a value to the counter. Logs the value and the counter name. * @param {number} value The value to be added to the counter. + * @param {Dimensions} dimensions The dimensions associated with the value. */ - add(value: number) { + add(value: number, dimensions: Dimensions) { this.logger.log( - `Value added to counter ${this.name} = ${value.toString()}` + `Value added to counter ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` ); } } @@ -76,10 +78,11 @@ class TestHistogram extends WithLoggerAndName { /** * Simulates recording a value in the histogram. Logs the value and the histogram name. * @param {number} value The value to be recorded in the histogram. + * @param {Dimensions} dimensions The dimensions associated with the value. */ - record(value: number) { + record(value: number, dimensions: Dimensions) { this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()}` + `Value added to histogram ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` ); } } diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 790ecfc68..3bf843829 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {Dimensions} from '../../common/client-side-metrics-dimensions'; + const { MeterProvider, Histogram, @@ -39,28 +41,12 @@ interface OperationInfo { * Number of times a connectivity error occurred during the operation. */ connectivityErrorCount?: number; - isStreaming: string; + streamingOperation: string; } interface AttemptInfo { finalOperationStatus: string; - isStreaming: string; -} - -/** - * Dimensions (labels) associated with a Bigtable metric. These - * dimensions provide context for the metric values. - */ -interface Dimensions { - projectId: string; - instanceId: string; - table: string; - cluster?: string | null; - zone?: string | null; - appProfileId?: string; - methodName: string; - finalOperationStatus: string; - clientName: string; + streamingOperation: string; } /** @@ -189,7 +175,21 @@ class MetricsTracer { }; } - private getFinalOperationDimensions( + private getOperationLatencyDimensions( + projectId: string, + finalOperationStatus: string, + streamOperation?: string + ): Dimensions { + return Object.assign( + { + finalOperationStatus: finalOperationStatus, + streamingOperation: streamOperation, + }, + this.getBasicDimensions(projectId) + ); + } + + private getFinalOpDimensions( projectId: string, finalOperationStatus: string ): Dimensions { @@ -201,7 +201,21 @@ class MetricsTracer { ); } - private getAttemptDimensions(projectId: string, attemptStatus: string) { + private getAttemptDimensions( + projectId: string, + attemptStatus: string, + streamingOperation: string + ) { + return Object.assign( + { + attemptStatus: attemptStatus, + streamingOperation: streamingOperation, + }, + this.getBasicDimensions(projectId) + ); + } + + private getAttemptStatusDimensions(projectId: string, attemptStatus: string) { return Object.assign( { attemptStatus: attemptStatus, @@ -226,7 +240,7 @@ class MetricsTracer { this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.lastReadTime) { - const dimensions = this.getAttemptDimensions(projectId, 'PENDING'); + const dimensions = this.getBasicDimensions(projectId); const difference = currentTime.getTime() - this.lastReadTime.getTime(); this.metrics.applicationBlockingLatencies.record( @@ -253,7 +267,8 @@ class MetricsTracer { if (projectId && this.attemptStartTime) { const dimensions = this.getAttemptDimensions( projectId, - info.finalOperationStatus + info.finalOperationStatus, + info.streamingOperation ); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metrics.attemptLatencies.record(totalTime, dimensions); @@ -272,14 +287,14 @@ class MetricsTracer { /** * Called when the first response is received. Records first response latencies. */ - onResponse() { + onResponse(finalOperationStatus: string) { const endTime = this.dateProvider.getDate(); this.tabularApiSurface.bigtable.getProjectId_( (err: Error | null, projectId?: string) => { if (projectId && this.operationStartTime) { - const dimensions = this.getFinalOperationDimensions( + const dimensions = this.getFinalOpDimensions( projectId, - 'PENDING' + finalOperationStatus ); const totalTime = endTime.getTime() - this.operationStartTime.getTime(); @@ -305,16 +320,36 @@ class MetricsTracer { if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - const dimensions = this.getFinalOperationDimensions( - projectId, - info.finalOperationStatus - ); - this.metrics.operationLatencies.record(totalTime, dimensions); - this.metrics.retryCount.add(info.retries, dimensions); + { + // This block records operation latency metrics. + const operationLatencyDimensions = + this.getOperationLatencyDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + this.metrics.operationLatencies.record( + totalTime, + operationLatencyDimensions + ); + } + if (info.retries) { + // This block records the retry count metrics + const retryCountDimensions = this.getFinalOpDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.retryCount.add(info.retries, retryCountDimensions); + } if (info.connectivityErrorCount) { + // This block records the connectivity error count metrics + const connectivityCountDimensions = this.getAttemptStatusDimensions( + projectId, + info.finalOperationStatus + ); this.metrics.connectivityErrorCount.record( info.connectivityErrorCount, - dimensions + connectivityCountDimensions ); } } @@ -324,12 +359,16 @@ class MetricsTracer { /** * Called when metadata is received. Extracts server timing information if available. + * @param info Information about the completed attempt. * @param metadata The received metadata. */ - onMetadataReceived(metadata: { - internalRepr: Map; - options: {}; - }) { + onMetadataReceived( + info: AttemptInfo, + metadata: { + internalRepr: Map; + options: {}; + } + ) { const mappedEntries = new Map( Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, @@ -346,7 +385,8 @@ class MetricsTracer { if (projectId) { const dimensions = this.getAttemptDimensions( projectId, - 'PENDING' // TODO: Adjust this + info.finalOperationStatus, + info.streamingOperation ); this.metrics.serverLatencies.record(serverTime, dimensions); } diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 91587b28c..78163c771 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -62,6 +62,10 @@ describe('Bigtable/MetricsTracer', () => { }); fakeMethod(): void { + const standardAttemptInfo = { + finalOperationStatus: 'PENDING', + streamingOperation: 'YES', + }; function createMetadata(duration: string) { return { internalRepr: new Map([ @@ -94,27 +98,42 @@ describe('Bigtable/MetricsTracer', () => { logger.log('3. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('101')); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('101') + ); logger.log('5. Client receives first row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('102')); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('102') + ); logger.log('7. Client receives second row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('8. A transient error occurs.'); - metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); + metricsTracer.onAttemptComplete({ + finalOperationStatus: 'ERROR', + streamingOperation: 'YES', + }); logger.log('9. After a timeout, the second attempt is made.'); metricsTracer.onAttemptStart(); logger.log('10. Client receives status information.'); metricsTracer.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('103')); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('103') + ); logger.log('12. Client receives third row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived(createMetadata('104')); + metricsTracer.onMetadataReceived( + {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, + createMetadata('104') + ); logger.log('14. Client receives fourth row.'); - metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); logger.log('15. User reads row 1'); metricsTracer.onRead(); logger.log('16. User reads row 2'); @@ -128,6 +147,7 @@ describe('Bigtable/MetricsTracer', () => { retries: 1, finalOperationStatus: 'SUCCESS', connectivityErrorCount: 1, + streamingOperation: 'YES', }); } } From b04c3c469df258e028418775911da02251585e42 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 17:09:31 -0500 Subject: [PATCH 043/289] Update tests with dimension metrics --- test/metrics-tracer/metrics-tracer.ts | 2 +- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 78163c771..9ed72e155 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -51,7 +51,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsTracer', () => { +describe.only('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', () => { const logger = new Logger(); class FakeTable { diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index 96cec94ac..8a177824e 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 -Value added to counter bigtable.googleapis.com:retry_count = 1 -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;;nodejs-bigtable From 2417e80844ba59d2b04f8053cd75a61f1f707c35 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 17 Jan 2025 17:12:28 -0500 Subject: [PATCH 044/289] Revert "Revert "Revert "Delete client-side-metrics file""" This reverts commit 5906c29987fee2c55be44a9e9e0a931930051db8. --- system-test/client-side-metrics.ts | 93 ++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts new file mode 100644 index 000000000..9ac299773 --- /dev/null +++ b/system-test/client-side-metrics.ts @@ -0,0 +1,93 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import {Bigtable} from '../src'; +import * as assert from 'assert'; +import {describe, it, before, after} from 'mocha'; + +describe.only('Bigtable/Table#getRows', () => { + const bigtable = new Bigtable({ + projectId: 'cloud-native-db-dpes-shared', + }); + const instanceId = 'emulator-test-instance'; + const tableId = 'my-table'; + const columnFamilyId = 'cf1'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; + + before(async () => { + const instance = bigtable.instance(instanceId); + try { + const [instanceInfo] = await instance.exists(); + if (!instanceInfo) { + const [, operation] = await instance.create({ + clusters: { + id: 'fake-cluster3', + location: 'us-west1-c', + nodes: 1, + }, + }); + await operation.promise(); + } + + const table = instance.table(tableId); + const [tableExists] = await table.exists(); + if (!tableExists) { + await table.create({families: [columnFamilyId]}); // Create column family + } else { + // Check if column family exists and create it if not. + const [families] = await table.getFamilies(); + + if (!families.some(family => family.id === columnFamilyId)) { + await table.createFamily(columnFamilyId); + } + } + } catch (error) { + console.error('Error during setup:', error); + // Consider re-throwing error, to actually stop tests. + } + }); + + after(async () => { + const instance = bigtable.instance(instanceId); + await instance.delete({}); + }); + + it('should read rows after inserting data', async () => { + const instance = bigtable.instance(instanceId); + const table = instance.table(tableId); + const rows = [ + { + key: 'row1', + data: { + cf1: { + q1: 'value1', + }, + }, + }, + { + key: 'row2', + data: { + cf1: { + q2: 'value2', + }, + }, + }, + ]; + await table.insert(rows); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); + } + }); +}); From df59d88bf7a15edc07c9517633fc97b6e4cf4223 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:00:18 -0500 Subject: [PATCH 045/289] Do some measurements --- myFile.txt | 146 +++++++++++++++++++++++++++++ src/index.ts | 13 ++- src/tabular-api-surface.ts | 38 ++++---- system-test/client-side-metrics.ts | 34 ++++++- 4 files changed, 207 insertions(+), 24 deletions(-) create mode 100644 myFile.txt diff --git a/myFile.txt b/myFile.txt new file mode 100644 index 000000000..8e932f96f --- /dev/null +++ b/myFile.txt @@ -0,0 +1,146 @@ +Done attempt 0 +Value added to histogram bigtable.googleapis.com:server_latencies = 124 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 146 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 1 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 2 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 3 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 4 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 5 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 6 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;;PENDING;;nodejs-bigtable +Done attempt 7 +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 8 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 9 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 91 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 10 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 11 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 12 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 72 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 13 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 14 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 15 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 16 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 17 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 18 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 19 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 20 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 21 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 22 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 23 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 24 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 25 +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 26 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 27 +Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable +Done attempt 28 +Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable +Done attempt 29 diff --git a/src/index.ts b/src/index.ts index e77a67822..86d40ef3c 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,7 +36,8 @@ import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; // TODO: Uncomment the next line after client-side metrics are well tested. -// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; +import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; +import {ObservabilityOptions} from './client-side-metrics/observability-options'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -102,6 +103,8 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { * Internal only. */ BigtableTableAdminClient?: gax.ClientOptions; + + observabilityOptions?: ObservabilityOptions; } /** @@ -398,11 +401,13 @@ export class Bigtable { static Instance: Instance; static Cluster: Cluster; // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracerFactory: MetricsTracerFactory; + metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { // TODO: Uncomment the next line after client-side metrics are well tested. - // this.metricsTracerFactory = new MetricsTracerFactory(); + this.metricsTracerFactory = new MetricsTracerFactory( + options.observabilityOptions + ); // Determine what scopes are needed. // It is the union of the scopes on all three clients. @@ -876,7 +881,7 @@ export class Bigtable { .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) // TODO: Uncomment the next line after client-side metrics are well tested. - // .on('status', stream.emit.bind(stream, 'status')) + .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index fda3c1eae..c701194a2 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +let attemptCounter = 0; + import {promisifyAll} from '@google-cloud/promisify'; import arrify = require('arrify'); import {Instance} from './instance'; @@ -210,8 +212,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - // TODO: Uncomment the next line after client-side metrics are well tested. - /* + attemptCounter++; // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( this, @@ -222,9 +223,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); retries: numRequestsMade - 1, finalOperationStatus, connectivityErrorCount, + streamingOperation: 'YES', }); } - */ const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; @@ -235,7 +236,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const hasLimit = rowsLimit !== 0; // TODO: Uncomment the next line after client-side metrics are well tested. - // let connectivityErrorCount = 0; + let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -359,10 +360,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onOperationStart(); + metricsTracer.onOperationStart(); const makeNewRequest = () => { // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptStart(); + metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry @@ -538,12 +539,17 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. - /* requestStream .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - metricsTracer.onMetadataReceived(metadata); + metricsTracer.onMetadataReceived( + { + finalOperationStatus: 'PENDING', + streamingOperation: 'YES', + }, + metadata + ); } ) .on( @@ -554,19 +560,16 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onStatusReceived(status); } ); - */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. - /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: // ABORTED, UNAVAILABLE, DATA_LOSS connectivityErrorCount++; } - */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -589,7 +592,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); backOffSettings ); // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum + metricsTracer.onAttemptComplete({ + finalOperationStatus: 'ERROR', + streamingOperation: 'YES', + }); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -607,7 +613,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } userStream.emit('error', error); // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('ERROR'); + onCallComplete('ERROR'); } }) .on('data', _ => { @@ -615,14 +621,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onResponse(); + metricsTracer.onResponse('PENDING'); }) .on('end', () => { // TODO: Uncomment the next line after client-side metrics are well tested. - // numRequestsMade++; + numRequestsMade++; activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('SUCCESS'); + onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 9ac299773..70ae9d844 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -15,16 +15,34 @@ import {Bigtable} from '../src'; import * as assert from 'assert'; import {describe, it, before, after} from 'mocha'; +import {TestMeterProvider} from '../common/test-meter-provider'; +import * as fs from 'node:fs'; + +class Logger { + private messages = ''; + + log(message: string) { + console.log(message); + this.messages = this.messages + message + '\n'; + } + + getMessages() { + return this.messages; + } +} describe.only('Bigtable/Table#getRows', () => { + const logger = new Logger(); + const meterProvider = new TestMeterProvider(logger); const bigtable = new Bigtable({ projectId: 'cloud-native-db-dpes-shared', + observabilityOptions: { + meterProvider, + }, }); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; before(async () => { const instance = bigtable.instance(instanceId); @@ -86,8 +104,16 @@ describe.only('Bigtable/Table#getRows', () => { }, ]; await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); + for (let i = 0; i < 30; i++) { + console.log(`Doing attempt ${i}`); + const rows = await table.getRows(); + console.log(`Done attempt ${i}`); + logger.log(`Done attempt ${i}`); } + const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; + const filename = 'myFile.txt'; + + // Write the string to the file + fs.writeFileSync(filename, myString); }); }); From 8ad51f55a4009c573c64f2ee88d0551e20e08820 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:00:55 -0500 Subject: [PATCH 046/289] Revert "Do some measurements" This reverts commit df59d88bf7a15edc07c9517633fc97b6e4cf4223. --- myFile.txt | 146 ----------------------------- src/index.ts | 13 +-- src/tabular-api-surface.ts | 38 ++++---- system-test/client-side-metrics.ts | 34 +------ 4 files changed, 24 insertions(+), 207 deletions(-) delete mode 100644 myFile.txt diff --git a/myFile.txt b/myFile.txt deleted file mode 100644 index 8e932f96f..000000000 --- a/myFile.txt +++ /dev/null @@ -1,146 +0,0 @@ -Done attempt 0 -Value added to histogram bigtable.googleapis.com:server_latencies = 124 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 146 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 150 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 1 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 85 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 2 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 3 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 4 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 5 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 6 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;;PENDING;;nodejs-bigtable -Done attempt 7 -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 8 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 9 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 91 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 92 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 10 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 11 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 80 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 12 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 72 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 13 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 14 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 15 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 16 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 17 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 18 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 19 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 83 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 20 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 21 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 79 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 22 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 84 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 86 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 23 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 24 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 25 -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 76 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 26 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 73 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 74 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 27 -Value added to histogram bigtable.googleapis.com:server_latencies = 58 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;PENDING;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:first_response_latencies = 78 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 81 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:server_latencies = 57 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;;;;readRows;PENDING;;YES;nodejs-bigtable -Done attempt 28 -Value added to histogram bigtable.googleapis.com:first_response_latencies = 75 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;PENDING;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:attempt_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 77 with dimensions cloud-native-db-dpes-shared;emulator-test-instance;my-table;us-west1-c;us-west1-c;;readRows;;SUCCESS;YES;nodejs-bigtable -Done attempt 29 diff --git a/src/index.ts b/src/index.ts index 86d40ef3c..e77a67822 100644 --- a/src/index.ts +++ b/src/index.ts @@ -36,8 +36,7 @@ import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; // TODO: Uncomment the next line after client-side metrics are well tested. -import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; -import {ObservabilityOptions} from './client-side-metrics/observability-options'; +// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -103,8 +102,6 @@ export interface BigtableOptions extends gax.GoogleAuthOptions { * Internal only. */ BigtableTableAdminClient?: gax.ClientOptions; - - observabilityOptions?: ObservabilityOptions; } /** @@ -401,13 +398,11 @@ export class Bigtable { static Instance: Instance; static Cluster: Cluster; // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracerFactory: MetricsTracerFactory; + // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { // TODO: Uncomment the next line after client-side metrics are well tested. - this.metricsTracerFactory = new MetricsTracerFactory( - options.observabilityOptions - ); + // this.metricsTracerFactory = new MetricsTracerFactory(); // Determine what scopes are needed. // It is the union of the scopes on all three clients. @@ -881,7 +876,7 @@ export class Bigtable { .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) // TODO: Uncomment the next line after client-side metrics are well tested. - .on('status', stream.emit.bind(stream, 'status')) + // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index c701194a2..fda3c1eae 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -let attemptCounter = 0; - import {promisifyAll} from '@google-cloud/promisify'; import arrify = require('arrify'); import {Instance} from './instance'; @@ -212,7 +210,8 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - attemptCounter++; + // TODO: Uncomment the next line after client-side metrics are well tested. + /* // Initialize objects for collecting client side metrics. const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( this, @@ -223,9 +222,9 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); retries: numRequestsMade - 1, finalOperationStatus, connectivityErrorCount, - streamingOperation: 'YES', }); } + */ const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; @@ -236,7 +235,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const hasLimit = rowsLimit !== 0; // TODO: Uncomment the next line after client-side metrics are well tested. - let connectivityErrorCount = 0; + // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -360,10 +359,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onOperationStart(); + // metricsTracer.onOperationStart(); const makeNewRequest = () => { // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onAttemptStart(); + // metricsTracer.onAttemptStart(); // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry @@ -539,17 +538,12 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); }; // TODO: Uncomment the next line after client-side metrics are well tested. + /* requestStream .on( 'metadata', (metadata: {internalRepr: Map; options: {}}) => { - metricsTracer.onMetadataReceived( - { - finalOperationStatus: 'PENDING', - streamingOperation: 'YES', - }, - metadata - ); + metricsTracer.onMetadataReceived(metadata); } ) .on( @@ -560,16 +554,19 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); metricsTracer.onStatusReceived(status); } ); + */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. + /* if (new Set([10, 14, 15]).has(error.code)) { // The following grpc errors will be considered connectivity errors: // ABORTED, UNAVAILABLE, DATA_LOSS connectivityErrorCount++; } + */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -592,10 +589,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); backOffSettings ); // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onAttemptComplete({ - finalOperationStatus: 'ERROR', - streamingOperation: 'YES', - }); // TODO: Replace ERROR with enum + // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -613,7 +607,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } userStream.emit('error', error); // TODO: Uncomment the next line after client-side metrics are well tested. - onCallComplete('ERROR'); + // onCallComplete('ERROR'); } }) .on('data', _ => { @@ -621,14 +615,14 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; // TODO: Uncomment the next line after client-side metrics are well tested. - metricsTracer.onResponse('PENDING'); + // metricsTracer.onResponse(); }) .on('end', () => { // TODO: Uncomment the next line after client-side metrics are well tested. - numRequestsMade++; + // numRequestsMade++; activeRequestStream = null; // TODO: Uncomment the next line after client-side metrics are well tested. - onCallComplete('SUCCESS'); + // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts index 70ae9d844..9ac299773 100644 --- a/system-test/client-side-metrics.ts +++ b/system-test/client-side-metrics.ts @@ -15,34 +15,16 @@ import {Bigtable} from '../src'; import * as assert from 'assert'; import {describe, it, before, after} from 'mocha'; -import {TestMeterProvider} from '../common/test-meter-provider'; -import * as fs from 'node:fs'; - -class Logger { - private messages = ''; - - log(message: string) { - console.log(message); - this.messages = this.messages + message + '\n'; - } - - getMessages() { - return this.messages; - } -} describe.only('Bigtable/Table#getRows', () => { - const logger = new Logger(); - const meterProvider = new TestMeterProvider(logger); const bigtable = new Bigtable({ projectId: 'cloud-native-db-dpes-shared', - observabilityOptions: { - meterProvider, - }, }); const instanceId = 'emulator-test-instance'; const tableId = 'my-table'; const columnFamilyId = 'cf1'; + const clusterId = 'test-cluster'; + const location = 'us-central1-c'; before(async () => { const instance = bigtable.instance(instanceId); @@ -104,16 +86,8 @@ describe.only('Bigtable/Table#getRows', () => { }, ]; await table.insert(rows); - for (let i = 0; i < 30; i++) { - console.log(`Doing attempt ${i}`); - const rows = await table.getRows(); - console.log(`Done attempt ${i}`); - logger.log(`Done attempt ${i}`); + for (let i = 0; i < 100; i++) { + console.log(await table.getRows()); } - const myString = logger.getMessages(); // 'This is the string I want to write to the file.'; - const filename = 'myFile.txt'; - - // Write the string to the file - fs.writeFileSync(filename, myString); }); }); From 6868f5a9d4d5b1b53f826a68419bea62aef4d1a9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:01:35 -0500 Subject: [PATCH 047/289] Revert "Revert "Revert "Revert "Delete client-side-metrics file"""" This reverts commit 2417e80844ba59d2b04f8053cd75a61f1f707c35. --- system-test/client-side-metrics.ts | 93 ------------------------------ 1 file changed, 93 deletions(-) delete mode 100644 system-test/client-side-metrics.ts diff --git a/system-test/client-side-metrics.ts b/system-test/client-side-metrics.ts deleted file mode 100644 index 9ac299773..000000000 --- a/system-test/client-side-metrics.ts +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Bigtable} from '../src'; -import * as assert from 'assert'; -import {describe, it, before, after} from 'mocha'; - -describe.only('Bigtable/Table#getRows', () => { - const bigtable = new Bigtable({ - projectId: 'cloud-native-db-dpes-shared', - }); - const instanceId = 'emulator-test-instance'; - const tableId = 'my-table'; - const columnFamilyId = 'cf1'; - const clusterId = 'test-cluster'; - const location = 'us-central1-c'; - - before(async () => { - const instance = bigtable.instance(instanceId); - try { - const [instanceInfo] = await instance.exists(); - if (!instanceInfo) { - const [, operation] = await instance.create({ - clusters: { - id: 'fake-cluster3', - location: 'us-west1-c', - nodes: 1, - }, - }); - await operation.promise(); - } - - const table = instance.table(tableId); - const [tableExists] = await table.exists(); - if (!tableExists) { - await table.create({families: [columnFamilyId]}); // Create column family - } else { - // Check if column family exists and create it if not. - const [families] = await table.getFamilies(); - - if (!families.some(family => family.id === columnFamilyId)) { - await table.createFamily(columnFamilyId); - } - } - } catch (error) { - console.error('Error during setup:', error); - // Consider re-throwing error, to actually stop tests. - } - }); - - after(async () => { - const instance = bigtable.instance(instanceId); - await instance.delete({}); - }); - - it('should read rows after inserting data', async () => { - const instance = bigtable.instance(instanceId); - const table = instance.table(tableId); - const rows = [ - { - key: 'row1', - data: { - cf1: { - q1: 'value1', - }, - }, - }, - { - key: 'row2', - data: { - cf1: { - q2: 'value2', - }, - }, - }, - ]; - await table.insert(rows); - for (let i = 0; i < 100; i++) { - console.log(await table.getRows()); - } - }); -}); From 7cc36a226b0652f76ee16dba15c333e5b6a7b61a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:08:21 -0500 Subject: [PATCH 048/289] Add header --- common/client-side-metrics-dimensions.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts index 8c85ee388..2797b4499 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-dimensions.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /** * Dimensions (labels) associated with a Bigtable metric. These * dimensions provide context for the metric values. From 62a4b8be16fdd16705c2b1954c72c9da614ad89f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:14:52 -0500 Subject: [PATCH 049/289] Remove the TODOs --- src/index.ts | 9 ----- src/tabular-api-surface.ts | 67 -------------------------------------- 2 files changed, 76 deletions(-) diff --git a/src/index.ts b/src/index.ts index e77a67822..dc4143c99 100644 --- a/src/index.ts +++ b/src/index.ts @@ -35,8 +35,6 @@ import * as v2 from './v2'; import {PassThrough, Duplex} from 'stream'; import grpcGcpModule = require('grpc-gcp'); import {ClusterUtils} from './utils/cluster'; -// TODO: Uncomment the next line after client-side metrics are well tested. -// import {MetricsTracerFactory} from './client-side-metrics/metrics-tracer-factory'; // eslint-disable-next-line @typescript-eslint/no-var-requires const streamEvents = require('stream-events'); @@ -397,13 +395,8 @@ export class Bigtable { static AppProfile: AppProfile; static Instance: Instance; static Cluster: Cluster; - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracerFactory: MetricsTracerFactory; constructor(options: BigtableOptions = {}) { - // TODO: Uncomment the next line after client-side metrics are well tested. - // this.metricsTracerFactory = new MetricsTracerFactory(); - // Determine what scopes are needed. // It is the union of the scopes on all three clients. const scopes: string[] = []; @@ -875,8 +868,6 @@ export class Bigtable { gaxStream .on('error', stream.destroy.bind(stream)) .on('metadata', stream.emit.bind(stream, 'metadata')) - // TODO: Uncomment the next line after client-side metrics are well tested. - // .on('status', stream.emit.bind(stream, 'status')) .on('request', stream.emit.bind(stream, 'request')) .pipe(stream); }); diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index fda3c1eae..b15a08766 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -210,22 +210,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); * region_tag:bigtable_api_table_readstream */ createReadStream(opts?: GetRowsOptions) { - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - // Initialize objects for collecting client side metrics. - const metricsTracer = this.bigtable.metricsTracerFactory.getMetricsTracer( - this, - 'readRows' - ); - function onCallComplete(finalOperationStatus: string) { - metricsTracer.onOperationComplete({ - retries: numRequestsMade - 1, - finalOperationStatus, - connectivityErrorCount, - }); - } - */ - const options = opts || {}; const maxRetries = is.number(this.maxRetries) ? this.maxRetries! : 10; let activeRequestStream: AbortableDuplex | null; @@ -234,8 +218,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); const rowsLimit = options.limit || 0; const hasLimit = rowsLimit !== 0; - // TODO: Uncomment the next line after client-side metrics are well tested. - // let connectivityErrorCount = 0; let numConsecutiveErrors = 0; let numRequestsMade = 0; let retryTimer: NodeJS.Timeout | null; @@ -319,13 +301,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); rowsRead++; callback(null, row); }, - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - read(size) { - metricsTracer.onRead(); - return this.read(size); - }, - */ }); // The caller should be able to call userStream.end() to stop receiving @@ -357,13 +332,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; - - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onOperationStart(); const makeNewRequest = () => { - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptStart(); - // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry retryTimer = null; @@ -537,36 +506,10 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); return false; }; - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - requestStream - .on( - 'metadata', - (metadata: {internalRepr: Map; options: {}}) => { - metricsTracer.onMetadataReceived(metadata); - } - ) - .on( - 'status', - (status: { - metadata: {internalRepr: Map; options: {}}; - }) => { - metricsTracer.onStatusReceived(status); - } - ); - */ rowStream .on('error', (error: ServiceError) => { rowStreamUnpipe(rowStream, userStream); activeRequestStream = null; - // TODO: Uncomment the next line after client-side metrics are well tested. - /* - if (new Set([10, 14, 15]).has(error.code)) { - // The following grpc errors will be considered connectivity errors: - // ABORTED, UNAVAILABLE, DATA_LOSS - connectivityErrorCount++; - } - */ if (IGNORED_STATUS_CODES.has(error.code)) { // We ignore the `cancelled` "error", since we are the ones who cause // it when the user calls `.abort()`. @@ -588,8 +531,6 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); numConsecutiveErrors, backOffSettings ); - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onAttemptComplete({finalOperationStatus: 'ERROR'}); // TODO: Replace ERROR with enum retryTimer = setTimeout(makeNewRequest, nextRetryDelay); } else { if ( @@ -606,23 +547,15 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); error.code = grpc.status.CANCELLED; } userStream.emit('error', error); - // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('ERROR'); } }) .on('data', _ => { // Reset error count after a successful read so the backoff // time won't keep increasing when as stream had multiple errors numConsecutiveErrors = 0; - // TODO: Uncomment the next line after client-side metrics are well tested. - // metricsTracer.onResponse(); }) .on('end', () => { - // TODO: Uncomment the next line after client-side metrics are well tested. - // numRequestsMade++; activeRequestStream = null; - // TODO: Uncomment the next line after client-side metrics are well tested. - // onCallComplete('SUCCESS'); }); rowStreamPipe(rowStream, userStream); }; From 7c4f414c373a6b2f8a5219f6efcd6f42e81afbf9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:16:04 -0500 Subject: [PATCH 050/289] Add line back --- src/tabular-api-surface.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tabular-api-surface.ts b/src/tabular-api-surface.ts index b15a08766..a7f86e0a2 100644 --- a/src/tabular-api-surface.ts +++ b/src/tabular-api-surface.ts @@ -332,6 +332,7 @@ Please use the format 'prezzy' or '${instance.name}/tables/prezzy'.`); } return originalEnd(chunk, encoding, cb); }; + const makeNewRequest = () => { // Avoid cancelling an expired timer if user // cancelled the stream in the middle of a retry From 83f53ae88613e5a5bcef0907463b80f7e537a648 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:20:50 -0500 Subject: [PATCH 051/289] Add comment --- src/client-side-metrics/metrics-tracer-factory.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 3bf843829..69bd03b0b 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -433,7 +433,8 @@ export class MetricsTracerFactory { * @param observabilityOptions Options for configuring client-side metrics observability. */ constructor(observabilityOptions?: ObservabilityOptions) { - // Create MeterProvider + // Use MeterProvider provided by user + // If MeterProvider was not provided then use the default meter provider. const meterProvider = observabilityOptions && observabilityOptions.meterProvider ? observabilityOptions.meterProvider From 610eec01afbbe34ba9fec94ce06be7fb342489ac Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:34:49 -0500 Subject: [PATCH 052/289] Add version --- src/client-side-metrics/metrics-tracer-factory.ts | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 69bd03b0b..b87eeb04e 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -171,7 +171,7 @@ class MetricsTracer { zone: this.zone, appProfileId: this.tabularApiSurface.bigtable.appProfileId, methodName: this.methodName, - clientName: 'nodejs-bigtable', + clientName: 'nodejs-bigtable', // TODO: Add version }; } @@ -446,11 +446,7 @@ export class MetricsTracerFactory { // resource if running on GCP. Otherwise, metrics will be sent with monitored resource // `generic_task`. resource: new Resources.Resource({ - 'service.name': 'example-metric-service', - 'service.namespace': 'samples', - 'service.instance.id': '12345', - 'cloud.resource_manager.project_id': - 'cloud-native-db-dpes-shared', + 'service.name': 'bigtable-metrics', }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter From a2b5951b58c5652185a2204623921751c9c5507f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 10:42:21 -0500 Subject: [PATCH 053/289] Add version to client side metrics --- src/client-side-metrics/metrics-tracer-factory.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index b87eeb04e..4eab84feb 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,6 +24,7 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; +const { version } = require('../../package.json'); /** * Information about a Bigtable operation. @@ -171,7 +172,7 @@ class MetricsTracer { zone: this.zone, appProfileId: this.tabularApiSurface.bigtable.appProfileId, methodName: this.methodName, - clientName: 'nodejs-bigtable', // TODO: Add version + clientName: `nodejs-bigtable/${version}`, }; } From 5f67cad8693f82c0b42c0611e6ca5e847cd6adf6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:15:24 -0500 Subject: [PATCH 054/289] linter --- src/client-side-metrics/metrics-tracer-factory.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 4eab84feb..002339535 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,7 +24,7 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; -const { version } = require('../../package.json'); +const {version} = require('../../package.json'); /** * Information about a Bigtable operation. From 8f20c78c3c2ccb954745d40690ec60ba94065308 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:20:34 -0500 Subject: [PATCH 055/289] Generate documentation for AttemptInfo interface --- src/client-side-metrics/metrics-tracer-factory.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 002339535..41985f8c5 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -45,8 +45,17 @@ interface OperationInfo { streamingOperation: string; } +/** + * Information about a single attempt of a Bigtable operation. + */ interface AttemptInfo { + /** + * The final status of the attempt (e.g., 'OK', 'ERROR'). + */ finalOperationStatus: string; + /** + * Whether the operation is a streaming operation or not + */ streamingOperation: string; } From 9b1ba9d29ec4d4431c1a569aebe48cb0f9f6ff20 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:24:25 -0500 Subject: [PATCH 056/289] Logger documentation --- test/metrics-tracer/metrics-tracer.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 9ed72e155..206f86385 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -19,13 +19,24 @@ import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; +/** + * A basic logger class that stores log messages in an array. Useful for testing. + */ class Logger { private messages: string[] = []; + /** + * Logs a message by adding it to the internal message array. + * @param message The message to be logged. + */ log(message: string) { this.messages.push(message); } + /** + * Retrieves all logged messages. + * @returns An array of logged messages. + */ getMessages() { return this.messages; } From 88e96c34172aadf36a871945b812f05e6555ba6a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:27:29 -0500 Subject: [PATCH 057/289] Generate more documentation --- test/metrics-tracer/metrics-tracer.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 206f86385..336b6a485 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -42,15 +42,28 @@ class Logger { } } +/** + * A fake implementation of the Bigtable client for testing purposes. Provides a + * metricsTracerFactory and a stubbed getProjectId_ method. + */ class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; + /** + * @param observabilityOptions Options for configuring client-side metrics + * observability, including a TestMeterProvider. + */ constructor(observabilityOptions: {meterProvider: TestMeterProvider}) { this.metricsTracerFactory = new MetricsTracerFactory({ meterProvider: observabilityOptions.meterProvider, }); } + /** + * A stubbed method that simulates retrieving the project ID. Always returns + * 'my-project'. + * @param callback A callback function that receives the project ID (or an error). + */ getProjectId_( callback: (err: Error | null, projectId?: string) => void ): void { @@ -58,7 +71,13 @@ class FakeBigtable { } } +/** + * A fake implementation of a Bigtable instance for testing purposes. Provides only an ID. + */ class FakeInstance { + /** + * The ID of the fake instance. + */ id = 'fakeInstanceId'; } From ed39628cfdbe0091dbf6a25344584628b816e9f6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:49:35 -0500 Subject: [PATCH 058/289] Generate documentation --- src/client-side-metrics/metrics-tracer-factory.ts | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 41985f8c5..ae3c12cf6 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -172,6 +172,12 @@ class MetricsTracer { } } + /** + * Assembles the basic dimensions for metrics. These dimensions provide + * context about the Bigtable environment and the operation being performed. + * @param {string} projectId The Google Cloud project ID. + * @returns {object} An object containing the basic dimensions. + */ private getBasicDimensions(projectId: string) { return { projectId, @@ -185,6 +191,15 @@ class MetricsTracer { }; } + /** + * Assembles the dimensions for operation latency metrics. These dimensions + * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. + * Includes whether the operation was a streaming operation or not. + * @param {string} projectId The Google Cloud project ID. + * @param {string} finalOperationStatus The final status of the operation. + * @param {string} streamOperation Whether the operation was a streaming operation or not. + * @returns An object containing the dimensions for operation latency metrics. + */ private getOperationLatencyDimensions( projectId: string, finalOperationStatus: string, From 76b1249f3126d9bf4c86c31d78405523d694222d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 11:59:21 -0500 Subject: [PATCH 059/289] Make sure test reports correct duration, zone cluster --- test/metrics-tracer/metrics-tracer.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 336b6a485..43c0fbf48 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -99,7 +99,7 @@ describe.only('Bigtable/MetricsTracer', () => { function createMetadata(duration: string) { return { internalRepr: new Map([ - ['server-timing', Buffer.from(`dur=${duration}`)], + ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], ]), options: {}, }; @@ -107,7 +107,10 @@ describe.only('Bigtable/MetricsTracer', () => { const status = { metadata: { internalRepr: new Map([ - ['x-goog-ext-425905942-bin', Buffer.from('doLater')], + [ + 'x-goog-ext-425905942-bin', + Buffer.from('\n\nus-west1-c \rfake-cluster3'), + ], ]), options: {}, }, From 8d60cb1a7839ea6ff485e1a339a38247612d97e7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 12:59:59 -0500 Subject: [PATCH 060/289] Generate documentation for the dimensions to strin --- common/client-side-metrics-dimensions.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts index 2797b4499..967ff9113 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-dimensions.ts @@ -30,6 +30,15 @@ export interface Dimensions { clientName: string; } +/** + * Converts a Dimensions object to a string representation. + * This string representation is suitable for use as labels or tags. + * The order of dimensions in the output string is fixed: + * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName + * If a dimension is null or undefined, the empty string is used. + * @param {Dimensions} d The Dimensions object to convert. + * @returns A string representation of the dimensions. + */ export function dimensionsToString(d: Dimensions) { const p = (dimension?: string | null) => (dimension ? dimension : ''); return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};${p(d.clientName)}`; From 19fef92054551dc08dba85c104e9145a07fd6d56 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:12:28 -0500 Subject: [PATCH 061/289] Add version to the dimensions --- src/client-side-metrics/metrics-tracer-factory.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index ae3c12cf6..69eb96452 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,7 +24,7 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; -const {version} = require('../../package.json'); +import * as fs from 'fs'; /** * Information about a Bigtable operation. @@ -127,6 +127,9 @@ export interface ITabularApiSurface { }; } +const packageJSON = fs.readFileSync('package.json'); +const version = JSON.parse(packageJSON.toString()).version; + /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ From 1ecfb1c1eb87a67f1c6c9cdb6112037d38434e7e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:13:01 -0500 Subject: [PATCH 062/289] Fix the client name. The version is going to chan --- common/client-side-metrics-dimensions.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-dimensions.ts index 967ff9113..0e06d365b 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-dimensions.ts @@ -41,5 +41,5 @@ export interface Dimensions { */ export function dimensionsToString(d: Dimensions) { const p = (dimension?: string | null) => (dimension ? dimension : ''); - return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};${p(d.clientName)}`; + return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};nodejs-bigtable`; } From d8a3960e93ef0d0f838305d91be12f485c8b2705 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:13:24 -0500 Subject: [PATCH 063/289] Update the expected output file. --- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index 8a177824e..bdefbba01 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;ERROR;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;;doLater;;fakeMethod;SUCCESS;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From 1d6b64586b0da6d95380218868c20ed48ac03197 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:35:41 -0500 Subject: [PATCH 064/289] Fox bug, get cluster --- src/client-side-metrics/metrics-tracer-factory.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 69eb96452..21c4f7c02 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -445,7 +445,7 @@ class MetricsTracer { this.zone = instanceInformation[0]; } if (instanceInformation && instanceInformation[1]) { - this.cluster = instanceInformation[0]; + this.cluster = instanceInformation[1]; } } } From acb1d3a6026ac695a71e4ddfdb2ce9f6a1ada841 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:37:25 -0500 Subject: [PATCH 065/289] Add fake cluster to tests --- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index bdefbba01..a7208bea5 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;us-west1-c ;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From c30b0579b7a974bef3a5bb8d1a56f75fcd5f83e4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:39:03 -0500 Subject: [PATCH 066/289] Remove console log --- test/metrics-tracer/metrics-tracer.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 43c0fbf48..8367d97ee 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -192,6 +192,5 @@ describe.only('Bigtable/MetricsTracer', () => { ); // Ensure events occurred in the right order here: assert.strictEqual(logger.getMessages().join('\n') + '\n', expectedOutput); - console.log('test'); }); }); From 9ef079b6873316e9c9b19a5744f0bf480c2efe21 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 13:51:34 -0500 Subject: [PATCH 067/289] Generate more documentation --- .../metrics-tracer-factory.ts | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 21c4f7c02..dde89ef68 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -217,6 +217,13 @@ class MetricsTracer { ); } + /** + * Assembles the dimensions for final operation metrics. These dimensions provide + * context about the Bigtable environment and the operation being performed. + * @param projectId The Google Cloud project ID. + * @param finalOperationStatus The final status of the operation. + * @returns An object containing the dimensions for final operation metrics. + */ private getFinalOpDimensions( projectId: string, finalOperationStatus: string @@ -229,6 +236,15 @@ class MetricsTracer { ); } + /** + * Assembles the dimensions for attempt metrics. These dimensions provide context + * about the Bigtable environment, the operation being performed, and the status of the attempt. + * Includes whether the operation was a streaming operation or not. + * @param projectId The Google Cloud project ID. + * @param attemptStatus The status of the attempt. + * @param streamingOperation Whether the operation was a streaming operation or not. + * @returns An object containing the dimensions for attempt metrics. + */ private getAttemptDimensions( projectId: string, attemptStatus: string, @@ -243,6 +259,13 @@ class MetricsTracer { ); } + /** + * Assembles the dimensions for attempt status metrics. These dimensions provide context + * about the Bigtable environment and the operation being performed. + * @param projectId The Google Cloud project ID. + * @param attemptStatus The status of the attempt. + * @returns An object containing the dimensions for attempt status metrics. + */ private getAttemptStatusDimensions(projectId: string, attemptStatus: string) { return Object.assign( { From d5a0368497c61e8a56422f5ee6b7d94e3f1a6889 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 14:57:01 -0500 Subject: [PATCH 068/289] Require a call to fetch the project when using MT --- .../metrics-tracer-factory.ts | 164 ++++++++------- test/metrics-tracer/metrics-tracer.ts | 192 +++++++++--------- 2 files changed, 190 insertions(+), 166 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index dde89ef68..6e2fd7ade 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -478,90 +478,103 @@ class MetricsTracer { * OpenTelemetry metrics instruments. */ export class MetricsTracerFactory { - private metrics: Metrics; + private metrics?: Metrics; + private observabilityOptions?: ObservabilityOptions; /** * @param observabilityOptions Options for configuring client-side metrics observability. */ constructor(observabilityOptions?: ObservabilityOptions) { - // Use MeterProvider provided by user - // If MeterProvider was not provided then use the default meter provider. - const meterProvider = - observabilityOptions && observabilityOptions.meterProvider - ? observabilityOptions.meterProvider - : new MeterProvider({ - // This is the default meter provider - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. - resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 10_000, - exporter: new MetricExporter({ - projectId: 'cloud-native-db-dpes-shared', // TODO: Replace later + this.observabilityOptions = observabilityOptions; + } + + private initialize( + projectId?: string, + observabilityOptions?: ObservabilityOptions + ) { + if (this.metrics) { + return this.metrics; + } else { + // Use MeterProvider provided by user + // If MeterProvider was not provided then use the default meter provider. + const meterProvider = + observabilityOptions && observabilityOptions.meterProvider + ? observabilityOptions.meterProvider + : new MeterProvider({ + // This is the default meter provider + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + 'service.name': 'bigtable-metrics', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 10_000, + exporter: new MetricExporter({ + projectId, + }), }), - }), - ], - }); - const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.metrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createCounter('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', - { + ], + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.metrics = { + operationLatencies: meter.createHistogram('operation_latencies', { description: - 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - } - ), - firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', - { + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { description: - 'Latencies from when a client sends a request and receives the first row of the response.', + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', - } - ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', - { + }), + retryCount: meter.createCounter('retry_count', { description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - } - ), - clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', - { + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), + serverLatencies: meter.createHistogram('server_latencies', { description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - } - ), - }; + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), + }; + return this.metrics; + } } /** @@ -569,15 +582,18 @@ export class MetricsTracerFactory { * @param tabularApiSurface The Bigtable table being accessed. * @param methodName The name of the method being traced. * @param dateProvider An optional DateProvider for testing purposes. + * @param {string} projectId The project id * @returns A new MetricsTracer instance. */ getMetricsTracer( tabularApiSurface: ITabularApiSurface, methodName: string, + projectId?: string, dateProvider?: DateProvider ) { + const metrics = this.initialize(projectId, this.observabilityOptions); return new MetricsTracer( - this.metrics, + metrics, tabularApiSurface, methodName, dateProvider diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 8367d97ee..7ef1fd7ba 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -82,7 +82,7 @@ class FakeInstance { } describe.only('Bigtable/MetricsTracer', () => { - it('should record the right metrics with a typical method call', () => { + it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); class FakeTable { id = 'fakeTableId'; @@ -91,101 +91,109 @@ describe.only('Bigtable/MetricsTracer', () => { meterProvider: new TestMeterProvider(logger), }); - fakeMethod(): void { - const standardAttemptInfo = { - finalOperationStatus: 'PENDING', - streamingOperation: 'YES', - }; - function createMetadata(duration: string) { - return { - internalRepr: new Map([ - ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], - ]), - options: {}, - }; - } - const status = { - metadata: { - internalRepr: new Map([ - [ - 'x-goog-ext-425905942-bin', - Buffer.from('\n\nus-west1-c \rfake-cluster3'), - ], - ]), - options: {}, - }, - }; - const metricsTracer = - this.bigtable.metricsTracerFactory.getMetricsTracer( - this, - 'fakeMethod', - new TestDateProvider(logger) - ); - // In this method we simulate a series of events that might happen - // when a user calls one of the Table methods. - // Here is an example of what might happen in a method call: - logger.log('1. The operation starts'); - metricsTracer.onOperationStart(); - logger.log('2. The attempt starts.'); - metricsTracer.onAttemptStart(); - logger.log('3. Client receives status information.'); - metricsTracer.onStatusReceived(status); - logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived( - standardAttemptInfo, - createMetadata('101') - ); - logger.log('5. Client receives first row.'); - metricsTracer.onResponse('PENDING'); - logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived( - standardAttemptInfo, - createMetadata('102') - ); - logger.log('7. Client receives second row.'); - metricsTracer.onResponse('PENDING'); - logger.log('8. A transient error occurs.'); - metricsTracer.onAttemptComplete({ - finalOperationStatus: 'ERROR', - streamingOperation: 'YES', - }); - logger.log('9. After a timeout, the second attempt is made.'); - metricsTracer.onAttemptStart(); - logger.log('10. Client receives status information.'); - metricsTracer.onStatusReceived(status); - logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived( - standardAttemptInfo, - createMetadata('103') - ); - logger.log('12. Client receives third row.'); - metricsTracer.onResponse('PENDING'); - logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived( - {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, - createMetadata('104') - ); - logger.log('14. Client receives fourth row.'); - metricsTracer.onResponse('PENDING'); - logger.log('15. User reads row 1'); - metricsTracer.onRead(); - logger.log('16. User reads row 2'); - metricsTracer.onRead(); - logger.log('17. User reads row 3'); - metricsTracer.onRead(); - logger.log('18. User reads row 4'); - metricsTracer.onRead(); - logger.log('19. Stream ends, operation completes'); - metricsTracer.onOperationComplete({ - retries: 1, - finalOperationStatus: 'SUCCESS', - connectivityErrorCount: 1, - streamingOperation: 'YES', + async fakeMethod(): Promise { + return new Promise((resolve, reject) => { + this.bigtable.getProjectId_((err, projectId) => { + const standardAttemptInfo = { + finalOperationStatus: 'PENDING', + streamingOperation: 'YES', + }; + + function createMetadata(duration: string) { + return { + internalRepr: new Map([ + ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], + ]), + options: {}, + }; + } + + const status = { + metadata: { + internalRepr: new Map([ + [ + 'x-goog-ext-425905942-bin', + Buffer.from('\n\nus-west1-c \rfake-cluster3'), + ], + ]), + options: {}, + }, + }; + const metricsTracer = + this.bigtable.metricsTracerFactory.getMetricsTracer( + this, + 'fakeMethod', + projectId, + new TestDateProvider(logger) + ); + // In this method we simulate a series of events that might happen + // when a user calls one of the Table methods. + // Here is an example of what might happen in a method call: + logger.log('1. The operation starts'); + metricsTracer.onOperationStart(); + logger.log('2. The attempt starts.'); + metricsTracer.onAttemptStart(); + logger.log('3. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('4. Client receives metadata.'); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('101') + ); + logger.log('5. Client receives first row.'); + metricsTracer.onResponse('PENDING'); + logger.log('6. Client receives metadata.'); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('102') + ); + logger.log('7. Client receives second row.'); + metricsTracer.onResponse('PENDING'); + logger.log('8. A transient error occurs.'); + metricsTracer.onAttemptComplete({ + finalOperationStatus: 'ERROR', + streamingOperation: 'YES', + }); + logger.log('9. After a timeout, the second attempt is made.'); + metricsTracer.onAttemptStart(); + logger.log('10. Client receives status information.'); + metricsTracer.onStatusReceived(status); + logger.log('11. Client receives metadata.'); + metricsTracer.onMetadataReceived( + standardAttemptInfo, + createMetadata('103') + ); + logger.log('12. Client receives third row.'); + metricsTracer.onResponse('PENDING'); + logger.log('13. Client receives metadata.'); + metricsTracer.onMetadataReceived( + {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, + createMetadata('104') + ); + logger.log('14. Client receives fourth row.'); + metricsTracer.onResponse('PENDING'); + logger.log('15. User reads row 1'); + metricsTracer.onRead(); + logger.log('16. User reads row 2'); + metricsTracer.onRead(); + logger.log('17. User reads row 3'); + metricsTracer.onRead(); + logger.log('18. User reads row 4'); + metricsTracer.onRead(); + logger.log('19. Stream ends, operation completes'); + metricsTracer.onOperationComplete({ + retries: 1, + finalOperationStatus: 'SUCCESS', + connectivityErrorCount: 1, + streamingOperation: 'YES', + }); + resolve(); + }); }); } } const table = new FakeTable(); - table.fakeMethod(); + await table.fakeMethod(); const expectedOutput = fs.readFileSync( './test/metrics-tracer/typical-method-call.txt', 'utf8' From ae532d8e8a84b7bfbbd83d2b32399c454bc617f3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:14:09 -0500 Subject: [PATCH 069/289] use same date provider for all metrics tracers --- .../metrics-tracer-factory.ts | 12 ++++++++---- test/metrics-tracer/metrics-tracer.ts | 19 ++++++++++++------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 6e2fd7ade..41daa239e 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -480,12 +480,17 @@ class MetricsTracer { export class MetricsTracerFactory { private metrics?: Metrics; private observabilityOptions?: ObservabilityOptions; + private dateProvider: DateProvider; /** * @param observabilityOptions Options for configuring client-side metrics observability. */ - constructor(observabilityOptions?: ObservabilityOptions) { + constructor( + dateProvider: DateProvider, + observabilityOptions?: ObservabilityOptions + ) { this.observabilityOptions = observabilityOptions; + this.dateProvider = dateProvider; } private initialize( @@ -588,15 +593,14 @@ export class MetricsTracerFactory { getMetricsTracer( tabularApiSurface: ITabularApiSurface, methodName: string, - projectId?: string, - dateProvider?: DateProvider + projectId?: string ) { const metrics = this.initialize(projectId, this.observabilityOptions); return new MetricsTracer( metrics, tabularApiSurface, methodName, - dateProvider + this.dateProvider ); } } diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 7ef1fd7ba..04d5cba52 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -53,8 +53,11 @@ class FakeBigtable { * @param observabilityOptions Options for configuring client-side metrics * observability, including a TestMeterProvider. */ - constructor(observabilityOptions: {meterProvider: TestMeterProvider}) { - this.metricsTracerFactory = new MetricsTracerFactory({ + constructor( + observabilityOptions: {meterProvider: TestMeterProvider}, + dateProvider: TestDateProvider + ) { + this.metricsTracerFactory = new MetricsTracerFactory(dateProvider, { meterProvider: observabilityOptions.meterProvider, }); } @@ -87,9 +90,12 @@ describe.only('Bigtable/MetricsTracer', () => { class FakeTable { id = 'fakeTableId'; instance = new FakeInstance(); - bigtable = new FakeBigtable({ - meterProvider: new TestMeterProvider(logger), - }); + bigtable = new FakeBigtable( + { + meterProvider: new TestMeterProvider(logger), + }, + new TestDateProvider(logger) + ); async fakeMethod(): Promise { return new Promise((resolve, reject) => { @@ -123,8 +129,7 @@ describe.only('Bigtable/MetricsTracer', () => { this.bigtable.metricsTracerFactory.getMetricsTracer( this, 'fakeMethod', - projectId, - new TestDateProvider(logger) + projectId ); // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. From b2bced91ef55abd694c20bd40a3f8638a2ef3af1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:25:39 -0500 Subject: [PATCH 070/289] =?UTF-8?q?In=20the=20metrics=20traceer,=20don?= =?UTF-8?q?=E2=80=99t=20fetch=20the=20project?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit for each call --- .../metrics-tracer-factory.ts | 174 ++++++++---------- 1 file changed, 78 insertions(+), 96 deletions(-) diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 41daa239e..13fc7993d 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -121,9 +121,6 @@ export interface ITabularApiSurface { id: string; bigtable: { appProfileId?: string; - getProjectId_( - callback: (err: Error | null, projectId?: string) => void - ): void; }; } @@ -141,6 +138,7 @@ class MetricsTracer { private cluster: string | null | undefined; private tabularApiSurface: ITabularApiSurface; private methodName: string; + private projectId?: string; private receivedFirstResponse: boolean; private serverTimeRead: boolean; private lastReadTime: DateLike | null; @@ -156,6 +154,7 @@ class MetricsTracer { metrics: Metrics, tabularApiSurface: ITabularApiSurface, methodName: string, + projectId?: string, dateProvider?: DateProvider ) { this.metrics = metrics; @@ -168,6 +167,7 @@ class MetricsTracer { this.receivedFirstResponse = false; this.lastReadTime = null; this.serverTimeRead = false; + this.projectId = projectId; if (dateProvider) { this.dateProvider = dateProvider; } else { @@ -287,21 +287,17 @@ class MetricsTracer { */ onRead() { const currentTime = this.dateProvider.getDate(); + const projectId = this.projectId; if (this.lastReadTime) { - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.lastReadTime) { - const dimensions = this.getBasicDimensions(projectId); - const difference = - currentTime.getTime() - this.lastReadTime.getTime(); - this.metrics.applicationBlockingLatencies.record( - difference, - dimensions - ); - this.lastReadTime = currentTime; - } - } - ); + if (projectId && this.lastReadTime) { + const dimensions = this.getBasicDimensions(projectId); + const difference = currentTime.getTime() - this.lastReadTime.getTime(); + this.metrics.applicationBlockingLatencies.record( + difference, + dimensions + ); + this.lastReadTime = currentTime; + } } else { this.lastReadTime = currentTime; } @@ -313,19 +309,16 @@ class MetricsTracer { */ onAttemptComplete(info: AttemptInfo) { const endTime = this.dateProvider.getDate(); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.attemptStartTime) { - const dimensions = this.getAttemptDimensions( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.attemptLatencies.record(totalTime, dimensions); - } - } - ); + const projectId = this.projectId; + if (projectId && this.attemptStartTime) { + const dimensions = this.getAttemptDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metrics.attemptLatencies.record(totalTime, dimensions); + } } /** @@ -340,22 +333,18 @@ class MetricsTracer { */ onResponse(finalOperationStatus: string) { const endTime = this.dateProvider.getDate(); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.operationStartTime) { - const dimensions = this.getFinalOpDimensions( - projectId, - finalOperationStatus - ); - const totalTime = - endTime.getTime() - this.operationStartTime.getTime(); - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - this.metrics.firstResponseLatencies.record(totalTime, dimensions); - } - } + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + const dimensions = this.getFinalOpDimensions( + projectId, + finalOperationStatus + ); + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + this.metrics.firstResponseLatencies.record(totalTime, dimensions); } - ); + } } /** @@ -365,47 +354,42 @@ class MetricsTracer { */ onOperationComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; this.onAttemptComplete(info); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId && this.operationStartTime) { - const totalTime = - endTime.getTime() - this.operationStartTime.getTime(); - { - // This block records operation latency metrics. - const operationLatencyDimensions = - this.getOperationLatencyDimensions( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.operationLatencies.record( - totalTime, - operationLatencyDimensions - ); - } - if (info.retries) { - // This block records the retry count metrics - const retryCountDimensions = this.getFinalOpDimensions( - projectId, - info.finalOperationStatus - ); - this.metrics.retryCount.add(info.retries, retryCountDimensions); - } - if (info.connectivityErrorCount) { - // This block records the connectivity error count metrics - const connectivityCountDimensions = this.getAttemptStatusDimensions( - projectId, - info.finalOperationStatus - ); - this.metrics.connectivityErrorCount.record( - info.connectivityErrorCount, - connectivityCountDimensions - ); - } - } + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + // This block records operation latency metrics. + const operationLatencyDimensions = this.getOperationLatencyDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + this.metrics.operationLatencies.record( + totalTime, + operationLatencyDimensions + ); } - ); + if (info.retries) { + // This block records the retry count metrics + const retryCountDimensions = this.getFinalOpDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.retryCount.add(info.retries, retryCountDimensions); + } + if (info.connectivityErrorCount) { + // This block records the connectivity error count metrics + const connectivityCountDimensions = this.getAttemptStatusDimensions( + projectId, + info.finalOperationStatus + ); + this.metrics.connectivityErrorCount.record( + info.connectivityErrorCount, + connectivityCountDimensions + ); + } + } } /** @@ -431,18 +415,15 @@ class MetricsTracer { if (!this.serverTimeRead) { this.serverTimeRead = true; const serverTime = parseInt(durationValues[1]); - this.tabularApiSurface.bigtable.getProjectId_( - (err: Error | null, projectId?: string) => { - if (projectId) { - const dimensions = this.getAttemptDimensions( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.serverLatencies.record(serverTime, dimensions); - } - } - ); + const projectId = this.projectId; + if (projectId) { + const dimensions = this.getAttemptDimensions( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + this.metrics.serverLatencies.record(serverTime, dimensions); + } } } } @@ -600,6 +581,7 @@ export class MetricsTracerFactory { metrics, tabularApiSurface, methodName, + projectId, this.dateProvider ); } From e1dd61c97810fa1473d1fd8f7a9a39a27f87cdca Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:47:32 -0500 Subject: [PATCH 071/289] Remove only --- test/metrics-tracer/metrics-tracer.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 04d5cba52..61fc2dbbf 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -84,7 +84,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsTracer', () => { +describe('Bigtable/MetricsTracer', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); class FakeTable { From 9ec98df4a541f47a62fed7afededc7dd7fa8425c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:53:01 -0500 Subject: [PATCH 072/289] Add open telemetry api --- package.json | 1 + 1 file changed, 1 insertion(+) diff --git a/package.json b/package.json index 7b1c30d4a..2dc24800e 100644 --- a/package.json +++ b/package.json @@ -52,6 +52,7 @@ "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", "@google-cloud/promisify": "^4.0.0", + "@opentelemetry/api": "^1.9.0", "@opentelemetry/resources": "^1.30.0", "@opentelemetry/sdk-metrics": "^1.30.0", "arrify": "^2.0.0", From 5a1a3aad7bb8cf41a8a357a00c486bf84900505b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:56:36 -0500 Subject: [PATCH 073/289] Add TestExecuteQuery_EmptyResponse to failures --- testproxy/known_failures.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 979a31f98..8a7be2dcb 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -12,4 +12,5 @@ TestReadRows_Retry_WithRoutingCookie_MultipleErrorResponses\| TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| -TestSampleRowKeys_Generic_CloseClient +TestSampleRowKeys_Generic_CloseClient\| +TestExecuteQuery_EmptyResponse From 1bd2d2b9232956ddd2f20eb1a97336daa75aba0a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 15:59:14 -0500 Subject: [PATCH 074/289] TestExecuteQuery_SingleSimpleRow known failures --- testproxy/known_failures.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 8a7be2dcb..664980c39 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -13,4 +13,5 @@ TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| -TestExecuteQuery_EmptyResponse +TestExecuteQuery_EmptyResponse|\ +TestExecuteQuery_SingleSimpleRow From c2be338038f074ec115885ec177a08fa8e3e787b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 16:09:47 -0500 Subject: [PATCH 075/289] Fix syntax in known failures --- testproxy/known_failures.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 664980c39..fc6e244af 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -13,5 +13,5 @@ TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| -TestExecuteQuery_EmptyResponse|\ +TestExecuteQuery_EmptyResponse\| TestExecuteQuery_SingleSimpleRow From cd0d774f882fdd592a1d66426a354e5cfb6688e0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 16:14:21 -0500 Subject: [PATCH 076/289] Add two tests to the known failures --- testproxy/known_failures.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index fc6e244af..5b74408c3 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -13,5 +13,7 @@ TestReadRows_Retry_WithRetryInfo\| TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| +TestSampleRowKeys_Generic_Headers\| +TestSampleRowKeys_NoRetry_NoEmptyKey\| TestExecuteQuery_EmptyResponse\| TestExecuteQuery_SingleSimpleRow From e7caf36743beaa29ed57451b9e1e5e0de6b0eeb0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 16:19:46 -0500 Subject: [PATCH 077/289] TestSampleRowKeys_Retry_WithRetryInfo to known fai --- testproxy/known_failures.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/testproxy/known_failures.txt b/testproxy/known_failures.txt index 5b74408c3..81fade6bd 100644 --- a/testproxy/known_failures.txt +++ b/testproxy/known_failures.txt @@ -14,6 +14,7 @@ TestReadRows_Retry_WithRetryInfo_MultipleErrorResponse\| TestSampleRowKeys_Retry_WithRoutingCookie\| TestSampleRowKeys_Generic_CloseClient\| TestSampleRowKeys_Generic_Headers\| +TestSampleRowKeys_Retry_WithRetryInfo\| TestSampleRowKeys_NoRetry_NoEmptyKey\| TestExecuteQuery_EmptyResponse\| TestExecuteQuery_SingleSimpleRow From 7fd86d2aebbe12209dfa405dde1d02ace3b4af34 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:09:29 -0500 Subject: [PATCH 078/289] Change word dimensions to attributes --- ...s.ts => client-side-metrics-attributes.ts} | 10 +-- common/test-meter-provider.ts | 14 ++-- .../metrics-tracer-factory.ts | 72 +++++++++---------- 3 files changed, 48 insertions(+), 48 deletions(-) rename common/{client-side-metrics-dimensions.ts => client-side-metrics-attributes.ts} (78%) diff --git a/common/client-side-metrics-dimensions.ts b/common/client-side-metrics-attributes.ts similarity index 78% rename from common/client-side-metrics-dimensions.ts rename to common/client-side-metrics-attributes.ts index 0e06d365b..d5e900f97 100644 --- a/common/client-side-metrics-dimensions.ts +++ b/common/client-side-metrics-attributes.ts @@ -16,7 +16,7 @@ * Dimensions (labels) associated with a Bigtable metric. These * dimensions provide context for the metric values. */ -export interface Dimensions { +export interface Attributes { projectId: string; instanceId: string; table: string; @@ -36,10 +36,10 @@ export interface Dimensions { * The order of dimensions in the output string is fixed: * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName * If a dimension is null or undefined, the empty string is used. - * @param {Dimensions} d The Dimensions object to convert. + * @param {Attributes} a The Dimensions object to convert. * @returns A string representation of the dimensions. */ -export function dimensionsToString(d: Dimensions) { - const p = (dimension?: string | null) => (dimension ? dimension : ''); - return `${p(d.projectId)};${p(d.instanceId)};${p(d.table)};${p(d.cluster)};${p(d.zone)};${p(d.appProfileId)};${p(d.methodName)};${p(d.attemptStatus)};${p(d.finalOperationStatus)};${p(d.streamingOperation)};nodejs-bigtable`; +export function attributesToString(a: Attributes) { + const p = (attribute?: string | null) => (attribute ? attribute : ''); + return `${p(a.projectId)};${p(a.instanceId)};${p(a.table)};${p(a.cluster)};${p(a.zone)};${p(a.appProfileId)};${p(a.methodName)};${p(a.attemptStatus)};${p(a.finalOperationStatus)};${p(a.streamingOperation)};nodejs-bigtable`; } diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts index dc9cb741e..1590fe322 100644 --- a/common/test-meter-provider.ts +++ b/common/test-meter-provider.ts @@ -13,7 +13,7 @@ // limitations under the License. import {WithLogger, WithLoggerAndName} from './logger'; -import {Dimensions, dimensionsToString} from './client-side-metrics-dimensions'; +import {Attributes, attributesToString} from './client-side-metrics-attributes'; /** * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. @@ -61,11 +61,11 @@ class TestCounter extends WithLoggerAndName { /** * Simulates adding a value to the counter. Logs the value and the counter name. * @param {number} value The value to be added to the counter. - * @param {Dimensions} dimensions The dimensions associated with the value. + * @param {Attributes} attributes The attributes associated with the value. */ - add(value: number, dimensions: Dimensions) { + add(value: number, attributes: Attributes) { this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` + `Value added to counter ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` ); } } @@ -78,11 +78,11 @@ class TestHistogram extends WithLoggerAndName { /** * Simulates recording a value in the histogram. Logs the value and the histogram name. * @param {number} value The value to be recorded in the histogram. - * @param {Dimensions} dimensions The dimensions associated with the value. + * @param {Attributes} attributes The attributes associated with the value. */ - record(value: number, dimensions: Dimensions) { + record(value: number, attributes: Attributes) { this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} with dimensions ${dimensionsToString(dimensions)}` + `Value added to histogram ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` ); } } diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index 13fc7993d..a355a17a2 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {Dimensions} from '../../common/client-side-metrics-dimensions'; +import {Attributes} from '../../common/client-side-metrics-attributes'; const { MeterProvider, @@ -176,12 +176,12 @@ class MetricsTracer { } /** - * Assembles the basic dimensions for metrics. These dimensions provide + * Assembles the basic attributes for metrics. These attributes provide * context about the Bigtable environment and the operation being performed. * @param {string} projectId The Google Cloud project ID. - * @returns {object} An object containing the basic dimensions. + * @returns {object} An object containing the basic attributes. */ - private getBasicDimensions(projectId: string) { + private getBasicAttributes(projectId: string) { return { projectId, instanceId: this.tabularApiSurface.instance.id, @@ -195,57 +195,57 @@ class MetricsTracer { } /** - * Assembles the dimensions for operation latency metrics. These dimensions + * Assembles the attributes for operation latency metrics. These attributes * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. * Includes whether the operation was a streaming operation or not. * @param {string} projectId The Google Cloud project ID. * @param {string} finalOperationStatus The final status of the operation. * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns An object containing the dimensions for operation latency metrics. + * @returns An object containing the attributes for operation latency metrics. */ - private getOperationLatencyDimensions( + private getOperationLatencyAttributes( projectId: string, finalOperationStatus: string, streamOperation?: string - ): Dimensions { + ): Attributes { return Object.assign( { finalOperationStatus: finalOperationStatus, streamingOperation: streamOperation, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } /** - * Assembles the dimensions for final operation metrics. These dimensions provide + * Assembles the attributes for final operation metrics. These attributes provide * context about the Bigtable environment and the operation being performed. * @param projectId The Google Cloud project ID. * @param finalOperationStatus The final status of the operation. - * @returns An object containing the dimensions for final operation metrics. + * @returns An object containing the attributes for final operation metrics. */ - private getFinalOpDimensions( + private getFinalOpAttributes( projectId: string, finalOperationStatus: string - ): Dimensions { + ): Attributes { return Object.assign( { finalOperationStatus: finalOperationStatus, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } /** - * Assembles the dimensions for attempt metrics. These dimensions provide context + * Assembles the attributes for attempt metrics. These attributes provide context * about the Bigtable environment, the operation being performed, and the status of the attempt. * Includes whether the operation was a streaming operation or not. * @param projectId The Google Cloud project ID. * @param attemptStatus The status of the attempt. * @param streamingOperation Whether the operation was a streaming operation or not. - * @returns An object containing the dimensions for attempt metrics. + * @returns An object containing the attributes for attempt metrics. */ - private getAttemptDimensions( + private getAttemptAttributes( projectId: string, attemptStatus: string, streamingOperation: string @@ -255,23 +255,23 @@ class MetricsTracer { attemptStatus: attemptStatus, streamingOperation: streamingOperation, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } /** - * Assembles the dimensions for attempt status metrics. These dimensions provide context + * Assembles the attributes for attempt status metrics. These attributes provide context * about the Bigtable environment and the operation being performed. * @param projectId The Google Cloud project ID. * @param attemptStatus The status of the attempt. - * @returns An object containing the dimensions for attempt status metrics. + * @returns An object containing the attributes for attempt status metrics. */ - private getAttemptStatusDimensions(projectId: string, attemptStatus: string) { + private getAttemptStatusAttributes(projectId: string, attemptStatus: string) { return Object.assign( { attemptStatus: attemptStatus, }, - this.getBasicDimensions(projectId) + this.getBasicAttributes(projectId) ); } @@ -290,11 +290,11 @@ class MetricsTracer { const projectId = this.projectId; if (this.lastReadTime) { if (projectId && this.lastReadTime) { - const dimensions = this.getBasicDimensions(projectId); + const attributes = this.getBasicAttributes(projectId); const difference = currentTime.getTime() - this.lastReadTime.getTime(); this.metrics.applicationBlockingLatencies.record( difference, - dimensions + attributes ); this.lastReadTime = currentTime; } @@ -311,13 +311,13 @@ class MetricsTracer { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.attemptStartTime) { - const dimensions = this.getAttemptDimensions( + const attributes = this.getAttemptAttributes( projectId, info.finalOperationStatus, info.streamingOperation ); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.attemptLatencies.record(totalTime, dimensions); + this.metrics.attemptLatencies.record(totalTime, attributes); } } @@ -335,14 +335,14 @@ class MetricsTracer { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.operationStartTime) { - const dimensions = this.getFinalOpDimensions( + const attributes = this.getFinalOpAttributes( projectId, finalOperationStatus ); const totalTime = endTime.getTime() - this.operationStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; - this.metrics.firstResponseLatencies.record(totalTime, dimensions); + this.metrics.firstResponseLatencies.record(totalTime, attributes); } } } @@ -360,33 +360,33 @@ class MetricsTracer { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. - const operationLatencyDimensions = this.getOperationLatencyDimensions( + const operationLatencyAttributes = this.getOperationLatencyAttributes( projectId, info.finalOperationStatus, info.streamingOperation ); this.metrics.operationLatencies.record( totalTime, - operationLatencyDimensions + operationLatencyAttributes ); } if (info.retries) { // This block records the retry count metrics - const retryCountDimensions = this.getFinalOpDimensions( + const retryCountAttributes = this.getFinalOpAttributes( projectId, info.finalOperationStatus ); - this.metrics.retryCount.add(info.retries, retryCountDimensions); + this.metrics.retryCount.add(info.retries, retryCountAttributes); } if (info.connectivityErrorCount) { // This block records the connectivity error count metrics - const connectivityCountDimensions = this.getAttemptStatusDimensions( + const connectivityCountAttributes = this.getAttemptStatusAttributes( projectId, info.finalOperationStatus ); this.metrics.connectivityErrorCount.record( info.connectivityErrorCount, - connectivityCountDimensions + connectivityCountAttributes ); } } @@ -417,12 +417,12 @@ class MetricsTracer { const serverTime = parseInt(durationValues[1]); const projectId = this.projectId; if (projectId) { - const dimensions = this.getAttemptDimensions( + const attributes = this.getAttemptAttributes( projectId, info.finalOperationStatus, info.streamingOperation ); - this.metrics.serverLatencies.record(serverTime, dimensions); + this.metrics.serverLatencies.record(serverTime, attributes); } } } From db05ff3771b641466816cf4cc36f0c3eef8fd856 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:19:27 -0500 Subject: [PATCH 079/289] Change more docs to use Attributes instead of dim --- common/client-side-metrics-attributes.ts | 14 +++++----- .../observability-options.ts | 26 ++++++++++--------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d5e900f97..d1f31ab63 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -13,8 +13,8 @@ // limitations under the License. /** - * Dimensions (labels) associated with a Bigtable metric. These - * dimensions provide context for the metric values. + * Attributes (labels) associated with a Bigtable metric. These + * attributes provide context for the metric values. */ export interface Attributes { projectId: string; @@ -31,13 +31,13 @@ export interface Attributes { } /** - * Converts a Dimensions object to a string representation. + * Converts an Attributes object to a string representation. * This string representation is suitable for use as labels or tags. - * The order of dimensions in the output string is fixed: + * The order of attributes in the output string is fixed: * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName - * If a dimension is null or undefined, the empty string is used. - * @param {Attributes} a The Dimensions object to convert. - * @returns A string representation of the dimensions. + * If an attribute is null or undefined, the empty string is used. + * @param {Attributes} a The Attributes object to convert. + * @returns A string representation of the attribute. */ export function attributesToString(a: Attributes) { const p = (attribute?: string | null) => (attribute ? attribute : ''); diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index 7d54fe623..919899827 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {Attributes} from '../../common/client-side-metrics-attributes'; + /** * The Counter interface for recording increments of a metric. */ @@ -19,9 +21,9 @@ interface ICounter { /** * Adds a value to the counter. * @param retries The value to be added to the counter. - * @param dimensions The dimensions associated with this value. + * @param attributes The attributes associated with this value. */ - add(retries: number, dimensions: {}): void; + add(retries: number, attributes: Attributes): void; } /** @@ -30,10 +32,10 @@ interface ICounter { interface IHistogram { /** * Records a value in the histogram. - * @param value The value to be recorded in the histogram. - * @param dimensions The dimensions associated with this value. + * @param {number} value The value to be recorded in the histogram. + * @param attributes The attributes associated with this value. */ - record(value: number, dimensions: {}): void; + record(value: number, attributes: Attributes): void; } /** @@ -42,18 +44,18 @@ interface IHistogram { interface IMeter { /** * Creates a Counter instrument, which counts increments of a given metric. - * @param instrument The name of the counter instrument. - * @param attributes The attributes associated with this counter. + * @param {string} instrument The name of the counter instrument. + * @param {Attributes} attributes The attributes associated with this counter. * @returns {ICounter} A Counter instance. */ - createCounter(instrument: string, attributes: {}): ICounter; + createCounter(instrument: string, attributes: Attributes): ICounter; /** * Creates a Histogram instrument, which records distributions of values for a given metric. - * @param instrument The name of the histogram instrument. - * @param attributes The attributes associated with this histogram. + * @param {string} instrument The name of the histogram instrument. + * @param {Attributes} attributes The attributes associated with this histogram. * @returns {IHistogram} A Histogram instance. */ - createHistogram(instrument: string, attributes: {}): IHistogram; + createHistogram(instrument: string, attributes: Attributes): IHistogram; } /** @@ -62,7 +64,7 @@ interface IMeter { interface IMeterProvider { /** * Returns a Meter, which can be used to create instruments for recording measurements. - * @param name The name of the Meter. + * @param {string} name The name of the Meter. * @returns {IMeter} A Meter instance. */ getMeter(name: string): IMeter; From 9cc4b15930a00ffcc0eee499d02f35698671b5e3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:20:24 -0500 Subject: [PATCH 080/289] attributes --- src/client-side-metrics/observability-options.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts index 919899827..031c169da 100644 --- a/src/client-side-metrics/observability-options.ts +++ b/src/client-side-metrics/observability-options.ts @@ -20,8 +20,8 @@ import {Attributes} from '../../common/client-side-metrics-attributes'; interface ICounter { /** * Adds a value to the counter. - * @param retries The value to be added to the counter. - * @param attributes The attributes associated with this value. + * @param {number} retries The value to be added to the counter. + * @param {Attributes} attributes The attributes associated with this value. */ add(retries: number, attributes: Attributes): void; } @@ -33,7 +33,7 @@ interface IHistogram { /** * Records a value in the histogram. * @param {number} value The value to be recorded in the histogram. - * @param attributes The attributes associated with this value. + * @param {Attributes} attributes The attributes associated with this value. */ record(value: number, attributes: Attributes): void; } From 014232925a7e86aad4904b49b8519fe4388fa731 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 20 Jan 2025 17:22:59 -0500 Subject: [PATCH 081/289] Test should use attributes as string --- test/metrics-tracer/typical-method-call.txt | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt index a7208bea5..4fa4fb6f4 100644 --- a/test/metrics-tracer/typical-method-call.txt +++ b/test/metrics-tracer/typical-method-call.txt @@ -4,16 +4,16 @@ getDate call returns 1000 ms getDate call returns 2000 ms 3. Client receives status information. 4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:server_latencies = 101 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable 5. Client receives first row. getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable 6. Client receives metadata. 7. Client receives second row. getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -27,17 +27,17 @@ getDate call returns 8000 ms getDate call returns 9000 ms 16. User reads row 2 getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 17. User reads row 3 getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 18. User reads row 4 getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable 19. Stream ends, operation completes getDate call returns 13000 ms getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with dimensions my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable +Value added to counter bigtable.googleapis.com:retry_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable +Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From 15d6e4a1b781ca7af4984d4204f3792d9f6e88c4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 21 Jan 2025 11:25:01 -0500 Subject: [PATCH 082/289] For Windows replace carriage return --- test/metrics-tracer/metrics-tracer.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index 61fc2dbbf..e6f05ef8e 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -204,6 +204,9 @@ describe('Bigtable/MetricsTracer', () => { 'utf8' ); // Ensure events occurred in the right order here: - assert.strictEqual(logger.getMessages().join('\n') + '\n', expectedOutput); + assert.strictEqual( + logger.getMessages().join('\n') + '\n', + expectedOutput.replace(/\r/g, '') + ); }); }); From 865529ea4d732404c8fb9788225ee22c09bba662 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 21 Jan 2025 11:44:15 -0500 Subject: [PATCH 083/289] Update documentation with types --- common/logger.ts | 6 +-- .../metrics-tracer-factory.ts | 48 ++++++++++++------- test/metrics-tracer/metrics-tracer.ts | 11 +++-- 3 files changed, 39 insertions(+), 26 deletions(-) diff --git a/common/logger.ts b/common/logger.ts index 82baa0f9c..7b09b8737 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -27,7 +27,7 @@ interface ILogger { export abstract class WithLogger { protected logger: ILogger; /** - * @param logger The logger instance to be used by this object. + * @param {ILogger} logger The logger instance to be used by this object. */ constructor(logger: ILogger) { this.logger = logger; @@ -42,8 +42,8 @@ export abstract class WithLoggerAndName { protected logger: ILogger; protected name: string; /** - * @param logger The logger instance to be used by this object. - * @param name The name associated with this object. + * @param {ILogger} logger The logger instance to be used by this object. + * @param {string} name The name associated with this object. */ constructor(logger: ILogger, name: string) { this.logger = logger; diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index a355a17a2..a4155c241 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -179,7 +179,7 @@ class MetricsTracer { * Assembles the basic attributes for metrics. These attributes provide * context about the Bigtable environment and the operation being performed. * @param {string} projectId The Google Cloud project ID. - * @returns {object} An object containing the basic attributes. + * @returns {Attributes} An object containing the basic attributes. */ private getBasicAttributes(projectId: string) { return { @@ -201,7 +201,7 @@ class MetricsTracer { * @param {string} projectId The Google Cloud project ID. * @param {string} finalOperationStatus The final status of the operation. * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns An object containing the attributes for operation latency metrics. + * @returns {Attributes} An object containing the attributes for operation latency metrics. */ private getOperationLatencyAttributes( projectId: string, @@ -220,9 +220,9 @@ class MetricsTracer { /** * Assembles the attributes for final operation metrics. These attributes provide * context about the Bigtable environment and the operation being performed. - * @param projectId The Google Cloud project ID. - * @param finalOperationStatus The final status of the operation. - * @returns An object containing the attributes for final operation metrics. + * @param {string} projectId The Google Cloud project ID. + * @param {string} finalOperationStatus The final status of the operation. + * @returns {Attributes} An object containing the attributes for final operation metrics. */ private getFinalOpAttributes( projectId: string, @@ -240,10 +240,10 @@ class MetricsTracer { * Assembles the attributes for attempt metrics. These attributes provide context * about the Bigtable environment, the operation being performed, and the status of the attempt. * Includes whether the operation was a streaming operation or not. - * @param projectId The Google Cloud project ID. - * @param attemptStatus The status of the attempt. - * @param streamingOperation Whether the operation was a streaming operation or not. - * @returns An object containing the attributes for attempt metrics. + * @param {string} projectId The Google Cloud project ID. + * @param {string} attemptStatus The status of the attempt. + * @param {string} streamingOperation Whether the operation was a streaming operation or not. + * @returns {Attributes} An object containing the attributes for attempt metrics. */ private getAttemptAttributes( projectId: string, @@ -262,9 +262,9 @@ class MetricsTracer { /** * Assembles the attributes for attempt status metrics. These attributes provide context * about the Bigtable environment and the operation being performed. - * @param projectId The Google Cloud project ID. - * @param attemptStatus The status of the attempt. - * @returns An object containing the attributes for attempt status metrics. + * @param {string} projectId The Google Cloud project ID. + * @param {string} attemptStatus The status of the attempt. + * @returns {Attributes} An object containing the attributes for attempt status metrics. */ private getAttemptStatusAttributes(projectId: string, attemptStatus: string) { return Object.assign( @@ -305,7 +305,7 @@ class MetricsTracer { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param info Information about the completed attempt. + * @param {AttemptInfo} info Information about the completed attempt. */ onAttemptComplete(info: AttemptInfo) { const endTime = this.dateProvider.getDate(); @@ -330,6 +330,7 @@ class MetricsTracer { /** * Called when the first response is received. Records first response latencies. + * @param {string} finalOperationStatus The final status of the operation. */ onResponse(finalOperationStatus: string) { const endTime = this.dateProvider.getDate(); @@ -350,7 +351,7 @@ class MetricsTracer { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. - * @param info Information about the completed operation. + * @param {OperationInfo} info Information about the completed operation. */ onOperationComplete(info: OperationInfo) { const endTime = this.dateProvider.getDate(); @@ -394,8 +395,8 @@ class MetricsTracer { /** * Called when metadata is received. Extracts server timing information if available. - * @param info Information about the completed attempt. - * @param metadata The received metadata. + * @param {AttemptInfo} info Information about the completed attempt. + * @param {object} metadata The received metadata. */ onMetadataReceived( info: AttemptInfo, @@ -430,7 +431,7 @@ class MetricsTracer { /** * Called when status information is received. Extracts zone and cluster information. - * @param status The received status information. + * @param {object} status The received status information. */ onStatusReceived(status: { metadata: {internalRepr: Map; options: {}}; @@ -464,7 +465,8 @@ export class MetricsTracerFactory { private dateProvider: DateProvider; /** - * @param observabilityOptions Options for configuring client-side metrics observability. + * @param {DateProvider} dateProvider An object that provides dates for latency measurement. + * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics observability. */ constructor( dateProvider: DateProvider, @@ -474,6 +476,16 @@ export class MetricsTracerFactory { this.dateProvider = dateProvider; } + /** + * Initializes the OpenTelemetry metrics instruments if they haven't been already. + * If metrics already exist, this method returns early. Otherwise, it creates and registers + * metric instruments (histograms and counters) for various Bigtable client metrics. + * It handles the creation of a MeterProvider, either using a user-provided one or creating a default one, and + * configures a PeriodicExportingMetricReader for exporting metrics. + * @param {string} [projectId] The Google Cloud project ID. Used for metric export. + * @param {ObservabilityOptions} [observabilityOptions] Options for configuring client-side metrics observability, including a custom MeterProvider. + * @returns {Metrics} An object containing the initialized OpenTelemetry metric instruments. + */ private initialize( projectId?: string, observabilityOptions?: ObservabilityOptions diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-tracer/metrics-tracer.ts index e6f05ef8e..180ad1bf7 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-tracer/metrics-tracer.ts @@ -18,6 +18,7 @@ import {TestMeterProvider} from '../../common/test-meter-provider'; import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; +import {ObservabilityOptions} from '../../src/client-side-metrics/observability-options'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -27,7 +28,7 @@ class Logger { /** * Logs a message by adding it to the internal message array. - * @param message The message to be logged. + * @param {string} message The message to be logged. */ log(message: string) { this.messages.push(message); @@ -35,7 +36,7 @@ class Logger { /** * Retrieves all logged messages. - * @returns An array of logged messages. + * @returns {string[]} An array of logged messages. */ getMessages() { return this.messages; @@ -50,11 +51,11 @@ class FakeBigtable { appProfileId?: string; metricsTracerFactory: MetricsTracerFactory; /** - * @param observabilityOptions Options for configuring client-side metrics + * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics * observability, including a TestMeterProvider. */ constructor( - observabilityOptions: {meterProvider: TestMeterProvider}, + observabilityOptions: ObservabilityOptions, dateProvider: TestDateProvider ) { this.metricsTracerFactory = new MetricsTracerFactory(dateProvider, { @@ -65,7 +66,7 @@ class FakeBigtable { /** * A stubbed method that simulates retrieving the project ID. Always returns * 'my-project'. - * @param callback A callback function that receives the project ID (or an error). + * @param {function} callback A callback function that receives the project ID (or an error). */ getProjectId_( callback: (err: Error | null, projectId?: string) => void From 28fbfd8d5433d31106b0900adf75e440937a64c1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 23 Jan 2025 17:09:12 -0500 Subject: [PATCH 084/289] Add metrics collector --- .../gcp-metrics-handler.ts | 0 src/client-side-metrics/metrics-collector.ts | 383 ++++++++++++++++++ src/client-side-metrics/metrics-handler.ts | 30 ++ .../metrics-tracer-factory.ts | 54 --- 4 files changed, 413 insertions(+), 54 deletions(-) create mode 100644 src/client-side-metrics/gcp-metrics-handler.ts create mode 100644 src/client-side-metrics/metrics-collector.ts create mode 100644 src/client-side-metrics/metrics-handler.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts new file mode 100644 index 000000000..e69de29bb diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts new file mode 100644 index 000000000..47448ca85 --- /dev/null +++ b/src/client-side-metrics/metrics-collector.ts @@ -0,0 +1,383 @@ +import {Attributes} from '../../common/client-side-metrics-attributes'; +import * as fs from 'fs'; +import {IMetricsHandler} from './metrics-handler'; + +/** + * An interface representing a Date-like object. Provides a `getTime` method + * for retrieving the time value in milliseconds. Used for abstracting time + * in tests. + */ +interface DateLike { + /** + * Returns the time value in milliseconds. + * @returns The time value in milliseconds. + */ + getTime(): number; +} + +/** + * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. + */ +interface DateProvider { + /** + * Returns a DateLike object. + * @returns A DateLike object representing the current time or a fake time value. + */ + getDate(): DateLike; +} + +/** + * The default DateProvider implementation. Returns the current date and time. + */ +class DefaultDateProvider { + /** + * Returns a new Date object representing the current time. + * @returns {Date} The current date and time. + */ + getDate() { + return new Date(); + } +} + +/** + * An interface representing a tabular API surface, such as a Bigtable table. + */ +export interface ITabularApiSurface { + instance: { + id: string; + }; + id: string; + bigtable: { + appProfileId?: string; + }; +} + +/** + * Information about a Bigtable operation. + */ +interface OperationInfo { + /** + * The number of retries attempted for the operation. + */ + retries?: number; + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; + /** + * Number of times a connectivity error occurred during the operation. + */ + connectivityErrorCount?: number; + streamingOperation: string; +} + +/** + * Information about a single attempt of a Bigtable operation. + */ +interface AttemptInfo { + /** + * The final status of the attempt (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; + /** + * Whether the operation is a streaming operation or not + */ + streamingOperation: string; +} + +const packageJSON = fs.readFileSync('package.json'); +const version = JSON.parse(packageJSON.toString()).version; + +// TODO: Check if metrics tracer method exists. + +/** + * A class for tracing and recording client-side metrics related to Bigtable operations. + */ +export class MetricsCollector { + private operationStartTime: DateLike | null; + private attemptStartTime: DateLike | null; + private zone: string | null | undefined; + private cluster: string | null | undefined; + private tabularApiSurface: ITabularApiSurface; + private methodName: string; + private projectId?: string; + private receivedFirstResponse: boolean; + private metricsHandlers: IMetricsHandler[]; + private firstResponseLatency?: number; + private serverTimeRead: boolean; + private serverTime?: number; + private lastReadTime: DateLike | null; + private dateProvider: DateProvider; + + /** + * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. + * @param {string} methodName The name of the method being traced. + * @param {string} projectId The id of the project. + * @param {DateProvider} dateProvider A provider for date/time information (for testing). + */ + constructor( + tabularApiSurface: ITabularApiSurface, + metricsHandlers: IMetricsHandler[], + methodName: string, + projectId?: string, + dateProvider?: DateProvider + ) { + this.zone = null; + this.cluster = null; + this.tabularApiSurface = tabularApiSurface; + this.methodName = methodName; + this.operationStartTime = null; + this.attemptStartTime = null; + this.receivedFirstResponse = false; + this.metricsHandlers = metricsHandlers; + this.lastReadTime = null; + this.serverTimeRead = false; + this.projectId = projectId; + if (dateProvider) { + this.dateProvider = dateProvider; + } else { + this.dateProvider = new DefaultDateProvider(); + } + } + + /** + * Assembles the basic attributes for metrics. These attributes provide + * context about the Bigtable environment and the operation being performed. + * @param {string} projectId The Google Cloud project ID. + * @returns {Attributes} An object containing the basic attributes. + */ + private getBasicAttributes(projectId: string) { + return { + projectId, + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + clientName: `nodejs-bigtable/${version}`, + }; + } + + /** + * Assembles the attributes for operation latency metrics. These attributes + * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. + * Includes whether the operation was a streaming operation or not. + * @param {string} projectId The Google Cloud project ID. + * @param {string} finalOperationStatus The final status of the operation. + * @param {string} streamOperation Whether the operation was a streaming operation or not. + * @returns {Attributes} An object containing the attributes for operation latency metrics. + */ + private getOperationLatencyAttributes( + projectId: string, + finalOperationStatus: string, + streamOperation?: string + ): Attributes { + return Object.assign( + { + finalOperationStatus: finalOperationStatus, + streamingOperation: streamOperation, + }, + this.getBasicAttributes(projectId) + ); + } + + /** + * Assembles the attributes for attempt metrics. These attributes provide context + * about the Bigtable environment, the operation being performed, and the status of the attempt. + * Includes whether the operation was a streaming operation or not. + * @param {string} projectId The Google Cloud project ID. + * @param {string} attemptStatus The status of the attempt. + * @param {string} streamingOperation Whether the operation was a streaming operation or not. + * @returns {Attributes} An object containing the attributes for attempt metrics. + */ + private getAttemptAttributes( + projectId: string, + attemptStatus: string, + streamingOperation: string + ) { + return Object.assign( + { + attemptStatus: attemptStatus, + streamingOperation: streamingOperation, + }, + this.getBasicAttributes(projectId) + ); + } + + /** + * Called when the operation starts. Records the start time. + */ + onOperationStart() { + this.operationStartTime = this.dateProvider.getDate(); + } + + /** + * Called after the client reads a row. Records application blocking latencies. + */ + onRead() { + const currentTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (this.lastReadTime) { + if (projectId && this.lastReadTime) { + const attributes = this.getBasicAttributes(projectId); + const difference = currentTime.getTime() - this.lastReadTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onRead) { + metricsHandler.onRead({latency: difference}, attributes); + } + }); + this.lastReadTime = currentTime; + } + } else { + this.lastReadTime = currentTime; + } + } + + /** + * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. + * @param {AttemptInfo} info Information about the completed attempt. + */ + onAttemptComplete(info: AttemptInfo) { + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.attemptStartTime) { + const attributes = this.getAttemptAttributes( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete( + { + attemptLatency: totalTime, + serverLatency: this.serverTime, + }, + attributes + ); + } + }); + } + } + + /** + * Called when a new attempt starts. Records the start time of the attempt. + */ + onAttemptStart() { + this.attemptStartTime = this.dateProvider.getDate(); + this.serverTime = undefined; + this.serverTimeRead = false; + this.firstResponseLatency = undefined; + this.receivedFirstResponse = false; + } + + /** + * Called when the first response is received. Records first response latencies. + * @param {string} finalOperationStatus The final status of the operation. + */ + onResponse() { + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + this.firstResponseLatency = totalTime; + } + } + } + + /** + * Called when an operation completes (successfully or unsuccessfully). + * Records operation latencies, retry counts, and connectivity error counts. + * @param {OperationInfo} info Information about the completed operation. + */ + onOperationComplete(info: OperationInfo) { + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + this.onAttemptComplete(info); + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + // This block records operation latency metrics. + const operationLatencyAttributes = this.getOperationLatencyAttributes( + projectId, + info.finalOperationStatus, + info.streamingOperation + ); + const metrics = { + operationLatency: totalTime, + firstResponseLatency: this.firstResponseLatency, + retryCount: info.retries, + connectivityErrorCount: info.connectivityErrorCount, + }; + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete( + metrics, + operationLatencyAttributes + ); + } + }); + } + } + } + + /** + * Called when metadata is received. Extracts server timing information if available. + * @param {AttemptInfo} info Information about the completed attempt. + * @param {object} metadata The received metadata. + */ + onMetadataReceived( + info: AttemptInfo, + metadata: { + internalRepr: Map; + options: {}; + } + ) { + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const durationValues = mappedEntries.get('server-timing')?.split('dur='); + if (durationValues && durationValues[1]) { + if (!this.serverTimeRead) { + this.serverTimeRead = true; + const serverTime = parseInt(durationValues[1]); + const projectId = this.projectId; + if (projectId) { + this.serverTime = serverTime; + } + } + } + } + + /** + * Called when status information is received. Extracts zone and cluster information. + * @param {object} status The received status information. + */ + onStatusReceived(status: { + metadata: {internalRepr: Map; options: {}}; + }) { + const mappedEntries = new Map( + Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const instanceInformation = mappedEntries + .get('x-goog-ext-425905942-bin') + ?.replace(new RegExp('\\n', 'g'), '') + .split('\r'); + if (instanceInformation && instanceInformation[0]) { + this.zone = instanceInformation[0]; + } + if (instanceInformation && instanceInformation[1]) { + this.cluster = instanceInformation[1]; + } + } +} diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts new file mode 100644 index 000000000..7247e3697 --- /dev/null +++ b/src/client-side-metrics/metrics-handler.ts @@ -0,0 +1,30 @@ +import {Attributes} from '../../common/client-side-metrics-attributes'; + +interface onOperationCompleteMetrics { + operationLatency: number; + retryCount?: number; +} + +interface onAttemptCompleteMetrics { + attemptLatency: number; + serverLatency?: number; + firstResponseLatency?: number; + connectivityErrorCount?: number; +} + +interface onReadMetrics { + latency: number; +} + +// TODO: Trim attributes so only necessary attributes are required. +export interface IMetricsHandler { + onOperationComplete?( + metrics: onOperationCompleteMetrics, + attributes: Attributes + ): void; + onRead?(metrics: onReadMetrics, attributes: Attributes): void; + onAttemptComplete?( + metrics: onAttemptCompleteMetrics, + attributes: Attributes + ): void; +} diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts index a4155c241..2d54df6be 100644 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ b/src/client-side-metrics/metrics-tracer-factory.ts @@ -24,7 +24,6 @@ import * as Resources from '@opentelemetry/resources'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {ObservabilityOptions} from './observability-options'; -import * as fs from 'fs'; /** * Information about a Bigtable operation. @@ -74,59 +73,6 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } -/** - * An interface representing a Date-like object. Provides a `getTime` method - * for retrieving the time value in milliseconds. Used for abstracting time - * in tests. - */ -interface DateLike { - /** - * Returns the time value in milliseconds. - * @returns The time value in milliseconds. - */ - getTime(): number; -} - -/** - * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. - */ -interface DateProvider { - /** - * Returns a DateLike object. - * @returns A DateLike object representing the current time or a fake time value. - */ - getDate(): DateLike; -} - -/** - * The default DateProvider implementation. Returns the current date and time. - */ -class DefaultDateProvider { - /** - * Returns a new Date object representing the current time. - * @returns {Date} The current date and time. - */ - getDate() { - return new Date(); - } -} - -/** - * An interface representing a tabular API surface, such as a Bigtable table. - */ -export interface ITabularApiSurface { - instance: { - id: string; - }; - id: string; - bigtable: { - appProfileId?: string; - }; -} - -const packageJSON = fs.readFileSync('package.json'); -const version = JSON.parse(packageJSON.toString()).version; - /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ From 5995789849d9201caba13785f3abd9e2abeaac09 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 10:49:53 -0500 Subject: [PATCH 085/289] Metrics handler, GCPMetricsHandler and tests add --- common/test-metrics-handler.ts | 39 ++ .../gcp-metrics-handler.ts | 144 +++++ src/client-side-metrics/metrics-collector.ts | 24 - src/client-side-metrics/metrics-handler.ts | 9 +- .../metrics-tracer-factory.ts | 546 ------------------ .../metrics-collector.ts} | 82 +-- .../metrics-collector/typical-method-call.txt | 35 ++ test/metrics-tracer/typical-method-call.txt | 43 -- 8 files changed, 249 insertions(+), 673 deletions(-) create mode 100644 common/test-metrics-handler.ts delete mode 100644 src/client-side-metrics/metrics-tracer-factory.ts rename test/{metrics-tracer/metrics-tracer.ts => metrics-collector/metrics-collector.ts} (70%) create mode 100644 test/metrics-collector/typical-method-call.txt delete mode 100644 test/metrics-tracer/typical-method-call.txt diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts new file mode 100644 index 000000000..737168edb --- /dev/null +++ b/common/test-metrics-handler.ts @@ -0,0 +1,39 @@ +import {WithLogger} from './logger'; +import { + onAttemptCompleteMetrics, + onOperationCompleteMetrics, +} from '../src/client-side-metrics/metrics-handler'; +import {Attributes} from './client-side-metrics-attributes'; + +/** + * A test implementation of the IMetricsHandler interface. Used for testing purposes. + * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. + */ +export class TestMetricsHandler extends WithLogger { + /** + * Logs the metrics and attributes received for an operation completion. + * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: onOperationCompleteMetrics, + attributes: Attributes + ) { + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onOperationComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + /** + * Logs the metrics and attributes received for an attempt completion. + * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onAttemptComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } +} diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e69de29bb..aea683fe1 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -0,0 +1,144 @@ +import { + IMetricsHandler, + onAttemptCompleteMetrics, + onOperationCompleteMetrics, +} from './metrics-handler'; +import * as Resources from '@opentelemetry/resources'; +import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {Attributes} from '../../common/client-side-metrics-attributes'; +const { + MeterProvider, + Histogram, + Counter, + PeriodicExportingMetricReader, +} = require('@opentelemetry/sdk-metrics'); + +/** + * A collection of OpenTelemetry metric instruments used to record + * Bigtable client-side metrics. + */ +interface Metrics { + operationLatencies: typeof Histogram; + attemptLatencies: typeof Histogram; + retryCount: typeof Counter; + applicationBlockingLatencies: typeof Histogram; + firstResponseLatencies: typeof Histogram; + serverLatencies: typeof Histogram; + connectivityErrorCount: typeof Histogram; + clientBlockingLatencies: typeof Histogram; +} + +export class GCPMetricsHandler implements IMetricsHandler { + private initialized = false; + private otelMetrics?: Metrics; + + private initialize(projectId?: string) { + if (!this.initialized) { + this.initialized = true; + // Use MeterProvider provided by user + // If MeterProvider was not provided then use the default meter provider. + const meterProvider = new MeterProvider({ + // This is the default meter provider + // Create a resource. Fill the `service.*` attributes in with real values for your service. + // GcpDetectorSync will add in resource information about the current environment if you are + // running on GCP. These resource attributes will be translated to a specific GCP monitored + // resource if running on GCP. Otherwise, metrics will be sent with monitored resource + // `generic_task`. + resource: new Resources.Resource({ + 'service.name': 'bigtable-metrics', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 100_000, + exporter: new MetricExporter({ + projectId, + }), + }), + ], + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.otelMetrics = { + operationLatencies: meter.createHistogram('operation_latencies', { + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + }), + retryCount: meter.createCounter('retry_count', { + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), + serverLatencies: meter.createHistogram('server_latencies', { + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), + }; + } + } + + onOperationComplete( + metrics: onOperationCompleteMetrics, + attributes: Attributes + ) { + this.initialize(); + this.otelMetrics?.operationLatencies.record( + metrics.operationLatency, + attributes + ); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + } + onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + this.initialize(); + this.otelMetrics?.attemptLatencies.record( + metrics.attemptLatency, + attributes + ); + this.otelMetrics?.connectivityErrorCount.record( + metrics.connectivityErrorCount, + attributes + ); + this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, + attributes + ); + } +} diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 47448ca85..e7b4d7da9 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -106,7 +106,6 @@ export class MetricsCollector { private firstResponseLatency?: number; private serverTimeRead: boolean; private serverTime?: number; - private lastReadTime: DateLike | null; private dateProvider: DateProvider; /** @@ -130,7 +129,6 @@ export class MetricsCollector { this.attemptStartTime = null; this.receivedFirstResponse = false; this.metricsHandlers = metricsHandlers; - this.lastReadTime = null; this.serverTimeRead = false; this.projectId = projectId; if (dateProvider) { @@ -212,28 +210,6 @@ export class MetricsCollector { this.operationStartTime = this.dateProvider.getDate(); } - /** - * Called after the client reads a row. Records application blocking latencies. - */ - onRead() { - const currentTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (this.lastReadTime) { - if (projectId && this.lastReadTime) { - const attributes = this.getBasicAttributes(projectId); - const difference = currentTime.getTime() - this.lastReadTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onRead) { - metricsHandler.onRead({latency: difference}, attributes); - } - }); - this.lastReadTime = currentTime; - } - } else { - this.lastReadTime = currentTime; - } - } - /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. * @param {AttemptInfo} info Information about the completed attempt. diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 7247e3697..a3ae4840d 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,28 +1,23 @@ import {Attributes} from '../../common/client-side-metrics-attributes'; -interface onOperationCompleteMetrics { +export interface onOperationCompleteMetrics { operationLatency: number; retryCount?: number; } -interface onAttemptCompleteMetrics { +export interface onAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; firstResponseLatency?: number; connectivityErrorCount?: number; } -interface onReadMetrics { - latency: number; -} - // TODO: Trim attributes so only necessary attributes are required. export interface IMetricsHandler { onOperationComplete?( metrics: onOperationCompleteMetrics, attributes: Attributes ): void; - onRead?(metrics: onReadMetrics, attributes: Attributes): void; onAttemptComplete?( metrics: onAttemptCompleteMetrics, attributes: Attributes diff --git a/src/client-side-metrics/metrics-tracer-factory.ts b/src/client-side-metrics/metrics-tracer-factory.ts deleted file mode 100644 index 2d54df6be..000000000 --- a/src/client-side-metrics/metrics-tracer-factory.ts +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Attributes} from '../../common/client-side-metrics-attributes'; - -const { - MeterProvider, - Histogram, - Counter, - PeriodicExportingMetricReader, -} = require('@opentelemetry/sdk-metrics'); -import * as Resources from '@opentelemetry/resources'; -import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {ObservabilityOptions} from './observability-options'; - -/** - * Information about a Bigtable operation. - */ -interface OperationInfo { - /** - * The number of retries attempted for the operation. - */ - retries?: number; - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Number of times a connectivity error occurred during the operation. - */ - connectivityErrorCount?: number; - streamingOperation: string; -} - -/** - * Information about a single attempt of a Bigtable operation. - */ -interface AttemptInfo { - /** - * The final status of the attempt (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Whether the operation is a streaming operation or not - */ - streamingOperation: string; -} - -/** - * A collection of OpenTelemetry metric instruments used to record - * Bigtable client-side metrics. - */ -interface Metrics { - operationLatencies: typeof Histogram; - attemptLatencies: typeof Histogram; - retryCount: typeof Counter; - applicationBlockingLatencies: typeof Histogram; - firstResponseLatencies: typeof Histogram; - serverLatencies: typeof Histogram; - connectivityErrorCount: typeof Histogram; - clientBlockingLatencies: typeof Histogram; -} - -/** - * A class for tracing and recording client-side metrics related to Bigtable operations. - */ -class MetricsTracer { - private operationStartTime: DateLike | null; - private attemptStartTime: DateLike | null; - private metrics: Metrics; - private zone: string | null | undefined; - private cluster: string | null | undefined; - private tabularApiSurface: ITabularApiSurface; - private methodName: string; - private projectId?: string; - private receivedFirstResponse: boolean; - private serverTimeRead: boolean; - private lastReadTime: DateLike | null; - private dateProvider: DateProvider; - - /** - * @param metrics The metrics instruments to record data with. - * @param tabularApiSurface Information about the Bigtable table being accessed. - * @param methodName The name of the method being traced. - * @param dateProvider A provider for date/time information (for testing). - */ - constructor( - metrics: Metrics, - tabularApiSurface: ITabularApiSurface, - methodName: string, - projectId?: string, - dateProvider?: DateProvider - ) { - this.metrics = metrics; - this.zone = null; - this.cluster = null; - this.tabularApiSurface = tabularApiSurface; - this.methodName = methodName; - this.operationStartTime = null; - this.attemptStartTime = null; - this.receivedFirstResponse = false; - this.lastReadTime = null; - this.serverTimeRead = false; - this.projectId = projectId; - if (dateProvider) { - this.dateProvider = dateProvider; - } else { - this.dateProvider = new DefaultDateProvider(); - } - } - - /** - * Assembles the basic attributes for metrics. These attributes provide - * context about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @returns {Attributes} An object containing the basic attributes. - */ - private getBasicAttributes(projectId: string) { - return { - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - } - - /** - * Assembles the attributes for operation latency metrics. These attributes - * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. - * Includes whether the operation was a streaming operation or not. - * @param {string} projectId The Google Cloud project ID. - * @param {string} finalOperationStatus The final status of the operation. - * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for operation latency metrics. - */ - private getOperationLatencyAttributes( - projectId: string, - finalOperationStatus: string, - streamOperation?: string - ): Attributes { - return Object.assign( - { - finalOperationStatus: finalOperationStatus, - streamingOperation: streamOperation, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for final operation metrics. These attributes provide - * context about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @param {string} finalOperationStatus The final status of the operation. - * @returns {Attributes} An object containing the attributes for final operation metrics. - */ - private getFinalOpAttributes( - projectId: string, - finalOperationStatus: string - ): Attributes { - return Object.assign( - { - finalOperationStatus: finalOperationStatus, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for attempt metrics. These attributes provide context - * about the Bigtable environment, the operation being performed, and the status of the attempt. - * Includes whether the operation was a streaming operation or not. - * @param {string} projectId The Google Cloud project ID. - * @param {string} attemptStatus The status of the attempt. - * @param {string} streamingOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for attempt metrics. - */ - private getAttemptAttributes( - projectId: string, - attemptStatus: string, - streamingOperation: string - ) { - return Object.assign( - { - attemptStatus: attemptStatus, - streamingOperation: streamingOperation, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for attempt status metrics. These attributes provide context - * about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @param {string} attemptStatus The status of the attempt. - * @returns {Attributes} An object containing the attributes for attempt status metrics. - */ - private getAttemptStatusAttributes(projectId: string, attemptStatus: string) { - return Object.assign( - { - attemptStatus: attemptStatus, - }, - this.getBasicAttributes(projectId) - ); - } - - /** - * Called when the operation starts. Records the start time. - */ - onOperationStart() { - this.operationStartTime = this.dateProvider.getDate(); - } - - /** - * Called after the client reads a row. Records application blocking latencies. - */ - onRead() { - const currentTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (this.lastReadTime) { - if (projectId && this.lastReadTime) { - const attributes = this.getBasicAttributes(projectId); - const difference = currentTime.getTime() - this.lastReadTime.getTime(); - this.metrics.applicationBlockingLatencies.record( - difference, - attributes - ); - this.lastReadTime = currentTime; - } - } else { - this.lastReadTime = currentTime; - } - } - - /** - * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {AttemptInfo} info Information about the completed attempt. - */ - onAttemptComplete(info: AttemptInfo) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metrics.attemptLatencies.record(totalTime, attributes); - } - } - - /** - * Called when a new attempt starts. Records the start time of the attempt. - */ - onAttemptStart() { - this.attemptStartTime = this.dateProvider.getDate(); - } - - /** - * Called when the first response is received. Records first response latencies. - * @param {string} finalOperationStatus The final status of the operation. - */ - onResponse(finalOperationStatus: string) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const attributes = this.getFinalOpAttributes( - projectId, - finalOperationStatus - ); - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - this.metrics.firstResponseLatencies.record(totalTime, attributes); - } - } - } - - /** - * Called when an operation completes (successfully or unsuccessfully). - * Records operation latencies, retry counts, and connectivity error counts. - * @param {OperationInfo} info Information about the completed operation. - */ - onOperationComplete(info: OperationInfo) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - this.onAttemptComplete(info); - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationLatencyAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.operationLatencies.record( - totalTime, - operationLatencyAttributes - ); - } - if (info.retries) { - // This block records the retry count metrics - const retryCountAttributes = this.getFinalOpAttributes( - projectId, - info.finalOperationStatus - ); - this.metrics.retryCount.add(info.retries, retryCountAttributes); - } - if (info.connectivityErrorCount) { - // This block records the connectivity error count metrics - const connectivityCountAttributes = this.getAttemptStatusAttributes( - projectId, - info.finalOperationStatus - ); - this.metrics.connectivityErrorCount.record( - info.connectivityErrorCount, - connectivityCountAttributes - ); - } - } - } - - /** - * Called when metadata is received. Extracts server timing information if available. - * @param {AttemptInfo} info Information about the completed attempt. - * @param {object} metadata The received metadata. - */ - onMetadataReceived( - info: AttemptInfo, - metadata: { - internalRepr: Map; - options: {}; - } - ) { - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const durationValues = mappedEntries.get('server-timing')?.split('dur='); - if (durationValues && durationValues[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - const serverTime = parseInt(durationValues[1]); - const projectId = this.projectId; - if (projectId) { - const attributes = this.getAttemptAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); - this.metrics.serverLatencies.record(serverTime, attributes); - } - } - } - } - - /** - * Called when status information is received. Extracts zone and cluster information. - * @param {object} status The received status information. - */ - onStatusReceived(status: { - metadata: {internalRepr: Map; options: {}}; - }) { - const mappedEntries = new Map( - Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const instanceInformation = mappedEntries - .get('x-goog-ext-425905942-bin') - ?.replace(new RegExp('\\n', 'g'), '') - .split('\r'); - if (instanceInformation && instanceInformation[0]) { - this.zone = instanceInformation[0]; - } - if (instanceInformation && instanceInformation[1]) { - this.cluster = instanceInformation[1]; - } - } -} - -/** - * A factory class for creating MetricsTracer instances. Initializes - * OpenTelemetry metrics instruments. - */ -export class MetricsTracerFactory { - private metrics?: Metrics; - private observabilityOptions?: ObservabilityOptions; - private dateProvider: DateProvider; - - /** - * @param {DateProvider} dateProvider An object that provides dates for latency measurement. - * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics observability. - */ - constructor( - dateProvider: DateProvider, - observabilityOptions?: ObservabilityOptions - ) { - this.observabilityOptions = observabilityOptions; - this.dateProvider = dateProvider; - } - - /** - * Initializes the OpenTelemetry metrics instruments if they haven't been already. - * If metrics already exist, this method returns early. Otherwise, it creates and registers - * metric instruments (histograms and counters) for various Bigtable client metrics. - * It handles the creation of a MeterProvider, either using a user-provided one or creating a default one, and - * configures a PeriodicExportingMetricReader for exporting metrics. - * @param {string} [projectId] The Google Cloud project ID. Used for metric export. - * @param {ObservabilityOptions} [observabilityOptions] Options for configuring client-side metrics observability, including a custom MeterProvider. - * @returns {Metrics} An object containing the initialized OpenTelemetry metric instruments. - */ - private initialize( - projectId?: string, - observabilityOptions?: ObservabilityOptions - ) { - if (this.metrics) { - return this.metrics; - } else { - // Use MeterProvider provided by user - // If MeterProvider was not provided then use the default meter provider. - const meterProvider = - observabilityOptions && observabilityOptions.meterProvider - ? observabilityOptions.meterProvider - : new MeterProvider({ - // This is the default meter provider - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. - resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 10_000, - exporter: new MetricExporter({ - projectId, - }), - }), - ], - }); - const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.metrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createCounter('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', - { - description: - 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - } - ), - firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', - { - description: - 'Latencies from when a client sends a request and receives the first row of the response.', - unit: 'ms', - } - ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', - { - description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - } - ), - clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', - { - description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - } - ), - }; - return this.metrics; - } - } - - /** - * Creates a new MetricsTracer instance. - * @param tabularApiSurface The Bigtable table being accessed. - * @param methodName The name of the method being traced. - * @param dateProvider An optional DateProvider for testing purposes. - * @param {string} projectId The project id - * @returns A new MetricsTracer instance. - */ - getMetricsTracer( - tabularApiSurface: ITabularApiSurface, - methodName: string, - projectId?: string - ) { - const metrics = this.initialize(projectId, this.observabilityOptions); - return new MetricsTracer( - metrics, - tabularApiSurface, - methodName, - projectId, - this.dateProvider - ); - } -} diff --git a/test/metrics-tracer/metrics-tracer.ts b/test/metrics-collector/metrics-collector.ts similarity index 70% rename from test/metrics-tracer/metrics-tracer.ts rename to test/metrics-collector/metrics-collector.ts index 180ad1bf7..34e7fa86a 100644 --- a/test/metrics-tracer/metrics-tracer.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -13,12 +13,11 @@ // limitations under the License. import {describe} from 'mocha'; -import {MetricsTracerFactory} from '../../src/client-side-metrics/metrics-tracer-factory'; -import {TestMeterProvider} from '../../common/test-meter-provider'; import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; -import {ObservabilityOptions} from '../../src/client-side-metrics/observability-options'; +import {TestMetricsHandler} from '../../common/test-metrics-handler'; +import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -49,19 +48,6 @@ class Logger { */ class FakeBigtable { appProfileId?: string; - metricsTracerFactory: MetricsTracerFactory; - /** - * @param {ObservabilityOptions} observabilityOptions Options for configuring client-side metrics - * observability, including a TestMeterProvider. - */ - constructor( - observabilityOptions: ObservabilityOptions, - dateProvider: TestDateProvider - ) { - this.metricsTracerFactory = new MetricsTracerFactory(dateProvider, { - meterProvider: observabilityOptions.meterProvider, - }); - } /** * A stubbed method that simulates retrieving the project ID. Always returns @@ -85,21 +71,17 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsTracer', () => { +describe.only('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); + const metricsHandlers = [new TestMetricsHandler(logger)]; class FakeTable { id = 'fakeTableId'; instance = new FakeInstance(); - bigtable = new FakeBigtable( - { - meterProvider: new TestMeterProvider(logger), - }, - new TestDateProvider(logger) - ); + bigtable = new FakeBigtable(); async fakeMethod(): Promise { - return new Promise((resolve, reject) => { + return new Promise(resolve => { this.bigtable.getProjectId_((err, projectId) => { const standardAttemptInfo = { finalOperationStatus: 'PENDING', @@ -126,68 +108,62 @@ describe('Bigtable/MetricsTracer', () => { options: {}, }, }; - const metricsTracer = - this.bigtable.metricsTracerFactory.getMetricsTracer( - this, - 'fakeMethod', - projectId - ); + const metricsCollector = new MetricsCollector( + this, + metricsHandlers, + 'fakeMethod', + projectId, + new TestDateProvider(logger) + ); // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. // Here is an example of what might happen in a method call: logger.log('1. The operation starts'); - metricsTracer.onOperationStart(); + metricsCollector.onOperationStart(); logger.log('2. The attempt starts.'); - metricsTracer.onAttemptStart(); + metricsCollector.onAttemptStart(); logger.log('3. Client receives status information.'); - metricsTracer.onStatusReceived(status); + metricsCollector.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( standardAttemptInfo, createMetadata('101') ); logger.log('5. Client receives first row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('6. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( standardAttemptInfo, createMetadata('102') ); logger.log('7. Client receives second row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); - metricsTracer.onAttemptComplete({ + metricsCollector.onAttemptComplete({ finalOperationStatus: 'ERROR', streamingOperation: 'YES', }); logger.log('9. After a timeout, the second attempt is made.'); - metricsTracer.onAttemptStart(); + metricsCollector.onAttemptStart(); logger.log('10. Client receives status information.'); - metricsTracer.onStatusReceived(status); + metricsCollector.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( standardAttemptInfo, createMetadata('103') ); logger.log('12. Client receives third row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('13. Client receives metadata.'); - metricsTracer.onMetadataReceived( + metricsCollector.onMetadataReceived( {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, createMetadata('104') ); logger.log('14. Client receives fourth row.'); - metricsTracer.onResponse('PENDING'); + metricsCollector.onResponse(); logger.log('15. User reads row 1'); - metricsTracer.onRead(); - logger.log('16. User reads row 2'); - metricsTracer.onRead(); - logger.log('17. User reads row 3'); - metricsTracer.onRead(); - logger.log('18. User reads row 4'); - metricsTracer.onRead(); logger.log('19. Stream ends, operation completes'); - metricsTracer.onOperationComplete({ + metricsCollector.onOperationComplete({ retries: 1, finalOperationStatus: 'SUCCESS', connectivityErrorCount: 1, @@ -201,7 +177,7 @@ describe('Bigtable/MetricsTracer', () => { const table = new FakeTable(); await table.fakeMethod(); const expectedOutput = fs.readFileSync( - './test/metrics-tracer/typical-method-call.txt', + './test/metrics-collector/typical-method-call.txt', 'utf8' ); // Ensure events occurred in the right order here: diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt new file mode 100644 index 000000000..9f10b0416 --- /dev/null +++ b/test/metrics-collector/typical-method-call.txt @@ -0,0 +1,35 @@ +1. The operation starts +getDate call returns 1000 ms +2. The attempt starts. +getDate call returns 2000 ms +3. Client receives status information. +4. Client receives metadata. +5. Client receives first row. +getDate call returns 3000 ms +6. Client receives metadata. +7. Client receives second row. +getDate call returns 4000 ms +8. A transient error occurs. +getDate call returns 5000 ms +Recording parameters for onAttemptComplete: +metrics: {"attemptLatency":3000,"serverLatency":101} +attributes: {"attemptStatus":"ERROR","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +9. After a timeout, the second attempt is made. +getDate call returns 6000 ms +10. Client receives status information. +11. Client receives metadata. +12. Client receives third row. +getDate call returns 7000 ms +13. Client receives metadata. +14. Client receives fourth row. +getDate call returns 8000 ms +15. User reads row 1 +19. Stream ends, operation completes +getDate call returns 9000 ms +getDate call returns 10000 ms +Recording parameters for onAttemptComplete: +metrics: {"attemptLatency":4000,"serverLatency":103} +attributes: {"attemptStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +Recording parameters for onOperationComplete: +metrics: {"operationLatency":8000,"firstResponseLatency":6000,"retryCount":1,"connectivityErrorCount":1} +attributes: {"finalOperationStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} diff --git a/test/metrics-tracer/typical-method-call.txt b/test/metrics-tracer/typical-method-call.txt deleted file mode 100644 index 4fa4fb6f4..000000000 --- a/test/metrics-tracer/typical-method-call.txt +++ /dev/null @@ -1,43 +0,0 @@ -1. The operation starts -getDate call returns 1000 ms -2. The attempt starts. -getDate call returns 2000 ms -3. Client receives status information. -4. Client receives metadata. -Value added to histogram bigtable.googleapis.com:server_latencies = 101 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;PENDING;;YES;nodejs-bigtable -5. Client receives first row. -getDate call returns 3000 ms -Value added to histogram bigtable.googleapis.com:first_response_latencies = 2000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;PENDING;;nodejs-bigtable -6. Client receives metadata. -7. Client receives second row. -getDate call returns 4000 ms -8. A transient error occurs. -getDate call returns 5000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 3000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;ERROR;;YES;nodejs-bigtable -9. After a timeout, the second attempt is made. -getDate call returns 6000 ms -10. Client receives status information. -11. Client receives metadata. -12. Client receives third row. -getDate call returns 7000 ms -13. Client receives metadata. -14. Client receives fourth row. -getDate call returns 8000 ms -15. User reads row 1 -getDate call returns 9000 ms -16. User reads row 2 -getDate call returns 10000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable -17. User reads row 3 -getDate call returns 11000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable -18. User reads row 4 -getDate call returns 12000 ms -Value added to histogram bigtable.googleapis.com:application_blocking_latencies = 1000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;;;nodejs-bigtable -19. Stream ends, operation completes -getDate call returns 13000 ms -getDate call returns 14000 ms -Value added to histogram bigtable.googleapis.com:attempt_latencies = 8000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;YES;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:operation_latencies = 12000 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;YES;nodejs-bigtable -Value added to counter bigtable.googleapis.com:retry_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;;SUCCESS;;nodejs-bigtable -Value added to histogram bigtable.googleapis.com:connectivity_error_count = 1 with attributes my-project;fakeInstanceId;fakeTableId;fake-cluster3;us-west1-c ;;fakeMethod;SUCCESS;;;nodejs-bigtable From a62b124bc0a04d347f2cbf18b65150ea75100eab Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 10:51:45 -0500 Subject: [PATCH 086/289] Remove only --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 34e7fa86a..734b3334f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -71,7 +71,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); const metricsHandlers = [new TestMetricsHandler(logger)]; From 0996d3cd1d3a8047ec511d229f6cade2e66b5ab7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:15:31 -0500 Subject: [PATCH 087/289] Add metrics handlers parameter to Doc --- src/client-side-metrics/metrics-collector.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index e7b4d7da9..7b71e53ca 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -110,6 +110,7 @@ export class MetricsCollector { /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. + * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. * @param {string} methodName The name of the method being traced. * @param {string} projectId The id of the project. * @param {DateProvider} dateProvider A provider for date/time information (for testing). From ef8e3fe28103bcfe53aff72a7de836d479749595 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:51:39 -0500 Subject: [PATCH 088/289] =?UTF-8?q?Don=E2=80=99t=20require=20retries=20to?= =?UTF-8?q?=20be=20passed=20into=20metrics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit collector --- src/client-side-metrics/metrics-collector.ts | 19 +++++++----------- test/metrics-collector/metrics-collector.ts | 21 ++++---------------- 2 files changed, 11 insertions(+), 29 deletions(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 7b71e53ca..f537f48ee 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -56,10 +56,6 @@ export interface ITabularApiSurface { * Information about a Bigtable operation. */ interface OperationInfo { - /** - * The number of retries attempted for the operation. - */ - retries?: number; /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ @@ -101,6 +97,7 @@ export class MetricsCollector { private tabularApiSurface: ITabularApiSurface; private methodName: string; private projectId?: string; + private attemptCount = 0; private receivedFirstResponse: boolean; private metricsHandlers: IMetricsHandler[]; private firstResponseLatency?: number; @@ -216,6 +213,7 @@ export class MetricsCollector { * @param {AttemptInfo} info Information about the completed attempt. */ onAttemptComplete(info: AttemptInfo) { + this.attemptCount++; const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.attemptStartTime) { @@ -287,7 +285,7 @@ export class MetricsCollector { const metrics = { operationLatency: totalTime, firstResponseLatency: this.firstResponseLatency, - retryCount: info.retries, + retryCount: this.attemptCount - 1, connectivityErrorCount: info.connectivityErrorCount, }; this.metricsHandlers.forEach(metricsHandler => { @@ -307,13 +305,10 @@ export class MetricsCollector { * @param {AttemptInfo} info Information about the completed attempt. * @param {object} metadata The received metadata. */ - onMetadataReceived( - info: AttemptInfo, - metadata: { - internalRepr: Map; - options: {}; - } - ) { + onMetadataReceived(metadata: { + internalRepr: Map; + options: {}; + }) { const mappedEntries = new Map( Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 734b3334f..02b3f91ed 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -125,17 +125,11 @@ describe('Bigtable/MetricsCollector', () => { logger.log('3. Client receives status information.'); metricsCollector.onStatusReceived(status); logger.log('4. Client receives metadata.'); - metricsCollector.onMetadataReceived( - standardAttemptInfo, - createMetadata('101') - ); + metricsCollector.onMetadataReceived(createMetadata('101')); logger.log('5. Client receives first row.'); metricsCollector.onResponse(); logger.log('6. Client receives metadata.'); - metricsCollector.onMetadataReceived( - standardAttemptInfo, - createMetadata('102') - ); + metricsCollector.onMetadataReceived(createMetadata('102')); logger.log('7. Client receives second row.'); metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); @@ -148,23 +142,16 @@ describe('Bigtable/MetricsCollector', () => { logger.log('10. Client receives status information.'); metricsCollector.onStatusReceived(status); logger.log('11. Client receives metadata.'); - metricsCollector.onMetadataReceived( - standardAttemptInfo, - createMetadata('103') - ); + metricsCollector.onMetadataReceived(createMetadata('103')); logger.log('12. Client receives third row.'); metricsCollector.onResponse(); logger.log('13. Client receives metadata.'); - metricsCollector.onMetadataReceived( - {finalOperationStatus: 'PENDING', streamingOperation: 'YES'}, - createMetadata('104') - ); + metricsCollector.onMetadataReceived(createMetadata('104')); logger.log('14. Client receives fourth row.'); metricsCollector.onResponse(); logger.log('15. User reads row 1'); logger.log('19. Stream ends, operation completes'); metricsCollector.onOperationComplete({ - retries: 1, finalOperationStatus: 'SUCCESS', connectivityErrorCount: 1, streamingOperation: 'YES', From c68a76f2c8fdae5e90e876dc21c8cf164580d238 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:58:00 -0500 Subject: [PATCH 089/289] Remove testMeterProvider --- common/test-meter-provider.ts | 88 ----------------------------------- 1 file changed, 88 deletions(-) delete mode 100644 common/test-meter-provider.ts diff --git a/common/test-meter-provider.ts b/common/test-meter-provider.ts deleted file mode 100644 index 1590fe322..000000000 --- a/common/test-meter-provider.ts +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {WithLogger, WithLoggerAndName} from './logger'; -import {Attributes, attributesToString} from './client-side-metrics-attributes'; - -/** - * A test implementation of a MeterProvider. This MeterProvider is used for testing purposes. - * It doesn't send metrics to a backend, but instead logs metric updates for verification. - */ -export class TestMeterProvider extends WithLogger { - /** - * Returns a TestMeter, that logs metric updates for verification. - * @param {string} name The name of the meter. - * @returns {TestMeter} - */ - getMeter(name: string) { - return new TestMeter(this.logger, name); - } -} - -/** - * A test implementation of a Meter. Used for testing purposes. It doesn't send metrics to a backend, - * but instead logs metric updates for verification. - */ -class TestMeter extends WithLoggerAndName { - /** - * Creates a test histogram. The TestHistogram logs when values are recorded. - * @param {string} instrument The name of the instrument. - * @returns {TestHistogram} - */ - createHistogram(instrument: string) { - return new TestHistogram(this.logger, `${this.name}:${instrument}`); - } - /** - * Creates a test counter. The TestCounter logs when values are added. - * @param {string} instrument The name of the instrument. - * @returns {TestCounter} - */ - createCounter(instrument: string) { - return new TestCounter(this.logger, `${this.name}:${instrument}`); - } -} - -/** - * A test implementation of a Counter. Used for testing purposes. It doesn't send metrics to a backend, - * but instead logs value additions for verification. - */ -class TestCounter extends WithLoggerAndName { - /** - * Simulates adding a value to the counter. Logs the value and the counter name. - * @param {number} value The value to be added to the counter. - * @param {Attributes} attributes The attributes associated with the value. - */ - add(value: number, attributes: Attributes) { - this.logger.log( - `Value added to counter ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` - ); - } -} - -/** - * A test implementation of a Histogram. Used for testing purposes. It doesn't send metrics to a backend, - * but instead logs recorded values for verification. - */ -class TestHistogram extends WithLoggerAndName { - /** - * Simulates recording a value in the histogram. Logs the value and the histogram name. - * @param {number} value The value to be recorded in the histogram. - * @param {Attributes} attributes The attributes associated with the value. - */ - record(value: number, attributes: Attributes) { - this.logger.log( - `Value added to histogram ${this.name} = ${value.toString()} with attributes ${attributesToString(attributes)}` - ); - } -} From 47a24b1e6a3463ce83d378c73c885fc05997d856 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 11:58:47 -0500 Subject: [PATCH 090/289] Remove the attributesToString function --- common/client-side-metrics-attributes.ts | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d1f31ab63..1b7adecb2 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -29,17 +29,3 @@ export interface Attributes { streamingOperation?: string; clientName: string; } - -/** - * Converts an Attributes object to a string representation. - * This string representation is suitable for use as labels or tags. - * The order of attributes in the output string is fixed: - * projectId;instanceId;table;cluster;zone;appProfileId;methodName;attemptStatus;finalOperationStatus;streamingOperation;clientName - * If an attribute is null or undefined, the empty string is used. - * @param {Attributes} a The Attributes object to convert. - * @returns A string representation of the attribute. - */ -export function attributesToString(a: Attributes) { - const p = (attribute?: string | null) => (attribute ? attribute : ''); - return `${p(a.projectId)};${p(a.instanceId)};${p(a.table)};${p(a.cluster)};${p(a.zone)};${p(a.appProfileId)};${p(a.methodName)};${p(a.attemptStatus)};${p(a.finalOperationStatus)};${p(a.streamingOperation)};nodejs-bigtable`; -} From b2600f213ce9f089820717a8972670423d0babc7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 13:05:23 -0500 Subject: [PATCH 091/289] Eliminate unused class --- common/logger.ts | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/common/logger.ts b/common/logger.ts index 7b09b8737..284005350 100644 --- a/common/logger.ts +++ b/common/logger.ts @@ -33,20 +33,3 @@ export abstract class WithLogger { this.logger = logger; } } - -/** - * An abstract base class that provides a logger instance and a name. Subclasses - * can use the logger for logging messages, incorporating the name for context. - */ -export abstract class WithLoggerAndName { - protected logger: ILogger; - protected name: string; - /** - * @param {ILogger} logger The logger instance to be used by this object. - * @param {string} name The name associated with this object. - */ - constructor(logger: ILogger, name: string) { - this.logger = logger; - this.name = name; - } -} From d4d3f6c053fbecf82d32148d770289ae61da39c6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 13:11:24 -0500 Subject: [PATCH 092/289] Generate documentation for the IMetricsHandler --- src/client-side-metrics/metrics-handler.ts | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index a3ae4840d..d402fc39e 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,10 +1,16 @@ import {Attributes} from '../../common/client-side-metrics-attributes'; +/** + * Metrics related to the completion of a Bigtable operation. + */ export interface onOperationCompleteMetrics { operationLatency: number; retryCount?: number; } +/** + * Metrics related to the completion of a single attempt of a Bigtable operation. + */ export interface onAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; @@ -13,11 +19,25 @@ export interface onAttemptCompleteMetrics { } // TODO: Trim attributes so only necessary attributes are required. +/** + * An interface for handling client-side metrics related to Bigtable operations. + * Implementations of this interface can define how metrics are recorded and processed. + */ export interface IMetricsHandler { + /** + * Called when an operation completes (successfully or unsuccessfully). + * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ onOperationComplete?( metrics: onOperationCompleteMetrics, attributes: Attributes ): void; + /** + * Called when an attempt (e.g., an RPC attempt) completes. + * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ onAttemptComplete?( metrics: onAttemptCompleteMetrics, attributes: Attributes From b8dff1c85088fdc4cba6d8038fa4e831da058954 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 13:16:06 -0500 Subject: [PATCH 093/289] Generate documentation for GCPMetricsHandler --- .../gcp-metrics-handler.ts | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index aea683fe1..86628ac39 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -29,10 +29,21 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +/** + * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. + * This handler records metrics such as operation latency, attempt latency, retry count, and more, + * associating them with relevant attributes for detailed analysis in Cloud Monitoring. + */ export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; private otelMetrics?: Metrics; + /** + * Initializes the OpenTelemetry metrics instruments if they haven't been already. + * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. + * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. + * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. + */ private initialize(projectId?: string) { if (!this.initialized) { this.initialized = true; @@ -114,6 +125,12 @@ export class GCPMetricsHandler implements IMetricsHandler { } } + /** + * Records metrics for a completed Bigtable operation. + * This method records the operation latency and retry count, associating them with provided attributes. + * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ onOperationComplete( metrics: onOperationCompleteMetrics, attributes: Attributes @@ -125,6 +142,14 @@ export class GCPMetricsHandler implements IMetricsHandler { ); this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); } + + /** + * Records metrics for a completed attempt of a Bigtable operation. + * This method records attempt latency, connectivity error count, server latency, and first response latency, + * along with the provided attributes. + * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { this.initialize(); this.otelMetrics?.attemptLatencies.record( From d50384f56d16a3ec00aced943e646bcf08c632cb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 14:19:03 -0500 Subject: [PATCH 094/289] Restrict attributes interfaces and solve compile --- common/client-side-metrics-attributes.ts | 53 ++++++++++-- common/test-metrics-handler.ts | 19 +++-- .../gcp-metrics-handler.ts | 23 ++--- src/client-side-metrics/metrics-collector.ts | 84 +++++-------------- src/client-side-metrics/metrics-handler.ts | 25 +++--- .../observability-options.ts | 81 ------------------ test/metrics-collector/metrics-collector.ts | 12 +-- 7 files changed, 110 insertions(+), 187 deletions(-) delete mode 100644 src/client-side-metrics/observability-options.ts diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 1b7adecb2..153bed3a1 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -12,11 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -/** - * Attributes (labels) associated with a Bigtable metric. These - * attributes provide context for the metric values. - */ -export interface Attributes { +interface StandardAttributes { projectId: string; instanceId: string; table: string; @@ -24,8 +20,49 @@ export interface Attributes { zone?: string | null; appProfileId?: string; methodName: string; - attemptStatus?: string; - finalOperationStatus?: string; - streamingOperation?: string; clientName: string; } + +/** + * Information about a Bigtable operation. + */ +export interface OperationOnlyAttributes { + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; + streamingOperation: string; +} + +/** + * Information about a single attempt of a Bigtable operation. + */ +export interface AttemptOnlyAttributes { + /** + * The final status of the operation (e.g., 'OK', 'ERROR'). + */ + finalOperationStatus: string; // TODO: enum + /** + * Whether the operation is a streaming operation or not. + */ + streamingOperation: string; // TODO: enum + /** + * The attempt status of the operation. + */ + attemptStatus: string; // TODO: enum +} + +export interface OnOperationCompleteAttributes + extends StandardAttributes, + OperationOnlyAttributes { + finalOperationStatus: string; + streamingOperation: string; +} + +export interface OnAttemptCompleteAttributes + extends StandardAttributes, + AttemptOnlyAttributes { + attemptStatus: string; + finalOperationStatus: string; + streamingOperation: string; +} diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 737168edb..7fd5be4d5 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -1,9 +1,9 @@ import {WithLogger} from './logger'; import { - onAttemptCompleteMetrics, - onOperationCompleteMetrics, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, } from '../src/client-side-metrics/metrics-handler'; -import {Attributes} from './client-side-metrics-attributes'; +import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from './client-side-metrics-attributes'; /** * A test implementation of the IMetricsHandler interface. Used for testing purposes. @@ -12,12 +12,12 @@ import {Attributes} from './client-side-metrics-attributes'; export class TestMetricsHandler extends WithLogger { /** * Logs the metrics and attributes received for an operation completion. - * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. * @param {Attributes} attributes Attributes associated with the completed operation. */ onOperationComplete( - metrics: onOperationCompleteMetrics, - attributes: Attributes + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { attributes.clientName = 'nodejs-bigtable'; this.logger.log('Recording parameters for onOperationComplete:'); @@ -27,10 +27,13 @@ export class TestMetricsHandler extends WithLogger { /** * Logs the metrics and attributes received for an attempt completion. - * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. * @param {Attributes} attributes Attributes associated with the completed attempt. */ - onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes + ) { attributes.clientName = 'nodejs-bigtable'; this.logger.log('Recording parameters for onAttemptComplete:'); this.logger.log(`metrics: ${JSON.stringify(metrics)}`); diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 86628ac39..e62b711b1 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -1,12 +1,12 @@ import { IMetricsHandler, - onAttemptCompleteMetrics, - onOperationCompleteMetrics, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, } from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {Attributes} from '../../common/client-side-metrics-attributes'; +import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../common/client-side-metrics-attributes'; const { MeterProvider, Histogram, @@ -128,12 +128,12 @@ export class GCPMetricsHandler implements IMetricsHandler { /** * Records metrics for a completed Bigtable operation. * This method records the operation latency and retry count, associating them with provided attributes. - * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. */ onOperationComplete( - metrics: onOperationCompleteMetrics, - attributes: Attributes + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { this.initialize(); this.otelMetrics?.operationLatencies.record( @@ -147,10 +147,13 @@ export class GCPMetricsHandler implements IMetricsHandler { * Records metrics for a completed attempt of a Bigtable operation. * This method records attempt latency, connectivity error count, server latency, and first response latency, * along with the provided attributes. - * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. */ - onAttemptComplete(metrics: onAttemptCompleteMetrics, attributes: Attributes) { + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes + ) { this.initialize(); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index f537f48ee..608fdbebc 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -1,6 +1,10 @@ -import {Attributes} from '../../common/client-side-metrics-attributes'; import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; +import { + AttemptOnlyAttributes, + OnOperationCompleteAttributes, + OperationOnlyAttributes, +} from '../../common/client-side-metrics-attributes'; /** * An interface representing a Date-like object. Provides a `getTime` method @@ -52,35 +56,6 @@ export interface ITabularApiSurface { }; } -/** - * Information about a Bigtable operation. - */ -interface OperationInfo { - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Number of times a connectivity error occurred during the operation. - */ - connectivityErrorCount?: number; - streamingOperation: string; -} - -/** - * Information about a single attempt of a Bigtable operation. - */ -interface AttemptInfo { - /** - * The final status of the attempt (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: string; - /** - * Whether the operation is a streaming operation or not - */ - streamingOperation: string; -} - const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; @@ -160,20 +135,15 @@ export class MetricsCollector { * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. * Includes whether the operation was a streaming operation or not. * @param {string} projectId The Google Cloud project ID. - * @param {string} finalOperationStatus The final status of the operation. - * @param {string} streamOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for operation latency metrics. + * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. + * @returns {OnOperationCompleteAttributes} An object containing the attributes for operation latency metrics. */ private getOperationLatencyAttributes( projectId: string, - finalOperationStatus: string, - streamOperation?: string - ): Attributes { + operationOnlyAttributes: OperationOnlyAttributes + ): OnOperationCompleteAttributes { return Object.assign( - { - finalOperationStatus: finalOperationStatus, - streamingOperation: streamOperation, - }, + operationOnlyAttributes, this.getBasicAttributes(projectId) ); } @@ -183,20 +153,15 @@ export class MetricsCollector { * about the Bigtable environment, the operation being performed, and the status of the attempt. * Includes whether the operation was a streaming operation or not. * @param {string} projectId The Google Cloud project ID. - * @param {string} attemptStatus The status of the attempt. - * @param {string} streamingOperation Whether the operation was a streaming operation or not. - * @returns {Attributes} An object containing the attributes for attempt metrics. + * @param {AttemptOnlyAttributes} attemptOnlyAttributes The attributes of the attempt. + * @returns {OnAttemptCompleteAttributes} An object containing the attributes for attempt metrics. */ private getAttemptAttributes( projectId: string, - attemptStatus: string, - streamingOperation: string + attemptOnlyAttributes: AttemptOnlyAttributes ) { return Object.assign( - { - attemptStatus: attemptStatus, - streamingOperation: streamingOperation, - }, + attemptOnlyAttributes, this.getBasicAttributes(projectId) ); } @@ -210,18 +175,14 @@ export class MetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {AttemptInfo} info Information about the completed attempt. + * @param {AttemptOnlyAttributes} info Information about the completed attempt. */ - onAttemptComplete(info: AttemptInfo) { + onAttemptComplete(info: AttemptOnlyAttributes) { this.attemptCount++; const endTime = this.dateProvider.getDate(); const projectId = this.projectId; if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes( - projectId, - info.finalOperationStatus, - info.streamingOperation - ); + const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { @@ -267,26 +228,23 @@ export class MetricsCollector { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. - * @param {OperationInfo} info Information about the completed operation. + * @param {OperationOnlyAttributes} info Information about the completed operation. */ - onOperationComplete(info: OperationInfo) { + onOperationComplete(info: OperationOnlyAttributes) { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; - this.onAttemptComplete(info); if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. const operationLatencyAttributes = this.getOperationLatencyAttributes( projectId, - info.finalOperationStatus, - info.streamingOperation + info ); const metrics = { operationLatency: totalTime, firstResponseLatency: this.firstResponseLatency, retryCount: this.attemptCount - 1, - connectivityErrorCount: info.connectivityErrorCount, }; this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { @@ -302,7 +260,7 @@ export class MetricsCollector { /** * Called when metadata is received. Extracts server timing information if available. - * @param {AttemptInfo} info Information about the completed attempt. + * @param {AttemptOnlyAttributes} info Information about the completed attempt. * @param {object} metadata The received metadata. */ onMetadataReceived(metadata: { diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index d402fc39e..9a1ba558d 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,9 +1,12 @@ -import {Attributes} from '../../common/client-side-metrics-attributes'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../common/client-side-metrics-attributes'; /** * Metrics related to the completion of a Bigtable operation. */ -export interface onOperationCompleteMetrics { +export interface OnOperationCompleteMetrics { operationLatency: number; retryCount?: number; } @@ -11,7 +14,7 @@ export interface onOperationCompleteMetrics { /** * Metrics related to the completion of a single attempt of a Bigtable operation. */ -export interface onAttemptCompleteMetrics { +export interface OnAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; firstResponseLatency?: number; @@ -26,20 +29,20 @@ export interface onAttemptCompleteMetrics { export interface IMetricsHandler { /** * Called when an operation completes (successfully or unsuccessfully). - * @param {onOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. */ onOperationComplete?( - metrics: onOperationCompleteMetrics, - attributes: Attributes + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ): void; /** * Called when an attempt (e.g., an RPC attempt) completes. - * @param {onAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. */ onAttemptComplete?( - metrics: onAttemptCompleteMetrics, - attributes: Attributes + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ): void; } diff --git a/src/client-side-metrics/observability-options.ts b/src/client-side-metrics/observability-options.ts deleted file mode 100644 index 031c169da..000000000 --- a/src/client-side-metrics/observability-options.ts +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {Attributes} from '../../common/client-side-metrics-attributes'; - -/** - * The Counter interface for recording increments of a metric. - */ -interface ICounter { - /** - * Adds a value to the counter. - * @param {number} retries The value to be added to the counter. - * @param {Attributes} attributes The attributes associated with this value. - */ - add(retries: number, attributes: Attributes): void; -} - -/** - * The Histogram interface for recording distributions of values of a metric. - */ -interface IHistogram { - /** - * Records a value in the histogram. - * @param {number} value The value to be recorded in the histogram. - * @param {Attributes} attributes The attributes associated with this value. - */ - record(value: number, attributes: Attributes): void; -} - -/** - * The Meter interface. Meters are responsible for creating and managing instruments (Counters, Histograms, etc.). - */ -interface IMeter { - /** - * Creates a Counter instrument, which counts increments of a given metric. - * @param {string} instrument The name of the counter instrument. - * @param {Attributes} attributes The attributes associated with this counter. - * @returns {ICounter} A Counter instance. - */ - createCounter(instrument: string, attributes: Attributes): ICounter; - /** - * Creates a Histogram instrument, which records distributions of values for a given metric. - * @param {string} instrument The name of the histogram instrument. - * @param {Attributes} attributes The attributes associated with this histogram. - * @returns {IHistogram} A Histogram instance. - */ - createHistogram(instrument: string, attributes: Attributes): IHistogram; -} - -/** - * The MeterProvider interface. A MeterProvider creates and manages Meters. - */ -interface IMeterProvider { - /** - * Returns a Meter, which can be used to create instruments for recording measurements. - * @param {string} name The name of the Meter. - * @returns {IMeter} A Meter instance. - */ - getMeter(name: string): IMeter; -} - -/** - * Options for configuring client-side metrics observability. Allows users to provide their own MeterProvider. - */ -export interface ObservabilityOptions { - /** - * The MeterProvider to use for recording metrics. If not provided, a default MeterProvider will be used. - */ - meterProvider: IMeterProvider; -} diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 02b3f91ed..98410b2fd 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -83,11 +83,6 @@ describe('Bigtable/MetricsCollector', () => { async fakeMethod(): Promise { return new Promise(resolve => { this.bigtable.getProjectId_((err, projectId) => { - const standardAttemptInfo = { - finalOperationStatus: 'PENDING', - streamingOperation: 'YES', - }; - function createMetadata(duration: string) { return { internalRepr: new Map([ @@ -136,6 +131,7 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onAttemptComplete({ finalOperationStatus: 'ERROR', streamingOperation: 'YES', + attemptStatus: 'ERROR', }); logger.log('9. After a timeout, the second attempt is made.'); metricsCollector.onAttemptStart(); @@ -151,9 +147,13 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('15. User reads row 1'); logger.log('19. Stream ends, operation completes'); + metricsCollector.onAttemptComplete({ + finalOperationStatus: 'SUCCESS', + attemptStatus: 'SUCCESS', + streamingOperation: 'YES', + }); metricsCollector.onOperationComplete({ finalOperationStatus: 'SUCCESS', - connectivityErrorCount: 1, streamingOperation: 'YES', }); resolve(); From b5fc1f248e8c3aa1fb82cf3c193b056db2791213 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 24 Jan 2025 19:23:58 +0000 Subject: [PATCH 095/289] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- common/test-metrics-handler.ts | 5 ++++- src/client-side-metrics/gcp-metrics-handler.ts | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 7fd5be4d5..479bdcbda 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -3,7 +3,10 @@ import { OnAttemptCompleteMetrics, OnOperationCompleteMetrics, } from '../src/client-side-metrics/metrics-handler'; -import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from './client-side-metrics-attributes'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from './client-side-metrics-attributes'; /** * A test implementation of the IMetricsHandler interface. Used for testing purposes. diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e62b711b1..0041db253 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -6,7 +6,10 @@ import { import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../common/client-side-metrics-attributes'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../common/client-side-metrics-attributes'; const { MeterProvider, Histogram, From 1e5dc82b832e2679f97013ff7bea4bb1c1164191 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 14:24:54 -0500 Subject: [PATCH 096/289] use undefined instead of null --- common/client-side-metrics-attributes.ts | 4 ++-- src/client-side-metrics/metrics-collector.ts | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 153bed3a1..0927425e8 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -16,8 +16,8 @@ interface StandardAttributes { projectId: string; instanceId: string; table: string; - cluster?: string | null; - zone?: string | null; + cluster?: string; + zone?: string; appProfileId?: string; methodName: string; clientName: string; diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 608fdbebc..6d12d9b0d 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -67,8 +67,8 @@ const version = JSON.parse(packageJSON.toString()).version; export class MetricsCollector { private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; - private zone: string | null | undefined; - private cluster: string | null | undefined; + private zone: string | undefined; + private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; private methodName: string; private projectId?: string; @@ -94,8 +94,8 @@ export class MetricsCollector { projectId?: string, dateProvider?: DateProvider ) { - this.zone = null; - this.cluster = null; + this.zone = undefined; + this.cluster = undefined; this.tabularApiSurface = tabularApiSurface; this.methodName = methodName; this.operationStartTime = null; From c2ffbc649120befaa896aa6f7779c9aafaa9d87f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 15:46:27 -0500 Subject: [PATCH 097/289] Introduce enums for allowable values --- common/client-side-metrics-attributes.ts | 38 ++++++++++++------- src/client-side-metrics/metrics-collector.ts | 3 +- test/metrics-collector/metrics-collector.ts | 25 +++++++----- .../metrics-collector/typical-method-call.txt | 12 +++--- 4 files changed, 49 insertions(+), 29 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 0927425e8..d891d5841 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -30,8 +30,8 @@ export interface OperationOnlyAttributes { /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ - finalOperationStatus: string; - streamingOperation: string; + finalOperationStatus: FinalOperationStatus; + streamingOperation: StreamingOperation; } /** @@ -41,28 +41,40 @@ export interface AttemptOnlyAttributes { /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ - finalOperationStatus: string; // TODO: enum + finalOperationStatus: FinalOperationStatus; /** * Whether the operation is a streaming operation or not. */ - streamingOperation: string; // TODO: enum + streamingOperation: StreamingOperation; /** * The attempt status of the operation. */ - attemptStatus: string; // TODO: enum + attemptStatus: AttemptStatus; +} + +export enum FinalOperationStatus { + OK = 'OK', + ERROR = 'ERROR', +} + +export enum AttemptStatus { + OK = 'OK', + ERROR = 'ERROR', +} + +export enum StreamingOperation { + YES = 'YES', + NO = 'NO', } export interface OnOperationCompleteAttributes extends StandardAttributes, - OperationOnlyAttributes { - finalOperationStatus: string; - streamingOperation: string; -} + OperationOnlyAttributes {} export interface OnAttemptCompleteAttributes extends StandardAttributes, - AttemptOnlyAttributes { - attemptStatus: string; - finalOperationStatus: string; - streamingOperation: string; + AttemptOnlyAttributes {} + +export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { + connectivityErrorCount: number; } diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 6d12d9b0d..ae1bedfcc 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -2,6 +2,7 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptOnlyAttributes, + OnAttemptCompleteInfo, OnOperationCompleteAttributes, OperationOnlyAttributes, } from '../../common/client-side-metrics-attributes'; @@ -177,7 +178,7 @@ export class MetricsCollector { * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. * @param {AttemptOnlyAttributes} info Information about the completed attempt. */ - onAttemptComplete(info: AttemptOnlyAttributes) { + onAttemptComplete(info: OnAttemptCompleteInfo) { this.attemptCount++; const endTime = this.dateProvider.getDate(); const projectId = this.projectId; diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 98410b2fd..df1ca7321 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,6 +18,11 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector'; +import { + AttemptStatus, + FinalOperationStatus, + StreamingOperation, +} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -71,7 +76,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsCollector', () => { +describe.only('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); const metricsHandlers = [new TestMetricsHandler(logger)]; @@ -129,9 +134,10 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - finalOperationStatus: 'ERROR', - streamingOperation: 'YES', - attemptStatus: 'ERROR', + finalOperationStatus: FinalOperationStatus.ERROR, + streamingOperation: StreamingOperation.YES, + attemptStatus: AttemptStatus.ERROR, + connectivityErrorCount: 1, }); logger.log('9. After a timeout, the second attempt is made.'); metricsCollector.onAttemptStart(); @@ -148,13 +154,14 @@ describe('Bigtable/MetricsCollector', () => { logger.log('15. User reads row 1'); logger.log('19. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ - finalOperationStatus: 'SUCCESS', - attemptStatus: 'SUCCESS', - streamingOperation: 'YES', + finalOperationStatus: FinalOperationStatus.ERROR, + attemptStatus: AttemptStatus.OK, + streamingOperation: StreamingOperation.YES, + connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ - finalOperationStatus: 'SUCCESS', - streamingOperation: 'YES', + finalOperationStatus: FinalOperationStatus.OK, + streamingOperation: StreamingOperation.YES, }); resolve(); }); diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 9f10b0416..00d645e0b 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101} -attributes: {"attemptStatus":"ERROR","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -26,10 +26,10 @@ getDate call returns 8000 ms 15. User reads row 1 19. Stream ends, operation completes getDate call returns 9000 ms -getDate call returns 10000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":4000,"serverLatency":103} -attributes: {"attemptStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +metrics: {"attemptLatency":3000,"serverLatency":103} +attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":8000,"firstResponseLatency":6000,"retryCount":1,"connectivityErrorCount":1} -attributes: {"finalOperationStatus":"SUCCESS","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +metrics: {"operationLatency":9000,"firstResponseLatency":6000,"retryCount":1} +attributes: {"finalOperationStatus":"OK","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} From 9320149600f3f2f57c137d34dff23e3d484067c0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 15:50:41 -0500 Subject: [PATCH 098/289] Add more headers --- common/test-metrics-handler.ts | 14 ++++++++++++++ src/client-side-metrics/gcp-metrics-handler.ts | 14 ++++++++++++++ src/client-side-metrics/metrics-collector.ts | 14 ++++++++++++++ src/client-side-metrics/metrics-handler.ts | 14 ++++++++++++++ 4 files changed, 56 insertions(+) diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 479bdcbda..9ffabd7a0 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {WithLogger} from './logger'; import { OnAttemptCompleteMetrics, diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 0041db253..d80734b63 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import { IMetricsHandler, OnAttemptCompleteMetrics, diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index ae1bedfcc..a3d1623e5 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 9a1ba558d..cde1ee803 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, From 40233611aa3e6db1e092e20297a09ff28772e4c7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 15:54:45 -0500 Subject: [PATCH 099/289] Remove only --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index df1ca7321..bdc58ed04 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -76,7 +76,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = new Logger(); const metricsHandlers = [new TestMetricsHandler(logger)]; From ef9173376c88deae0f547d1a81217820143cecdd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:20:48 -0500 Subject: [PATCH 100/289] Use null to pass values around. Not undefined --- common/client-side-metrics-attributes.ts | 2 +- src/client-side-metrics/metrics-collector.ts | 12 ++++++++---- src/client-side-metrics/metrics-handler.ts | 14 ++++++++++---- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d891d5841..73aee51d5 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -76,5 +76,5 @@ export interface OnAttemptCompleteAttributes AttemptOnlyAttributes {} export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { - connectivityErrorCount: number; + connectivityErrorCount: number | null; } diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index a3d1623e5..e46efb580 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -90,9 +90,9 @@ export class MetricsCollector { private attemptCount = 0; private receivedFirstResponse: boolean; private metricsHandlers: IMetricsHandler[]; - private firstResponseLatency?: number; + private firstResponseLatency: number | null; private serverTimeRead: boolean; - private serverTime?: number; + private serverTime: number | null; private dateProvider: DateProvider; /** @@ -117,7 +117,9 @@ export class MetricsCollector { this.attemptStartTime = null; this.receivedFirstResponse = false; this.metricsHandlers = metricsHandlers; + this.firstResponseLatency = null; this.serverTimeRead = false; + this.serverTime = null; this.projectId = projectId; if (dateProvider) { this.dateProvider = dateProvider; @@ -205,6 +207,8 @@ export class MetricsCollector { { attemptLatency: totalTime, serverLatency: this.serverTime, + connectivityErrorCount: info.connectivityErrorCount, + firstResponseLatency: this.firstResponseLatency, }, attributes ); @@ -218,9 +222,9 @@ export class MetricsCollector { */ onAttemptStart() { this.attemptStartTime = this.dateProvider.getDate(); - this.serverTime = undefined; + this.serverTime = null; this.serverTimeRead = false; - this.firstResponseLatency = undefined; + this.firstResponseLatency = null; this.receivedFirstResponse = false; } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index cde1ee803..24f81a6af 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -17,12 +17,18 @@ import { OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; +/** + * The interfaces below use null instead of undefined to gain the advantage + * of being able to use the compiler to tell us when a property isn't being + * provided for easier debugging. + */ + /** * Metrics related to the completion of a Bigtable operation. */ export interface OnOperationCompleteMetrics { operationLatency: number; - retryCount?: number; + retryCount: number | null; } /** @@ -30,9 +36,9 @@ export interface OnOperationCompleteMetrics { */ export interface OnAttemptCompleteMetrics { attemptLatency: number; - serverLatency?: number; - firstResponseLatency?: number; - connectivityErrorCount?: number; + serverLatency: number | null; + firstResponseLatency: number | null; + connectivityErrorCount: number | null; } // TODO: Trim attributes so only necessary attributes are required. From 52b570ccca25b336e615610c3ab119a46970af11 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:26:13 -0500 Subject: [PATCH 101/289] Modify test step --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index bdc58ed04..bfda40b00 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -152,7 +152,7 @@ describe('Bigtable/MetricsCollector', () => { logger.log('14. Client receives fourth row.'); metricsCollector.onResponse(); logger.log('15. User reads row 1'); - logger.log('19. Stream ends, operation completes'); + logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ finalOperationStatus: FinalOperationStatus.ERROR, attemptStatus: AttemptStatus.OK, From 6a6774f90b82009bef03aea8395927612e86228d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:30:58 -0500 Subject: [PATCH 102/289] Add metrics --- test/metrics-collector/typical-method-call.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 00d645e0b..4a94904f7 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101} +metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":2000} attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms @@ -24,10 +24,10 @@ getDate call returns 7000 ms 14. Client receives fourth row. getDate call returns 8000 ms 15. User reads row 1 -19. Stream ends, operation completes +16. Stream ends, operation completes getDate call returns 9000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103} +metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":6000} attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: From 10b6d3071e1f861b327a76d11a49d26e9885ea6d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:32:36 -0500 Subject: [PATCH 103/289] =?UTF-8?q?Don=E2=80=99t=20provide=20first=20respo?= =?UTF-8?q?nse=20latency?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/client-side-metrics/metrics-collector.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index e46efb580..e2bf95152 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -262,7 +262,6 @@ export class MetricsCollector { ); const metrics = { operationLatency: totalTime, - firstResponseLatency: this.firstResponseLatency, retryCount: this.attemptCount - 1, }; this.metricsHandlers.forEach(metricsHandler => { From 33c17c6f96d22d23cbea194662ad6367b2cb0277 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:34:48 -0500 Subject: [PATCH 104/289] Remove firstResponseLatency from operation metrics --- test/metrics-collector/typical-method-call.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 4a94904f7..55210e357 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -31,5 +31,5 @@ metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1," attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"firstResponseLatency":6000,"retryCount":1} +metrics: {"operationLatency":9000,"retryCount":1} attributes: {"finalOperationStatus":"OK","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} From fbf2314a43362b817b7e0112375fe50b432cda76 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 16:52:33 -0500 Subject: [PATCH 105/289] Expose interface allowing undefined not null --- common/client-side-metrics-attributes.ts | 2 +- src/client-side-metrics/metrics-collector.ts | 4 ++-- src/client-side-metrics/metrics-handler.ts | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 73aee51d5..d891d5841 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -76,5 +76,5 @@ export interface OnAttemptCompleteAttributes AttemptOnlyAttributes {} export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { - connectivityErrorCount: number | null; + connectivityErrorCount: number; } diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index e2bf95152..946fc723c 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -206,9 +206,9 @@ export class MetricsCollector { metricsHandler.onAttemptComplete( { attemptLatency: totalTime, - serverLatency: this.serverTime, + serverLatency: this.serverTime ?? undefined, connectivityErrorCount: info.connectivityErrorCount, - firstResponseLatency: this.firstResponseLatency, + firstResponseLatency: this.firstResponseLatency ?? undefined, }, attributes ); diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 24f81a6af..97b6c5ff5 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -28,7 +28,7 @@ import { */ export interface OnOperationCompleteMetrics { operationLatency: number; - retryCount: number | null; + retryCount?: number; } /** @@ -36,9 +36,9 @@ export interface OnOperationCompleteMetrics { */ export interface OnAttemptCompleteMetrics { attemptLatency: number; - serverLatency: number | null; - firstResponseLatency: number | null; - connectivityErrorCount: number | null; + serverLatency?: number; + firstResponseLatency?: number; + connectivityErrorCount: number; } // TODO: Trim attributes so only necessary attributes are required. From 39fe8610d6a1940bac8476c8052c826948fc6730 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 24 Jan 2025 17:15:28 -0500 Subject: [PATCH 106/289] Better explanations for design decision inline --- src/client-side-metrics/metrics-collector.ts | 2 -- src/client-side-metrics/metrics-handler.ts | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 946fc723c..9868a673f 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -74,8 +74,6 @@ export interface ITabularApiSurface { const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; -// TODO: Check if metrics tracer method exists. - /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 97b6c5ff5..735758be4 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -18,9 +18,9 @@ import { } from '../../common/client-side-metrics-attributes'; /** - * The interfaces below use null instead of undefined to gain the advantage - * of being able to use the compiler to tell us when a property isn't being - * provided for easier debugging. + * The interfaces below use undefined instead of null to indicate a metric is + * not available yet. The benefit of this is that new metrics can be added + * without requiring users to change the methods in their metrics handler. */ /** From 8f131001a97f3c8701533ab4052defb10e6c27f2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 27 Jan 2025 11:03:27 -0500 Subject: [PATCH 107/289] Use attempt start time not operation start time for firstResponseLatency --- src/client-side-metrics/metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 9868a673f..24228cb33 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -233,8 +233,8 @@ export class MetricsCollector { onResponse() { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + if (projectId && this.attemptStartTime) { + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; this.firstResponseLatency = totalTime; From 48e0e95a7cbd4020959c68356e01b8960f1b67af Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 27 Jan 2025 11:06:50 -0500 Subject: [PATCH 108/289] Adjust tests for first response latency --- test/metrics-collector/typical-method-call.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 55210e357..c883357da 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":2000} +metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms @@ -27,7 +27,7 @@ getDate call returns 8000 ms 16. Stream ends, operation completes getDate call returns 9000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":6000} +metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: From 66c4ab1efc5981332926c11218ba8ee8a4138101 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 27 Jan 2025 15:10:29 -0500 Subject: [PATCH 109/289] Remove TODO --- src/client-side-metrics/metrics-handler.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 735758be4..acc2b88af 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -41,7 +41,6 @@ export interface OnAttemptCompleteMetrics { connectivityErrorCount: number; } -// TODO: Trim attributes so only necessary attributes are required. /** * An interface for handling client-side metrics related to Bigtable operations. * Implementations of this interface can define how metrics are recorded and processed. From e7c5b5f4926002e43fd6b5b151ce77491239b5b9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 09:54:44 -0500 Subject: [PATCH 110/289] Use the MethodName enum instead of string --- common/client-side-metrics-attributes.ts | 9 +++++++++ src/client-side-metrics/metrics-collector.ts | 7 ++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d891d5841..3c492da43 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -78,3 +78,12 @@ export interface OnAttemptCompleteAttributes export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { connectivityErrorCount: number; } + +export enum MethodName { + READ_ROWS = 'readRows', + MUTATE_ROW = 'mutateRow', + CHECK_AND_MUTATE_ROW = 'checkAndMutateRow', + READ_MODIFY_WRITE_ROW = 'readModifyWriteRow', + SAMPLE_ROW_KEYS = 'sampleRowKeys', + MUTATE_ROWS = 'mutateRows', +} diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/metrics-collector.ts index 24228cb33..5695b58d6 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/metrics-collector.ts @@ -16,6 +16,7 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptOnlyAttributes, + MethodName, OnAttemptCompleteInfo, OnOperationCompleteAttributes, OperationOnlyAttributes, @@ -83,7 +84,7 @@ export class MetricsCollector { private zone: string | undefined; private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; - private methodName: string; + private methodName: MethodName; private projectId?: string; private attemptCount = 0; private receivedFirstResponse: boolean; @@ -96,14 +97,14 @@ export class MetricsCollector { /** * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. - * @param {string} methodName The name of the method being traced. + * @param {MethodName} methodName The name of the method being traced. * @param {string} projectId The id of the project. * @param {DateProvider} dateProvider A provider for date/time information (for testing). */ constructor( tabularApiSurface: ITabularApiSurface, metricsHandlers: IMetricsHandler[], - methodName: string, + methodName: MethodName, projectId?: string, dateProvider?: DateProvider ) { From 98be3516ffa3aecbacdc9c81d531d062f59127d0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 10:09:57 -0500 Subject: [PATCH 111/289] =?UTF-8?q?Don=E2=80=99t=20use=20enum=20for=20stre?= =?UTF-8?q?aming=20operation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- common/client-side-metrics-attributes.ts | 9 ++------- test/metrics-collector/metrics-collector.ts | 7 +++---- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 3c492da43..8cbfdcfec 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -31,7 +31,7 @@ export interface OperationOnlyAttributes { * The final status of the operation (e.g., 'OK', 'ERROR'). */ finalOperationStatus: FinalOperationStatus; - streamingOperation: StreamingOperation; + streamingOperation: boolean; } /** @@ -45,7 +45,7 @@ export interface AttemptOnlyAttributes { /** * Whether the operation is a streaming operation or not. */ - streamingOperation: StreamingOperation; + streamingOperation: boolean; /** * The attempt status of the operation. */ @@ -62,11 +62,6 @@ export enum AttemptStatus { ERROR = 'ERROR', } -export enum StreamingOperation { - YES = 'YES', - NO = 'NO', -} - export interface OnOperationCompleteAttributes extends StandardAttributes, OperationOnlyAttributes {} diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index bfda40b00..89c9774c5 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -21,7 +21,6 @@ import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector' import { AttemptStatus, FinalOperationStatus, - StreamingOperation, } from '../../common/client-side-metrics-attributes'; /** @@ -135,7 +134,7 @@ describe('Bigtable/MetricsCollector', () => { logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ finalOperationStatus: FinalOperationStatus.ERROR, - streamingOperation: StreamingOperation.YES, + streamingOperation: true, attemptStatus: AttemptStatus.ERROR, connectivityErrorCount: 1, }); @@ -156,12 +155,12 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onAttemptComplete({ finalOperationStatus: FinalOperationStatus.ERROR, attemptStatus: AttemptStatus.OK, - streamingOperation: StreamingOperation.YES, + streamingOperation: true, connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ finalOperationStatus: FinalOperationStatus.OK, - streamingOperation: StreamingOperation.YES, + streamingOperation: true, }); resolve(); }); From efdfcead853a7331e66eefd9884de8d9cb08ddc0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 10:40:59 -0500 Subject: [PATCH 112/289] Remove copy/pasted comment --- src/client-side-metrics/gcp-metrics-handler.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d80734b63..7e2945a48 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -67,12 +67,6 @@ export class GCPMetricsHandler implements IMetricsHandler { // Use MeterProvider provided by user // If MeterProvider was not provided then use the default meter provider. const meterProvider = new MeterProvider({ - // This is the default meter provider - // Create a resource. Fill the `service.*` attributes in with real values for your service. - // GcpDetectorSync will add in resource information about the current environment if you are - // running on GCP. These resource attributes will be translated to a specific GCP monitored - // resource if running on GCP. Otherwise, metrics will be sent with monitored resource - // `generic_task`. resource: new Resources.Resource({ 'service.name': 'bigtable-metrics', }).merge(new ResourceUtil.GcpDetectorSync().detect()), From 4a6a47669e8106b675fb9e736b9703bc7048dd21 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 11:44:42 -0500 Subject: [PATCH 113/289] Rename to OperationMetricsCollector --- ...cs-collector.ts => operation-metrics-collector.ts} | 2 +- test/metrics-collector/metrics-collector.ts | 11 ++++------- 2 files changed, 5 insertions(+), 8 deletions(-) rename src/client-side-metrics/{metrics-collector.ts => operation-metrics-collector.ts} (99%) diff --git a/src/client-side-metrics/metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts similarity index 99% rename from src/client-side-metrics/metrics-collector.ts rename to src/client-side-metrics/operation-metrics-collector.ts index 5695b58d6..0e38c4fc4 100644 --- a/src/client-side-metrics/metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -78,7 +78,7 @@ const version = JSON.parse(packageJSON.toString()).version; /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ -export class MetricsCollector { +export class OperationMetricsCollector { private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; private zone: string | undefined; diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 89c9774c5..2894aa90f 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -17,11 +17,8 @@ import {TestDateProvider} from '../../common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; -import {MetricsCollector} from '../../src/client-side-metrics/metrics-collector'; -import { - AttemptStatus, - FinalOperationStatus, -} from '../../common/client-side-metrics-attributes'; +import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; +import {AttemptStatus, FinalOperationStatus, MethodName,} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -107,10 +104,10 @@ describe('Bigtable/MetricsCollector', () => { options: {}, }, }; - const metricsCollector = new MetricsCollector( + const metricsCollector = new OperationMetricsCollector( this, metricsHandlers, - 'fakeMethod', + MethodName.READ_ROWS, projectId, new TestDateProvider(logger) ); From edfcf8aef02b5299d11b3d72f7e643861b31a3d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 11:49:10 -0500 Subject: [PATCH 114/289] Rename the method to getOperationAttributes --- .../operation-metrics-collector.ts | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0e38c4fc4..54a7a21ce 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -147,14 +147,16 @@ export class OperationMetricsCollector { } /** - * Assembles the attributes for operation latency metrics. These attributes - * provide context about the Bigtable environment, the operation being performed, and the final status of the operation. - * Includes whether the operation was a streaming operation or not. + * Assembles the attributes for an entire operation. These attributes + * provide context about the Bigtable environment, the operation being + * performed, and the final status of the operation. Includes whether the + * operation was a streaming operation or not. + * * @param {string} projectId The Google Cloud project ID. * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. * @returns {OnOperationCompleteAttributes} An object containing the attributes for operation latency metrics. */ - private getOperationLatencyAttributes( + private getOperationAttributes( projectId: string, operationOnlyAttributes: OperationOnlyAttributes ): OnOperationCompleteAttributes { @@ -255,7 +257,7 @@ export class OperationMetricsCollector { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationLatencyAttributes( + const operationLatencyAttributes = this.getOperationAttributes( projectId, info ); From bc4998f37b1d272c225cb1bec032c77b55842562 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 28 Jan 2025 16:49:34 +0000 Subject: [PATCH 115/289] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- test/metrics-collector/metrics-collector.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 2894aa90f..9009f477b 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,11 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {AttemptStatus, FinalOperationStatus, MethodName,} from '../../common/client-side-metrics-attributes'; +import { + AttemptStatus, + FinalOperationStatus, + MethodName, +} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. From 10b72ec98471f0bf9cb5a422eea6889a02ac3528 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 28 Jan 2025 16:53:57 +0000 Subject: [PATCH 116/289] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- test/metrics-collector/metrics-collector.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 2894aa90f..9009f477b 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,11 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {AttemptStatus, FinalOperationStatus, MethodName,} from '../../common/client-side-metrics-attributes'; +import { + AttemptStatus, + FinalOperationStatus, + MethodName, +} from '../../common/client-side-metrics-attributes'; /** * A basic logger class that stores log messages in an array. Useful for testing. From 47fd9d03af168d038ea2afbcc9ab05090a8ea812 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 14:58:42 -0500 Subject: [PATCH 117/289] Add aggregate views to the GCP metrics handler --- .../gcp-metrics-handler.ts | 29 +++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 7e2945a48..d0163b3d9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -24,7 +24,10 @@ import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; +import {View} from '@opentelemetry/sdk-metrics'; const { + Aggregation, + ExplicitBucketHistogramAggregation, MeterProvider, Histogram, Counter, @@ -64,9 +67,31 @@ export class GCPMetricsHandler implements IMetricsHandler { private initialize(projectId?: string) { if (!this.initialized) { this.initialized = true; - // Use MeterProvider provided by user - // If MeterProvider was not provided then use the default meter provider. + const sumAggregation = Aggregation.Sum(); + const histogramAggregation = new ExplicitBucketHistogramAggregation([ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, + 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + ]); + const viewList = [ + 'operation_latencies', + 'first_response_latencies', + 'attempt_latencies', + 'retry_count', + 'server_latencies', + 'connectivity_error_count', + 'application_latencies', + 'throttling_latencies', + ].map( + name => + new View({ + instrumentName: name, + name, + aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, + }) + ); const meterProvider = new MeterProvider({ + views: viewList, resource: new Resources.Resource({ 'service.name': 'bigtable-metrics', }).merge(new ResourceUtil.GcpDetectorSync().detect()), From 9073f07182e362174ad0163e253b14042c143be7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 15:44:15 -0500 Subject: [PATCH 118/289] Adjust test based on enum changes --- test/metrics-collector/typical-method-call.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index c883357da..b51e98331 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","streamingOperation":"YES","attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"ERROR","streamingOperation":true,"attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -28,8 +28,8 @@ getDate call returns 8000 ms getDate call returns 9000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":"YES","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":9000,"retryCount":1} -attributes: {"finalOperationStatus":"OK","streamingOperation":"YES","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"fakeMethod","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":"OK","streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 32d3983c12b6feae22eb93121ffb2b159805ee19 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 28 Jan 2025 16:37:31 -0500 Subject: [PATCH 119/289] Update the documentation to be more descriptive --- .../operation-metrics-collector.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 54a7a21ce..98ada061d 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -154,7 +154,8 @@ export class OperationMetricsCollector { * * @param {string} projectId The Google Cloud project ID. * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. - * @returns {OnOperationCompleteAttributes} An object containing the attributes for operation latency metrics. + * @returns {OnOperationCompleteAttributes} An object containing the attributes + * for operation latency metrics. */ private getOperationAttributes( projectId: string, @@ -168,11 +169,13 @@ export class OperationMetricsCollector { /** * Assembles the attributes for attempt metrics. These attributes provide context - * about the Bigtable environment, the operation being performed, and the status of the attempt. - * Includes whether the operation was a streaming operation or not. + * about the Bigtable environment, the operation being performed, the status + * of the attempt and whether the operation was a streaming operation or not. + * * @param {string} projectId The Google Cloud project ID. * @param {AttemptOnlyAttributes} attemptOnlyAttributes The attributes of the attempt. - * @returns {OnAttemptCompleteAttributes} An object containing the attributes for attempt metrics. + * @returns {OnAttemptCompleteAttributes} The attributes all metrics recorded + * in the onAttemptComplete handler. */ private getAttemptAttributes( projectId: string, From 9716c4a097b81a432be7bab64b5503eaab2c7f3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 29 Jan 2025 10:52:12 -0500 Subject: [PATCH 120/289] Add the state machine to the metrics collector --- .../operation-metrics-collector.ts | 144 ++++++++++++------ 1 file changed, 97 insertions(+), 47 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 98ada061d..9e63e3d3a 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -75,10 +75,29 @@ export interface ITabularApiSurface { const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; +// MetricsCollectorState is a list of states that the metrics collector can be in. +// Tracking the OperationMetricsCollector state is done so that the +// OperationMetricsCollector methods are not called in the wrong order. If the +// methods are called in the wrong order they will not execute and they will +// throw warnings. +// +// The following state transitions are allowed: +// OPERATION_NOT_STARTED -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS +// OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_IN_PROGRESS +// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS +// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_COMPLETE +enum MetricsCollectorState { + OPERATION_NOT_STARTED, + OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, + OPERATION_STARTED_ATTEMPT_IN_PROGRESS, + OPERATION_COMPLETE, +} + /** * A class for tracing and recording client-side metrics related to Bigtable operations. */ export class OperationMetricsCollector { + private state: MetricsCollectorState; private operationStartTime: DateLike | null; private attemptStartTime: DateLike | null; private zone: string | undefined; @@ -108,6 +127,7 @@ export class OperationMetricsCollector { projectId?: string, dateProvider?: DateProvider ) { + this.state = MetricsCollectorState.OPERATION_NOT_STARTED; this.zone = undefined; this.cluster = undefined; this.tabularApiSurface = tabularApiSurface; @@ -191,7 +211,13 @@ export class OperationMetricsCollector { * Called when the operation starts. Records the start time. */ onOperationStart() { - this.operationStartTime = this.dateProvider.getDate(); + if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { + this.operationStartTime = this.dateProvider.getDate(); + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + } else { + console.warn('Invalid state transition'); + } } /** @@ -199,25 +225,33 @@ export class OperationMetricsCollector { * @param {AttemptOnlyAttributes} info Information about the completed attempt. */ onAttemptComplete(info: OnAttemptCompleteInfo) { - this.attemptCount++; - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes(projectId, info); - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete( - { - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: info.connectivityErrorCount, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }, - attributes - ); - } - }); + if ( + this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; + this.attemptCount++; + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.attemptStartTime) { + const attributes = this.getAttemptAttributes(projectId, info); + const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete( + { + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: info.connectivityErrorCount, + firstResponseLatency: this.firstResponseLatency ?? undefined, + }, + attributes + ); + } + }); + } + } else { + console.warn('Invalid state transition attempted'); } } @@ -225,11 +259,19 @@ export class OperationMetricsCollector { * Called when a new attempt starts. Records the start time of the attempt. */ onAttemptStart() { - this.attemptStartTime = this.dateProvider.getDate(); - this.serverTime = null; - this.serverTimeRead = false; - this.firstResponseLatency = null; - this.receivedFirstResponse = false; + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS + ) { + this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS; + this.attemptStartTime = this.dateProvider.getDate(); + this.serverTime = null; + this.serverTimeRead = false; + this.firstResponseLatency = null; + this.receivedFirstResponse = false; + } else { + console.warn('Invalid state transition attempted'); + } } /** @@ -254,29 +296,37 @@ export class OperationMetricsCollector { * @param {OperationOnlyAttributes} info Information about the completed operation. */ onOperationComplete(info: OperationOnlyAttributes) { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationAttributes( - projectId, - info - ); - const metrics = { - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - }; - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete( - metrics, - operationLatencyAttributes - ); - } - }); + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS + ) { + this.state = MetricsCollectorState.OPERATION_COMPLETE; + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); + { + // This block records operation latency metrics. + const operationLatencyAttributes = this.getOperationAttributes( + projectId, + info + ); + const metrics = { + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + }; + this.metricsHandlers.forEach(metricsHandler => { + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete( + metrics, + operationLatencyAttributes + ); + } + }); + } } + } else { + console.warn('Invalid state transition attempted'); } } From d2b93ee8260efa1b1dc2b9d9d26be10f44ab566a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 29 Jan 2025 11:53:19 -0500 Subject: [PATCH 121/289] Use grpc code to report attempt/operation status --- common/client-side-metrics-attributes.ts | 12 ++++-------- test/metrics-collector/metrics-collector.ts | 11 ++++++----- test/metrics-collector/typical-method-call.txt | 6 +++--- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 8cbfdcfec..7da3d31b8 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +import {grpc} from 'google-gax'; + interface StandardAttributes { projectId: string; instanceId: string; @@ -52,15 +54,9 @@ export interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; } -export enum FinalOperationStatus { - OK = 'OK', - ERROR = 'ERROR', -} +export type FinalOperationStatus = grpc.status; -export enum AttemptStatus { - OK = 'OK', - ERROR = 'ERROR', -} +export type AttemptStatus = grpc.status; export interface OnOperationCompleteAttributes extends StandardAttributes, diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 9009f477b..24ffa2645 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -23,6 +23,7 @@ import { FinalOperationStatus, MethodName, } from '../../common/client-side-metrics-attributes'; +import {grpc} from 'google-gax'; /** * A basic logger class that stores log messages in an array. Useful for testing. @@ -134,9 +135,9 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - finalOperationStatus: FinalOperationStatus.ERROR, + finalOperationStatus: grpc.status.DEADLINE_EXCEEDED, streamingOperation: true, - attemptStatus: AttemptStatus.ERROR, + attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, }); logger.log('9. After a timeout, the second attempt is made.'); @@ -154,13 +155,13 @@ describe('Bigtable/MetricsCollector', () => { logger.log('15. User reads row 1'); logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ - finalOperationStatus: FinalOperationStatus.ERROR, - attemptStatus: AttemptStatus.OK, + finalOperationStatus: grpc.status.OK, + attemptStatus: grpc.status.OK, streamingOperation: true, connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ - finalOperationStatus: FinalOperationStatus.OK, + finalOperationStatus: grpc.status.OK, streamingOperation: true, }); resolve(); diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index b51e98331..abf8de579 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","streamingOperation":true,"attemptStatus":"ERROR","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":4,"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -28,8 +28,8 @@ getDate call returns 8000 ms getDate call returns 9000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} -attributes: {"finalOperationStatus":"ERROR","attemptStatus":"OK","streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":9000,"retryCount":1} -attributes: {"finalOperationStatus":"OK","streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 99f95778ddf2dc5a5875c640a5bd6a448a1f9955 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 29 Jan 2025 13:13:55 -0500 Subject: [PATCH 122/289] Remove parameters from JS Documentation --- src/client-side-metrics/operation-metrics-collector.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 9e63e3d3a..413d3b087 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -276,7 +276,6 @@ export class OperationMetricsCollector { /** * Called when the first response is received. Records first response latencies. - * @param {string} finalOperationStatus The final status of the operation. */ onResponse() { const endTime = this.dateProvider.getDate(); @@ -332,7 +331,6 @@ export class OperationMetricsCollector { /** * Called when metadata is received. Extracts server timing information if available. - * @param {AttemptOnlyAttributes} info Information about the completed attempt. * @param {object} metadata The received metadata. */ onMetadataReceived(metadata: { From c82e72dc1493b9df8e357491ed61202224bb9618 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 11:47:45 -0500 Subject: [PATCH 123/289] Update interfaces and some metrics - Introduce attributes interface for each metric. - First latency should be recorded per operation not per attempt - Update tests for firstResponseLatency change --- common/client-side-metrics-attributes.ts | 73 ++++++++++++++----- .../gcp-metrics-handler.ts | 8 +- src/client-side-metrics/metrics-handler.ts | 2 +- .../operation-metrics-collector.ts | 8 +- .../metrics-collector/typical-method-call.txt | 6 +- 5 files changed, 65 insertions(+), 32 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 7da3d31b8..86a70ef68 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -25,6 +25,42 @@ interface StandardAttributes { clientName: string; } +interface OperationLatencyAttributes extends StandardAttributes { + finalOperationStatus: FinalOperationStatus; + StreamingOperation: boolean; +} + +interface AttemptLatencyAttributes extends StandardAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + +interface RetryCountAttributes extends StandardAttributes { + finalOperationStatus: FinalOperationStatus; +} + +type ApplicationBlockingLatenciesAttributes = StandardAttributes; + +interface FirstResponseLatencyAttributes extends StandardAttributes { + finalOperationStatus: FinalOperationStatus; +} + +interface ServerLatenciesAttributes extends StandardAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + +interface ConnectivityErrorCountAttributes extends StandardAttributes { + attemptStatus: AttemptStatus; +} + +type ClientBlockingLatenciesAttributes = StandardAttributes; + +export interface AttemptOnlyAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + /** * Information about a Bigtable operation. */ @@ -36,10 +72,23 @@ export interface OperationOnlyAttributes { streamingOperation: boolean; } -/** - * Information about a single attempt of a Bigtable operation. - */ -export interface AttemptOnlyAttributes { +export type FinalOperationStatus = grpc.status; + +export type AttemptStatus = grpc.status; + +export type OnOperationCompleteAttributes = + | OperationLatencyAttributes + | FirstResponseLatencyAttributes + | RetryCountAttributes; + +export type OnAttemptCompleteAttributes = + | AttemptLatencyAttributes + | ConnectivityErrorCountAttributes + | ServerLatenciesAttributes + | ClientBlockingLatenciesAttributes; + +export interface OnAttemptCompleteInfo { + connectivityErrorCount: number; /** * The final status of the operation (e.g., 'OK', 'ERROR'). */ @@ -54,22 +103,6 @@ export interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; } -export type FinalOperationStatus = grpc.status; - -export type AttemptStatus = grpc.status; - -export interface OnOperationCompleteAttributes - extends StandardAttributes, - OperationOnlyAttributes {} - -export interface OnAttemptCompleteAttributes - extends StandardAttributes, - AttemptOnlyAttributes {} - -export interface OnAttemptCompleteInfo extends AttemptOnlyAttributes { - connectivityErrorCount: number; -} - export enum MethodName { READ_ROWS = 'readRows', MUTATE_ROW = 'mutateRow', diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d0163b3d9..e1a462bb9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -177,6 +177,10 @@ export class GCPMetricsHandler implements IMetricsHandler { attributes ); this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, + attributes + ); } /** @@ -200,9 +204,5 @@ export class GCPMetricsHandler implements IMetricsHandler { attributes ); this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index acc2b88af..051b65394 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -27,6 +27,7 @@ import { * Metrics related to the completion of a Bigtable operation. */ export interface OnOperationCompleteMetrics { + firstResponseLatency?: number; operationLatency: number; retryCount?: number; } @@ -37,7 +38,6 @@ export interface OnOperationCompleteMetrics { export interface OnAttemptCompleteMetrics { attemptLatency: number; serverLatency?: number; - firstResponseLatency?: number; connectivityErrorCount: number; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 413d3b087..9a1b10568 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -213,6 +213,8 @@ export class OperationMetricsCollector { onOperationStart() { if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { this.operationStartTime = this.dateProvider.getDate(); + this.firstResponseLatency = null; + this.receivedFirstResponse = false; this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; } else { @@ -222,7 +224,7 @@ export class OperationMetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {AttemptOnlyAttributes} info Information about the completed attempt. + * @param {OnAttemptCompleteInfo} info Information about the completed attempt. */ onAttemptComplete(info: OnAttemptCompleteInfo) { if ( @@ -243,7 +245,6 @@ export class OperationMetricsCollector { attemptLatency: totalTime, serverLatency: this.serverTime ?? undefined, connectivityErrorCount: info.connectivityErrorCount, - firstResponseLatency: this.firstResponseLatency ?? undefined, }, attributes ); @@ -267,8 +268,6 @@ export class OperationMetricsCollector { this.attemptStartTime = this.dateProvider.getDate(); this.serverTime = null; this.serverTimeRead = false; - this.firstResponseLatency = null; - this.receivedFirstResponse = false; } else { console.warn('Invalid state transition attempted'); } @@ -313,6 +312,7 @@ export class OperationMetricsCollector { const metrics = { operationLatency: totalTime, retryCount: this.attemptCount - 1, + firstResponseLatency: this.firstResponseLatency ?? undefined, }; this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index abf8de579..d8f9a142a 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 4000 ms 8. A transient error occurs. getDate call returns 5000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1,"firstResponseLatency":1000} +metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1} attributes: {"finalOperationStatus":4,"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms @@ -27,9 +27,9 @@ getDate call returns 8000 ms 16. Stream ends, operation completes getDate call returns 9000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1,"firstResponseLatency":1000} +metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"retryCount":1} +metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":1000} attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 759e8292960e3beb01239643c377dde47c7f6775 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 12:06:46 -0500 Subject: [PATCH 124/289] Documentation for all the different interfaces --- common/client-side-metrics-attributes.ts | 72 ++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 86a70ef68..84cdf9c74 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -14,6 +14,10 @@ import {grpc} from 'google-gax'; +/** + * Standard attributes common to various Bigtable client-side metrics. These attributes provide + * contextual information about the Bigtable environment and operation. + */ interface StandardAttributes { projectId: string; instanceId: string; @@ -25,37 +29,73 @@ interface StandardAttributes { clientName: string; } +/** + * Attributes associated with operation latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the completed operation. + */ interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; StreamingOperation: boolean; } +/** + * Attributes associated with attempt latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. + */ interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; } +/** + * Attributes associated with retry count metrics for Bigtable client operations. These attributes + * provide context about the Bigtable environment and the final status of the operation. + */ interface RetryCountAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } +/** + * Attributes associated with application blocking latencies for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the operation being performed. + */ type ApplicationBlockingLatenciesAttributes = StandardAttributes; +/** + * Attributes associated with first response latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the final status of the operation. + */ interface FirstResponseLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } +/** + * Attributes associated with server latency metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. + */ interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; } +/** + * Attributes associated with connectivity error count metrics for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the status of the attempt. + */ interface ConnectivityErrorCountAttributes extends StandardAttributes { attemptStatus: AttemptStatus; } +/** + * Attributes associated with client blocking latencies for Bigtable client operations. + * These attributes provide context about the Bigtable environment and the operation being performed. + */ type ClientBlockingLatenciesAttributes = StandardAttributes; +/** + * Attributes specific to a single attempt of a Bigtable operation. These attributes + * provide information about the attempt's status and whether it was part of a streaming operation. + */ export interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; @@ -72,21 +112,49 @@ export interface OperationOnlyAttributes { streamingOperation: boolean; } +/** + * The final status of a Bigtable operation. This represents the ultimate result + * of the operation, regardless of individual attempt statuses. It's represented + * as a gRPC status code. See the `google-gax` library's documentation on + * gRPC status codes for more information on specific values. + */ export type FinalOperationStatus = grpc.status; +/** + * The status of a single attempt of a Bigtable operation. This is represented as a + * gRPC status code. See the `google-gax` library's documentation on gRPC status + * codes for more information on specific values. + */ export type AttemptStatus = grpc.status; +/** + * Attributes associated with the completion of a Bigtable operation. These + * attributes provide context about the Bigtable environment, the completed + * operation, and its final status. They are used for recording metrics such as + * operation latency, first response latency, and retry count. + */ export type OnOperationCompleteAttributes = | OperationLatencyAttributes | FirstResponseLatencyAttributes | RetryCountAttributes; +/** + * Attributes associated with the completion of a single attempt of a Bigtable + * operation. These attributes provide context about the Bigtable environment, + * the specific attempt, its status, and whether the operation was streaming. They + * are used for recording metrics such as attempt latency, server latency, and + * connectivity errors. + */ export type OnAttemptCompleteAttributes = | AttemptLatencyAttributes | ConnectivityErrorCountAttributes | ServerLatenciesAttributes | ClientBlockingLatenciesAttributes; +/** + * Information about the completion of a single attempt of a Bigtable operation. + * This information is used for recording metrics. + */ export interface OnAttemptCompleteInfo { connectivityErrorCount: number; /** @@ -103,6 +171,10 @@ export interface OnAttemptCompleteInfo { attemptStatus: AttemptStatus; } +/** + * Represents the names of Bigtable methods. These are used as attributes for + * metrics, allowing for differentiation of performance by method. + */ export enum MethodName { READ_ROWS = 'readRows', MUTATE_ROW = 'mutateRow', From 76b6f5af37049c17cdbdd83cc3a3c7a98e2b81ae Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 13:23:55 -0500 Subject: [PATCH 125/289] use operation start time as the benchmark --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- test/metrics-collector/typical-method-call.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 9a1b10568..cc9d10d3f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -279,8 +279,8 @@ export class OperationMetricsCollector { onResponse() { const endTime = this.dateProvider.getDate(); const projectId = this.projectId; - if (projectId && this.attemptStartTime) { - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); + if (projectId && this.operationStartTime) { + const totalTime = endTime.getTime() - this.operationStartTime.getTime(); if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; this.firstResponseLatency = totalTime; diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index d8f9a142a..f261fcf55 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -31,5 +31,5 @@ metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":1000} +metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":2000} attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 1e840a4ae5fbd08cd514fdef0f6bbd125dc96afa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 30 Jan 2025 14:04:06 -0500 Subject: [PATCH 126/289] =?UTF-8?q?Final=20operation=20status=20shouldn?= =?UTF-8?q?=E2=80=99t=20be=20included=20per=20a?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- common/client-side-metrics-attributes.ts | 4 ---- test/metrics-collector/metrics-collector.ts | 2 -- test/metrics-collector/typical-method-call.txt | 4 ++-- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 84cdf9c74..d0cd2022c 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -157,10 +157,6 @@ export type OnAttemptCompleteAttributes = */ export interface OnAttemptCompleteInfo { connectivityErrorCount: number; - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: FinalOperationStatus; /** * Whether the operation is a streaming operation or not. */ diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 24ffa2645..342648163 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -135,7 +135,6 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - finalOperationStatus: grpc.status.DEADLINE_EXCEEDED, streamingOperation: true, attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, @@ -155,7 +154,6 @@ describe('Bigtable/MetricsCollector', () => { logger.log('15. User reads row 1'); logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ - finalOperationStatus: grpc.status.OK, attemptStatus: grpc.status.OK, streamingOperation: true, connectivityErrorCount: 1, diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index f261fcf55..4014b0125 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -13,7 +13,7 @@ getDate call returns 4000 ms getDate call returns 5000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1} -attributes: {"finalOperationStatus":4,"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 6000 ms 10. Client receives status information. @@ -28,7 +28,7 @@ getDate call returns 8000 ms getDate call returns 9000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} -attributes: {"finalOperationStatus":0,"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 10000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":2000} From 7bf62e91a171737ff9b048766624eec7125bfb7b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 11:20:04 -0500 Subject: [PATCH 127/289] Move OnAttemptCompleteInfo Remove unused imports too --- common/client-side-metrics-attributes.ts | 16 ---------------- .../operation-metrics-collector.ts | 18 +++++++++++++++++- test/metrics-collector/metrics-collector.ts | 6 +----- 3 files changed, 18 insertions(+), 22 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d0cd2022c..d8b5acacf 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -151,22 +151,6 @@ export type OnAttemptCompleteAttributes = | ServerLatenciesAttributes | ClientBlockingLatenciesAttributes; -/** - * Information about the completion of a single attempt of a Bigtable operation. - * This information is used for recording metrics. - */ -export interface OnAttemptCompleteInfo { - connectivityErrorCount: number; - /** - * Whether the operation is a streaming operation or not. - */ - streamingOperation: boolean; - /** - * The attempt status of the operation. - */ - attemptStatus: AttemptStatus; -} - /** * Represents the names of Bigtable methods. These are used as attributes for * metrics, allowing for differentiation of performance by method. diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index cc9d10d3f..0b5a2c610 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -16,8 +16,8 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptOnlyAttributes, + AttemptStatus, MethodName, - OnAttemptCompleteInfo, OnOperationCompleteAttributes, OperationOnlyAttributes, } from '../../common/client-side-metrics-attributes'; @@ -72,6 +72,22 @@ export interface ITabularApiSurface { }; } +/** + * Information about the completion of a single attempt of a Bigtable operation. + * This information is used for recording metrics. + */ +interface OnAttemptCompleteInfo { + connectivityErrorCount: number; + /** + * Whether the operation is a streaming operation or not. + */ + streamingOperation: boolean; + /** + * The attempt status of the operation. + */ + attemptStatus: AttemptStatus; +} + const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 342648163..0d7901698 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,11 +18,7 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import { - AttemptStatus, - FinalOperationStatus, - MethodName, -} from '../../common/client-side-metrics-attributes'; +import {MethodName} from '../../common/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From fca55b7b95d0ef30bb5721fca42fb27cc871bd3b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 11:37:48 -0500 Subject: [PATCH 128/289] Provide AttemptOnlyAttributes in the only file In the only file that it is needed --- common/client-side-metrics-attributes.ts | 9 --------- src/client-side-metrics/operation-metrics-collector.ts | 10 +++++++++- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index d8b5acacf..4b5be0e18 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -92,15 +92,6 @@ interface ConnectivityErrorCountAttributes extends StandardAttributes { */ type ClientBlockingLatenciesAttributes = StandardAttributes; -/** - * Attributes specific to a single attempt of a Bigtable operation. These attributes - * provide information about the attempt's status and whether it was part of a streaming operation. - */ -export interface AttemptOnlyAttributes { - attemptStatus: AttemptStatus; - streamingOperation: boolean; -} - /** * Information about a Bigtable operation. */ diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0b5a2c610..119e38ac1 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -15,7 +15,6 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { - AttemptOnlyAttributes, AttemptStatus, MethodName, OnOperationCompleteAttributes, @@ -88,6 +87,15 @@ interface OnAttemptCompleteInfo { attemptStatus: AttemptStatus; } +/** + * Attributes specific to a single attempt of a Bigtable operation. These attributes + * provide information about the attempt's status and whether it was part of a streaming operation. + */ +interface AttemptOnlyAttributes { + attemptStatus: AttemptStatus; + streamingOperation: boolean; +} + const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; From 51afdce11ef0ce53753af0e7a86755bf124c2c3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 11:45:50 -0500 Subject: [PATCH 129/289] Move over the OperationOnlyAttributes --- common/client-side-metrics-attributes.ts | 11 ----------- .../operation-metrics-collector.ts | 15 ++++++++++++--- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 4b5be0e18..64772019b 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -92,17 +92,6 @@ interface ConnectivityErrorCountAttributes extends StandardAttributes { */ type ClientBlockingLatenciesAttributes = StandardAttributes; -/** - * Information about a Bigtable operation. - */ -export interface OperationOnlyAttributes { - /** - * The final status of the operation (e.g., 'OK', 'ERROR'). - */ - finalOperationStatus: FinalOperationStatus; - streamingOperation: boolean; -} - /** * The final status of a Bigtable operation. This represents the ultimate result * of the operation, regardless of individual attempt statuses. It's represented diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 119e38ac1..0583d6c3f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -16,9 +16,9 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { AttemptStatus, + FinalOperationStatus, MethodName, OnOperationCompleteAttributes, - OperationOnlyAttributes, } from '../../common/client-side-metrics-attributes'; /** @@ -88,14 +88,23 @@ interface OnAttemptCompleteInfo { } /** - * Attributes specific to a single attempt of a Bigtable operation. These attributes - * provide information about the attempt's status and whether it was part of a streaming operation. + * Attributes specific to a single attempt of a Bigtable operation. These + * attributes provide information about the attempt's status and whether it was + * part of a streaming operation. */ interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; streamingOperation: boolean; } +/** + * Information about a Bigtable operation to be recorded in client side metrics. + */ +interface OperationOnlyAttributes { + finalOperationStatus: FinalOperationStatus; + streamingOperation: boolean; +} + const packageJSON = fs.readFileSync('package.json'); const version = JSON.parse(packageJSON.toString()).version; From 57b1dc17947a3714218a0f561103ea70b5d5635f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 13:47:33 -0500 Subject: [PATCH 130/289] Adjust the guard so that it is earlier --- .../operation-metrics-collector.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 0583d6c3f..ddefa0564 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -310,13 +310,13 @@ export class OperationMetricsCollector { * Called when the first response is received. Records first response latencies. */ onResponse() { - const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - this.firstResponseLatency = totalTime; + if (!this.receivedFirstResponse) { + this.receivedFirstResponse = true; + const endTime = this.dateProvider.getDate(); + const projectId = this.projectId; + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); } } } From 0f850b79d6a827424e1978edf1c7df16c424227f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 14:11:03 -0500 Subject: [PATCH 131/289] Adjust the test output file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dates don’t get read for rows after the first row anymore. --- test/metrics-collector/typical-method-call.txt | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 4014b0125..d08628efa 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -8,28 +8,25 @@ getDate call returns 2000 ms getDate call returns 3000 ms 6. Client receives metadata. 7. Client receives second row. -getDate call returns 4000 ms 8. A transient error occurs. -getDate call returns 5000 ms +getDate call returns 4000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":101,"connectivityErrorCount":1} +metrics: {"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":1} attributes: {"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. -getDate call returns 6000 ms +getDate call returns 5000 ms 10. Client receives status information. 11. Client receives metadata. 12. Client receives third row. -getDate call returns 7000 ms 13. Client receives metadata. 14. Client receives fourth row. -getDate call returns 8000 ms 15. User reads row 1 16. Stream ends, operation completes -getDate call returns 9000 ms +getDate call returns 6000 ms Recording parameters for onAttemptComplete: -metrics: {"attemptLatency":3000,"serverLatency":103,"connectivityErrorCount":1} +metrics: {"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":1} attributes: {"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} -getDate call returns 10000 ms +getDate call returns 7000 ms Recording parameters for onOperationComplete: -metrics: {"operationLatency":9000,"retryCount":1,"firstResponseLatency":2000} +metrics: {"operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 6c1e01b0b9fb1f590c1945cf0ec64192679b9651 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 14:37:14 -0500 Subject: [PATCH 132/289] Change streaming back to STREAMING/UNARY --- common/client-side-metrics-attributes.ts | 11 ++++++++--- .../operation-metrics-collector.ts | 7 ++++--- test/metrics-collector/metrics-collector.ts | 8 ++++---- test/metrics-collector/typical-method-call.txt | 6 +++--- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 64772019b..5f2adf7e2 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -29,13 +29,18 @@ interface StandardAttributes { clientName: string; } +export enum StreamingState { + STREAMING = 'streaming', + UNARY = 'unary', +} + /** * Attributes associated with operation latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the completed operation. */ interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; - StreamingOperation: boolean; + streamingOperation: StreamingState; } /** @@ -44,7 +49,7 @@ interface OperationLatencyAttributes extends StandardAttributes { */ interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } /** @@ -75,7 +80,7 @@ interface FirstResponseLatencyAttributes extends StandardAttributes { */ interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } /** diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index ddefa0564..e120e5cf9 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -19,6 +19,7 @@ import { FinalOperationStatus, MethodName, OnOperationCompleteAttributes, + StreamingState, } from '../../common/client-side-metrics-attributes'; /** @@ -80,7 +81,7 @@ interface OnAttemptCompleteInfo { /** * Whether the operation is a streaming operation or not. */ - streamingOperation: boolean; + streamingOperation: StreamingState; /** * The attempt status of the operation. */ @@ -94,7 +95,7 @@ interface OnAttemptCompleteInfo { */ interface AttemptOnlyAttributes { attemptStatus: AttemptStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } /** @@ -102,7 +103,7 @@ interface AttemptOnlyAttributes { */ interface OperationOnlyAttributes { finalOperationStatus: FinalOperationStatus; - streamingOperation: boolean; + streamingOperation: StreamingState; } const packageJSON = fs.readFileSync('package.json'); diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 0d7901698..7ba8895ea 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,7 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {MethodName} from '../../common/client-side-metrics-attributes'; +import {MethodName, StreamingState} from '../../common/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** @@ -131,7 +131,7 @@ describe('Bigtable/MetricsCollector', () => { metricsCollector.onResponse(); logger.log('8. A transient error occurs.'); metricsCollector.onAttemptComplete({ - streamingOperation: true, + streamingOperation: StreamingState.STREAMING, attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, }); @@ -151,12 +151,12 @@ describe('Bigtable/MetricsCollector', () => { logger.log('16. Stream ends, operation completes'); metricsCollector.onAttemptComplete({ attemptStatus: grpc.status.OK, - streamingOperation: true, + streamingOperation: StreamingState.STREAMING, connectivityErrorCount: 1, }); metricsCollector.onOperationComplete({ finalOperationStatus: grpc.status.OK, - streamingOperation: true, + streamingOperation: StreamingState.STREAMING, }); resolve(); }); diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index d08628efa..921270cf4 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -12,7 +12,7 @@ getDate call returns 3000 ms getDate call returns 4000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":1} -attributes: {"streamingOperation":true,"attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"streamingOperation":"streaming","attemptStatus":4,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} 9. After a timeout, the second attempt is made. getDate call returns 5000 ms 10. Client receives status information. @@ -25,8 +25,8 @@ getDate call returns 5000 ms getDate call returns 6000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":1} -attributes: {"attemptStatus":0,"streamingOperation":true,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"attemptStatus":0,"streamingOperation":"streaming","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 7000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} -attributes: {"finalOperationStatus":0,"streamingOperation":true,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"finalOperationStatus":0,"streamingOperation":"streaming","projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} From 2910408d4012859d65e4788d257f34ff75c60268 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 31 Jan 2025 19:40:51 +0000 Subject: [PATCH 133/289] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= =?UTF-8?q?=20post-processor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- test/metrics-collector/metrics-collector.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 7ba8895ea..7983d99bb 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -18,7 +18,10 @@ import * as assert from 'assert'; import * as fs from 'fs'; import {TestMetricsHandler} from '../../common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import {MethodName, StreamingState} from '../../common/client-side-metrics-attributes'; +import { + MethodName, + StreamingState, +} from '../../common/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From 2781561f3791868855e0daf76d90d4f80736beca Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 16:21:23 -0500 Subject: [PATCH 134/289] Change metrics handler interface to support each metric --- common/client-side-metrics-attributes.ts | 28 +++--- common/test-metrics-handler.ts | 79 +++++++++++------ .../gcp-metrics-handler.ts | 86 ++++++++++--------- src/client-side-metrics/metrics-handler.ts | 51 +++++++---- .../operation-metrics-collector.ts | 42 +++++---- 5 files changed, 170 insertions(+), 116 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index 5f2adf7e2..cb7430906 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -38,7 +38,7 @@ export enum StreamingState { * Attributes associated with operation latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the completed operation. */ -interface OperationLatencyAttributes extends StandardAttributes { +export interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; streamingOperation: StreamingState; } @@ -47,7 +47,7 @@ interface OperationLatencyAttributes extends StandardAttributes { * Attributes associated with attempt latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -interface AttemptLatencyAttributes extends StandardAttributes { +export interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -56,7 +56,7 @@ interface AttemptLatencyAttributes extends StandardAttributes { * Attributes associated with retry count metrics for Bigtable client operations. These attributes * provide context about the Bigtable environment and the final status of the operation. */ -interface RetryCountAttributes extends StandardAttributes { +export interface RetryCountAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -70,7 +70,7 @@ type ApplicationBlockingLatenciesAttributes = StandardAttributes; * Attributes associated with first response latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the final status of the operation. */ -interface FirstResponseLatencyAttributes extends StandardAttributes { +export interface FirstResponseLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -78,7 +78,7 @@ interface FirstResponseLatencyAttributes extends StandardAttributes { * Attributes associated with server latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -interface ServerLatenciesAttributes extends StandardAttributes { +export interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -87,7 +87,7 @@ interface ServerLatenciesAttributes extends StandardAttributes { * Attributes associated with connectivity error count metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the status of the attempt. */ -interface ConnectivityErrorCountAttributes extends StandardAttributes { +export interface ConnectivityErrorCountAttributes extends StandardAttributes { attemptStatus: AttemptStatus; } @@ -118,10 +118,9 @@ export type AttemptStatus = grpc.status; * operation, and its final status. They are used for recording metrics such as * operation latency, first response latency, and retry count. */ -export type OnOperationCompleteAttributes = - | OperationLatencyAttributes - | FirstResponseLatencyAttributes - | RetryCountAttributes; +export type OnOperationCompleteAttributes = OperationLatencyAttributes & + FirstResponseLatencyAttributes & + RetryCountAttributes; /** * Attributes associated with the completion of a single attempt of a Bigtable @@ -130,11 +129,10 @@ export type OnOperationCompleteAttributes = * are used for recording metrics such as attempt latency, server latency, and * connectivity errors. */ -export type OnAttemptCompleteAttributes = - | AttemptLatencyAttributes - | ConnectivityErrorCountAttributes - | ServerLatenciesAttributes - | ClientBlockingLatenciesAttributes; +export type OnAttemptCompleteAttributes = AttemptLatencyAttributes & + ConnectivityErrorCountAttributes & + ServerLatenciesAttributes & + ClientBlockingLatenciesAttributes; /** * Represents the names of Bigtable methods. These are used as attributes for diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 9ffabd7a0..07c8c1661 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -14,12 +14,12 @@ import {WithLogger} from './logger'; import { - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from '../src/client-side-metrics/metrics-handler'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, + AttemptLatencyAttributes, + ConnectivityErrorCountAttributes, + FirstResponseLatencyAttributes, + OperationLatencyAttributes, + RetryCountAttributes, + ServerLatenciesAttributes, } from './client-side-metrics-attributes'; /** @@ -27,33 +27,56 @@ import { * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ export class TestMetricsHandler extends WithLogger { - /** - * Logs the metrics and attributes received for an operation completion. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes + onRecordAttemptLatency( + attemptLatency: number, + attributes: AttemptLatencyAttributes + ) { + this.logger.log( + `Recording parameters for AttemptLatency: ${attemptLatency}:` + ); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordConnectivityErrorCount( + connectivityErrorCount: number, + attributes: ConnectivityErrorCountAttributes + ) { + this.logger.log( + `Recording parameters for ConnectivityErrorCount: ${connectivityErrorCount}:` + ); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordServerLatency( + serverLatency: number, + attributes: ServerLatenciesAttributes + ) { + this.logger.log(`Recording parameters for ServerLatency: ${serverLatency}`); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordOperationLatency( + operationLatency: number, + attributes: OperationLatencyAttributes ) { - attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onOperationComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log( + `Recording parameters for OperationLatency: ${operationLatency}` + ); + this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + } + + onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { + this.logger.log(`Recording parameters for RetryCount: ${retryCount}`); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } - /** - * Logs the metrics and attributes received for an attempt completion. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes + onRecordFirstResponseLatency( + firstResponseLatency: number, + attributes: FirstResponseLatencyAttributes ) { - attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onAttemptComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); + this.logger.log( + `Recording parameters for FirstResponseLatency: ${firstResponseLatency}` + ); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e1a462bb9..1192e221d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { - IMetricsHandler, - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from './metrics-handler'; +import {IMetricsHandler} from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, + AttemptLatencyAttributes, + ConnectivityErrorCountAttributes, + FirstResponseLatencyAttributes, + OperationLatencyAttributes, + RetryCountAttributes, + ServerLatenciesAttributes, } from '../../common/client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { @@ -161,48 +161,54 @@ export class GCPMetricsHandler implements IMetricsHandler { } } - /** - * Records metrics for a completed Bigtable operation. - * This method records the operation latency and retry count, associating them with provided attributes. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes + onRecordAttemptLatency( + attemptLatency: number, + attributes: AttemptLatencyAttributes ) { this.initialize(); - this.otelMetrics?.operationLatencies.record( - metrics.operationLatency, - attributes - ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); + this.otelMetrics?.attemptLatencies.record(attemptLatency, attributes); } - /** - * Records metrics for a completed attempt of a Bigtable operation. - * This method records attempt latency, connectivity error count, server latency, and first response latency, - * along with the provided attributes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes + onRecordConnectivityErrorCount( + connectivityErrorCount: number, + attributes: ConnectivityErrorCountAttributes ) { this.initialize(); - this.otelMetrics?.attemptLatencies.record( - metrics.attemptLatency, + this.otelMetrics?.connectivityErrorCount.record( + connectivityErrorCount, attributes ); - this.otelMetrics?.connectivityErrorCount.record( - metrics.connectivityErrorCount, + } + + onRecordServerLatency( + serverLatency: number, + attributes: ServerLatenciesAttributes + ) { + this.initialize(); + this.otelMetrics?.serverLatencies.record(serverLatency, attributes); + } + + onRecordOperationLatency( + operationLatency: number, + attributes: OperationLatencyAttributes + ) { + this.initialize(); + this.otelMetrics?.operationLatencies.record(operationLatency, attributes); + } + + onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { + this.initialize(); + this.otelMetrics?.retryCount.add(retryCount, attributes); + } + + onRecordFirstResponseLatency( + firstResponseLatency: number, + attributes: FirstResponseLatencyAttributes + ) { + this.initialize(); + this.otelMetrics?.firstResponseLatencies.record( + firstResponseLatency, attributes ); - this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 051b65394..fa3cfda72 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -13,8 +13,12 @@ // limitations under the License. import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, + AttemptLatencyAttributes, + ConnectivityErrorCountAttributes, + FirstResponseLatencyAttributes, + OperationLatencyAttributes, + RetryCountAttributes, + ServerLatenciesAttributes, } from '../../common/client-side-metrics-attributes'; /** @@ -46,22 +50,33 @@ export interface OnAttemptCompleteMetrics { * Implementations of this interface can define how metrics are recorded and processed. */ export interface IMetricsHandler { - /** - * Called when an operation completes (successfully or unsuccessfully). - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete?( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes + onRecordAttemptLatency?( + attemptLatency: number, + attributes: AttemptLatencyAttributes ): void; - /** - * Called when an attempt (e.g., an RPC attempt) completes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete?( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes + + onRecordConnectivityErrorCount?( + connectivityErrorCount: number, + attributes: ConnectivityErrorCountAttributes + ): void; + + onRecordServerLatency?( + serverLatency: number, + attributes: ServerLatenciesAttributes + ): void; + + onRecordOperationLatency?( + operationLatency: number, + attributes: OperationLatencyAttributes + ): void; + + onRecordRetryCount?( + retryCount: number, + attributes: RetryCountAttributes + ): void; + + onRecordFirstResponseLatency?( + firstResponseLatency: number, + attributes: FirstResponseLatencyAttributes ): void; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e120e5cf9..59c859e35 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -273,13 +273,15 @@ export class OperationMetricsCollector { const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete( - { - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: info.connectivityErrorCount, - }, + if (metricsHandler.onRecordAttemptLatency) { + metricsHandler.onRecordAttemptLatency(totalTime, attributes); + } + if (metricsHandler.onRecordServerLatency && this.serverTime) { + metricsHandler.onRecordServerLatency(this.serverTime, attributes); + } + if (metricsHandler.onRecordConnectivityErrorCount) { + metricsHandler.onRecordConnectivityErrorCount( + info.connectivityErrorCount, attributes ); } @@ -343,15 +345,25 @@ export class OperationMetricsCollector { projectId, info ); - const metrics = { - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }; this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete( - metrics, + if (metricsHandler.onRecordOperationLatency) { + metricsHandler.onRecordOperationLatency( + totalTime, + operationLatencyAttributes + ); + } + if (metricsHandler.onRecordRetryCount) { + metricsHandler.onRecordRetryCount( + this.attemptCount - 1, + operationLatencyAttributes + ); + } + if ( + metricsHandler.onRecordFirstResponseLatency && + this.firstResponseLatency + ) { + metricsHandler.onRecordFirstResponseLatency( + this.firstResponseLatency ?? undefined, operationLatencyAttributes ); } From 0b4d93edaaa6298b4fa3797867d897b71baadc92 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 31 Jan 2025 16:22:36 -0500 Subject: [PATCH 135/289] Revert "Change metrics handler interface to support each metric" This reverts commit 2781561f3791868855e0daf76d90d4f80736beca. --- common/client-side-metrics-attributes.ts | 28 +++--- common/test-metrics-handler.ts | 79 ++++++----------- .../gcp-metrics-handler.ts | 86 +++++++++---------- src/client-side-metrics/metrics-handler.ts | 51 ++++------- .../operation-metrics-collector.ts | 42 ++++----- 5 files changed, 116 insertions(+), 170 deletions(-) diff --git a/common/client-side-metrics-attributes.ts b/common/client-side-metrics-attributes.ts index cb7430906..5f2adf7e2 100644 --- a/common/client-side-metrics-attributes.ts +++ b/common/client-side-metrics-attributes.ts @@ -38,7 +38,7 @@ export enum StreamingState { * Attributes associated with operation latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the completed operation. */ -export interface OperationLatencyAttributes extends StandardAttributes { +interface OperationLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; streamingOperation: StreamingState; } @@ -47,7 +47,7 @@ export interface OperationLatencyAttributes extends StandardAttributes { * Attributes associated with attempt latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -export interface AttemptLatencyAttributes extends StandardAttributes { +interface AttemptLatencyAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -56,7 +56,7 @@ export interface AttemptLatencyAttributes extends StandardAttributes { * Attributes associated with retry count metrics for Bigtable client operations. These attributes * provide context about the Bigtable environment and the final status of the operation. */ -export interface RetryCountAttributes extends StandardAttributes { +interface RetryCountAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -70,7 +70,7 @@ type ApplicationBlockingLatenciesAttributes = StandardAttributes; * Attributes associated with first response latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the final status of the operation. */ -export interface FirstResponseLatencyAttributes extends StandardAttributes { +interface FirstResponseLatencyAttributes extends StandardAttributes { finalOperationStatus: FinalOperationStatus; } @@ -78,7 +78,7 @@ export interface FirstResponseLatencyAttributes extends StandardAttributes { * Attributes associated with server latency metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ -export interface ServerLatenciesAttributes extends StandardAttributes { +interface ServerLatenciesAttributes extends StandardAttributes { attemptStatus: AttemptStatus; streamingOperation: StreamingState; } @@ -87,7 +87,7 @@ export interface ServerLatenciesAttributes extends StandardAttributes { * Attributes associated with connectivity error count metrics for Bigtable client operations. * These attributes provide context about the Bigtable environment and the status of the attempt. */ -export interface ConnectivityErrorCountAttributes extends StandardAttributes { +interface ConnectivityErrorCountAttributes extends StandardAttributes { attemptStatus: AttemptStatus; } @@ -118,9 +118,10 @@ export type AttemptStatus = grpc.status; * operation, and its final status. They are used for recording metrics such as * operation latency, first response latency, and retry count. */ -export type OnOperationCompleteAttributes = OperationLatencyAttributes & - FirstResponseLatencyAttributes & - RetryCountAttributes; +export type OnOperationCompleteAttributes = + | OperationLatencyAttributes + | FirstResponseLatencyAttributes + | RetryCountAttributes; /** * Attributes associated with the completion of a single attempt of a Bigtable @@ -129,10 +130,11 @@ export type OnOperationCompleteAttributes = OperationLatencyAttributes & * are used for recording metrics such as attempt latency, server latency, and * connectivity errors. */ -export type OnAttemptCompleteAttributes = AttemptLatencyAttributes & - ConnectivityErrorCountAttributes & - ServerLatenciesAttributes & - ClientBlockingLatenciesAttributes; +export type OnAttemptCompleteAttributes = + | AttemptLatencyAttributes + | ConnectivityErrorCountAttributes + | ServerLatenciesAttributes + | ClientBlockingLatenciesAttributes; /** * Represents the names of Bigtable methods. These are used as attributes for diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 07c8c1661..9ffabd7a0 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -14,12 +14,12 @@ import {WithLogger} from './logger'; import { - AttemptLatencyAttributes, - ConnectivityErrorCountAttributes, - FirstResponseLatencyAttributes, - OperationLatencyAttributes, - RetryCountAttributes, - ServerLatenciesAttributes, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, +} from '../src/client-side-metrics/metrics-handler'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, } from './client-side-metrics-attributes'; /** @@ -27,56 +27,33 @@ import { * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ export class TestMetricsHandler extends WithLogger { - onRecordAttemptLatency( - attemptLatency: number, - attributes: AttemptLatencyAttributes - ) { - this.logger.log( - `Recording parameters for AttemptLatency: ${attemptLatency}:` - ); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordConnectivityErrorCount( - connectivityErrorCount: number, - attributes: ConnectivityErrorCountAttributes - ) { - this.logger.log( - `Recording parameters for ConnectivityErrorCount: ${connectivityErrorCount}:` - ); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordServerLatency( - serverLatency: number, - attributes: ServerLatenciesAttributes - ) { - this.logger.log(`Recording parameters for ServerLatency: ${serverLatency}`); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordOperationLatency( - operationLatency: number, - attributes: OperationLatencyAttributes + /** + * Logs the metrics and attributes received for an operation completion. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {Attributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { - this.logger.log( - `Recording parameters for OperationLatency: ${operationLatency}` - ); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); - } - - onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { - this.logger.log(`Recording parameters for RetryCount: ${retryCount}`); + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onOperationComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } - onRecordFirstResponseLatency( - firstResponseLatency: number, - attributes: FirstResponseLatencyAttributes + /** + * Logs the metrics and attributes received for an attempt completion. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {Attributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ) { - this.logger.log( - `Recording parameters for FirstResponseLatency: ${firstResponseLatency}` - ); + attributes.clientName = 'nodejs-bigtable'; + this.logger.log('Recording parameters for onAttemptComplete:'); + this.logger.log(`metrics: ${JSON.stringify(metrics)}`); this.logger.log(`attributes: ${JSON.stringify(attributes)}`); } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 1192e221d..e1a462bb9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {IMetricsHandler} from './metrics-handler'; +import { + IMetricsHandler, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, +} from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import { - AttemptLatencyAttributes, - ConnectivityErrorCountAttributes, - FirstResponseLatencyAttributes, - OperationLatencyAttributes, - RetryCountAttributes, - ServerLatenciesAttributes, + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { @@ -161,54 +161,48 @@ export class GCPMetricsHandler implements IMetricsHandler { } } - onRecordAttemptLatency( - attemptLatency: number, - attributes: AttemptLatencyAttributes - ) { - this.initialize(); - this.otelMetrics?.attemptLatencies.record(attemptLatency, attributes); - } - - onRecordConnectivityErrorCount( - connectivityErrorCount: number, - attributes: ConnectivityErrorCountAttributes + /** + * Records metrics for a completed Bigtable operation. + * This method records the operation latency and retry count, associating them with provided attributes. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ) { this.initialize(); - this.otelMetrics?.connectivityErrorCount.record( - connectivityErrorCount, + this.otelMetrics?.operationLatencies.record( + metrics.operationLatency, + attributes + ); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, attributes ); } - onRecordServerLatency( - serverLatency: number, - attributes: ServerLatenciesAttributes - ) { - this.initialize(); - this.otelMetrics?.serverLatencies.record(serverLatency, attributes); - } - - onRecordOperationLatency( - operationLatency: number, - attributes: OperationLatencyAttributes - ) { - this.initialize(); - this.otelMetrics?.operationLatencies.record(operationLatency, attributes); - } - - onRecordRetryCount(retryCount: number, attributes: RetryCountAttributes) { - this.initialize(); - this.otelMetrics?.retryCount.add(retryCount, attributes); - } - - onRecordFirstResponseLatency( - firstResponseLatency: number, - attributes: FirstResponseLatencyAttributes + /** + * Records metrics for a completed attempt of a Bigtable operation. + * This method records attempt latency, connectivity error count, server latency, and first response latency, + * along with the provided attributes. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ) { this.initialize(); - this.otelMetrics?.firstResponseLatencies.record( - firstResponseLatency, + this.otelMetrics?.attemptLatencies.record( + metrics.attemptLatency, + attributes + ); + this.otelMetrics?.connectivityErrorCount.record( + metrics.connectivityErrorCount, attributes ); + this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index fa3cfda72..051b65394 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -13,12 +13,8 @@ // limitations under the License. import { - AttemptLatencyAttributes, - ConnectivityErrorCountAttributes, - FirstResponseLatencyAttributes, - OperationLatencyAttributes, - RetryCountAttributes, - ServerLatenciesAttributes, + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, } from '../../common/client-side-metrics-attributes'; /** @@ -50,33 +46,22 @@ export interface OnAttemptCompleteMetrics { * Implementations of this interface can define how metrics are recorded and processed. */ export interface IMetricsHandler { - onRecordAttemptLatency?( - attemptLatency: number, - attributes: AttemptLatencyAttributes + /** + * Called when an operation completes (successfully or unsuccessfully). + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete?( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes ): void; - - onRecordConnectivityErrorCount?( - connectivityErrorCount: number, - attributes: ConnectivityErrorCountAttributes - ): void; - - onRecordServerLatency?( - serverLatency: number, - attributes: ServerLatenciesAttributes - ): void; - - onRecordOperationLatency?( - operationLatency: number, - attributes: OperationLatencyAttributes - ): void; - - onRecordRetryCount?( - retryCount: number, - attributes: RetryCountAttributes - ): void; - - onRecordFirstResponseLatency?( - firstResponseLatency: number, - attributes: FirstResponseLatencyAttributes + /** + * Called when an attempt (e.g., an RPC attempt) completes. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete?( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes ): void; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 59c859e35..e120e5cf9 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -273,15 +273,13 @@ export class OperationMetricsCollector { const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onRecordAttemptLatency) { - metricsHandler.onRecordAttemptLatency(totalTime, attributes); - } - if (metricsHandler.onRecordServerLatency && this.serverTime) { - metricsHandler.onRecordServerLatency(this.serverTime, attributes); - } - if (metricsHandler.onRecordConnectivityErrorCount) { - metricsHandler.onRecordConnectivityErrorCount( - info.connectivityErrorCount, + if (metricsHandler.onAttemptComplete) { + metricsHandler.onAttemptComplete( + { + attemptLatency: totalTime, + serverLatency: this.serverTime ?? undefined, + connectivityErrorCount: info.connectivityErrorCount, + }, attributes ); } @@ -345,25 +343,15 @@ export class OperationMetricsCollector { projectId, info ); + const metrics = { + operationLatency: totalTime, + retryCount: this.attemptCount - 1, + firstResponseLatency: this.firstResponseLatency ?? undefined, + }; this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onRecordOperationLatency) { - metricsHandler.onRecordOperationLatency( - totalTime, - operationLatencyAttributes - ); - } - if (metricsHandler.onRecordRetryCount) { - metricsHandler.onRecordRetryCount( - this.attemptCount - 1, - operationLatencyAttributes - ); - } - if ( - metricsHandler.onRecordFirstResponseLatency && - this.firstResponseLatency - ) { - metricsHandler.onRecordFirstResponseLatency( - this.firstResponseLatency ?? undefined, + if (metricsHandler.onOperationComplete) { + metricsHandler.onOperationComplete( + metrics, operationLatencyAttributes ); } From 1b6681b7aabde340a1fad04208c1cb52c23440c9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Feb 2025 15:23:24 -0500 Subject: [PATCH 136/289] Supply the projectId later in the client side metrics lifecycle --- .../operation-metrics-collector.ts | 28 ++- test/metrics-collector/metrics-collector.ts | 175 +++++++++--------- 2 files changed, 99 insertions(+), 104 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e120e5cf9..e2893de5f 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -138,7 +138,6 @@ export class OperationMetricsCollector { private cluster: string | undefined; private tabularApiSurface: ITabularApiSurface; private methodName: MethodName; - private projectId?: string; private attemptCount = 0; private receivedFirstResponse: boolean; private metricsHandlers: IMetricsHandler[]; @@ -151,14 +150,12 @@ export class OperationMetricsCollector { * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. * @param {MethodName} methodName The name of the method being traced. - * @param {string} projectId The id of the project. * @param {DateProvider} dateProvider A provider for date/time information (for testing). */ constructor( tabularApiSurface: ITabularApiSurface, metricsHandlers: IMetricsHandler[], methodName: MethodName, - projectId?: string, dateProvider?: DateProvider ) { this.state = MetricsCollectorState.OPERATION_NOT_STARTED; @@ -173,7 +170,6 @@ export class OperationMetricsCollector { this.firstResponseLatency = null; this.serverTimeRead = false; this.serverTime = null; - this.projectId = projectId; if (dateProvider) { this.dateProvider = dateProvider; } else { @@ -258,9 +254,10 @@ export class OperationMetricsCollector { /** * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. + * @param {string} projectId The id of the project. * @param {OnAttemptCompleteInfo} info Information about the completed attempt. */ - onAttemptComplete(info: OnAttemptCompleteInfo) { + onAttemptComplete(projectId: string, info: OnAttemptCompleteInfo) { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS ) { @@ -268,7 +265,6 @@ export class OperationMetricsCollector { MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; this.attemptCount++; const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; if (projectId && this.attemptStartTime) { const attributes = this.getAttemptAttributes(projectId, info); const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); @@ -310,11 +306,10 @@ export class OperationMetricsCollector { /** * Called when the first response is received. Records first response latencies. */ - onResponse() { + onResponse(projectId: string) { if (!this.receivedFirstResponse) { this.receivedFirstResponse = true; const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; if (projectId && this.operationStartTime) { this.firstResponseLatency = endTime.getTime() - this.operationStartTime.getTime(); @@ -325,16 +320,16 @@ export class OperationMetricsCollector { /** * Called when an operation completes (successfully or unsuccessfully). * Records operation latencies, retry counts, and connectivity error counts. + * @param {string} projectId The id of the project. * @param {OperationOnlyAttributes} info Information about the completed operation. */ - onOperationComplete(info: OperationOnlyAttributes) { + onOperationComplete(projectId: string, info: OperationOnlyAttributes) { if ( this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS ) { this.state = MetricsCollectorState.OPERATION_COMPLETE; const endTime = this.dateProvider.getDate(); - const projectId = this.projectId; if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { @@ -365,12 +360,16 @@ export class OperationMetricsCollector { /** * Called when metadata is received. Extracts server timing information if available. + * @param {string} projectId The id of the project. * @param {object} metadata The received metadata. */ - onMetadataReceived(metadata: { - internalRepr: Map; - options: {}; - }) { + onMetadataReceived( + projectId: string, + metadata: { + internalRepr: Map; + options: {}; + } + ) { const mappedEntries = new Map( Array.from(metadata.internalRepr.entries(), ([key, value]) => [ key, @@ -382,7 +381,6 @@ export class OperationMetricsCollector { if (!this.serverTimeRead) { this.serverTimeRead = true; const serverTime = parseInt(durationValues[1]); - const projectId = this.projectId; if (projectId) { this.serverTime = serverTime; } diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 7983d99bb..dd8130389 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -49,21 +49,11 @@ class Logger { /** * A fake implementation of the Bigtable client for testing purposes. Provides a - * metricsTracerFactory and a stubbed getProjectId_ method. + * metricsTracerFactory and a stubbed projectId method. */ class FakeBigtable { appProfileId?: string; - - /** - * A stubbed method that simulates retrieving the project ID. Always returns - * 'my-project'. - * @param {function} callback A callback function that receives the project ID (or an error). - */ - getProjectId_( - callback: (err: Error | null, projectId?: string) => void - ): void { - callback(null, 'my-project'); - } + projectId = 'my-project'; } /** @@ -86,84 +76,91 @@ describe('Bigtable/MetricsCollector', () => { bigtable = new FakeBigtable(); async fakeMethod(): Promise { - return new Promise(resolve => { - this.bigtable.getProjectId_((err, projectId) => { - function createMetadata(duration: string) { - return { - internalRepr: new Map([ - ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], - ]), - options: {}, - }; - } - - const status = { - metadata: { - internalRepr: new Map([ - [ - 'x-goog-ext-425905942-bin', - Buffer.from('\n\nus-west1-c \rfake-cluster3'), - ], - ]), - options: {}, - }, - }; - const metricsCollector = new OperationMetricsCollector( - this, - metricsHandlers, - MethodName.READ_ROWS, - projectId, - new TestDateProvider(logger) - ); - // In this method we simulate a series of events that might happen - // when a user calls one of the Table methods. - // Here is an example of what might happen in a method call: - logger.log('1. The operation starts'); - metricsCollector.onOperationStart(); - logger.log('2. The attempt starts.'); - metricsCollector.onAttemptStart(); - logger.log('3. Client receives status information.'); - metricsCollector.onStatusReceived(status); - logger.log('4. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('101')); - logger.log('5. Client receives first row.'); - metricsCollector.onResponse(); - logger.log('6. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('102')); - logger.log('7. Client receives second row.'); - metricsCollector.onResponse(); - logger.log('8. A transient error occurs.'); - metricsCollector.onAttemptComplete({ - streamingOperation: StreamingState.STREAMING, - attemptStatus: grpc.status.DEADLINE_EXCEEDED, - connectivityErrorCount: 1, - }); - logger.log('9. After a timeout, the second attempt is made.'); - metricsCollector.onAttemptStart(); - logger.log('10. Client receives status information.'); - metricsCollector.onStatusReceived(status); - logger.log('11. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('103')); - logger.log('12. Client receives third row.'); - metricsCollector.onResponse(); - logger.log('13. Client receives metadata.'); - metricsCollector.onMetadataReceived(createMetadata('104')); - logger.log('14. Client receives fourth row.'); - metricsCollector.onResponse(); - logger.log('15. User reads row 1'); - logger.log('16. Stream ends, operation completes'); - metricsCollector.onAttemptComplete({ - attemptStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - connectivityErrorCount: 1, - }); - metricsCollector.onOperationComplete({ - finalOperationStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - }); - resolve(); + function createMetadata(duration: string) { + return { + internalRepr: new Map([ + ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], + ]), + options: {}, + }; + } + if (this.bigtable.projectId) { + const status = { + metadata: { + internalRepr: new Map([ + [ + 'x-goog-ext-425905942-bin', + Buffer.from('\n\nus-west1-c \rfake-cluster3'), + ], + ]), + options: {}, + }, + }; + const metricsCollector = new OperationMetricsCollector( + this, + metricsHandlers, + MethodName.READ_ROWS, + new TestDateProvider(logger) + ); + // In this method we simulate a series of events that might happen + // when a user calls one of the Table methods. + // Here is an example of what might happen in a method call: + logger.log('1. The operation starts'); + metricsCollector.onOperationStart(); + logger.log('2. The attempt starts.'); + metricsCollector.onAttemptStart(); + logger.log('3. Client receives status information.'); + metricsCollector.onStatusReceived(status); + logger.log('4. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('101') + ); + logger.log('5. Client receives first row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('6. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('102') + ); + logger.log('7. Client receives second row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('8. A transient error occurs.'); + metricsCollector.onAttemptComplete(this.bigtable.projectId, { + streamingOperation: StreamingState.STREAMING, + attemptStatus: grpc.status.DEADLINE_EXCEEDED, + connectivityErrorCount: 1, + }); + logger.log('9. After a timeout, the second attempt is made.'); + metricsCollector.onAttemptStart(); + logger.log('10. Client receives status information.'); + metricsCollector.onStatusReceived(status); + logger.log('11. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('103') + ); + logger.log('12. Client receives third row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('13. Client receives metadata.'); + metricsCollector.onMetadataReceived( + this.bigtable.projectId, + createMetadata('104') + ); + logger.log('14. Client receives fourth row.'); + metricsCollector.onResponse(this.bigtable.projectId); + logger.log('15. User reads row 1'); + logger.log('16. Stream ends, operation completes'); + metricsCollector.onAttemptComplete(this.bigtable.projectId, { + attemptStatus: grpc.status.OK, + streamingOperation: StreamingState.STREAMING, + connectivityErrorCount: 1, + }); + metricsCollector.onOperationComplete(this.bigtable.projectId, { + finalOperationStatus: grpc.status.OK, + streamingOperation: StreamingState.STREAMING, }); - }); + } } } const table = new FakeTable(); From b6f130258101446a7fc65c02a36039184ad9d5ed Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 5 Feb 2025 16:44:05 -0500 Subject: [PATCH 137/289] Remove the GCPMetricsHandler file --- .../gcp-metrics-handler.ts | 208 ------------------ 1 file changed, 208 deletions(-) delete mode 100644 src/client-side-metrics/gcp-metrics-handler.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts deleted file mode 100644 index e1a462bb9..000000000 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import { - IMetricsHandler, - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from './metrics-handler'; -import * as Resources from '@opentelemetry/resources'; -import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../../common/client-side-metrics-attributes'; -import {View} from '@opentelemetry/sdk-metrics'; -const { - Aggregation, - ExplicitBucketHistogramAggregation, - MeterProvider, - Histogram, - Counter, - PeriodicExportingMetricReader, -} = require('@opentelemetry/sdk-metrics'); - -/** - * A collection of OpenTelemetry metric instruments used to record - * Bigtable client-side metrics. - */ -interface Metrics { - operationLatencies: typeof Histogram; - attemptLatencies: typeof Histogram; - retryCount: typeof Counter; - applicationBlockingLatencies: typeof Histogram; - firstResponseLatencies: typeof Histogram; - serverLatencies: typeof Histogram; - connectivityErrorCount: typeof Histogram; - clientBlockingLatencies: typeof Histogram; -} - -/** - * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. - * This handler records metrics such as operation latency, attempt latency, retry count, and more, - * associating them with relevant attributes for detailed analysis in Cloud Monitoring. - */ -export class GCPMetricsHandler implements IMetricsHandler { - private initialized = false; - private otelMetrics?: Metrics; - - /** - * Initializes the OpenTelemetry metrics instruments if they haven't been already. - * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. - * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. - * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. - */ - private initialize(projectId?: string) { - if (!this.initialized) { - this.initialized = true; - const sumAggregation = Aggregation.Sum(); - const histogramAggregation = new ExplicitBucketHistogramAggregation([ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, - 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, - ]); - const viewList = [ - 'operation_latencies', - 'first_response_latencies', - 'attempt_latencies', - 'retry_count', - 'server_latencies', - 'connectivity_error_count', - 'application_latencies', - 'throttling_latencies', - ].map( - name => - new View({ - instrumentName: name, - name, - aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, - }) - ); - const meterProvider = new MeterProvider({ - views: viewList, - resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', - }).merge(new ResourceUtil.GcpDetectorSync().detect()), - readers: [ - // Register the exporter - new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 100_000, - exporter: new MetricExporter({ - projectId, - }), - }), - ], - }); - const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.otelMetrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createCounter('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), - applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', - { - description: - 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', - unit: 'ms', - } - ), - firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', - { - description: - 'Latencies from when a client sends a request and receives the first row of the response.', - unit: 'ms', - } - ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - }), - connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', - { - description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - } - ), - clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', - { - description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - } - ), - }; - } - } - - /** - * Records metrics for a completed Bigtable operation. - * This method records the operation latency and retry count, associating them with provided attributes. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ) { - this.initialize(); - this.otelMetrics?.operationLatencies.record( - metrics.operationLatency, - attributes - ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); - } - - /** - * Records metrics for a completed attempt of a Bigtable operation. - * This method records attempt latency, connectivity error count, server latency, and first response latency, - * along with the provided attributes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ) { - this.initialize(); - this.otelMetrics?.attemptLatencies.record( - metrics.attemptLatency, - attributes - ); - this.otelMetrics?.connectivityErrorCount.record( - metrics.connectivityErrorCount, - attributes - ); - this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); - } -} From 1ae82ff00e12af0b306175bd7133ffa4d08fe871 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 09:41:14 -0500 Subject: [PATCH 138/289] Change location of the client-side-metrics-attribu file --- common/test-metrics-handler.ts | 2 +- .../client-side-metrics}/client-side-metrics-attributes.ts | 0 src/client-side-metrics/metrics-handler.ts | 2 +- src/client-side-metrics/operation-metrics-collector.ts | 2 +- test/metrics-collector/metrics-collector.ts | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) rename {common => src/client-side-metrics}/client-side-metrics-attributes.ts (100%) diff --git a/common/test-metrics-handler.ts b/common/test-metrics-handler.ts index 9ffabd7a0..c9c5507df 100644 --- a/common/test-metrics-handler.ts +++ b/common/test-metrics-handler.ts @@ -20,7 +20,7 @@ import { import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, -} from './client-side-metrics-attributes'; +} from '../src/client-side-metrics/client-side-metrics-attributes'; /** * A test implementation of the IMetricsHandler interface. Used for testing purposes. diff --git a/common/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts similarity index 100% rename from common/client-side-metrics-attributes.ts rename to src/client-side-metrics/client-side-metrics-attributes.ts diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 051b65394..38a98ae59 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -15,7 +15,7 @@ import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, -} from '../../common/client-side-metrics-attributes'; +} from './client-side-metrics-attributes'; /** * The interfaces below use undefined instead of null to indicate a metric is diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e2893de5f..502a11ad0 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -20,7 +20,7 @@ import { MethodName, OnOperationCompleteAttributes, StreamingState, -} from '../../common/client-side-metrics-attributes'; +} from './client-side-metrics-attributes'; /** * An interface representing a Date-like object. Provides a `getTime` method diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index dd8130389..335a288cb 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -21,7 +21,7 @@ import {OperationMetricsCollector} from '../../src/client-side-metrics/operation import { MethodName, StreamingState, -} from '../../common/client-side-metrics-attributes'; +} from '../../src/client-side-metrics/client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From 3ee5604464d250060bf44541ecfe86f54a430b90 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 09:42:57 -0500 Subject: [PATCH 139/289] Change common test utilities folder name --- {common => test-common}/logger.ts | 0 {common => test-common}/test-date-provider.ts | 0 {common => test-common}/test-metrics-handler.ts | 0 test/metrics-collector/metrics-collector.ts | 4 ++-- 4 files changed, 2 insertions(+), 2 deletions(-) rename {common => test-common}/logger.ts (100%) rename {common => test-common}/test-date-provider.ts (100%) rename {common => test-common}/test-metrics-handler.ts (100%) diff --git a/common/logger.ts b/test-common/logger.ts similarity index 100% rename from common/logger.ts rename to test-common/logger.ts diff --git a/common/test-date-provider.ts b/test-common/test-date-provider.ts similarity index 100% rename from common/test-date-provider.ts rename to test-common/test-date-provider.ts diff --git a/common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts similarity index 100% rename from common/test-metrics-handler.ts rename to test-common/test-metrics-handler.ts diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 335a288cb..48f1327fd 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -13,10 +13,10 @@ // limitations under the License. import {describe} from 'mocha'; -import {TestDateProvider} from '../../common/test-date-provider'; +import {TestDateProvider} from '../../test-common/test-date-provider'; import * as assert from 'assert'; import * as fs from 'fs'; -import {TestMetricsHandler} from '../../common/test-metrics-handler'; +import {TestMetricsHandler} from '../../test-common/test-metrics-handler'; import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; import { MethodName, From 124ed30cbb79768bc293d55210362fed0c618531 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:04:05 -0500 Subject: [PATCH 140/289] Remove aliases for grpc status --- .../client-side-metrics-attributes.ts | 27 +++++-------------- .../operation-metrics-collector.ts | 9 +++---- 2 files changed, 10 insertions(+), 26 deletions(-) diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index 5f2adf7e2..1e5c04119 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -39,7 +39,7 @@ export enum StreamingState { * These attributes provide context about the Bigtable environment and the completed operation. */ interface OperationLatencyAttributes extends StandardAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; streamingOperation: StreamingState; } @@ -48,7 +48,7 @@ interface OperationLatencyAttributes extends StandardAttributes { * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ interface AttemptLatencyAttributes extends StandardAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; streamingOperation: StreamingState; } @@ -57,7 +57,7 @@ interface AttemptLatencyAttributes extends StandardAttributes { * provide context about the Bigtable environment and the final status of the operation. */ interface RetryCountAttributes extends StandardAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; } /** @@ -71,7 +71,7 @@ type ApplicationBlockingLatenciesAttributes = StandardAttributes; * These attributes provide context about the Bigtable environment and the final status of the operation. */ interface FirstResponseLatencyAttributes extends StandardAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; } /** @@ -79,7 +79,7 @@ interface FirstResponseLatencyAttributes extends StandardAttributes { * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. */ interface ServerLatenciesAttributes extends StandardAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; streamingOperation: StreamingState; } @@ -88,7 +88,7 @@ interface ServerLatenciesAttributes extends StandardAttributes { * These attributes provide context about the Bigtable environment and the status of the attempt. */ interface ConnectivityErrorCountAttributes extends StandardAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; } /** @@ -97,21 +97,6 @@ interface ConnectivityErrorCountAttributes extends StandardAttributes { */ type ClientBlockingLatenciesAttributes = StandardAttributes; -/** - * The final status of a Bigtable operation. This represents the ultimate result - * of the operation, regardless of individual attempt statuses. It's represented - * as a gRPC status code. See the `google-gax` library's documentation on - * gRPC status codes for more information on specific values. - */ -export type FinalOperationStatus = grpc.status; - -/** - * The status of a single attempt of a Bigtable operation. This is represented as a - * gRPC status code. See the `google-gax` library's documentation on gRPC status - * codes for more information on specific values. - */ -export type AttemptStatus = grpc.status; - /** * Attributes associated with the completion of a Bigtable operation. These * attributes provide context about the Bigtable environment, the completed diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 502a11ad0..1034c826b 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -15,12 +15,11 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; import { - AttemptStatus, - FinalOperationStatus, MethodName, OnOperationCompleteAttributes, StreamingState, } from './client-side-metrics-attributes'; +import {grpc} from 'google-gax'; /** * An interface representing a Date-like object. Provides a `getTime` method @@ -85,7 +84,7 @@ interface OnAttemptCompleteInfo { /** * The attempt status of the operation. */ - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; } /** @@ -94,7 +93,7 @@ interface OnAttemptCompleteInfo { * part of a streaming operation. */ interface AttemptOnlyAttributes { - attemptStatus: AttemptStatus; + attemptStatus: grpc.status; streamingOperation: StreamingState; } @@ -102,7 +101,7 @@ interface AttemptOnlyAttributes { * Information about a Bigtable operation to be recorded in client side metrics. */ interface OperationOnlyAttributes { - finalOperationStatus: FinalOperationStatus; + finalOperationStatus: grpc.status; streamingOperation: StreamingState; } From ef36a6fa056be739ebfadee67c52f22dcb01b9a5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:14:17 -0500 Subject: [PATCH 141/289] Should be MethodName type --- src/client-side-metrics/client-side-metrics-attributes.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index 1e5c04119..0672f6f1c 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -25,7 +25,7 @@ interface StandardAttributes { cluster?: string; zone?: string; appProfileId?: string; - methodName: string; + methodName: MethodName; clientName: string; } From 68292248bbf9549f9a843eed3cb45a84ecc07a2d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:38:38 -0500 Subject: [PATCH 142/289] Rename variable as it expands beyond latency --- src/client-side-metrics/operation-metrics-collector.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 1034c826b..cedb67c2e 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -333,7 +333,7 @@ export class OperationMetricsCollector { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { // This block records operation latency metrics. - const operationLatencyAttributes = this.getOperationAttributes( + const operationAttributes = this.getOperationAttributes( projectId, info ); @@ -346,7 +346,7 @@ export class OperationMetricsCollector { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete( metrics, - operationLatencyAttributes + operationAttributes ); } }); From dd603f180e013ebd45f842277166257365351d2e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 10:59:42 -0500 Subject: [PATCH 143/289] Remove private methods for building attributes --- .../operation-metrics-collector.ts | 94 +++++-------------- .../metrics-collector/typical-method-call.txt | 2 +- 2 files changed, 26 insertions(+), 70 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index cedb67c2e..488748329 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -176,66 +176,6 @@ export class OperationMetricsCollector { } } - /** - * Assembles the basic attributes for metrics. These attributes provide - * context about the Bigtable environment and the operation being performed. - * @param {string} projectId The Google Cloud project ID. - * @returns {Attributes} An object containing the basic attributes. - */ - private getBasicAttributes(projectId: string) { - return { - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - } - - /** - * Assembles the attributes for an entire operation. These attributes - * provide context about the Bigtable environment, the operation being - * performed, and the final status of the operation. Includes whether the - * operation was a streaming operation or not. - * - * @param {string} projectId The Google Cloud project ID. - * @param {OperationOnlyAttributes} operationOnlyAttributes The attributes of the operation. - * @returns {OnOperationCompleteAttributes} An object containing the attributes - * for operation latency metrics. - */ - private getOperationAttributes( - projectId: string, - operationOnlyAttributes: OperationOnlyAttributes - ): OnOperationCompleteAttributes { - return Object.assign( - operationOnlyAttributes, - this.getBasicAttributes(projectId) - ); - } - - /** - * Assembles the attributes for attempt metrics. These attributes provide context - * about the Bigtable environment, the operation being performed, the status - * of the attempt and whether the operation was a streaming operation or not. - * - * @param {string} projectId The Google Cloud project ID. - * @param {AttemptOnlyAttributes} attemptOnlyAttributes The attributes of the attempt. - * @returns {OnAttemptCompleteAttributes} The attributes all metrics recorded - * in the onAttemptComplete handler. - */ - private getAttemptAttributes( - projectId: string, - attemptOnlyAttributes: AttemptOnlyAttributes - ) { - return Object.assign( - attemptOnlyAttributes, - this.getBasicAttributes(projectId) - ); - } - /** * Called when the operation starts. Records the start time. */ @@ -265,7 +205,19 @@ export class OperationMetricsCollector { this.attemptCount++; const endTime = this.dateProvider.getDate(); if (projectId && this.attemptStartTime) { - const attributes = this.getAttemptAttributes(projectId, info); + const attributes = { + streamingOperation: info.streamingOperation, + attemptStatus: info.attemptStatus, + connectivityErrorCount: info.connectivityErrorCount, + projectId, + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + clientName: `nodejs-bigtable/${version}`, + }; const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onAttemptComplete) { @@ -332,11 +284,18 @@ export class OperationMetricsCollector { if (projectId && this.operationStartTime) { const totalTime = endTime.getTime() - this.operationStartTime.getTime(); { - // This block records operation latency metrics. - const operationAttributes = this.getOperationAttributes( + const operationAttributes = { + finalOperationStatus: info.finalOperationStatus, + streamingOperation: info.streamingOperation, projectId, - info - ); + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + appProfileId: this.tabularApiSurface.bigtable.appProfileId, + methodName: this.methodName, + clientName: `nodejs-bigtable/${version}`, + }; const metrics = { operationLatency: totalTime, retryCount: this.attemptCount - 1, @@ -344,10 +303,7 @@ export class OperationMetricsCollector { }; this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete( - metrics, - operationAttributes - ); + metricsHandler.onOperationComplete(metrics, operationAttributes); } }); } diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 921270cf4..28bde2266 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -25,7 +25,7 @@ getDate call returns 5000 ms getDate call returns 6000 ms Recording parameters for onAttemptComplete: metrics: {"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":1} -attributes: {"attemptStatus":0,"streamingOperation":"streaming","connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} +attributes: {"streamingOperation":"streaming","attemptStatus":0,"connectivityErrorCount":1,"projectId":"my-project","instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c ","methodName":"readRows","clientName":"nodejs-bigtable"} getDate call returns 7000 ms Recording parameters for onOperationComplete: metrics: {"operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} From b493c0defd70fdede128d3ed10f1a8679bc60382 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 11:33:20 -0500 Subject: [PATCH 144/289] Replace the logger class with a simple object --- test-common/test-date-provider.ts | 9 ++++- test-common/test-metrics-handler.ts | 19 ++++++---- test/metrics-collector/metrics-collector.ts | 41 ++++++++++----------- 3 files changed, 38 insertions(+), 31 deletions(-) diff --git a/test-common/test-date-provider.ts b/test-common/test-date-provider.ts index 71ef66aee..533b6b148 100644 --- a/test-common/test-date-provider.ts +++ b/test-common/test-date-provider.ts @@ -41,8 +41,13 @@ class TestDateLike { * a deterministic series of fake dates, with each call to getDate() returning a date 1000ms later than the last. * Logs each date value returned for verification purposes. */ -export class TestDateProvider extends WithLogger { +export class TestDateProvider { private dateCounter = 0; + private messages: {value: string}; + + constructor(messages: {value: string}) { + this.messages = messages; + } /** * Returns a new fake date 1000ms later than the last. Logs the date for test verification. * @returns {TestDateLike} A fake date object. @@ -50,7 +55,7 @@ export class TestDateProvider extends WithLogger { getDate() { // The test assumes exactly 1s passes between each getDate call. this.dateCounter = this.dateCounter + 1000; - this.logger.log(`getDate call returns ${this.dateCounter.toString()} ms`); + this.messages.value += `getDate call returns ${this.dateCounter.toString()} ms\n`; return new TestDateLike(this.dateCounter); } } diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index c9c5507df..970459807 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -26,7 +26,12 @@ import { * A test implementation of the IMetricsHandler interface. Used for testing purposes. * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. */ -export class TestMetricsHandler extends WithLogger { +export class TestMetricsHandler { + private messages: {value: string}; + + constructor(messages: {value: string}) { + this.messages = messages; + } /** * Logs the metrics and attributes received for an operation completion. * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. @@ -37,9 +42,9 @@ export class TestMetricsHandler extends WithLogger { attributes: OnOperationCompleteAttributes ) { attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onOperationComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + this.messages.value += 'Recording parameters for onOperationComplete:\n'; + this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; + this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; } /** @@ -52,8 +57,8 @@ export class TestMetricsHandler extends WithLogger { attributes: OnAttemptCompleteAttributes ) { attributes.clientName = 'nodejs-bigtable'; - this.logger.log('Recording parameters for onAttemptComplete:'); - this.logger.log(`metrics: ${JSON.stringify(metrics)}`); - this.logger.log(`attributes: ${JSON.stringify(attributes)}`); + this.messages.value += 'Recording parameters for onAttemptComplete:\n'; + this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; + this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; } } diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 48f1327fd..4f933d007 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -66,9 +66,9 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe('Bigtable/MetricsCollector', () => { +describe.only('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { - const logger = new Logger(); + const logger = {value: ''}; const metricsHandlers = [new TestMetricsHandler(logger)]; class FakeTable { id = 'fakeTableId'; @@ -105,52 +105,52 @@ describe('Bigtable/MetricsCollector', () => { // In this method we simulate a series of events that might happen // when a user calls one of the Table methods. // Here is an example of what might happen in a method call: - logger.log('1. The operation starts'); + logger.value += '1. The operation starts\n'; metricsCollector.onOperationStart(); - logger.log('2. The attempt starts.'); + logger.value += '2. The attempt starts.\n'; metricsCollector.onAttemptStart(); - logger.log('3. Client receives status information.'); + logger.value += '3. Client receives status information.\n'; metricsCollector.onStatusReceived(status); - logger.log('4. Client receives metadata.'); + logger.value += '4. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('101') ); - logger.log('5. Client receives first row.'); + logger.value += '5. Client receives first row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('6. Client receives metadata.'); + logger.value += '6. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('102') ); - logger.log('7. Client receives second row.'); + logger.value += '7. Client receives second row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('8. A transient error occurs.'); + logger.value += '8. A transient error occurs.\n'; metricsCollector.onAttemptComplete(this.bigtable.projectId, { streamingOperation: StreamingState.STREAMING, attemptStatus: grpc.status.DEADLINE_EXCEEDED, connectivityErrorCount: 1, }); - logger.log('9. After a timeout, the second attempt is made.'); + logger.value += '9. After a timeout, the second attempt is made.\n'; metricsCollector.onAttemptStart(); - logger.log('10. Client receives status information.'); + logger.value += '10. Client receives status information.\n'; metricsCollector.onStatusReceived(status); - logger.log('11. Client receives metadata.'); + logger.value += '11. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('103') ); - logger.log('12. Client receives third row.'); + logger.value += '12. Client receives third row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('13. Client receives metadata.'); + logger.value += '13. Client receives metadata.\n'; metricsCollector.onMetadataReceived( this.bigtable.projectId, createMetadata('104') ); - logger.log('14. Client receives fourth row.'); + logger.value += '14. Client receives fourth row.\n'; metricsCollector.onResponse(this.bigtable.projectId); - logger.log('15. User reads row 1'); - logger.log('16. Stream ends, operation completes'); + logger.value += '15. User reads row 1\n'; + logger.value += '16. Stream ends, operation completes\n'; metricsCollector.onAttemptComplete(this.bigtable.projectId, { attemptStatus: grpc.status.OK, streamingOperation: StreamingState.STREAMING, @@ -170,9 +170,6 @@ describe('Bigtable/MetricsCollector', () => { 'utf8' ); // Ensure events occurred in the right order here: - assert.strictEqual( - logger.getMessages().join('\n') + '\n', - expectedOutput.replace(/\r/g, '') - ); + assert.strictEqual(logger.value, expectedOutput.replace(/\r/g, '')); }); }); From 2f19f31b08ed250ac587491f6108e91d670125d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 11:33:52 -0500 Subject: [PATCH 145/289] Remove only --- test/metrics-collector/metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 4f933d007..0ce90962e 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -66,7 +66,7 @@ class FakeInstance { id = 'fakeInstanceId'; } -describe.only('Bigtable/MetricsCollector', () => { +describe('Bigtable/MetricsCollector', () => { it('should record the right metrics with a typical method call', async () => { const logger = {value: ''}; const metricsHandlers = [new TestMetricsHandler(logger)]; From dfe7d579e582fbc7f785c549fa1df76cc3f4e821 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 11:39:20 -0500 Subject: [PATCH 146/289] Remove the logger classes Replace them with a simpler object --- test-common/logger.ts | 35 --------------------- test-common/test-date-provider.ts | 2 -- test-common/test-metrics-handler.ts | 1 - test/metrics-collector/metrics-collector.ts | 23 -------------- 4 files changed, 61 deletions(-) delete mode 100644 test-common/logger.ts diff --git a/test-common/logger.ts b/test-common/logger.ts deleted file mode 100644 index 284005350..000000000 --- a/test-common/logger.ts +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * A simple logger interface for logging messages. Implementations of this interface - * can provide various logging mechanisms (e.g., console logging, file logging, etc.). - */ -interface ILogger { - log(message: string): void; -} - -/** - * An abstract base class that provides a logger instance. Subclasses can use this logger - * for logging messages. - */ -export abstract class WithLogger { - protected logger: ILogger; - /** - * @param {ILogger} logger The logger instance to be used by this object. - */ - constructor(logger: ILogger) { - this.logger = logger; - } -} diff --git a/test-common/test-date-provider.ts b/test-common/test-date-provider.ts index 533b6b148..8eaa7b38c 100644 --- a/test-common/test-date-provider.ts +++ b/test-common/test-date-provider.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {WithLogger} from './logger'; - /** * A test implementation of a Date-like object. Used for testing purposes. It provides a * getTime method that returns a pre-determined fake date value, allowing for diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index 970459807..8166155b9 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {WithLogger} from './logger'; import { OnAttemptCompleteMetrics, OnOperationCompleteMetrics, diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts index 0ce90962e..5c158d28c 100644 --- a/test/metrics-collector/metrics-collector.ts +++ b/test/metrics-collector/metrics-collector.ts @@ -24,29 +24,6 @@ import { } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {grpc} from 'google-gax'; -/** - * A basic logger class that stores log messages in an array. Useful for testing. - */ -class Logger { - private messages: string[] = []; - - /** - * Logs a message by adding it to the internal message array. - * @param {string} message The message to be logged. - */ - log(message: string) { - this.messages.push(message); - } - - /** - * Retrieves all logged messages. - * @returns {string[]} An array of logged messages. - */ - getMessages() { - return this.messages; - } -} - /** * A fake implementation of the Bigtable client for testing purposes. Provides a * metricsTracerFactory and a stubbed projectId method. From 02d752ab4a27d72f821e9f5184f77e5d3c2e1765 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 16:25:04 -0500 Subject: [PATCH 147/289] Add stubs --- src/client-side-metrics/exporter.ts | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 src/client-side-metrics/exporter.ts diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts new file mode 100644 index 000000000..107a1d700 --- /dev/null +++ b/src/client-side-metrics/exporter.ts @@ -0,0 +1,9 @@ +export function transformInExport(args: {}) { + +} + +export class CloudMonitoringExporter { + export() { + + } +} From 19d1d81b94d4eb0c571005d5652a495c35bcbafa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Feb 2025 16:26:13 -0500 Subject: [PATCH 148/289] Revert "Remove the GCPMetricsHandler file" This reverts commit b6f130258101446a7fc65c02a36039184ad9d5ed. --- .../gcp-metrics-handler.ts | 208 ++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 src/client-side-metrics/gcp-metrics-handler.ts diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts new file mode 100644 index 000000000..e1a462bb9 --- /dev/null +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -0,0 +1,208 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import { + IMetricsHandler, + OnAttemptCompleteMetrics, + OnOperationCompleteMetrics, +} from './metrics-handler'; +import * as Resources from '@opentelemetry/resources'; +import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../common/client-side-metrics-attributes'; +import {View} from '@opentelemetry/sdk-metrics'; +const { + Aggregation, + ExplicitBucketHistogramAggregation, + MeterProvider, + Histogram, + Counter, + PeriodicExportingMetricReader, +} = require('@opentelemetry/sdk-metrics'); + +/** + * A collection of OpenTelemetry metric instruments used to record + * Bigtable client-side metrics. + */ +interface Metrics { + operationLatencies: typeof Histogram; + attemptLatencies: typeof Histogram; + retryCount: typeof Counter; + applicationBlockingLatencies: typeof Histogram; + firstResponseLatencies: typeof Histogram; + serverLatencies: typeof Histogram; + connectivityErrorCount: typeof Histogram; + clientBlockingLatencies: typeof Histogram; +} + +/** + * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. + * This handler records metrics such as operation latency, attempt latency, retry count, and more, + * associating them with relevant attributes for detailed analysis in Cloud Monitoring. + */ +export class GCPMetricsHandler implements IMetricsHandler { + private initialized = false; + private otelMetrics?: Metrics; + + /** + * Initializes the OpenTelemetry metrics instruments if they haven't been already. + * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. + * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. + * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. + */ + private initialize(projectId?: string) { + if (!this.initialized) { + this.initialized = true; + const sumAggregation = Aggregation.Sum(); + const histogramAggregation = new ExplicitBucketHistogramAggregation([ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, + 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + ]); + const viewList = [ + 'operation_latencies', + 'first_response_latencies', + 'attempt_latencies', + 'retry_count', + 'server_latencies', + 'connectivity_error_count', + 'application_latencies', + 'throttling_latencies', + ].map( + name => + new View({ + instrumentName: name, + name, + aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, + }) + ); + const meterProvider = new MeterProvider({ + views: viewList, + resource: new Resources.Resource({ + 'service.name': 'bigtable-metrics', + }).merge(new ResourceUtil.GcpDetectorSync().detect()), + readers: [ + // Register the exporter + new PeriodicExportingMetricReader({ + // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by + // Cloud Monitoring. + exportIntervalMillis: 100_000, + exporter: new MetricExporter({ + projectId, + }), + }), + ], + }); + const meter = meterProvider.getMeter('bigtable.googleapis.com'); + this.otelMetrics = { + operationLatencies: meter.createHistogram('operation_latencies', { + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + }), + attemptLatencies: meter.createHistogram('attempt_latencies', { + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + }), + retryCount: meter.createCounter('retry_count', { + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + }), + applicationBlockingLatencies: meter.createHistogram( + 'application_blocking_latencies', + { + description: + 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', + unit: 'ms', + } + ), + firstResponseLatencies: meter.createHistogram( + 'first_response_latencies', + { + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + } + ), + serverLatencies: meter.createHistogram('server_latencies', { + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + }), + connectivityErrorCount: meter.createHistogram( + 'connectivity_error_count', + { + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + } + ), + clientBlockingLatencies: meter.createHistogram( + 'client_blocking_latencies', + { + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + } + ), + }; + } + } + + /** + * Records metrics for a completed Bigtable operation. + * This method records the operation latency and retry count, associating them with provided attributes. + * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. + * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + */ + onOperationComplete( + metrics: OnOperationCompleteMetrics, + attributes: OnOperationCompleteAttributes + ) { + this.initialize(); + this.otelMetrics?.operationLatencies.record( + metrics.operationLatency, + attributes + ); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.firstResponseLatencies.record( + metrics.firstResponseLatency, + attributes + ); + } + + /** + * Records metrics for a completed attempt of a Bigtable operation. + * This method records attempt latency, connectivity error count, server latency, and first response latency, + * along with the provided attributes. + * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. + * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + */ + onAttemptComplete( + metrics: OnAttemptCompleteMetrics, + attributes: OnAttemptCompleteAttributes + ) { + this.initialize(); + this.otelMetrics?.attemptLatencies.record( + metrics.attemptLatency, + attributes + ); + this.otelMetrics?.connectivityErrorCount.record( + metrics.connectivityErrorCount, + attributes + ); + this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); + } +} From 48ff70603ad241e864c1296254052385e471bb5e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 09:51:30 -0500 Subject: [PATCH 149/289] Add exporter and helper function --- package.json | 1 + src/client-side-metrics/exporter.ts | 30 ++- .../gcp-metrics-handler.ts | 2 +- test/metrics-collector/metricsToRequest.ts | 209 ++++++++++++++++++ 4 files changed, 238 insertions(+), 4 deletions(-) create mode 100644 test/metrics-collector/metricsToRequest.ts diff --git a/package.json b/package.json index 2dc24800e..903a4b89c 100644 --- a/package.json +++ b/package.json @@ -47,6 +47,7 @@ "precompile": "gts clean" }, "dependencies": { + "@google-cloud/monitoring": "^4.1.0", "@google-cloud/opentelemetry-cloud-monitoring-exporter": "^0.20.0", "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 107a1d700..784c9c078 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -1,9 +1,33 @@ -export function transformInExport(args: {}) { +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ServiceError} from 'google-gax'; +import {MetricServiceClient} from '@google-cloud/monitoring'; +interface ExportResult { + code: number; } -export class CloudMonitoringExporter { - export() { +export function metricsToRequest(metrics: ResourceMetrics) { + return {}; +} + +export class CloudMonitoringExporter extends MetricExporter { + private monitoringClient = new MetricServiceClient(); + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + (async () => { + try { + const request = metricsToRequest(metrics); + await this.monitoringClient.createTimeSeries(request); + const exportResult = {code: 0}; + resultCallback(exportResult); + } catch (error) { + const exportResult = {code: (error as ServiceError).code as number}; + resultCallback(exportResult); + } + })(); } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e1a462bb9..b947d8f88 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -23,7 +23,7 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-expor import { OnAttemptCompleteAttributes, OnOperationCompleteAttributes, -} from '../../common/client-side-metrics-attributes'; +} from './client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { Aggregation, diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts new file mode 100644 index 000000000..97857a704 --- /dev/null +++ b/test/metrics-collector/metricsToRequest.ts @@ -0,0 +1,209 @@ +import {describe} from 'mocha'; + +// TODO: Generate the export code +describe('Bigtable/metricsToRequest', () => { + it('Converts a counter and a histogram to the cloud monitoring format', () => { + const exportArgs = { + resource: { + _attributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', + 'monitored_resource.labels.table_id': 'events-table', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', + 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', + 'monitored_resource.labels.table_id': 'events-table', + }, + }, + scopeMetrics: [ + { + scope: { + name: 'sample_metric', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/metric91', + type: 'COUNTER', + description: '', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + key: 'value', + }, + startTime: [1738789130, 855000000], + endTime: [1738789140, 857000000], + value: 15, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/metric92', + type: 'HISTOGRAM', + description: + 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + key: 'value', + }, + startTime: [1738789130, 855000000], + endTime: [1738789140, 857000000], + value: { + min: 7, + max: 7, + sum: 7, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 5000, 7500, 10000, + ], + counts: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + ], + }, + ], + }; + const expectedRequest = { + name: 'projects/cloud-native-db-dpes-shared', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: Math.floor(Date.now() / 1000), + }, + startTime: { + seconds: Math.floor(Date.now() / 1000) - 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 376.177845, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, + 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + 200000, 400000, 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], + }; + }); +}); From 883ea1a04536e87537113b4079041ae8a37cea2e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 10:58:40 -0500 Subject: [PATCH 150/289] Add headers and fixture --- .../export-input-fixture.ts | 202 ++++++++++++++++++ src/client-side-metrics/exporter.ts | 14 ++ 2 files changed, 216 insertions(+) create mode 100644 src/client-side-metrics/export-input-fixture.ts diff --git a/src/client-side-metrics/export-input-fixture.ts b/src/client-side-metrics/export-input-fixture.ts new file mode 100644 index 000000000..833f0c36b --- /dev/null +++ b/src/client-side-metrics/export-input-fixture.ts @@ -0,0 +1,202 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const exportInput = { + resource: { + _attributes: { + 'service.name': 'bigtable-metrics', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'bigtable-metrics', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + }, + _asyncAttributesPromise: {}, + }, + scopeMetrics: [ + { + scope: { + name: 'bigtable.googleapis.com', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'operation_latencies', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + type: 'HISTOGRAM', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 11956, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'attempt_latencies', + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + type: 'HISTOGRAM', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + attemptStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 942000000], + endTime: [1738943383, 940000000], + value: 11830, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'retry_count', + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + type: 'HISTOGRAM', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 0, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'server_latencies', + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + type: 'HISTOGRAM', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + attemptStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 7642, + }, + ], + isMonotonic: true, + }, + { + descriptor: { + name: 'connectivity_error_count', + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + type: 'HISTOGRAM', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 3, + dataPoints: [ + { + attributes: { + attemptStatus: 0, + streamingOperation: true, + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c\u0012', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [1738943373, 943000000], + endTime: [1738943383, 940000000], + value: 0, + }, + ], + isMonotonic: true, + }, + ], + }, + ], +}; diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 784c9c078..88bf4d771 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; From ad9c85bde5215c4b373445aa9fa0f8932f8d685e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:06:31 -0500 Subject: [PATCH 151/289] Add milliseconds unit --- src/client-side-metrics/gcp-metrics-handler.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index b947d8f88..ea7a0ab72 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -112,6 +112,7 @@ export class GCPMetricsHandler implements IMetricsHandler { operationLatencies: meter.createHistogram('operation_latencies', { description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: 'ms', }), attemptLatencies: meter.createHistogram('attempt_latencies', { description: @@ -141,6 +142,7 @@ export class GCPMetricsHandler implements IMetricsHandler { serverLatencies: meter.createHistogram('server_latencies', { description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + unit: 'ms', }), connectivityErrorCount: meter.createHistogram( 'connectivity_error_count', From 5a3bac2f94454d4e69647831f6ce5732f929c3e0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:08:43 -0500 Subject: [PATCH 152/289] Record to a histogram --- src/client-side-metrics/gcp-metrics-handler.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index ea7a0ab72..6b97a52c9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -41,7 +41,7 @@ const { interface Metrics { operationLatencies: typeof Histogram; attemptLatencies: typeof Histogram; - retryCount: typeof Counter; + retryCount: typeof Histogram; applicationBlockingLatencies: typeof Histogram; firstResponseLatencies: typeof Histogram; serverLatencies: typeof Histogram; @@ -119,7 +119,7 @@ export class GCPMetricsHandler implements IMetricsHandler { 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', }), - retryCount: meter.createCounter('retry_count', { + retryCount: meter.createHistogram('retry_count', { description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', }), @@ -178,7 +178,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics.operationLatency, attributes ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); + this.otelMetrics?.retryCount.record(metrics.retryCount, attributes); this.otelMetrics?.firstResponseLatencies.record( metrics.firstResponseLatency, attributes From 4740c624565e938d1a604b95ae4d31fabbebef06 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:24:44 -0500 Subject: [PATCH 153/289] Add necessary elements to the GCPMetricsHandler # Conflicts: # src/client-side-metrics/gcp-metrics-handler.ts --- .../gcp-metrics-handler.ts | 68 +++++++++++-------- 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 6b97a52c9..0afa4b64d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -30,7 +30,6 @@ const { ExplicitBucketHistogramAggregation, MeterProvider, Histogram, - Counter, PeriodicExportingMetricReader, } = require('@opentelemetry/sdk-metrics'); @@ -93,7 +92,11 @@ export class GCPMetricsHandler implements IMetricsHandler { const meterProvider = new MeterProvider({ views: viewList, resource: new Resources.Resource({ - 'service.name': 'bigtable-metrics', + 'service.name': 'Cloud Bigtable Table', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': projectId, + 'monitored_resource.type': 'bigtable_client_raw', }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -109,22 +112,30 @@ export class GCPMetricsHandler implements IMetricsHandler { }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); this.otelMetrics = { - operationLatencies: meter.createHistogram('operation_latencies', { - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: 'ms', - }), - attemptLatencies: meter.createHistogram('attempt_latencies', { - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - unit: 'ms', - }), - retryCount: meter.createHistogram('retry_count', { - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - }), + operationLatencies: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/operation_latencies', + { + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + } + ), + attemptLatencies: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/attempt_latencies', + { + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + } + ), + retryCount: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/retry_count', + { + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + } + ), applicationBlockingLatencies: meter.createHistogram( - 'application_blocking_latencies', + 'bigtable.googleapis.com/internal/client/application_blocking_latencies', { description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', @@ -132,27 +143,29 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), firstResponseLatencies: meter.createHistogram( - 'first_response_latencies', + 'bigtable.googleapis.com/internal/client/first_response_latencies', { description: 'Latencies from when a client sends a request and receives the first row of the response.', unit: 'ms', } ), - serverLatencies: meter.createHistogram('server_latencies', { - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - unit: 'ms', - }), + serverLatencies: meter.createHistogram( + 'bigtable.googleapis.com/internal/client/server_latencies', + { + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + } + ), connectivityErrorCount: meter.createHistogram( - 'connectivity_error_count', + 'bigtable.googleapis.com/internal/client/connectivity_error_count', { description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", } ), clientBlockingLatencies: meter.createHistogram( - 'client_blocking_latencies', + 'bigtable.googleapis.com/internal/client/client_blocking_latencies', { description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', @@ -173,7 +186,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnOperationCompleteMetrics, attributes: OnOperationCompleteAttributes ) { - this.initialize(); + this.initialize(attributes.projectId); this.otelMetrics?.operationLatencies.record( metrics.operationLatency, attributes @@ -196,7 +209,8 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnAttemptCompleteMetrics, attributes: OnAttemptCompleteAttributes ) { - this.initialize(); + console.log('onAttemptComplete handler'); + this.initialize(attributes.projectId); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, attributes From ee404f1fe57191ba93818de8c36cca8b9aa534c2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:47:45 -0500 Subject: [PATCH 154/289] Pass an exporter into the GCPMetricsHandler --- src/client-side-metrics/gcp-metrics-handler.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 0afa4b64d..afd2e12b4 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -56,6 +56,11 @@ interface Metrics { export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; private otelMetrics?: Metrics; + private exporter: typeof MetricExporter; + + constructor(exporter: typeof MetricExporter) { + this.exporter = exporter; + } /** * Initializes the OpenTelemetry metrics instruments if they haven't been already. @@ -104,9 +109,7 @@ export class GCPMetricsHandler implements IMetricsHandler { // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. exportIntervalMillis: 100_000, - exporter: new MetricExporter({ - projectId, - }), + exporter: this.exporter, }), ], }); From c997f0fd22a36b2db3ed8dbab79190a386baf8d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:51:38 -0500 Subject: [PATCH 155/289] Move file to tests --- .../metrics-collector}/export-input-fixture.ts | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {src/client-side-metrics => test/metrics-collector}/export-input-fixture.ts (100%) diff --git a/src/client-side-metrics/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts similarity index 100% rename from src/client-side-metrics/export-input-fixture.ts rename to test/metrics-collector/export-input-fixture.ts From 3719257988a9a999af1e6bc7a2a6c65841daf5c7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:52:00 -0500 Subject: [PATCH 156/289] Remove unused import --- src/client-side-metrics/operation-metrics-collector.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 488748329..b9c3e90a2 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -14,11 +14,7 @@ import * as fs from 'fs'; import {IMetricsHandler} from './metrics-handler'; -import { - MethodName, - OnOperationCompleteAttributes, - StreamingState, -} from './client-side-metrics-attributes'; +import {MethodName, StreamingState} from './client-side-metrics-attributes'; import {grpc} from 'google-gax'; /** From a6498f2891add40f75682ff68c5c62521b17f4bc Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 11:57:03 -0500 Subject: [PATCH 157/289] Adapt the fixture to include the projectId --- .../metrics-collector/export-input-fixture.ts | 132 ++++++++++++------ 1 file changed, 92 insertions(+), 40 deletions(-) diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts index 833f0c36b..bf3cb2106 100644 --- a/test/metrics-collector/export-input-fixture.ts +++ b/test/metrics-collector/export-input-fixture.ts @@ -15,17 +15,25 @@ export const exportInput = { resource: { _attributes: { - 'service.name': 'bigtable-metrics', + 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', }, asyncAttributesPending: false, _syncAttributes: { - 'service.name': 'bigtable-metrics', + 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', }, _asyncAttributesPromise: {}, }, @@ -38,11 +46,11 @@ export const exportInput = { metrics: [ { descriptor: { - name: 'operation_latencies', + name: 'attempt_latencies', description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', type: 'HISTOGRAM', - unit: '', + unit: 'ms', valueType: 1, advice: {}, }, @@ -51,7 +59,7 @@ export const exportInput = { dataPoints: [ { attributes: { - finalOperationStatus: 0, + attemptStatus: 0, streamingOperation: true, projectId: 'some-project', instanceId: 'emulator-test-instance', @@ -61,29 +69,29 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 11956, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: 10944, }, ], isMonotonic: true, }, { descriptor: { - name: 'attempt_latencies', - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + name: 'bigtable.googleapis.com/internal/client/operation_latencies', type: 'HISTOGRAM', - unit: 'ms', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { - attemptStatus: 0, + finalOperationStatus: 0, streamingOperation: true, projectId: 'some-project', instanceId: 'emulator-test-instance', @@ -93,25 +101,36 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 942000000], - endTime: [1738943383, 940000000], - value: 11830, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: { + min: 76, + max: 1337, + sum: 11027, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, { descriptor: { - name: 'retry_count', + name: 'bigtable.googleapis.com/internal/client/retry_count', + type: 'HISTOGRAM', description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - type: 'HISTOGRAM', unit: 'ms', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { @@ -125,25 +144,36 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 0, + startTime: [1738946024, 951000000], + endTime: [1738946034, 948000000], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, { descriptor: { - name: 'server_latencies', + name: 'bigtable.googleapis.com/internal/client/server_latencies', + type: 'HISTOGRAM', description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - type: 'HISTOGRAM', unit: '', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { @@ -157,25 +187,36 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 7642, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: { + min: 57, + max: 379, + sum: 7271, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 94, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, { descriptor: { - name: 'connectivity_error_count', + name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + type: 'HISTOGRAM', description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - type: 'HISTOGRAM', unit: '', valueType: 1, advice: {}, }, aggregationTemporality: 1, - dataPointType: 3, + dataPointType: 0, dataPoints: [ { attributes: { @@ -189,12 +230,23 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738943373, 943000000], - endTime: [1738943383, 940000000], - value: 0, + startTime: [1738946024, 950000000], + endTime: [1738946034, 948000000], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 99, + }, }, ], - isMonotonic: true, }, ], }, From e7d631d74d07794ea76123f1afa11024d712fb20 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 14:51:55 -0500 Subject: [PATCH 158/289] More exporter code fixes --- src/client-side-metrics/exporter.ts | 128 ++++++- .../metrics-collector/export-input-fixture.ts | 127 +------ test/metrics-collector/metricsToRequest.ts | 321 ++++++++---------- 3 files changed, 263 insertions(+), 313 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 88bf4d771..964cb3780 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -21,8 +21,129 @@ interface ExportResult { code: number; } -export function metricsToRequest(metrics: ResourceMetrics) { - return {}; +// TODO: Only involves the values that we care about +interface ExportInput { + resource: { + _attributes: { + 'cloud.resource_manager.project_id': string; + }; + _syncAttributes: { + 'monitored_resource.type': string; + }; + }; + scopeMetrics: [ + { + metrics: [ + { + descriptor: { + name: string; + unit: string; + }; + dataPoints: [ + { + attributes: { + appProfileId: string; + finalOperationStatus: number; + streamingOperation: string; + projectId: string; + instanceId: string; + table: string; + cluster: string; + zone: string; + methodName: string; + clientName: string; + }; + startTime: [number, number]; + endTime: [number, number]; + value: { + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; + }; + }; + }, + ]; + }, + ]; + }, + ]; +} + +export function metricsToRequest(exportArgs: ExportInput) { + const request = { + name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, + timeSeries: [], + }; + + for (const scopeMetrics of exportArgs.scopeMetrics) { + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; + + for (const dataPoint of metric.dataPoints) { + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + const metricLabels = { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + finalOperationStatus: allAttributes.finalOperationStatus, + streaming: allAttributes.streamingOperation, + }; + const resourceLabels = { + cluster: allAttributes.cluster, + instance: allAttributes.instanceId, + project_id: allAttributes.projectId, + table: allAttributes.table, + zone: allAttributes.zone, + }; + const timeSeries = { + metric: { + type: metricName, + labels: metricLabels, + }, + resource: { + type: exportArgs.resource._syncAttributes[ + 'monitored_resource.type' + ], + labels: resourceLabels, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: dataPoint.endTime[0], + nanos: dataPoint.endTime[1], + }, + startTime: { + seconds: dataPoint.startTime[0], + nanos: dataPoint.startTime[1], + }, + }, + value: { + distributionValue: { + count: String(dataPoint.value.count), + mean: dataPoint.value.sum / dataPoint.value.count, + bucketOptions: { + explicitBuckets: { + bounds: dataPoint.value.buckets.boundaries, + }, + }, + bucketCounts: dataPoint.value.buckets.counts.map(String), + }, + }, + }, + ], + unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + request.timeSeries.push(timeSeries); + } + } + } + return request; } export class CloudMonitoringExporter extends MetricExporter { @@ -34,7 +155,8 @@ export class CloudMonitoringExporter extends MetricExporter { ): void { (async () => { try { - const request = metricsToRequest(metrics); + // TODO: Remove casting. + const request = metricsToRequest(metrics as unknown as ExportInput); await this.monitoringClient.createTimeSeries(request); const exportResult = {code: 0}; resultCallback(exportResult); diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts index bf3cb2106..bd5b290c7 100644 --- a/test/metrics-collector/export-input-fixture.ts +++ b/test/metrics-collector/export-input-fixture.ts @@ -44,38 +44,6 @@ export const exportInput = { version: '', }, metrics: [ - { - descriptor: { - name: 'attempt_latencies', - description: - 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', - type: 'HISTOGRAM', - unit: 'ms', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 3, - dataPoints: [ - { - attributes: { - attemptStatus: 0, - streamingOperation: true, - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 950000000], - endTime: [1738946034, 948000000], - value: 10944, - }, - ], - isMonotonic: true, - }, { descriptor: { name: 'bigtable.googleapis.com/internal/client/operation_latencies', @@ -91,8 +59,9 @@ export const exportInput = { dataPoints: [ { attributes: { + appProfileId: 'fake-app-profile-id', finalOperationStatus: 0, - streamingOperation: true, + streamingOperation: 'STREAMING', projectId: 'some-project', instanceId: 'emulator-test-instance', table: 'my-table', @@ -106,7 +75,7 @@ export const exportInput = { value: { min: 76, max: 1337, - sum: 11027, + sum: 11979, buckets: { boundaries: [ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, @@ -114,7 +83,7 @@ export const exportInput = { ], counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], }, - count: 99, + count: 100, }, }, ], @@ -135,7 +104,7 @@ export const exportInput = { { attributes: { finalOperationStatus: 0, - streamingOperation: true, + streamingOperation: 'STREAMING', projectId: 'some-project', instanceId: 'emulator-test-instance', table: 'my-table', @@ -162,92 +131,6 @@ export const exportInput = { }, ], }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/server_latencies', - type: 'HISTOGRAM', - description: - 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - attemptStatus: 0, - streamingOperation: true, - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 950000000], - endTime: [1738946034, 948000000], - value: { - min: 57, - max: 379, - sum: 7271, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 94, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', - type: 'HISTOGRAM', - description: - "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - attemptStatus: 0, - streamingOperation: true, - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 950000000], - endTime: [1738946034, 948000000], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, ], }, ], diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 97857a704..fd5804820 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,209 +1,154 @@ import {describe} from 'mocha'; +import {exportInput} from './export-input-fixture'; // TODO: Generate the export code describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { - const exportArgs = { - resource: { - _attributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.type': 'bigtable_client_raw', - 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', - 'monitored_resource.labels.table_id': 'events-table', - }, - asyncAttributesPending: false, - _syncAttributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.type': 'bigtable_client_raw', - 'monitored_resource.labels.project_id': 'cloud-native-db-dpes-shared', - 'monitored_resource.labels.instance_id': 'dan-bigtable-instance', - 'monitored_resource.labels.table_id': 'events-table', - }, - }, - scopeMetrics: [ - { - scope: { - name: 'sample_metric', - version: '', - }, - metrics: [ - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/metric91', - type: 'COUNTER', - description: '', - unit: '', - valueType: 1, - advice: {}, + const exportArgs = exportInput; + const expectedRequests = [ + { + name: 'projects/some-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', }, - aggregationTemporality: 1, - dataPointType: 3, - dataPoints: [ - { - attributes: { - key: 'value', - }, - startTime: [1738789130, 855000000], - endTime: [1738789140, 857000000], - value: 15, - }, - ], - isMonotonic: true, }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/metric92', - type: 'HISTOGRAM', - description: - 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', - unit: 'ms', - valueType: 1, - advice: {}, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - key: 'value', + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, }, - startTime: [1738789130, 855000000], - endTime: [1738789140, 857000000], - value: { - min: 7, - max: 7, - sum: 7, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 5000, 7500, 10000, - ], - counts: [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], + }, }, - count: 1, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], }, }, - ], - }, - ], - }, - ], - }; - const expectedRequest = { - name: 'projects/cloud-native-db-dpes-shared', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - streaming: 'true', - }, + }, + ], + unit: 'ms', }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', + { + metric: { + type: 'bigtable.googleapis.com/internal/client/retry_count', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', + }, }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: Math.floor(Date.now() / 1000), - }, - startTime: { - seconds: Math.floor(Date.now() / 1000) - 1000, - }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', }, - value: { - distributionValue: { - count: '1', - mean: 376.177845, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, - 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, - 200000, 400000, 800000, 1600000, 3200000, - ], + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, + }, + }, + value: { + distributionValue: { + count: '100', + mean: 110.27, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], + }, }, + bucketCounts: [ + '99', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], }, }, - }, - ], - unit: 'ms', - }, - ], - }; + ], + unit: 'ms', + }, + ], + }, + ]; }); }); From 382ebef14c0604a93b6bc39a3fe228a04a1ad2b6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 15:40:03 -0500 Subject: [PATCH 159/289] Add the fixture. We are going to use it later --- .../metrics-handler-fixture.ts | 75 +++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 test/metrics-collector/metrics-handler-fixture.ts diff --git a/test/metrics-collector/metrics-handler-fixture.ts b/test/metrics-collector/metrics-handler-fixture.ts new file mode 100644 index 000000000..b9280bfa0 --- /dev/null +++ b/test/metrics-collector/metrics-handler-fixture.ts @@ -0,0 +1,75 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const expectedRequestsHandled = [ + { + metrics: { + attemptLatency: 2000, + serverLatency: 101, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'streaming', + attemptStatus: 4, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c ', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + attemptLatency: 1000, + serverLatency: 103, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'streaming', + attemptStatus: 0, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c ', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + operationLatency: 6000, + retryCount: 1, + firstResponseLatency: 2000, + }, + attributes: { + appProfileId: undefined, + finalOperationStatus: 0, + streamingOperation: 'streaming', + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c ', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, +]; From 8ba3347c41bb743126c70abece16b0a0c8cc6b53 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 16:50:23 -0500 Subject: [PATCH 160/289] Solved compiler errors. Test almost passes --- src/client-side-metrics/exporter.ts | 103 ++++---- test/metrics-collector/metricsToRequest.ts | 270 +++++++++++---------- 2 files changed, 193 insertions(+), 180 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 964cb3780..b1fd41a92 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -16,13 +16,15 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-expor import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; +import {google} from '@google-cloud/monitoring/build/protos/protos'; +import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; interface ExportResult { code: number; } // TODO: Only involves the values that we care about -interface ExportInput { +export interface ExportInput { resource: { _attributes: { 'cloud.resource_manager.project_id': string; @@ -31,52 +33,54 @@ interface ExportInput { 'monitored_resource.type': string; }; }; - scopeMetrics: [ - { - metrics: [ - { - descriptor: { - name: string; - unit: string; + scopeMetrics: { + scope: { + name: string; + version: string; + }; + metrics: { + descriptor: { + name: string; + unit: string; + description?: string; + type?: string; + valueType?: number; + advice?: {}; + }; + aggregationTemporality?: number; + dataPointType?: number; + dataPoints: { + attributes: { + appProfileId?: string; + finalOperationStatus: number; + streamingOperation: string; + projectId: string; + instanceId: string; + table: string; + cluster: string; + zone: string; + methodName: string; + clientName: string; + }; + startTime: number[]; + endTime: number[]; + value: { + min?: number; + max?: number; + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; }; - dataPoints: [ - { - attributes: { - appProfileId: string; - finalOperationStatus: number; - streamingOperation: string; - projectId: string; - instanceId: string; - table: string; - cluster: string; - zone: string; - methodName: string; - clientName: string; - }; - startTime: [number, number]; - endTime: [number, number]; - value: { - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; - }; - }, - ]; - }, - ]; - }, - ]; + }; + }[]; + }[]; + }[]; } export function metricsToRequest(exportArgs: ExportInput) { - const request = { - name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, - timeSeries: [], - }; - + const timeSeriesArray = []; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; @@ -88,7 +92,7 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - finalOperationStatus: allAttributes.finalOperationStatus, + finalOperationStatus: allAttributes.finalOperationStatus.toString(), streaming: allAttributes.streamingOperation, }; const resourceLabels = { @@ -139,11 +143,14 @@ export function metricsToRequest(exportArgs: ExportInput) { ], unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified }; - request.timeSeries.push(timeSeries); + timeSeriesArray.push(timeSeries); } } } - return request; + return { + name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, + timeSeries: timeSeriesArray, + }; } export class CloudMonitoringExporter extends MetricExporter { @@ -157,7 +164,9 @@ export class CloudMonitoringExporter extends MetricExporter { try { // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); - await this.monitoringClient.createTimeSeries(request); + await this.monitoringClient.createTimeSeries( + request as ICreateTimeSeriesRequest + ); const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index fd5804820..3adbd6369 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,154 +1,158 @@ +import * as assert from 'assert'; import {describe} from 'mocha'; import {exportInput} from './export-input-fixture'; +import { + ExportInput, + metricsToRequest, +} from '../../src/client-side-metrics/exporter'; // TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { +describe.only('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { - const exportArgs = exportInput; - const expectedRequests = [ - { - name: 'projects/some-project', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - finalOperationStatus: 0, - streaming: 'STREAMING', - }, + const expectedRequest = { + name: 'projects/some-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c\u0012', - }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 1738946034, - }, - startTime: { - seconds: 1738946024, - }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, - ], - }, + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - ], }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], }, }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/retry_count', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - finalOperationStatus: 0, - streaming: 'STREAMING', - }, }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c\u0012', - }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/retry_count', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + finalOperationStatus: 0, + streaming: 'STREAMING', }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 1738946034, - }, - startTime: { - seconds: 1738946024, - }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c\u0012', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 1738946034, + }, + startTime: { + seconds: 1738946024, }, - value: { - distributionValue: { - count: '100', - mean: 110.27, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, - ], - }, + }, + value: { + distributionValue: { + count: '100', + mean: 110.27, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, + 7500, 10000, + ], }, - bucketCounts: [ - '99', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], }, + bucketCounts: [ + '99', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], }, }, - ], - unit: 'ms', - }, - ], - }, - ]; + }, + ], + unit: 'ms', + }, + ], + }; + const actualRequest = metricsToRequest(exportInput); + assert.deepStrictEqual(actualRequest, expectedRequest); }); }); From 13382d2383e29459326f5267079ea63f5b7886f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Feb 2025 17:18:47 -0500 Subject: [PATCH 161/289] Address the failing export test --- test/metrics-collector/export-input-fixture.ts | 5 +++-- test/metrics-collector/metricsToRequest.ts | 17 ++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts index bd5b290c7..6017029ef 100644 --- a/test/metrics-collector/export-input-fixture.ts +++ b/test/metrics-collector/export-input-fixture.ts @@ -70,7 +70,7 @@ export const exportInput = { methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', }, - startTime: [1738946024, 950000000], + startTime: [1738946024, 951000000], endTime: [1738946034, 948000000], value: { min: 76, @@ -83,7 +83,7 @@ export const exportInput = { ], counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], }, - count: 100, + count: 99, }, }, ], @@ -103,6 +103,7 @@ export const exportInput = { dataPoints: [ { attributes: { + appProfileId: 'fake-app-profile-id', finalOperationStatus: 0, streamingOperation: 'STREAMING', projectId: 'some-project', diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 3adbd6369..dedb47f50 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -2,7 +2,6 @@ import * as assert from 'assert'; import {describe} from 'mocha'; import {exportInput} from './export-input-fixture'; import { - ExportInput, metricsToRequest, } from '../../src/client-side-metrics/exporter'; @@ -19,7 +18,7 @@ describe.only('Bigtable/metricsToRequest', () => { app_profile: 'fake-app-profile-id', client_name: 'nodejs-bigtable/5.1.2', method: 'readRows', - finalOperationStatus: 0, + finalOperationStatus: '0', streaming: 'STREAMING', }, }, @@ -40,9 +39,11 @@ describe.only('Bigtable/metricsToRequest', () => { interval: { endTime: { seconds: 1738946034, + nanos: 948000000, }, startTime: { seconds: 1738946024, + nanos: 951000000, }, }, value: { @@ -53,7 +54,7 @@ describe.only('Bigtable/metricsToRequest', () => { explicitBuckets: { bounds: [ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, + 5000, 7500, 10000, ], }, }, @@ -88,7 +89,7 @@ describe.only('Bigtable/metricsToRequest', () => { app_profile: 'fake-app-profile-id', client_name: 'nodejs-bigtable/5.1.2', method: 'readRows', - finalOperationStatus: 0, + finalOperationStatus: '0', streaming: 'STREAMING', }, }, @@ -108,21 +109,23 @@ describe.only('Bigtable/metricsToRequest', () => { { interval: { endTime: { + nanos: 948000000, seconds: 1738946034, }, startTime: { + nanos: 951000000, seconds: 1738946024, }, }, value: { distributionValue: { - count: '100', - mean: 110.27, + count: '99', + mean: 0, bucketOptions: { explicitBuckets: { bounds: [ 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 7500, 10000, + 5000, 7500, 10000, ], }, }, From 948a3a341549c73dcf476920b5de665a4d7b3257 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 13:32:53 -0500 Subject: [PATCH 162/289] Fixed the exporter. It now writes data properly. --- src/client-side-metrics/exporter.ts | 2 +- system-test/cloud-monitoring-exporter.ts | 33 ++ test-common/export-input-fixture.ts | 495 ++++++++++++++++++ .../metrics-collector/export-input-fixture.ts | 138 ----- test/metrics-collector/metricsToRequest.ts | 91 +--- 5 files changed, 540 insertions(+), 219 deletions(-) create mode 100644 system-test/cloud-monitoring-exporter.ts create mode 100644 test-common/export-input-fixture.ts delete mode 100644 test/metrics-collector/export-input-fixture.ts diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index b1fd41a92..fc2e480d4 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -92,7 +92,7 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - finalOperationStatus: allAttributes.finalOperationStatus.toString(), + status: allAttributes.finalOperationStatus.toString(), streaming: allAttributes.streamingOperation, }; const resourceLabels = { diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts new file mode 100644 index 000000000..08bc9bbbc --- /dev/null +++ b/system-test/cloud-monitoring-exporter.ts @@ -0,0 +1,33 @@ +import {describe} from 'mocha'; +import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; +import {exportInput} from '../test-common/export-input-fixture'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {Bigtable} from '../src'; + +describe.only('Bigtable/CloudMonitoringExporter', () => { + it('exports client side metrics to cloud monitoring', done => { + // When this test is run, metrics should be visible at the following link: + // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} + // This test will add metrics so that they are available in Pantheon + (async () => { + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const transformedExportInput = JSON.parse( + JSON.stringify(exportInput).replace(/some-project/g, projectId) + ); + const exporter = new CloudMonitoringExporter(); + exporter.export( + transformedExportInput as unknown as ResourceMetrics, + done + ); + })(); + }); +}); diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts new file mode 100644 index 000000000..6147565e2 --- /dev/null +++ b/test-common/export-input-fixture.ts @@ -0,0 +1,495 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; +export const fakeEndTime = fakeStartTime + 1000; + +export const exportInput = { + resource: { + _attributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'some-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + _asyncAttributesPromise: {}, + }, + scopeMetrics: [ + { + scope: { + name: 'bigtable.googleapis.com', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + appProfileId: 'fake-app-profile-id', + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [fakeStartTime, 951000000], + endTime: [fakeEndTime, 948000000], + value: { + min: 76, + max: 1337, + sum: 11979, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, + }, + ], + }, + ], + }, + ], +}; + +const serverLatencyExportOutput = { + name: 'projects/cloud-native-db-dpes-shared', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 376.103605, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/attempt_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.ReadRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '5', + mean: 272.559932, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '2', + '2', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 331, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.ReadRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'bahaaiman-instance-01', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 2000, + }, + startTime: { + seconds: 1000, + }, + }, + value: { + distributionValue: { + count: '5', + mean: 230, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '2', + '2', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], +}; diff --git a/test/metrics-collector/export-input-fixture.ts b/test/metrics-collector/export-input-fixture.ts deleted file mode 100644 index 6017029ef..000000000 --- a/test/metrics-collector/export-input-fixture.ts +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -export const exportInput = { - resource: { - _attributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - asyncAttributesPending: false, - _syncAttributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - _asyncAttributesPromise: {}, - }, - scopeMetrics: [ - { - scope: { - name: 'bigtable.googleapis.com', - version: '', - }, - metrics: [ - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'STREAMING', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 951000000], - endTime: [1738946034, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/retry_count', - type: 'HISTOGRAM', - description: - 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', - unit: 'ms', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'STREAMING', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c\u0012', - methodName: 'readRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [1738946024, 951000000], - endTime: [1738946034, 948000000], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - ], - }, - ], -}; diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index dedb47f50..6a0bc5919 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,12 +1,14 @@ import * as assert from 'assert'; import {describe} from 'mocha'; -import {exportInput} from './export-input-fixture'; import { - metricsToRequest, -} from '../../src/client-side-metrics/exporter'; + exportInput, + fakeEndTime, + fakeStartTime, +} from '../../test-common/export-input-fixture'; +import {metricsToRequest} from '../../src/client-side-metrics/exporter'; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const expectedRequest = { name: 'projects/some-project', @@ -18,8 +20,8 @@ describe.only('Bigtable/metricsToRequest', () => { app_profile: 'fake-app-profile-id', client_name: 'nodejs-bigtable/5.1.2', method: 'readRows', - finalOperationStatus: '0', - streaming: 'STREAMING', + status: '0', + streaming: 'true', }, }, resource: { @@ -29,7 +31,7 @@ describe.only('Bigtable/metricsToRequest', () => { instance: 'emulator-test-instance', project_id: 'some-project', table: 'my-table', - zone: 'us-west1-c\u0012', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -38,11 +40,11 @@ describe.only('Bigtable/metricsToRequest', () => { { interval: { endTime: { - seconds: 1738946034, + seconds: fakeEndTime, nanos: 948000000, }, startTime: { - seconds: 1738946024, + seconds: fakeStartTime, nanos: 951000000, }, }, @@ -82,77 +84,6 @@ describe.only('Bigtable/metricsToRequest', () => { ], unit: 'ms', }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/retry_count', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - finalOperationStatus: '0', - streaming: 'STREAMING', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c\u0012', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - nanos: 948000000, - seconds: 1738946034, - }, - startTime: { - nanos: 951000000, - seconds: 1738946024, - }, - }, - value: { - distributionValue: { - count: '99', - mean: 0, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 5000, 7500, 10000, - ], - }, - }, - bucketCounts: [ - '99', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, ], }; const actualRequest = metricsToRequest(exportInput); From c8bb0a8b4ae135a821b95d3f20cf1970210da086 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 13:39:51 -0500 Subject: [PATCH 163/289] Test should complete when the export doesnt error --- src/client-side-metrics/exporter.ts | 1 + system-test/cloud-monitoring-exporter.ts | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index fc2e480d4..4c697d9fb 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -153,6 +153,7 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } +// TODO: Add test for when the export fails export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 08bc9bbbc..3e78c77c6 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -26,7 +26,13 @@ describe.only('Bigtable/CloudMonitoringExporter', () => { const exporter = new CloudMonitoringExporter(); exporter.export( transformedExportInput as unknown as ResourceMetrics, - done + (result: {code: number}) => { + if (result.code === 0) { + done(); + } else { + done(result.code); + } + } ); })(); }); From 78ec2e89cd32469024829bdfd8c122397bc5efb2 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 15:52:31 -0500 Subject: [PATCH 164/289] Add the fixture to the shared folder --- test-common/metrics-handler-fixture.ts | 75 ++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 test-common/metrics-handler-fixture.ts diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts new file mode 100644 index 000000000..f5899ce43 --- /dev/null +++ b/test-common/metrics-handler-fixture.ts @@ -0,0 +1,75 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export const expectedRequestsHandled = [ + { + metrics: { + attemptLatency: 2000, + serverLatency: 101, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'true', + attemptStatus: 4, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + attemptLatency: 1000, + serverLatency: 103, + connectivityErrorCount: 1, + }, + attributes: { + appProfileId: undefined, + streamingOperation: 'true', + attemptStatus: 0, + connectivityErrorCount: 1, + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, + { + metrics: { + operationLatency: 6000, + retryCount: 1, + firstResponseLatency: 2000, + }, + attributes: { + appProfileId: undefined, + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'my-project', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientName: 'nodejs-bigtable', + }, + }, +]; From e5ec89f522b1ae39c63268a4f40e5e72705a4584 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 16:55:48 -0500 Subject: [PATCH 165/289] Remove two files not in this PR --- .../operation-metrics-collector.ts | 366 ------------------ src/something.js | 0 test/metrics-collector/metrics-collector.ts | 152 -------- 3 files changed, 518 deletions(-) delete mode 100644 src/client-side-metrics/operation-metrics-collector.ts create mode 100644 src/something.js delete mode 100644 test/metrics-collector/metrics-collector.ts diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts deleted file mode 100644 index b9c3e90a2..000000000 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import * as fs from 'fs'; -import {IMetricsHandler} from './metrics-handler'; -import {MethodName, StreamingState} from './client-side-metrics-attributes'; -import {grpc} from 'google-gax'; - -/** - * An interface representing a Date-like object. Provides a `getTime` method - * for retrieving the time value in milliseconds. Used for abstracting time - * in tests. - */ -interface DateLike { - /** - * Returns the time value in milliseconds. - * @returns The time value in milliseconds. - */ - getTime(): number; -} - -/** - * Interface for a provider that returns DateLike objects. Used for mocking dates in tests. - */ -interface DateProvider { - /** - * Returns a DateLike object. - * @returns A DateLike object representing the current time or a fake time value. - */ - getDate(): DateLike; -} - -/** - * The default DateProvider implementation. Returns the current date and time. - */ -class DefaultDateProvider { - /** - * Returns a new Date object representing the current time. - * @returns {Date} The current date and time. - */ - getDate() { - return new Date(); - } -} - -/** - * An interface representing a tabular API surface, such as a Bigtable table. - */ -export interface ITabularApiSurface { - instance: { - id: string; - }; - id: string; - bigtable: { - appProfileId?: string; - }; -} - -/** - * Information about the completion of a single attempt of a Bigtable operation. - * This information is used for recording metrics. - */ -interface OnAttemptCompleteInfo { - connectivityErrorCount: number; - /** - * Whether the operation is a streaming operation or not. - */ - streamingOperation: StreamingState; - /** - * The attempt status of the operation. - */ - attemptStatus: grpc.status; -} - -/** - * Attributes specific to a single attempt of a Bigtable operation. These - * attributes provide information about the attempt's status and whether it was - * part of a streaming operation. - */ -interface AttemptOnlyAttributes { - attemptStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Information about a Bigtable operation to be recorded in client side metrics. - */ -interface OperationOnlyAttributes { - finalOperationStatus: grpc.status; - streamingOperation: StreamingState; -} - -const packageJSON = fs.readFileSync('package.json'); -const version = JSON.parse(packageJSON.toString()).version; - -// MetricsCollectorState is a list of states that the metrics collector can be in. -// Tracking the OperationMetricsCollector state is done so that the -// OperationMetricsCollector methods are not called in the wrong order. If the -// methods are called in the wrong order they will not execute and they will -// throw warnings. -// -// The following state transitions are allowed: -// OPERATION_NOT_STARTED -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -// OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_IN_PROGRESS -// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS -// OPERATION_STARTED_ATTEMPT_IN_PROGRESS -> OPERATION_COMPLETE -enum MetricsCollectorState { - OPERATION_NOT_STARTED, - OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS, - OPERATION_STARTED_ATTEMPT_IN_PROGRESS, - OPERATION_COMPLETE, -} - -/** - * A class for tracing and recording client-side metrics related to Bigtable operations. - */ -export class OperationMetricsCollector { - private state: MetricsCollectorState; - private operationStartTime: DateLike | null; - private attemptStartTime: DateLike | null; - private zone: string | undefined; - private cluster: string | undefined; - private tabularApiSurface: ITabularApiSurface; - private methodName: MethodName; - private attemptCount = 0; - private receivedFirstResponse: boolean; - private metricsHandlers: IMetricsHandler[]; - private firstResponseLatency: number | null; - private serverTimeRead: boolean; - private serverTime: number | null; - private dateProvider: DateProvider; - - /** - * @param {ITabularApiSurface} tabularApiSurface Information about the Bigtable table being accessed. - * @param {IMetricsHandler[]} metricsHandlers The metrics handlers used for recording metrics. - * @param {MethodName} methodName The name of the method being traced. - * @param {DateProvider} dateProvider A provider for date/time information (for testing). - */ - constructor( - tabularApiSurface: ITabularApiSurface, - metricsHandlers: IMetricsHandler[], - methodName: MethodName, - dateProvider?: DateProvider - ) { - this.state = MetricsCollectorState.OPERATION_NOT_STARTED; - this.zone = undefined; - this.cluster = undefined; - this.tabularApiSurface = tabularApiSurface; - this.methodName = methodName; - this.operationStartTime = null; - this.attemptStartTime = null; - this.receivedFirstResponse = false; - this.metricsHandlers = metricsHandlers; - this.firstResponseLatency = null; - this.serverTimeRead = false; - this.serverTime = null; - if (dateProvider) { - this.dateProvider = dateProvider; - } else { - this.dateProvider = new DefaultDateProvider(); - } - } - - /** - * Called when the operation starts. Records the start time. - */ - onOperationStart() { - if (this.state === MetricsCollectorState.OPERATION_NOT_STARTED) { - this.operationStartTime = this.dateProvider.getDate(); - this.firstResponseLatency = null; - this.receivedFirstResponse = false; - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - } else { - console.warn('Invalid state transition'); - } - } - - /** - * Called when an attempt (e.g., an RPC attempt) completes. Records attempt latencies. - * @param {string} projectId The id of the project. - * @param {OnAttemptCompleteInfo} info Information about the completed attempt. - */ - onAttemptComplete(projectId: string, info: OnAttemptCompleteInfo) { - if ( - this.state === MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS; - this.attemptCount++; - const endTime = this.dateProvider.getDate(); - if (projectId && this.attemptStartTime) { - const attributes = { - streamingOperation: info.streamingOperation, - attemptStatus: info.attemptStatus, - connectivityErrorCount: info.connectivityErrorCount, - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - const totalTime = endTime.getTime() - this.attemptStartTime.getTime(); - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onAttemptComplete) { - metricsHandler.onAttemptComplete( - { - attemptLatency: totalTime, - serverLatency: this.serverTime ?? undefined, - connectivityErrorCount: info.connectivityErrorCount, - }, - attributes - ); - } - }); - } - } else { - console.warn('Invalid state transition attempted'); - } - } - - /** - * Called when a new attempt starts. Records the start time of the attempt. - */ - onAttemptStart() { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS; - this.attemptStartTime = this.dateProvider.getDate(); - this.serverTime = null; - this.serverTimeRead = false; - } else { - console.warn('Invalid state transition attempted'); - } - } - - /** - * Called when the first response is received. Records first response latencies. - */ - onResponse(projectId: string) { - if (!this.receivedFirstResponse) { - this.receivedFirstResponse = true; - const endTime = this.dateProvider.getDate(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); - } - } - } - - /** - * Called when an operation completes (successfully or unsuccessfully). - * Records operation latencies, retry counts, and connectivity error counts. - * @param {string} projectId The id of the project. - * @param {OperationOnlyAttributes} info Information about the completed operation. - */ - onOperationComplete(projectId: string, info: OperationOnlyAttributes) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_NOT_IN_PROGRESS - ) { - this.state = MetricsCollectorState.OPERATION_COMPLETE; - const endTime = this.dateProvider.getDate(); - if (projectId && this.operationStartTime) { - const totalTime = endTime.getTime() - this.operationStartTime.getTime(); - { - const operationAttributes = { - finalOperationStatus: info.finalOperationStatus, - streamingOperation: info.streamingOperation, - projectId, - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientName: `nodejs-bigtable/${version}`, - }; - const metrics = { - operationLatency: totalTime, - retryCount: this.attemptCount - 1, - firstResponseLatency: this.firstResponseLatency ?? undefined, - }; - this.metricsHandlers.forEach(metricsHandler => { - if (metricsHandler.onOperationComplete) { - metricsHandler.onOperationComplete(metrics, operationAttributes); - } - }); - } - } - } else { - console.warn('Invalid state transition attempted'); - } - } - - /** - * Called when metadata is received. Extracts server timing information if available. - * @param {string} projectId The id of the project. - * @param {object} metadata The received metadata. - */ - onMetadataReceived( - projectId: string, - metadata: { - internalRepr: Map; - options: {}; - } - ) { - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const durationValues = mappedEntries.get('server-timing')?.split('dur='); - if (durationValues && durationValues[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - const serverTime = parseInt(durationValues[1]); - if (projectId) { - this.serverTime = serverTime; - } - } - } - } - - /** - * Called when status information is received. Extracts zone and cluster information. - * @param {object} status The received status information. - */ - onStatusReceived(status: { - metadata: {internalRepr: Map; options: {}}; - }) { - const mappedEntries = new Map( - Array.from(status.metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const instanceInformation = mappedEntries - .get('x-goog-ext-425905942-bin') - ?.replace(new RegExp('\\n', 'g'), '') - .split('\r'); - if (instanceInformation && instanceInformation[0]) { - this.zone = instanceInformation[0]; - } - if (instanceInformation && instanceInformation[1]) { - this.cluster = instanceInformation[1]; - } - } -} diff --git a/src/something.js b/src/something.js new file mode 100644 index 000000000..e69de29bb diff --git a/test/metrics-collector/metrics-collector.ts b/test/metrics-collector/metrics-collector.ts deleted file mode 100644 index 5c158d28c..000000000 --- a/test/metrics-collector/metrics-collector.ts +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {describe} from 'mocha'; -import {TestDateProvider} from '../../test-common/test-date-provider'; -import * as assert from 'assert'; -import * as fs from 'fs'; -import {TestMetricsHandler} from '../../test-common/test-metrics-handler'; -import {OperationMetricsCollector} from '../../src/client-side-metrics/operation-metrics-collector'; -import { - MethodName, - StreamingState, -} from '../../src/client-side-metrics/client-side-metrics-attributes'; -import {grpc} from 'google-gax'; - -/** - * A fake implementation of the Bigtable client for testing purposes. Provides a - * metricsTracerFactory and a stubbed projectId method. - */ -class FakeBigtable { - appProfileId?: string; - projectId = 'my-project'; -} - -/** - * A fake implementation of a Bigtable instance for testing purposes. Provides only an ID. - */ -class FakeInstance { - /** - * The ID of the fake instance. - */ - id = 'fakeInstanceId'; -} - -describe('Bigtable/MetricsCollector', () => { - it('should record the right metrics with a typical method call', async () => { - const logger = {value: ''}; - const metricsHandlers = [new TestMetricsHandler(logger)]; - class FakeTable { - id = 'fakeTableId'; - instance = new FakeInstance(); - bigtable = new FakeBigtable(); - - async fakeMethod(): Promise { - function createMetadata(duration: string) { - return { - internalRepr: new Map([ - ['server-timing', Buffer.from(`gfet4t7; dur=${duration}`)], - ]), - options: {}, - }; - } - if (this.bigtable.projectId) { - const status = { - metadata: { - internalRepr: new Map([ - [ - 'x-goog-ext-425905942-bin', - Buffer.from('\n\nus-west1-c \rfake-cluster3'), - ], - ]), - options: {}, - }, - }; - const metricsCollector = new OperationMetricsCollector( - this, - metricsHandlers, - MethodName.READ_ROWS, - new TestDateProvider(logger) - ); - // In this method we simulate a series of events that might happen - // when a user calls one of the Table methods. - // Here is an example of what might happen in a method call: - logger.value += '1. The operation starts\n'; - metricsCollector.onOperationStart(); - logger.value += '2. The attempt starts.\n'; - metricsCollector.onAttemptStart(); - logger.value += '3. Client receives status information.\n'; - metricsCollector.onStatusReceived(status); - logger.value += '4. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('101') - ); - logger.value += '5. Client receives first row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '6. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('102') - ); - logger.value += '7. Client receives second row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '8. A transient error occurs.\n'; - metricsCollector.onAttemptComplete(this.bigtable.projectId, { - streamingOperation: StreamingState.STREAMING, - attemptStatus: grpc.status.DEADLINE_EXCEEDED, - connectivityErrorCount: 1, - }); - logger.value += '9. After a timeout, the second attempt is made.\n'; - metricsCollector.onAttemptStart(); - logger.value += '10. Client receives status information.\n'; - metricsCollector.onStatusReceived(status); - logger.value += '11. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('103') - ); - logger.value += '12. Client receives third row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '13. Client receives metadata.\n'; - metricsCollector.onMetadataReceived( - this.bigtable.projectId, - createMetadata('104') - ); - logger.value += '14. Client receives fourth row.\n'; - metricsCollector.onResponse(this.bigtable.projectId); - logger.value += '15. User reads row 1\n'; - logger.value += '16. Stream ends, operation completes\n'; - metricsCollector.onAttemptComplete(this.bigtable.projectId, { - attemptStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - connectivityErrorCount: 1, - }); - metricsCollector.onOperationComplete(this.bigtable.projectId, { - finalOperationStatus: grpc.status.OK, - streamingOperation: StreamingState.STREAMING, - }); - } - } - } - const table = new FakeTable(); - await table.fakeMethod(); - const expectedOutput = fs.readFileSync( - './test/metrics-collector/typical-method-call.txt', - 'utf8' - ); - // Ensure events occurred in the right order here: - assert.strictEqual(logger.value, expectedOutput.replace(/\r/g, '')); - }); -}); From 5403a1b9c34e62d864079e2f76780baed3f70466 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 16:57:15 -0500 Subject: [PATCH 166/289] delete empty file --- src/something.js | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/something.js diff --git a/src/something.js b/src/something.js deleted file mode 100644 index e69de29bb..000000000 From 176ed0278459a3a050a96b55d341b46240baee80 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 16:58:51 -0500 Subject: [PATCH 167/289] Remove files that are already in the other PR --- test-common/test-date-provider.ts | 59 --------------------------- test-common/test-metrics-handler.ts | 63 ----------------------------- 2 files changed, 122 deletions(-) delete mode 100644 test-common/test-date-provider.ts delete mode 100644 test-common/test-metrics-handler.ts diff --git a/test-common/test-date-provider.ts b/test-common/test-date-provider.ts deleted file mode 100644 index 8eaa7b38c..000000000 --- a/test-common/test-date-provider.ts +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * A test implementation of a Date-like object. Used for testing purposes. It provides a - * getTime method that returns a pre-determined fake date value, allowing for - * deterministic testing of time-dependent functionality. - */ -class TestDateLike { - private fakeDate; - /** - * @param {number} fakeDate The fake date value to be returned by getTime(), in milliseconds. - */ - constructor(fakeDate: number) { - this.fakeDate = fakeDate; - } - /** - * Returns the fake date value that this object was created with. - * @returns {number} The fake date, in milliseconds. - */ - getTime() { - return this.fakeDate; - } -} - -/** - * A test implementation of a DateProvider. Used for testing purposes. Provides - * a deterministic series of fake dates, with each call to getDate() returning a date 1000ms later than the last. - * Logs each date value returned for verification purposes. - */ -export class TestDateProvider { - private dateCounter = 0; - private messages: {value: string}; - - constructor(messages: {value: string}) { - this.messages = messages; - } - /** - * Returns a new fake date 1000ms later than the last. Logs the date for test verification. - * @returns {TestDateLike} A fake date object. - */ - getDate() { - // The test assumes exactly 1s passes between each getDate call. - this.dateCounter = this.dateCounter + 1000; - this.messages.value += `getDate call returns ${this.dateCounter.toString()} ms\n`; - return new TestDateLike(this.dateCounter); - } -} diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts deleted file mode 100644 index 8166155b9..000000000 --- a/test-common/test-metrics-handler.ts +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import { - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, -} from '../src/client-side-metrics/metrics-handler'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../src/client-side-metrics/client-side-metrics-attributes'; - -/** - * A test implementation of the IMetricsHandler interface. Used for testing purposes. - * It logs the metrics and attributes received by the onOperationComplete and onAttemptComplete methods. - */ -export class TestMetricsHandler { - private messages: {value: string}; - - constructor(messages: {value: string}) { - this.messages = messages; - } - /** - * Logs the metrics and attributes received for an operation completion. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {Attributes} attributes Attributes associated with the completed operation. - */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ) { - attributes.clientName = 'nodejs-bigtable'; - this.messages.value += 'Recording parameters for onOperationComplete:\n'; - this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; - this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; - } - - /** - * Logs the metrics and attributes received for an attempt completion. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {Attributes} attributes Attributes associated with the completed attempt. - */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ) { - attributes.clientName = 'nodejs-bigtable'; - this.messages.value += 'Recording parameters for onAttemptComplete:\n'; - this.messages.value += `metrics: ${JSON.stringify(metrics)}\n`; - this.messages.value += `attributes: ${JSON.stringify(attributes)}\n`; - } -} From 08c1c1ba349e04cd012684ca475ac5b6d4429650 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 10 Feb 2025 17:02:17 -0500 Subject: [PATCH 168/289] Remove the metrics handler fixture --- .../metrics-handler-fixture.ts | 75 ------------------- 1 file changed, 75 deletions(-) delete mode 100644 test/metrics-collector/metrics-handler-fixture.ts diff --git a/test/metrics-collector/metrics-handler-fixture.ts b/test/metrics-collector/metrics-handler-fixture.ts deleted file mode 100644 index b9280bfa0..000000000 --- a/test/metrics-collector/metrics-handler-fixture.ts +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -export const expectedRequestsHandled = [ - { - metrics: { - attemptLatency: 2000, - serverLatency: 101, - connectivityErrorCount: 1, - }, - attributes: { - appProfileId: undefined, - streamingOperation: 'streaming', - attemptStatus: 4, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c ', - methodName: 'readRows', - clientName: 'nodejs-bigtable', - }, - }, - { - metrics: { - attemptLatency: 1000, - serverLatency: 103, - connectivityErrorCount: 1, - }, - attributes: { - appProfileId: undefined, - streamingOperation: 'streaming', - attemptStatus: 0, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c ', - methodName: 'readRows', - clientName: 'nodejs-bigtable', - }, - }, - { - metrics: { - operationLatency: 6000, - retryCount: 1, - firstResponseLatency: 2000, - }, - attributes: { - appProfileId: undefined, - finalOperationStatus: 0, - streamingOperation: 'streaming', - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c ', - methodName: 'readRows', - clientName: 'nodejs-bigtable', - }, - }, -]; From be7673fad26622fe16e0f771efa253dc48d95b30 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 11 Feb 2025 10:30:08 -0500 Subject: [PATCH 169/289] Use 3 metrics instead of 1 --- test-common/export-input-fixture.ts | 88 +++++++ test/metrics-collector/metricsToRequest.ts | 275 ++++++++++++++++----- 2 files changed, 297 insertions(+), 66 deletions(-) diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 6147565e2..2d29306b0 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -91,6 +91,94 @@ export const exportInput = { }, ], }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + appProfileId: 'fake-app-profile-id', + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'mutateRows', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [fakeStartTime, 951000000], + endTime: [fakeEndTime, 948000000], + value: { + min: 76, + max: 1337, + sum: 11979, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + appProfileId: 'fake-app-profile-id', + finalOperationStatus: 0, + streamingOperation: 'true', + projectId: 'some-project', + instanceId: 'emulator-test-instance', + table: 'my-table', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'sampleRowKeys', + clientName: 'nodejs-bigtable/5.1.2', + }, + startTime: [fakeStartTime, 951000000], + endTime: [fakeEndTime, 948000000], + value: { + min: 76, + max: 1337, + sum: 11979, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], + }, + count: 99, + }, + }, + ], + }, ], }, ], diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 6a0bc5919..368f62f48 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -7,85 +7,228 @@ import { } from '../../test-common/export-input-fixture'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; -// TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { - it('Converts a counter and a histogram to the cloud monitoring format', () => { - const expectedRequest = { - name: 'projects/some-project', - timeSeries: [ +export const expectedRequest = { + name: 'projects/some-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'readRows', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - status: '0', - streaming: 'true', + interval: { + endTime: { + seconds: fakeEndTime, + nanos: 948000000, }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c', + startTime: { + seconds: fakeStartTime, + nanos: 951000000, }, }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - nanos: 948000000, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], }, - startTime: { - seconds: fakeStartTime, - nanos: 951000000, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'mutateRows', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: fakeEndTime, + nanos: 948000000, + }, + startTime: { + seconds: fakeStartTime, + nanos: 951000000, + }, + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], }, }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, - 5000, 7500, 10000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: 'fake-app-profile-id', + client_name: 'nodejs-bigtable/5.1.2', + method: 'sampleRowKeys', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'fake-cluster3', + instance: 'emulator-test-instance', + project_id: 'some-project', + table: 'my-table', + zone: 'us-west1-c', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: fakeEndTime, + nanos: 948000000, + }, + startTime: { + seconds: fakeStartTime, + nanos: 951000000, + }, + }, + value: { + distributionValue: { + count: '99', + mean: 121, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, ], }, }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '93', + '0', + '5', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], }, - ], - unit: 'ms', + }, }, ], - }; + unit: 'ms', + }, + ], +}; + +// TODO: Generate the export code +describe.only('Bigtable/metricsToRequest', () => { + it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); }); From e6d66064d8e6520b2597c3eaa05290f50606d5f7 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 11 Feb 2025 15:33:41 -0500 Subject: [PATCH 170/289] Replace with proper buckets --- src/client-side-metrics/exporter.ts | 124 +++++++++- .../gcp-metrics-handler.ts | 13 +- test/metrics-collector/metricsToRequest.ts | 225 ++++-------------- 3 files changed, 186 insertions(+), 176 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 4c697d9fb..598a38926 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,6 +18,8 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; +import {expectedRequest} from '../../test/metrics-collector/metricsToRequest'; +import {exportInput} from '../../test-common/export-input-fixture'; interface ExportResult { code: number; @@ -153,6 +155,110 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } +const goRequestToExporter = { + name: 'projects/cloud-native-db-dpes-shared', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + app_profile: '', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.MutateRows', + status: 'OK', + // "streaming": "true" + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + cluster: 'bahaaiman-instance-01-c1', + instance: 'interim-instance3', + project_id: 'cloud-native-db-dpes-shared', + table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', + zone: 'us-central1-f', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: Math.floor(Date.now() / 1000), + }, + startTime: { + seconds: Math.floor(Date.now() / 1000) - 1000, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 331, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], +}; + // TODO: Add test for when the export fails export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); @@ -165,9 +271,25 @@ export class CloudMonitoringExporter extends MetricExporter { try { // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); - await this.monitoringClient.createTimeSeries( + /* + const result = await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); + */ + const usedRequest = JSON.parse( + JSON.stringify(expectedRequest).replace( + /some-project/g, + 'cloud-native-db-dpes-shared' + ) + ); + await this.monitoringClient.createTimeSeries( + usedRequest as ICreateTimeSeriesRequest + ); + /* + await this.monitoringClient.createTimeSeries( + goRequestToExporter as ICreateTimeSeriesRequest + ); + */ const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index afd2e12b4..2487d0c70 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -72,11 +72,12 @@ export class GCPMetricsHandler implements IMetricsHandler { if (!this.initialized) { this.initialized = true; const sumAggregation = Aggregation.Sum(); - const histogramAggregation = new ExplicitBucketHistogramAggregation([ + const buckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, - ]); + ]; + const histogramAggregation = new ExplicitBucketHistogramAggregation(); const viewList = [ 'operation_latencies', 'first_response_latencies', @@ -120,6 +121,10 @@ export class GCPMetricsHandler implements IMetricsHandler { { description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: 'ms', + advice: { + explicitBucketBoundaries: buckets, + }, } ), attemptLatencies: meter.createHistogram( @@ -128,6 +133,9 @@ export class GCPMetricsHandler implements IMetricsHandler { description: 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', + advice: { + explicitBucketBoundaries: buckets, + }, } ), retryCount: meter.createHistogram( @@ -158,6 +166,7 @@ export class GCPMetricsHandler implements IMetricsHandler { { description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + unit: 'ms', } ), connectivityErrorCount: meter.createHistogram( diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 368f62f48..cfa9cfe98 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -15,20 +15,21 @@ export const expectedRequest = { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'readRows', - status: '0', - streaming: 'true', + client_name: 'go-bigtable/1.35.0', + client_uid: + 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + method: 'Bigtable.ReadRows', + status: 'OK', }, }, resource: { type: 'bigtable_client_raw', labels: { cluster: 'fake-cluster3', - instance: 'emulator-test-instance', + instance: 'emulator-test-instance2', project_id: 'some-project', table: 'my-table', - zone: 'us-west1-c', + zone: 'us-central1-f', }, }, metricKind: 'CUMULATIVE', @@ -37,185 +38,63 @@ export const expectedRequest = { { interval: { endTime: { - seconds: fakeEndTime, - nanos: 948000000, + seconds: Math.floor(Date.now() / 1000), }, startTime: { - seconds: fakeStartTime, - nanos: 951000000, + seconds: Math.floor(Date.now() / 1000) - 1000, }, }, value: { distributionValue: { - count: '99', - mean: 121, + count: '1', + mean: 376.103605, bucketOptions: { explicitBuckets: { bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, + 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, ], }, }, bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'mutateRows', - status: '0', - streaming: 'true', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - nanos: 948000000, - }, - startTime: { - seconds: fakeStartTime, - nanos: 951000000, - }, - }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - method: 'sampleRowKeys', - status: '0', - streaming: 'true', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'emulator-test-instance', - project_id: 'some-project', - table: 'my-table', - zone: 'us-west1-c', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - nanos: 948000000, - }, - startTime: { - seconds: fakeStartTime, - nanos: 951000000, - }, - }, - value: { - distributionValue: { - count: '99', - mean: 121, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '93', - '0', - '5', - '0', - '0', - '1', - '0', - '0', - '0', - '0', + '0', //1 + '0', //2 + '0', //3 + '0', //4 + '0', //5 + '0', //6 + '0', //7 + '0', //8 + '0', //9 + '0', //10 + '0', //11 + '0', //12 + '0', //13 + '0', //14 + '0', //15 + '0', //16 + '0', //17 + '0', //18 + '0', //19 + '0', //20 + '0', //21 + '0', //22 + '0', //23 + '0', //24 + '1', //25 + '0', //26 + '0', //27 + '0', //28 + '0', //29 + '0', //30 + '0', //31 + '0', //32 + '0', //33 + '0', //34 + '0', //35 + '0', //36 + '0', //37 ], }, }, @@ -227,7 +106,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 285454f748b2ce1f1cf6bfe0de1f9dad6b83feb1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 12 Feb 2025 17:20:35 -0500 Subject: [PATCH 171/289] Change the metrics handler fixture --- test-common/metrics-handler-fixture.ts | 63 ++++++++++++++------------ 1 file changed, 35 insertions(+), 28 deletions(-) diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index f5899ce43..f008f86fd 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -17,59 +17,66 @@ export const expectedRequestsHandled = [ metrics: { attemptLatency: 2000, serverLatency: 101, - connectivityErrorCount: 1, + connectivityErrorCount: 0, }, attributes: { - appProfileId: undefined, streamingOperation: 'true', attemptStatus: 4, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', }, }, { metrics: { - attemptLatency: 1000, + attemptLatency: 2000, serverLatency: 103, - connectivityErrorCount: 1, + connectivityErrorCount: 0, }, attributes: { - appProfileId: undefined, streamingOperation: 'true', attemptStatus: 0, - connectivityErrorCount: 1, - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', }, }, { metrics: { - operationLatency: 6000, + operationLatency: 7000, retryCount: 1, - firstResponseLatency: 2000, + firstResponseLatency: 5000, }, attributes: { - appProfileId: undefined, finalOperationStatus: 0, streamingOperation: 'true', - projectId: 'my-project', - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, clientName: 'nodejs-bigtable', + projectId: 'my-project', }, }, ]; From 12a5cc72ee7d4754bbf83067949108a0d031bffa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 12:53:38 -0500 Subject: [PATCH 172/289] Stop using stub in exporter --- src/client-side-metrics/exporter.ts | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 598a38926..ef9016fec 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -271,25 +271,9 @@ export class CloudMonitoringExporter extends MetricExporter { try { // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); - /* - const result = await this.monitoringClient.createTimeSeries( - request as ICreateTimeSeriesRequest - ); - */ - const usedRequest = JSON.parse( - JSON.stringify(expectedRequest).replace( - /some-project/g, - 'cloud-native-db-dpes-shared' - ) - ); await this.monitoringClient.createTimeSeries( - usedRequest as ICreateTimeSeriesRequest - ); - /* - await this.monitoringClient.createTimeSeries( - goRequestToExporter as ICreateTimeSeriesRequest + request as ICreateTimeSeriesRequest ); - */ const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { From 54239f3039a4a6357200d0c3f8ba0f418f726c4c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 13:42:53 -0500 Subject: [PATCH 173/289] Use more realistic buckets --- src/client-side-metrics/exporter.ts | 2 + test-common/export-input-fixture.ts | 487 +-------------------- test/metrics-collector/metricsToRequest.ts | 54 ++- 3 files changed, 51 insertions(+), 492 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index ef9016fec..7b8897acc 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -63,6 +63,7 @@ export interface ExportInput { zone: string; methodName: string; clientName: string; + clientUid: string; }; startTime: number[]; endTime: number[]; @@ -96,6 +97,7 @@ export function metricsToRequest(exportArgs: ExportInput) { method: allAttributes.methodName, status: allAttributes.finalOperationStatus.toString(), streaming: allAttributes.streamingOperation, + client_uid: allAttributes.clientUid, }; const resourceLabels = { cluster: allAttributes.cluster, diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 2d29306b0..028e9e7c0 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -72,6 +72,7 @@ export const exportInput = { zone: 'us-west1-c', methodName: 'readRows', clientName: 'nodejs-bigtable/5.1.2', + clientUid: 'fake-uuid', }, startTime: [fakeStartTime, 951000000], endTime: [fakeEndTime, 948000000], @@ -91,493 +92,7 @@ export const exportInput = { }, ], }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'true', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'mutateRows', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [fakeStartTime, 951000000], - endTime: [fakeEndTime, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'true', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'sampleRowKeys', - clientName: 'nodejs-bigtable/5.1.2', - }, - startTime: [fakeStartTime, 951000000], - endTime: [fakeEndTime, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], - }, - count: 99, - }, - }, - ], - }, - ], - }, - ], -}; - -const serverLatencyExportOutput = { - name: 'projects/cloud-native-db-dpes-shared', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/server_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 376.103605, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/attempt_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.ReadRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '5', - mean: 272.559932, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '2', - '2', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/server_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 331, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - { - metric: { - type: 'bigtable.googleapis.com/internal/client/server_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.ReadRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'bahaaiman-instance-01', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: 2000, - }, - startTime: { - seconds: 1000, - }, - }, - value: { - distributionValue: { - count: '5', - mean: 230, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '2', - '2', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, ], - unit: 'ms', }, ], }; diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index cfa9cfe98..ce04b8ad8 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -16,8 +16,7 @@ export const expectedRequest = { labels: { app_profile: 'fake-app-profile-id', client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', + client_uid: 'fake-client-uid', method: 'Bigtable.ReadRows', status: 'OK', }, @@ -51,9 +50,47 @@ export const expectedRequest = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + 0, // 1 + 0.01, // 2 + 0.05, // 3 + 0.1, // 4 + 0.3, // 5 + 0.6, // 6 + 0.8, // 7 + 1, // 8 + 2, // 9 + 3, // 10 + 4, // 11 + 5, // 12 + 6, // 13 + 8, // 14 + 10, // 15 + 13, // 16 + 16, // 17 + 20, // 18 + 25, // 19 + 30, // 20 + 40, // 21 + 50, // 22 + 65, // 23 + 80, // 24 + 100, // 25 + 130, // 26 + 160, // 27 + 200, // 28 + 250, // 29 + 300, // 30 + 400, // 31 + 500, // 32 + 650, // 33 + 800, // 34 + 1000, // 35 + 2000, // 36 + 5000, // 37 + 10000, // 38 + 20000, // 39 + 50000, // 40 + 100000, // 41 ], }, }, @@ -95,6 +132,11 @@ export const expectedRequest = { '0', //35 '0', //36 '0', //37 + '0', //38 + '0', //39 + '0', //40 + '0', //41 + '0', //42 ], }, }, @@ -106,7 +148,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { +describe.only('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 86890aa20c5deab9634f49f02fe1e8bdfd2b9fd5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 13:43:49 -0500 Subject: [PATCH 174/289] Remove the go request to export --- src/client-side-metrics/exporter.ts | 106 ---------------------------- 1 file changed, 106 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 7b8897acc..09c9e0ec1 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,8 +18,6 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {expectedRequest} from '../../test/metrics-collector/metricsToRequest'; -import {exportInput} from '../../test-common/export-input-fixture'; interface ExportResult { code: number; @@ -157,110 +155,6 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } -const goRequestToExporter = { - name: 'projects/cloud-native-db-dpes-shared', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: '', - client_name: 'go-bigtable/1.35.0', - client_uid: - 'go-9f4f393d-c57f-457c-9445-550b8a6f7d00@bahaaiman-ct-01.c.googlers.com', - method: 'Bigtable.MutateRows', - status: 'OK', - // "streaming": "true" - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'bahaaiman-instance-01-c1', - instance: 'interim-instance3', - project_id: 'cloud-native-db-dpes-shared', - table: 'profile-b5e6f29d-2122-4d2c-8c12-cfb8e90ca05f', - zone: 'us-central1-f', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: Math.floor(Date.now() / 1000), - }, - startTime: { - seconds: Math.floor(Date.now() / 1000) - 1000, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 331, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, - 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, - 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, - 800000, 1600000, 3200000, - ], - }, - }, - bucketCounts: [ - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, - }, - }, - ], - unit: 'ms', - }, - ], -}; - // TODO: Add test for when the export fails export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); From a0fa7e42e951cf96455f413073b47bf52bed209b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 14:08:31 -0500 Subject: [PATCH 175/289] Modify the fixtures to be more realistic --- src/client-side-metrics/exporter.ts | 2 - test-common/export-input-fixture.ts | 94 ++++++++++++++++++++-- test/metrics-collector/metricsToRequest.ts | 15 ++-- 3 files changed, 96 insertions(+), 15 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 09c9e0ec1..462f80df0 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -122,11 +122,9 @@ export function metricsToRequest(exportArgs: ExportInput) { interval: { endTime: { seconds: dataPoint.endTime[0], - nanos: dataPoint.endTime[1], }, startTime: { seconds: dataPoint.startTime[0], - nanos: dataPoint.startTime[1], }, }, value: { diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 028e9e7c0..3a8495fe3 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -69,8 +69,8 @@ export const exportInput = { instanceId: 'emulator-test-instance', table: 'my-table', cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', + zone: 'us-central1-f', + methodName: 'Bigtable.ReadRows', clientName: 'nodejs-bigtable/5.1.2', clientUid: 'fake-uuid', }, @@ -82,12 +82,94 @@ export const exportInput = { sum: 11979, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, // 1 + 0.01, // 2 + 0.05, // 3 + 0.1, // 4 + 0.3, // 5 + 0.6, // 6 + 0.8, // 7 + 1, // 8 + 2, // 9 + 3, // 10 + 4, // 11 + 5, // 12 + 6, // 13 + 8, // 14 + 10, // 15 + 13, // 16 + 16, // 17 + 20, // 18 + 25, // 19 + 30, // 20 + 40, // 21 + 50, // 22 + 65, // 23 + 80, // 24 + 100, // 25 + 130, // 26 + 160, // 27 + 200, // 28 + 250, // 29 + 300, // 30 + 400, // 31 + 500, // 32 + 650, // 33 + 800, // 34 + 1000, // 35 + 2000, // 36 + 5000, // 37 + 10000, // 38 + 20000, // 39 + 50000, // 40 + 100000, // 41 + ], + counts: [ + 0, //1 + 0, //2 + 0, //3 + 0, //4 + 0, //5 + 0, //6 + 0, //7 + 0, //8 + 0, //9 + 0, //10 + 0, //11 + 0, //12 + 0, //13 + 0, //14 + 0, //15 + 0, //16 + 0, //17 + 0, //18 + 0, //19 + 0, //20 + 0, //21 + 0, //22 + 0, //23 + 0, //24 + 1, //25 + 0, //26 + 0, //27 + 0, //28 + 0, //29 + 0, //30 + 0, //31 + 0, //32 + 0, //33 + 0, //34 + 0, //35 + 0, //36 + 0, //37 + 0, //38 + 0, //39 + 0, //40 + 0, //41 + 0, //42 ], - counts: [0, 0, 0, 0, 0, 0, 93, 0, 5, 0, 0, 1, 0, 0, 0, 0], }, - count: 99, + count: 1, }, }, ], diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index ce04b8ad8..97cf61124 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -15,17 +15,18 @@ export const expectedRequest = { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { app_profile: 'fake-app-profile-id', - client_name: 'go-bigtable/1.35.0', - client_uid: 'fake-client-uid', + client_name: 'nodejs-bigtable/5.1.2', + client_uid: 'fake-uuid', method: 'Bigtable.ReadRows', - status: 'OK', + status: '0', + streaming: 'true', }, }, resource: { type: 'bigtable_client_raw', labels: { cluster: 'fake-cluster3', - instance: 'emulator-test-instance2', + instance: 'emulator-test-instance', project_id: 'some-project', table: 'my-table', zone: 'us-central1-f', @@ -37,16 +38,16 @@ export const expectedRequest = { { interval: { endTime: { - seconds: Math.floor(Date.now() / 1000), + seconds: fakeEndTime, }, startTime: { - seconds: Math.floor(Date.now() / 1000) - 1000, + seconds: fakeStartTime, }, }, value: { distributionValue: { count: '1', - mean: 376.103605, + mean: 121, bucketOptions: { explicitBuckets: { bounds: [ From f5267c1745717585c55efdaab43f3a12ceab7b3d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 14:15:23 -0500 Subject: [PATCH 176/289] Change the mean --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 97cf61124..c8162c766 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -47,7 +47,7 @@ export const expectedRequest = { value: { distributionValue: { count: '1', - mean: 121, + mean: 11979, bucketOptions: { explicitBuckets: { bounds: [ From 887d98887f432ff63cdff13d5f376921dd2e414b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 14:17:50 -0500 Subject: [PATCH 177/289] Remove only --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index c8162c766..15b839969 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -149,7 +149,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 075bf9b89d9cd8c83b1e962362e4f7753362ac09 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 16:26:29 -0500 Subject: [PATCH 178/289] Add the export input fixture --- .../client-side-metrics-attributes.ts | 122 ++---- src/client-side-metrics/exporter.ts | 2 +- .../gcp-metrics-handler.ts | 25 +- test-common/expected-otel-export-input.ts | 405 ++++++++++++++++++ test-common/metrics-handler-fixture.ts | 17 +- test/metrics-collector/gcp-metrics-handler.ts | 89 ++++ 6 files changed, 550 insertions(+), 110 deletions(-) create mode 100644 test-common/expected-otel-export-input.ts create mode 100644 test/metrics-collector/gcp-metrics-handler.ts diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index 0672f6f1c..ffc6dfa44 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -14,88 +14,23 @@ import {grpc} from 'google-gax'; -/** - * Standard attributes common to various Bigtable client-side metrics. These attributes provide - * contextual information about the Bigtable environment and operation. - */ -interface StandardAttributes { - projectId: string; +// The backend is expecting true/false and will fail if other values are provided. +// export in open telemetry is expecting string value attributes so we don't use boolean +// true/false. +export enum StreamingState { + STREAMING = 'true', + UNARY = 'false', +} + +type IMetricsCollectorData = { instanceId: string; table: string; cluster?: string; zone?: string; appProfileId?: string; methodName: MethodName; - clientName: string; -} - -export enum StreamingState { - STREAMING = 'streaming', - UNARY = 'unary', -} - -/** - * Attributes associated with operation latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the completed operation. - */ -interface OperationLatencyAttributes extends StandardAttributes { - finalOperationStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Attributes associated with attempt latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. - */ -interface AttemptLatencyAttributes extends StandardAttributes { - attemptStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Attributes associated with retry count metrics for Bigtable client operations. These attributes - * provide context about the Bigtable environment and the final status of the operation. - */ -interface RetryCountAttributes extends StandardAttributes { - finalOperationStatus: grpc.status; -} - -/** - * Attributes associated with application blocking latencies for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the operation being performed. - */ -type ApplicationBlockingLatenciesAttributes = StandardAttributes; - -/** - * Attributes associated with first response latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the final status of the operation. - */ -interface FirstResponseLatencyAttributes extends StandardAttributes { - finalOperationStatus: grpc.status; -} - -/** - * Attributes associated with server latency metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment, the specific attempt, and whether the operation was streaming. - */ -interface ServerLatenciesAttributes extends StandardAttributes { - attemptStatus: grpc.status; - streamingOperation: StreamingState; -} - -/** - * Attributes associated with connectivity error count metrics for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the status of the attempt. - */ -interface ConnectivityErrorCountAttributes extends StandardAttributes { - attemptStatus: grpc.status; -} - -/** - * Attributes associated with client blocking latencies for Bigtable client operations. - * These attributes provide context about the Bigtable environment and the operation being performed. - */ -type ClientBlockingLatenciesAttributes = StandardAttributes; + clientUid: string; +}; /** * Attributes associated with the completion of a Bigtable operation. These @@ -103,10 +38,13 @@ type ClientBlockingLatenciesAttributes = StandardAttributes; * operation, and its final status. They are used for recording metrics such as * operation latency, first response latency, and retry count. */ -export type OnOperationCompleteAttributes = - | OperationLatencyAttributes - | FirstResponseLatencyAttributes - | RetryCountAttributes; +export type OnOperationCompleteAttributes = { + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + finalOperationStatus: grpc.status; + streamingOperation: StreamingState; +}; /** * Attributes associated with the completion of a single attempt of a Bigtable @@ -115,21 +53,23 @@ export type OnOperationCompleteAttributes = * are used for recording metrics such as attempt latency, server latency, and * connectivity errors. */ -export type OnAttemptCompleteAttributes = - | AttemptLatencyAttributes - | ConnectivityErrorCountAttributes - | ServerLatenciesAttributes - | ClientBlockingLatenciesAttributes; +export type OnAttemptCompleteAttributes = { + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + attemptStatus: grpc.status; + streamingOperation: StreamingState; +}; /** * Represents the names of Bigtable methods. These are used as attributes for * metrics, allowing for differentiation of performance by method. */ export enum MethodName { - READ_ROWS = 'readRows', - MUTATE_ROW = 'mutateRow', - CHECK_AND_MUTATE_ROW = 'checkAndMutateRow', - READ_MODIFY_WRITE_ROW = 'readModifyWriteRow', - SAMPLE_ROW_KEYS = 'sampleRowKeys', - MUTATE_ROWS = 'mutateRows', + READ_ROWS = 'Bigtable.ReadRows', + MUTATE_ROW = 'Bigtable.MutateRow', + CHECK_AND_MUTATE_ROW = 'Bigtable.CheckAndMutateRow', + READ_MODIFY_WRITE_ROW = 'Bigtable.ReadModifyWriteRow', + SAMPLE_ROW_KEYS = 'Bigtable.SampleRowKeys', + MUTATE_ROWS = 'Bigtable.MutateRows', } diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 462f80df0..905003a57 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -19,7 +19,7 @@ import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -interface ExportResult { +export interface ExportResult { code: number; } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 2487d0c70..46026ed8d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -53,12 +53,15 @@ interface Metrics { * This handler records metrics such as operation latency, attempt latency, retry count, and more, * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ -export class GCPMetricsHandler implements IMetricsHandler { +export class GCPMetricsHandler + implements IMetricsHandler +{ private initialized = false; private otelMetrics?: Metrics; - private exporter: typeof MetricExporter; + private exporter: T; - constructor(exporter: typeof MetricExporter) { + constructor(exporter: T) { + console.log('Passing in exporter'); this.exporter = exporter; } @@ -77,6 +80,7 @@ export class GCPMetricsHandler implements IMetricsHandler { 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, ]; + /* const histogramAggregation = new ExplicitBucketHistogramAggregation(); const viewList = [ 'operation_latencies', @@ -95,8 +99,9 @@ export class GCPMetricsHandler implements IMetricsHandler { aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, }) ); + */ const meterProvider = new MeterProvider({ - views: viewList, + // views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', @@ -109,7 +114,7 @@ export class GCPMetricsHandler implements IMetricsHandler { new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 100_000, + exportIntervalMillis: 10_000, exporter: this.exporter, }), ], @@ -122,9 +127,6 @@ export class GCPMetricsHandler implements IMetricsHandler { description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", unit: 'ms', - advice: { - explicitBucketBoundaries: buckets, - }, } ), attemptLatencies: meter.createHistogram( @@ -133,9 +135,6 @@ export class GCPMetricsHandler implements IMetricsHandler { description: 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', - advice: { - explicitBucketBoundaries: buckets, - }, } ), retryCount: meter.createHistogram( @@ -185,6 +184,7 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), }; + console.log('Done initializing'); } } @@ -198,6 +198,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnOperationCompleteMetrics, attributes: OnOperationCompleteAttributes ) { + console.log('onOperationComplete'); this.initialize(attributes.projectId); this.otelMetrics?.operationLatencies.record( metrics.operationLatency, @@ -221,7 +222,7 @@ export class GCPMetricsHandler implements IMetricsHandler { metrics: OnAttemptCompleteMetrics, attributes: OnAttemptCompleteAttributes ) { - console.log('onAttemptComplete handler'); + console.log('onAttemptComplete'); this.initialize(attributes.projectId); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts new file mode 100644 index 000000000..9cd3e30ad --- /dev/null +++ b/test-common/expected-otel-export-input.ts @@ -0,0 +1,405 @@ +export const expectedOtelExportInput = { + resource: { + _attributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'my-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + asyncAttributesPending: false, + _syncAttributes: { + 'service.name': 'Cloud Bigtable Table', + 'telemetry.sdk.language': 'nodejs', + 'telemetry.sdk.name': 'opentelemetry', + 'telemetry.sdk.version': '1.30.0', + 'cloud.provider': 'gcp', + 'cloud.platform': 'gce_instance', + 'cloud.resource_manager.project_id': 'my-project', + 'monitored_resource.type': 'bigtable_client_raw', + }, + _asyncAttributesPromise: {}, + }, + scopeMetrics: [ + { + scope: { + name: 'bigtable.googleapis.com', + version: '', + }, + metrics: [ + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/operation_latencies', + type: 'HISTOGRAM', + description: + "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 7000, + max: 7000, + sum: 7000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/attempt_latencies', + type: 'HISTOGRAM', + description: + 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 2000, + max: 2000, + sum: 2000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + }, + count: 1, + }, + }, + { + attributes: { + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 2000, + max: 2000, + sum: 2000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/retry_count', + type: 'HISTOGRAM', + description: + 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 1, + max: 1, + sum: 1, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/first_response_latencies', + type: 'HISTOGRAM', + description: + 'Latencies from when a client sends a request and receives the first row of the response.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 5000, + max: 5000, + sum: 5000, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/server_latencies', + type: 'HISTOGRAM', + description: + 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', + unit: 'ms', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 101, + max: 101, + sum: 101, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + { + attributes: { + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 103, + max: 103, + sum: 103, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + { + descriptor: { + name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + type: 'HISTOGRAM', + description: + "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + unit: '', + valueType: 1, + advice: {}, + }, + aggregationTemporality: 1, + dataPointType: 0, + dataPoints: [ + { + attributes: { + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + { + attributes: { + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'readRows', + clientUid: 'fake-uuid', + }, + projectId: 'my-project', + }, + startTime: [123, 789], + endTime: [456, 789], + value: { + min: 0, + max: 0, + sum: 0, + buckets: { + boundaries: [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, + 7500, 10000, + ], + counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + }, + count: 1, + }, + }, + ], + }, + ], + }, + ], +}; diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index f008f86fd..8238d3150 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +import { + MethodName, + StreamingState, +} from '../src/client-side-metrics/client-side-metrics-attributes'; + export const expectedRequestsHandled = [ { metrics: { @@ -20,7 +25,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: 'true', + streamingOperation: StreamingState.STREAMING, attemptStatus: 4, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -29,7 +34,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'readRows', + methodName: MethodName.READ_ROWS, clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -42,7 +47,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: 'true', + streamingOperation: StreamingState.STREAMING, attemptStatus: 0, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -51,7 +56,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'readRows', + methodName: MethodName.READ_ROWS, clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -65,14 +70,14 @@ export const expectedRequestsHandled = [ }, attributes: { finalOperationStatus: 0, - streamingOperation: 'true', + streamingOperation: StreamingState.STREAMING, metricsCollectorData: { appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'readRows', + methodName: MethodName.READ_ROWS, clientUid: 'fake-uuid', }, clientName: 'nodejs-bigtable', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts new file mode 100644 index 000000000..7e24b41c1 --- /dev/null +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -0,0 +1,89 @@ +import {describe} from 'mocha'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ExportResult} from '../../src/client-side-metrics/exporter'; +import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; +import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; +import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../../src/client-side-metrics/client-side-metrics-attributes'; +import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; +import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; + +function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} + +// Example usage: +replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); + +// You can now use updatedInput with metricsToRequest, and it will have the new timestamps. + +describe.only('Bigtable/GCPMetricsHandler', () => { + it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + let testDone = false; + let resolvePlaceholder: (arg: string) => void; + class TestExporter extends MetricExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ) { + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + // super.export(metrics, resultCallback); + console.log('in export'); + try { + // Add assert statement here. + if (!testDone) { + testDone = true; + resultCallback({code: 0}); + resolvePlaceholder('done'); + } + } catch (e) { + resolvePlaceholder('error'); + } + } + } + const handler = new GCPMetricsHandler( + new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + for (let i = 0; i < expectedRequestsHandled.length; i++) { + const request = expectedRequestsHandled[i]; + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + // TODO: Use a type guard here instead of casting. + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + // Wait for the metric to be exported + console.log('waiting'); + // This promise is needed because the test completes prematurely otherwise + // before the metric is exported. + // TODO: Try removing this promise + new Promise(resolve => { + resolvePlaceholder = resolve; + }); + }); +}); From 94f422c06c3bdc989e12772f1a699b8301729f96 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 16:41:58 -0500 Subject: [PATCH 179/289] Reducing export interval makes time complete --- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- test-common/metrics-handler-fixture.ts | 12 ++++++------ test/metrics-collector/gcp-metrics-handler.ts | 7 +++++-- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 46026ed8d..24e5c3b6c 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -114,7 +114,7 @@ export class GCPMetricsHandler new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 10_000, + exportIntervalMillis: 1_000, exporter: this.exporter, }), ], diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 8238d3150..3304fb13b 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -25,7 +25,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: StreamingState.STREAMING, + streamingOperation: 'true', attemptStatus: 4, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -34,7 +34,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: MethodName.READ_ROWS, + methodName: 'readRows', clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -47,7 +47,7 @@ export const expectedRequestsHandled = [ connectivityErrorCount: 0, }, attributes: { - streamingOperation: StreamingState.STREAMING, + streamingOperation: 'true', attemptStatus: 0, clientName: 'nodejs-bigtable', metricsCollectorData: { @@ -56,7 +56,7 @@ export const expectedRequestsHandled = [ table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: MethodName.READ_ROWS, + methodName: 'readRows', clientUid: 'fake-uuid', }, projectId: 'my-project', @@ -70,14 +70,14 @@ export const expectedRequestsHandled = [ }, attributes: { finalOperationStatus: 0, - streamingOperation: StreamingState.STREAMING, + streamingOperation: 'true', metricsCollectorData: { appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: MethodName.READ_ROWS, + methodName: 'readRows', clientUid: 'fake-uuid', }, clientName: 'nodejs-bigtable', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 7e24b41c1..9a3534221 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -32,7 +32,7 @@ replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); // You can now use updatedInput with metricsToRequest, and it will have the new timestamps. describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + it('Should export a value ready for sending to the CloudMonitoringExporter', async () => { let testDone = false; let resolvePlaceholder: (arg: string) => void; class TestExporter extends MetricExporter { @@ -40,11 +40,13 @@ describe.only('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ) { + /* replaceTimestamps( metrics as unknown as typeof expectedOtelExportInput, [123, 789], [456, 789] ); + */ // super.export(metrics, resultCallback); console.log('in export'); try { @@ -82,8 +84,9 @@ describe.only('Bigtable/GCPMetricsHandler', () => { // This promise is needed because the test completes prematurely otherwise // before the metric is exported. // TODO: Try removing this promise - new Promise(resolve => { + await new Promise(resolve => { resolvePlaceholder = resolve; }); + console.log('done waiting'); }); }); From 1c3c290df90e0b6d87d0ec940da82b9ed6cfae7f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Feb 2025 17:26:07 -0500 Subject: [PATCH 180/289] Add the GCPMetricsHandler test --- .../gcp-metrics-handler.ts | 2 +- test/metrics-collector/gcp-metrics-handler.ts | 78 +++++++++++++++++-- 2 files changed, 71 insertions(+), 9 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 24e5c3b6c..46026ed8d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -114,7 +114,7 @@ export class GCPMetricsHandler new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 1_000, + exportIntervalMillis: 10_000, exporter: this.exporter, }), ], diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 9a3534221..7c57abef9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -27,10 +27,70 @@ function replaceTimestamps( } // Example usage: -replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); +// replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); // You can now use updatedInput with metricsToRequest, and it will have the new timestamps. +describe.only('Bigtable/GCPMetricsHandler', () => { + it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + (async () => { + // let exportPromiseResolve: (value: unknown) => void; + /* + const exportPromise = new Promise(resolve => { + setTimeout(() => { + resolve(undefined); + }, 30000); + }); + */ + + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + + class TestExporter extends MetricExporter { + async export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): Promise { + // Make export async + console.log('in export'); + // Perform your assertions here on the 'metrics' object + // ... (your assertion logic) + clearTimeout(timeout); + resultCallback({code: 0}); + done(); + // exportPromiseResolve(undefined); // Resolve the promise after export + } + } + + const handler = new GCPMetricsHandler( + new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + + for (const request of expectedRequestsHandled) { + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + + // await exportPromise; // Wait for the export to complete + + console.log('done waiting'); // This will now be reached + })(); + }); +}); +/* describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', async () => { let testDone = false; @@ -40,13 +100,6 @@ describe.only('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ) { - /* - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - */ // super.export(metrics, resultCallback); console.log('in export'); try { @@ -90,3 +143,12 @@ describe.only('Bigtable/GCPMetricsHandler', () => { console.log('done waiting'); }); }); +*/ + +/* +replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] +); + */ From b118bc17543f63328b297bc28f42dbf684b5ebd6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 15:10:36 -0500 Subject: [PATCH 181/289] Add a stub for the otel export --- .../gcp-metrics-handler.ts | 27 ++- test-common/expected-otel-export-input.ts | 160 +++++++++++++----- test/metrics-collector/gcp-metrics-handler.ts | 7 + 3 files changed, 147 insertions(+), 47 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 46026ed8d..c259290d7 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -75,7 +75,7 @@ export class GCPMetricsHandler if (!this.initialized) { this.initialized = true; const sumAggregation = Aggregation.Sum(); - const buckets = [ + const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, @@ -127,6 +127,9 @@ export class GCPMetricsHandler description: "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), attemptLatencies: meter.createHistogram( @@ -135,9 +138,12 @@ export class GCPMetricsHandler description: 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), - retryCount: meter.createHistogram( + retryCount: meter.createCounter( 'bigtable.googleapis.com/internal/client/retry_count', { description: @@ -150,6 +156,9 @@ export class GCPMetricsHandler description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), firstResponseLatencies: meter.createHistogram( @@ -158,6 +167,9 @@ export class GCPMetricsHandler description: 'Latencies from when a client sends a request and receives the first row of the response.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), serverLatencies: meter.createHistogram( @@ -166,6 +178,9 @@ export class GCPMetricsHandler description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), connectivityErrorCount: meter.createHistogram( @@ -173,6 +188,9 @@ export class GCPMetricsHandler { description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), clientBlockingLatencies: meter.createHistogram( @@ -181,6 +199,9 @@ export class GCPMetricsHandler description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', unit: 'ms', + advice: { + explicitBucketBoundaries: latencyBuckets, + }, } ), }; @@ -204,7 +225,7 @@ export class GCPMetricsHandler metrics.operationLatency, attributes ); - this.otelMetrics?.retryCount.record(metrics.retryCount, attributes); + this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); this.otelMetrics?.firstResponseLatencies.record( metrics.firstResponseLatency, attributes diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 9cd3e30ad..0941ab51d 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -38,7 +38,14 @@ export const expectedOtelExportInput = { "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -66,10 +73,16 @@ export const expectedOtelExportInput = { sum: 7000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], }, count: 1, }, @@ -84,7 +97,14 @@ export const expectedOtelExportInput = { 'The latencies of a client RPC attempt. Under normal circumstances, this value is identical to operation_latencies. If the client receives transient errors, however, then operation_latencies is the sum of all attempt_latencies and the exponential delays.', unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -112,10 +132,16 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], }, count: 1, }, @@ -143,10 +169,16 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], }, count: 1, }, @@ -156,7 +188,7 @@ export const expectedOtelExportInput = { { descriptor: { name: 'bigtable.googleapis.com/internal/client/retry_count', - type: 'HISTOGRAM', + type: 'COUNTER', description: 'A counter that records the number of attempts that an operation required to complete. Under normal circumstances, this value is empty.', unit: '', @@ -164,7 +196,7 @@ export const expectedOtelExportInput = { advice: {}, }, aggregationTemporality: 1, - dataPointType: 0, + dataPointType: 3, dataPoints: [ { attributes: { @@ -183,21 +215,10 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: { - min: 1, - max: 1, - sum: 1, - buckets: { - boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, - ], - counts: [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - }, - count: 1, - }, + value: 1, }, ], + isMonotonic: true, }, { descriptor: { @@ -207,7 +228,14 @@ export const expectedOtelExportInput = { 'Latencies from when a client sends a request and receives the first row of the response.', unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -235,10 +263,16 @@ export const expectedOtelExportInput = { sum: 5000, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], }, count: 1, }, @@ -253,7 +287,14 @@ export const expectedOtelExportInput = { 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', unit: 'ms', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -281,10 +322,16 @@ export const expectedOtelExportInput = { sum: 101, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, @@ -312,10 +359,16 @@ export const expectedOtelExportInput = { sum: 103, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, @@ -330,7 +383,14 @@ export const expectedOtelExportInput = { "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", unit: '', valueType: 1, - advice: {}, + advice: { + explicitBucketBoundaries: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, + 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, + 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, + 100000, + ], + }, }, aggregationTemporality: 1, dataPointType: 0, @@ -358,10 +418,16 @@ export const expectedOtelExportInput = { sum: 0, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, @@ -389,10 +455,16 @@ export const expectedOtelExportInput = { sum: 0, buckets: { boundaries: [ - 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, - 7500, 10000, + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + counts: [ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, ], - counts: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, count: 1, }, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 7c57abef9..b46c52d42 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -10,6 +10,7 @@ import { } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; +import * as assert from 'assert'; function replaceTimestamps( request: typeof expectedOtelExportInput, @@ -56,6 +57,12 @@ describe.only('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ): Promise { // Make export async + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual(metrics, expectedOtelExportInput); console.log('in export'); // Perform your assertions here on the 'metrics' object // ... (your assertion logic) From 8aefe114636b1a35fade6367f1045806f2c7a5f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 16:14:09 -0500 Subject: [PATCH 182/289] Test calling export is now working --- src/client-side-metrics/exporter.ts | 101 ++- .../gcp-metrics-handler.ts | 1 + test-common/expected-otel-export-input.ts | 763 ++++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 46 +- 4 files changed, 862 insertions(+), 49 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 905003a57..2074929b5 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,6 +18,7 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; +import {RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; @@ -93,7 +94,7 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - status: allAttributes.finalOperationStatus.toString(), + status: allAttributes.finalOperationStatus?.toString(), streaming: allAttributes.streamingOperation, client_uid: allAttributes.clientUid, }; @@ -104,46 +105,78 @@ export function metricsToRequest(exportArgs: ExportInput) { table: allAttributes.table, zone: allAttributes.zone, }; - const timeSeries = { - metric: { - type: metricName, - labels: metricLabels, - }, - resource: { - type: exportArgs.resource._syncAttributes[ - 'monitored_resource.type' - ], - labels: resourceLabels, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: dataPoint.endTime[0], + if (metricName === RETRY_COUNT_NAME) { + const timeSeries = { + metric: { + type: metricName, + labels: metricLabels, + }, + resource: { + type: exportArgs.resource._syncAttributes[ + 'monitored_resource.type' + ], + labels: resourceLabels, + }, + valueType: 'INT64', + points: [ + { + interval: { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, }, - startTime: { - seconds: dataPoint.startTime[0], + value: { + int64Value: dataPoint.value, }, }, - value: { - distributionValue: { - count: String(dataPoint.value.count), - mean: dataPoint.value.sum / dataPoint.value.count, - bucketOptions: { - explicitBuckets: { - bounds: dataPoint.value.buckets.boundaries, + ], + }; + timeSeriesArray.push(timeSeries); + } else { + const timeSeries = { + metric: { + type: metricName, + labels: metricLabels, + }, + resource: { + type: exportArgs.resource._syncAttributes[ + 'monitored_resource.type' + ], + labels: resourceLabels, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, + }, + value: { + distributionValue: { + count: String(dataPoint.value.count), + mean: dataPoint.value.sum / dataPoint.value.count, + bucketOptions: { + explicitBuckets: { + bounds: dataPoint.value.buckets.boundaries, + }, }, + bucketCounts: dataPoint.value.buckets.counts.map(String), }, - bucketCounts: dataPoint.value.buckets.counts.map(String), }, }, - }, - ], - unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); + ], + unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); + } } } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c259290d7..e5b2f3bec 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -178,6 +178,7 @@ export class GCPMetricsHandler description: 'Latencies between the time when the Google frontend receives an RPC and when it sends the first byte of the response.', unit: 'ms', + advice: { explicitBucketBoundaries: latencyBuckets, }, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 0941ab51d..3159f35d8 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,3 +1,766 @@ +export const RETRY_COUNT_NAME = + 'bigtable.googleapis.com/internal/client/retry_count'; + +const expectedOtelExportConvertedValue = { + name: 'projects/my-project', + timeSeries: [ + { + metric: { + type: 'bigtable.googleapis.com/internal/client/operation_latencies', + labels: { + client_name: 'nodejs-bigtable', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 7000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/attempt_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 2000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/attempt_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 2000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/retry_count', + labels: { + client_name: 'nodejs-bigtable', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + valueType: 'INT64', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + int64Value: 1, + }, + }, + ], + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/first_response_latencies', + labels: { + client_name: 'nodejs-bigtable', + status: '0', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 5000, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 101, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/server_latencies', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 103, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 0, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + { + metric: { + type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', + labels: { + client_name: 'nodejs-bigtable', + streaming: 'true', + }, + }, + resource: { + type: 'bigtable_client_raw', + labels: { + project_id: 'my-project', + }, + }, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval: { + endTime: { + seconds: 456, + }, + startTime: { + seconds: 123, + }, + }, + value: { + distributionValue: { + count: '1', + mean: 0, + bucketOptions: { + explicitBuckets: { + bounds: [ + 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, + 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, + 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, + 50000, 100000, + ], + }, + }, + bucketCounts: [ + '1', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + '0', + ], + }, + }, + }, + ], + unit: 'ms', + }, + ], +}; + export const expectedOtelExportInput = { resource: { _attributes: { diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index b46c52d42..231b75b83 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -1,6 +1,10 @@ import {describe} from 'mocha'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; -import {ExportResult} from '../../src/client-side-metrics/exporter'; +import { + ExportInput, + ExportResult, + metricsToRequest, +} from '../../src/client-side-metrics/exporter'; import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; @@ -11,6 +15,7 @@ import { import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; +import {exportInput} from '../../test-common/export-input-fixture'; function replaceTimestamps( request: typeof expectedOtelExportInput, @@ -56,20 +61,31 @@ describe.only('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): Promise { - // Make export async - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - assert.deepStrictEqual(metrics, expectedOtelExportInput); - console.log('in export'); - // Perform your assertions here on the 'metrics' object - // ... (your assertion logic) - clearTimeout(timeout); - resultCallback({code: 0}); - done(); - // exportPromiseResolve(undefined); // Resolve the promise after export + try { + console.log('in exporter'); + // Make export async + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual( + JSON.parse(JSON.stringify(metrics)), + expectedOtelExportInput + ); + const convertedRequest = metricsToRequest( + expectedOtelExportInput as unknown as ExportInput + ); + console.log('in export'); + // Perform your assertions here on the 'metrics' object + // ... (your assertion logic) + clearTimeout(timeout); + resultCallback({code: 0}); + done(); + // exportPromiseResolve(undefined); // Resolve the promise after export + } catch (e) { + done(e); + } } } From a30d3ec457bbbd2e7e1487922ab8340b45a9bb1b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 16:15:06 -0500 Subject: [PATCH 183/289] Remove old unused code --- test/metrics-collector/gcp-metrics-handler.ts | 62 ------------------- 1 file changed, 62 deletions(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 231b75b83..72e5a2e24 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -113,65 +113,3 @@ describe.only('Bigtable/GCPMetricsHandler', () => { })(); }); }); -/* -describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', async () => { - let testDone = false; - let resolvePlaceholder: (arg: string) => void; - class TestExporter extends MetricExporter { - export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void - ) { - // super.export(metrics, resultCallback); - console.log('in export'); - try { - // Add assert statement here. - if (!testDone) { - testDone = true; - resultCallback({code: 0}); - resolvePlaceholder('done'); - } - } catch (e) { - resolvePlaceholder('error'); - } - } - } - const handler = new GCPMetricsHandler( - new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) - ); - for (let i = 0; i < expectedRequestsHandled.length; i++) { - const request = expectedRequestsHandled[i]; - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); - } else { - // TODO: Use a type guard here instead of casting. - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); - } - } - // Wait for the metric to be exported - console.log('waiting'); - // This promise is needed because the test completes prematurely otherwise - // before the metric is exported. - // TODO: Try removing this promise - await new Promise(resolve => { - resolvePlaceholder = resolve; - }); - console.log('done waiting'); - }); -}); -*/ - -/* -replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] -); - */ From c3f296300112602466ae85b6bcd5e6bfc98b2d46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 17:05:08 -0500 Subject: [PATCH 184/289] Change tests and json structure to work with metricsCollectorData structure --- src/client-side-metrics/exporter.ts | 26 +++++++------- .../gcp-metrics-handler.ts | 1 - system-test/cloud-monitoring-exporter.ts | 2 +- test-common/expected-otel-export-input.ts | 2 +- test-common/export-input-fixture.ts | 16 +++++---- test/metrics-collector/gcp-metrics-handler.ts | 36 +++++-------------- test/metrics-collector/metricsToRequest.ts | 8 ++--- 7 files changed, 38 insertions(+), 53 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2074929b5..eb960dfcf 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -56,13 +56,15 @@ export interface ExportInput { finalOperationStatus: number; streamingOperation: string; projectId: string; - instanceId: string; - table: string; - cluster: string; - zone: string; - methodName: string; clientName: string; - clientUid: string; + metricsCollectorData: { + instanceId: string; + table: string; + cluster: string; + zone: string; + methodName: string; + clientUid: string; + }; }; startTime: number[]; endTime: number[]; @@ -93,17 +95,17 @@ export function metricsToRequest(exportArgs: ExportInput) { const metricLabels = { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, - method: allAttributes.methodName, + method: allAttributes.metricsCollectorData.methodName, status: allAttributes.finalOperationStatus?.toString(), streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, + client_uid: allAttributes.metricsCollectorData.clientUid, }; const resourceLabels = { - cluster: allAttributes.cluster, - instance: allAttributes.instanceId, + cluster: allAttributes.metricsCollectorData.cluster, + instance: allAttributes.metricsCollectorData.instanceId, project_id: allAttributes.projectId, - table: allAttributes.table, - zone: allAttributes.zone, + table: allAttributes.metricsCollectorData.table, + zone: allAttributes.metricsCollectorData.zone, }; if (metricName === RETRY_COUNT_NAME) { const timeSeries = { diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index e5b2f3bec..48d8286b1 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -206,7 +206,6 @@ export class GCPMetricsHandler } ), }; - console.log('Done initializing'); } } diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 3e78c77c6..75381025f 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -4,7 +4,7 @@ import {exportInput} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; -describe.only('Bigtable/CloudMonitoringExporter', () => { +describe('Bigtable/CloudMonitoringExporter', () => { it('exports client side metrics to cloud monitoring', done => { // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 3159f35d8..5212349e8 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,7 +1,7 @@ export const RETRY_COUNT_NAME = 'bigtable.googleapis.com/internal/client/retry_count'; -const expectedOtelExportConvertedValue = { +export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ { diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 3a8495fe3..17717aea1 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -65,14 +65,16 @@ export const exportInput = { appProfileId: 'fake-app-profile-id', finalOperationStatus: 0, streamingOperation: 'true', - projectId: 'some-project', - instanceId: 'emulator-test-instance', - table: 'my-table', - cluster: 'fake-cluster3', - zone: 'us-central1-f', - methodName: 'Bigtable.ReadRows', clientName: 'nodejs-bigtable/5.1.2', - clientUid: 'fake-uuid', + projectId: 'some-project', + metricsCollectorData: { + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', + }, }, startTime: [fakeStartTime, 951000000], endTime: [fakeEndTime, 948000000], diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 72e5a2e24..4aabba8be 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -13,9 +13,11 @@ import { OnOperationCompleteAttributes, } from '../../src/client-side-metrics/client-side-metrics-attributes'; import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; -import {expectedOtelExportInput} from '../../test-common/expected-otel-export-input'; +import { + expectedOtelExportConvertedValue, + expectedOtelExportInput, +} from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; -import {exportInput} from '../../test-common/export-input-fixture'; function replaceTimestamps( request: typeof expectedOtelExportInput, @@ -32,23 +34,9 @@ function replaceTimestamps( }); } -// Example usage: -// replaceTimestamps(expectedOtelExportInput, [123, 789], [456, 789]); - -// You can now use updatedInput with metricsToRequest, and it will have the new timestamps. - -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { - // let exportPromiseResolve: (value: unknown) => void; - /* - const exportPromise = new Promise(resolve => { - setTimeout(() => { - resolve(undefined); - }, 30000); - }); - */ - /* We need to create a timeout here because if we don't then mocha shuts down the test as it is sleeping before the GCPMetricsHandler has a chance to @@ -62,8 +50,6 @@ describe.only('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ): Promise { try { - console.log('in exporter'); - // Make export async replaceTimestamps( metrics as unknown as typeof expectedOtelExportInput, [123, 789], @@ -76,13 +62,13 @@ describe.only('Bigtable/GCPMetricsHandler', () => { const convertedRequest = metricsToRequest( expectedOtelExportInput as unknown as ExportInput ); - console.log('in export'); - // Perform your assertions here on the 'metrics' object - // ... (your assertion logic) + assert.deepStrictEqual( + JSON.parse(JSON.stringify(convertedRequest)), + expectedOtelExportConvertedValue + ); clearTimeout(timeout); resultCallback({code: 0}); done(); - // exportPromiseResolve(undefined); // Resolve the promise after export } catch (e) { done(e); } @@ -106,10 +92,6 @@ describe.only('Bigtable/GCPMetricsHandler', () => { ); } } - - // await exportPromise; // Wait for the export to complete - - console.log('done waiting'); // This will now be reached })(); }); }); diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 15b839969..2042d895d 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -26,10 +26,10 @@ export const expectedRequest = { type: 'bigtable_client_raw', labels: { cluster: 'fake-cluster3', - instance: 'emulator-test-instance', + instance: 'fakeInstanceId', project_id: 'some-project', - table: 'my-table', - zone: 'us-central1-f', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -149,7 +149,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe('Bigtable/metricsToRequest', () => { +describe.only('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 67b647852b1f9408d126bbee58c2a6b75f9fa637 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Feb 2025 17:15:17 -0500 Subject: [PATCH 185/289] Update all the fixtures --- .../gcp-metrics-handler.ts | 3 -- test-common/expected-otel-export-input.ts | 54 +++++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 2 +- test/metrics-collector/metricsToRequest.ts | 2 +- 4 files changed, 56 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 48d8286b1..21d7704fb 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -61,7 +61,6 @@ export class GCPMetricsHandler private exporter: T; constructor(exporter: T) { - console.log('Passing in exporter'); this.exporter = exporter; } @@ -219,7 +218,6 @@ export class GCPMetricsHandler metrics: OnOperationCompleteMetrics, attributes: OnOperationCompleteAttributes ) { - console.log('onOperationComplete'); this.initialize(attributes.projectId); this.otelMetrics?.operationLatencies.record( metrics.operationLatency, @@ -243,7 +241,6 @@ export class GCPMetricsHandler metrics: OnAttemptCompleteMetrics, attributes: OnAttemptCompleteAttributes ) { - console.log('onAttemptComplete'); this.initialize(attributes.projectId); this.otelMetrics?.attemptLatencies.record( metrics.attemptLatency, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 5212349e8..3f35b9c0a 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -9,14 +9,20 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', status: '0', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -100,13 +106,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -190,13 +202,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -280,14 +298,20 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', status: '0', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, valueType: 'INT64', @@ -312,14 +336,20 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', status: '0', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -403,13 +433,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -493,13 +529,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -583,13 +625,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', @@ -673,13 +721,19 @@ export const expectedOtelExportConvertedValue = { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { client_name: 'nodejs-bigtable', + method: 'readRows', streaming: 'true', + client_uid: 'fake-uuid', }, }, resource: { type: 'bigtable_client_raw', labels: { + cluster: 'fake-cluster3', + instance: 'fakeInstanceId', project_id: 'my-project', + table: 'fakeTableId', + zone: 'us-west1-c', }, }, metricKind: 'CUMULATIVE', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 4aabba8be..1ed11d63e 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -34,7 +34,7 @@ function replaceTimestamps( }); } -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 2042d895d..2123384d1 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -149,7 +149,7 @@ export const expectedRequest = { }; // TODO: Generate the export code -describe.only('Bigtable/metricsToRequest', () => { +describe('Bigtable/metricsToRequest', () => { it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); From 8544410a1fb2845606b3d51c632ec3c4367e64eb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 11:28:31 -0500 Subject: [PATCH 186/289] Fix the view creation code Pass in latency buckets --- src/client-side-metrics/gcp-metrics-handler.ts | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 21d7704fb..fb4fd9406 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -73,14 +73,11 @@ export class GCPMetricsHandler private initialize(projectId?: string) { if (!this.initialized) { this.initialized = true; - const sumAggregation = Aggregation.Sum(); const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, ]; - /* - const histogramAggregation = new ExplicitBucketHistogramAggregation(); const viewList = [ 'operation_latencies', 'first_response_latencies', @@ -95,12 +92,14 @@ export class GCPMetricsHandler new View({ instrumentName: name, name, - aggregation: name.slice(-9) ? sumAggregation : histogramAggregation, + aggregation: + name === 'retry_count' + ? Aggregation.Sum() + : new ExplicitBucketHistogramAggregation(latencyBuckets), }) ); - */ const meterProvider = new MeterProvider({ - // views: viewList, + views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', From 96dbc1c524c5b49d7684d5228c469a1542b44ec8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 12:58:16 -0500 Subject: [PATCH 187/289] Starting test for gcp-metrics-handler --- .../gcp-metrics-handler.ts | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 system-test/client-side-metrics/gcp-metrics-handler.ts diff --git a/system-test/client-side-metrics/gcp-metrics-handler.ts b/system-test/client-side-metrics/gcp-metrics-handler.ts new file mode 100644 index 000000000..18d3513cf --- /dev/null +++ b/system-test/client-side-metrics/gcp-metrics-handler.ts @@ -0,0 +1,36 @@ +import {describe} from 'mocha'; +import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; +import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; +import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../src/client-side-metrics/client-side-metrics-attributes'; +import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; +import {CloudMonitoringExporter} from '../../src/client-side-metrics/exporter'; + +// TODO: Test that calls export. +// TODO: Test whole process. +describe.only('Bigtable/GCPMetricsHandler', () => { + it('Should export a value to the CloudMonitoringExporter', done => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + const handler = new GCPMetricsHandler( + new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + + for (const request of expectedRequestsHandled) { + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + }); +}); From 416e18cd9cf1d3250978b1624fc14b520b42b3b3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 15:30:11 -0500 Subject: [PATCH 188/289] Put tests in the proper places --- .../gcp-metrics-handler.ts | 36 --------------- system-test/cloud-monitoring-exporter.ts | 42 ++++++++++++++++- system-test/gcp-metrics-handler.ts | 45 +++++++++++++++++++ 3 files changed, 86 insertions(+), 37 deletions(-) delete mode 100644 system-test/client-side-metrics/gcp-metrics-handler.ts create mode 100644 system-test/gcp-metrics-handler.ts diff --git a/system-test/client-side-metrics/gcp-metrics-handler.ts b/system-test/client-side-metrics/gcp-metrics-handler.ts deleted file mode 100644 index 18d3513cf..000000000 --- a/system-test/client-side-metrics/gcp-metrics-handler.ts +++ /dev/null @@ -1,36 +0,0 @@ -import {describe} from 'mocha'; -import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handler'; -import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; -import {OnAttemptCompleteAttributes, OnOperationCompleteAttributes} from '../../src/client-side-metrics/client-side-metrics-attributes'; -import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; -import {CloudMonitoringExporter} from '../../src/client-side-metrics/exporter'; - -// TODO: Test that calls export. -// TODO: Test whole process. -describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value to the CloudMonitoringExporter', done => { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => {}, 30000); - const handler = new GCPMetricsHandler( - new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) - ); - - for (const request of expectedRequestsHandled) { - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); - } else { - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); - } - } - }); -}); diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 75381025f..48c3f537c 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -1,8 +1,13 @@ import {describe} from 'mocha'; -import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; import {exportInput} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; +import * as assert from 'assert'; +import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; describe('Bigtable/CloudMonitoringExporter', () => { it('exports client side metrics to cloud monitoring', done => { @@ -36,4 +41,39 @@ describe('Bigtable/CloudMonitoringExporter', () => { ); })(); }); + it.only('Should send an otel exported value to the CloudMonitoringExporter', done => { + (async () => { + const resultCallback: (result: ExportResult) => void = ( + result: ExportResult + ) => { + try { + assert.deepStrictEqual(result, {code: 0}); + done(); + } catch (error) { + done(error); + } + }; + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const transformedExportInput = JSON.parse( + JSON.stringify(expectedOtelExportInput).replace( + /my-project/g, + projectId + ) + ); + const exporter = new CloudMonitoringExporter(); + exporter.export( + transformedExportInput as unknown as ResourceMetrics, + resultCallback + ); + })(); + }); }); diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts new file mode 100644 index 000000000..9e7f3cf1b --- /dev/null +++ b/system-test/gcp-metrics-handler.ts @@ -0,0 +1,45 @@ +import {describe} from 'mocha'; +import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; +import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; +import { + OnAttemptCompleteAttributes, + OnOperationCompleteAttributes, +} from '../src/client-side-metrics/client-side-metrics-attributes'; +import {OnOperationCompleteMetrics} from '../src/client-side-metrics/metrics-handler'; +import * as assert from 'assert'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; + +// TODO: Test that calls export. +// TODO: Test whole process. +describe('Bigtable/GCPMetricsHandler', () => { + it('Should export a value to the CloudMonitoringExporter', done => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + const handler = new GCPMetricsHandler( + new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) + ); + + for (const request of expectedRequestsHandled) { + if (request.metrics.attemptLatency) { + handler.onAttemptComplete( + request.metrics, + request.attributes as OnAttemptCompleteAttributes + ); + } else { + handler.onOperationComplete( + request.metrics as OnOperationCompleteMetrics, + request.attributes as OnOperationCompleteAttributes + ); + } + } + }); +}); From 991f5c8be2a110f172107ba6218b6fd31ee7e392 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 19 Feb 2025 16:25:36 -0500 Subject: [PATCH 189/289] Replace start and end time with more recent values --- system-test/cloud-monitoring-exporter.ts | 12 +++++++++++- system-test/gcp-metrics-handler.ts | 8 +------- test-common/replace-timestamps.ts | 16 ++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 16 +--------------- 4 files changed, 29 insertions(+), 23 deletions(-) create mode 100644 test-common/replace-timestamps.ts diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 48c3f537c..c8464457e 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -3,11 +3,16 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import {exportInput} from '../test-common/export-input-fixture'; +import { + exportInput, + fakeEndTime, + fakeStartTime, +} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; +import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/CloudMonitoringExporter', () => { it('exports client side metrics to cloud monitoring', done => { @@ -69,6 +74,11 @@ describe('Bigtable/CloudMonitoringExporter', () => { projectId ) ); + replaceTimestamps( + transformedExportInput as unknown as typeof expectedOtelExportInput, + [fakeStartTime, 0], + [fakeEndTime, 0] + ); const exporter = new CloudMonitoringExporter(); exporter.export( transformedExportInput as unknown as ResourceMetrics, diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 9e7f3cf1b..b1fe47d83 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -6,13 +6,7 @@ import { OnOperationCompleteAttributes, } from '../src/client-side-metrics/client-side-metrics-attributes'; import {OnOperationCompleteMetrics} from '../src/client-side-metrics/metrics-handler'; -import * as assert from 'assert'; -import { - CloudMonitoringExporter, - ExportResult, -} from '../src/client-side-metrics/exporter'; -import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; // TODO: Test that calls export. // TODO: Test whole process. diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts new file mode 100644 index 000000000..ea081cd81 --- /dev/null +++ b/test-common/replace-timestamps.ts @@ -0,0 +1,16 @@ +import {expectedOtelExportInput} from './expected-otel-export-input'; + +export function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 1ed11d63e..7c1f93f0f 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -18,21 +18,7 @@ import { expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; - -function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} +import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { From 09389eeffcbfe85994c75408835a1d80aca06eb1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 14:12:55 -0500 Subject: [PATCH 190/289] Utilize the new metrics handler interface --- .../gcp-metrics-handler.ts | 115 ++++++++++++------ src/client-side-metrics/metrics-handler.ts | 53 ++++---- test-common/metrics-handler-fixture.ts | 107 +++++++--------- 3 files changed, 155 insertions(+), 120 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index fb4fd9406..cd896f6e9 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -14,8 +14,8 @@ import { IMetricsHandler, - OnAttemptCompleteMetrics, - OnOperationCompleteMetrics, + OnAttemptCompleteData, + OnOperationCompleteData, } from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; @@ -48,6 +48,14 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +interface MonitoredResourceData { + projectId: string; + instanceId: string; + table: string; + cluster?: string; + zone?: string; +} + /** * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. * This handler records metrics such as operation latency, attempt latency, retry count, and more, @@ -70,7 +78,7 @@ export class GCPMetricsHandler * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. */ - private initialize(projectId?: string) { + private initialize(data: MonitoredResourceData) { if (!this.initialized) { this.initialized = true; const latencyBuckets = [ @@ -104,8 +112,13 @@ export class GCPMetricsHandler 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': projectId, + 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.project_id': data.projectId, + 'monitored_resource.instance_id': data.instanceId, + 'monitored_resource.table': data.table, + 'monitored_resource.cluster': data.cluster, + 'monitored_resource.zone': data.zone, }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -210,45 +223,79 @@ export class GCPMetricsHandler /** * Records metrics for a completed Bigtable operation. * This method records the operation latency and retry count, associating them with provided attributes. - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteData} data Data related to the completed operation. */ - onOperationComplete( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ) { - this.initialize(attributes.projectId); - this.otelMetrics?.operationLatencies.record( - metrics.operationLatency, - attributes - ); - this.otelMetrics?.retryCount.add(metrics.retryCount, attributes); - this.otelMetrics?.firstResponseLatencies.record( - metrics.firstResponseLatency, - attributes - ); + onOperationComplete(data: OnOperationCompleteData) { + this.initialize({ + projectId: data.projectId, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, + }); + this.otelMetrics?.operationLatencies.record(data.operationLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + finalOperationStatus: data.finalOperationStatus, + streamingOperation: data.streamingOperation, + clientName: data.clientName, + }); + this.otelMetrics?.retryCount.add(data.retryCount, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + finalOperationStatus: data.finalOperationStatus, + clientName: data.clientName, + }); + this.otelMetrics?.firstResponseLatencies.record(data.firstResponseLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + finalOperationStatus: data.finalOperationStatus, + clientName: data.clientName, + }); } /** * Records metrics for a completed attempt of a Bigtable operation. * This method records attempt latency, connectivity error count, server latency, and first response latency, * along with the provided attributes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ - onAttemptComplete( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ) { - this.initialize(attributes.projectId); - this.otelMetrics?.attemptLatencies.record( - metrics.attemptLatency, - attributes - ); + onAttemptComplete(data: OnAttemptCompleteData) { + this.initialize({ + projectId: data.projectId, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, + }); + this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + streamingOperation: data.streamingOperation, + clientName: data.clientName, + }); this.otelMetrics?.connectivityErrorCount.record( - metrics.connectivityErrorCount, - attributes + data.connectivityErrorCount, + { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + clientName: data.clientName, + } ); - this.otelMetrics?.serverLatencies.record(metrics.serverLatency, attributes); + this.otelMetrics?.serverLatencies.record(data.serverLatency, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + streamingOperation: data.streamingOperation, + clientName: data.clientName, + }); } } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 38a98ae59..0a701d3a0 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -12,10 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from './client-side-metrics-attributes'; +import {MethodName, StreamingState} from './client-side-metrics-attributes'; +import {grpc} from 'google-gax'; /** * The interfaces below use undefined instead of null to indicate a metric is @@ -23,22 +21,36 @@ import { * without requiring users to change the methods in their metrics handler. */ -/** - * Metrics related to the completion of a Bigtable operation. - */ -export interface OnOperationCompleteMetrics { +type IMetricsCollectorData = { + instanceId: string; + table: string; + cluster?: string; + zone?: string; + appProfileId?: string; + methodName: MethodName; + clientUid: string; +}; + +export interface OnOperationCompleteData { firstResponseLatency?: number; operationLatency: number; retryCount?: number; + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + finalOperationStatus: grpc.status; + streamingOperation: StreamingState; } -/** - * Metrics related to the completion of a single attempt of a Bigtable operation. - */ -export interface OnAttemptCompleteMetrics { +export interface OnAttemptCompleteData { attemptLatency: number; serverLatency?: number; connectivityErrorCount: number; + projectId: string; + metricsCollectorData: IMetricsCollectorData; + clientName: string; + attemptStatus: grpc.status; + streamingOperation: StreamingState; } /** @@ -48,20 +60,13 @@ export interface OnAttemptCompleteMetrics { export interface IMetricsHandler { /** * Called when an operation completes (successfully or unsuccessfully). - * @param {OnOperationCompleteMetrics} metrics Metrics related to the completed operation. - * @param {OnOperationCompleteAttributes} attributes Attributes associated with the completed operation. + * @param {OnOperationCompleteData} data Metrics and attributes related to the completed operation. */ - onOperationComplete?( - metrics: OnOperationCompleteMetrics, - attributes: OnOperationCompleteAttributes - ): void; + onOperationComplete?(data: OnOperationCompleteData): void; + /** * Called when an attempt (e.g., an RPC attempt) completes. - * @param {OnAttemptCompleteMetrics} metrics Metrics related to the completed attempt. - * @param {OnAttemptCompleteAttributes} attributes Attributes associated with the completed attempt. + * @param {OnAttemptCompleteData} data Metrics and attributes related to the completed attempt. */ - onAttemptComplete?( - metrics: OnAttemptCompleteMetrics, - attributes: OnAttemptCompleteAttributes - ): void; + onAttemptComplete?(data: OnAttemptCompleteData): void; } diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 3304fb13b..a49a91158 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -12,76 +12,59 @@ // See the License for the specific language governing permissions and // limitations under the License. -import { - MethodName, - StreamingState, -} from '../src/client-side-metrics/client-side-metrics-attributes'; - export const expectedRequestsHandled = [ { - metrics: { - attemptLatency: 2000, - serverLatency: 101, - connectivityErrorCount: 0, - }, - attributes: { - streamingOperation: 'true', - attemptStatus: 4, - clientName: 'nodejs-bigtable', - metricsCollectorData: { - appProfileId: undefined, - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', + attemptLatency: 2000, + serverLatency: 101, + connectivityErrorCount: 0, + streamingOperation: 'true', + attemptStatus: 4, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', }, + projectId: 'my-project', }, { - metrics: { - attemptLatency: 2000, - serverLatency: 103, - connectivityErrorCount: 0, - }, - attributes: { - streamingOperation: 'true', - attemptStatus: 0, - clientName: 'nodejs-bigtable', - metricsCollectorData: { - appProfileId: undefined, - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', + attemptLatency: 2000, + serverLatency: 103, + connectivityErrorCount: 0, + streamingOperation: 'true', + attemptStatus: 0, + clientName: 'nodejs-bigtable', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', }, + projectId: 'my-project', }, { - metrics: { - operationLatency: 7000, - retryCount: 1, - firstResponseLatency: 5000, - }, - attributes: { - finalOperationStatus: 0, - streamingOperation: 'true', - metricsCollectorData: { - appProfileId: undefined, - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - clientName: 'nodejs-bigtable', - projectId: 'my-project', + finalOperationStatus: 0, + streamingOperation: 'true', + metricsCollectorData: { + appProfileId: undefined, + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', }, + clientName: 'nodejs-bigtable', + projectId: 'my-project', + operationLatency: 7000, + retryCount: 1, + firstResponseLatency: 5000, }, ]; From 87d5592d7c8f18d76afd5384cad0756b21e8e8aa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 16:10:34 -0500 Subject: [PATCH 191/289] Solve compiler errors resulting from metrics handl --- system-test/gcp-metrics-handler.ts | 19 ++++++------------- test/metrics-collector/gcp-metrics-handler.ts | 19 ++++++------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index b1fe47d83..07b08e36f 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -2,10 +2,9 @@ import {describe} from 'mocha'; import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../src/client-side-metrics/client-side-metrics-attributes'; -import {OnOperationCompleteMetrics} from '../src/client-side-metrics/metrics-handler'; + OnAttemptCompleteData, + OnOperationCompleteData, +} from '../src/client-side-metrics/metrics-handler'; import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; // TODO: Test that calls export. @@ -23,16 +22,10 @@ describe('Bigtable/GCPMetricsHandler', () => { ); for (const request of expectedRequestsHandled) { - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); } else { - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); + handler.onOperationComplete(request as OnOperationCompleteData); } } }); diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 7c1f93f0f..b73082404 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -9,10 +9,9 @@ import {GCPMetricsHandler} from '../../src/client-side-metrics/gcp-metrics-handl import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import {expectedRequestsHandled} from '../../test-common/metrics-handler-fixture'; import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from '../../src/client-side-metrics/client-side-metrics-attributes'; -import {OnOperationCompleteMetrics} from '../../src/client-side-metrics/metrics-handler'; + OnAttemptCompleteData, + OnOperationCompleteData, +} from '../../src/client-side-metrics/metrics-handler'; import { expectedOtelExportConvertedValue, expectedOtelExportInput, @@ -66,16 +65,10 @@ describe.only('Bigtable/GCPMetricsHandler', () => { ); for (const request of expectedRequestsHandled) { - if (request.metrics.attemptLatency) { - handler.onAttemptComplete( - request.metrics, - request.attributes as OnAttemptCompleteAttributes - ); + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); } else { - handler.onOperationComplete( - request.metrics as OnOperationCompleteMetrics, - request.attributes as OnOperationCompleteAttributes - ); + handler.onOperationComplete(request as OnOperationCompleteData); } } })(); From 9ad2ef8c2ebe2c8e44011e095b133894248b5a08 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 17:01:01 -0500 Subject: [PATCH 192/289] Update the fixture --- test-common/expected-otel-export-input.ts | 125 ++++++---------------- 1 file changed, 34 insertions(+), 91 deletions(-) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 3f35b9c0a..c8ff81c87 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -821,22 +821,32 @@ export const expectedOtelExportInput = { 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', + 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.project_id': 'my-project', + 'monitored_resource.instance_id': 'fakeInstanceId', + 'monitored_resource.table': 'fakeTableId', + 'monitored_resource.cluster': 'fake-cluster3', + 'monitored_resource.zone': 'us-west1-c', }, asyncAttributesPending: false, _syncAttributes: { 'service.name': 'Cloud Bigtable Table', 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', + 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', + 'monitored_resource.project_id': 'my-project', + 'monitored_resource.instance_id': 'fakeInstanceId', + 'monitored_resource.table': 'fakeTableId', + 'monitored_resource.cluster': 'fake-cluster3', + 'monitored_resource.zone': 'us-west1-c', }, _asyncAttributesPromise: {}, }, @@ -869,18 +879,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', finalOperationStatus: 0, streamingOperation: 'true', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, clientName: 'nodejs-bigtable', - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -928,18 +931,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 4, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -965,18 +961,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 0, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1017,18 +1006,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', finalOperationStatus: 0, - streamingOperation: 'true', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, clientName: 'nodejs-bigtable', - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1059,18 +1040,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', finalOperationStatus: 0, - streamingOperation: 'true', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, clientName: 'nodejs-bigtable', - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1118,18 +1091,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 4, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1155,18 +1121,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 0, + streamingOperation: 'true', clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1214,18 +1173,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 4, clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], @@ -1251,18 +1202,10 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', + methodName: 'Bigtable.ReadRows', + clientUid: 'fake-uuid', attemptStatus: 0, clientName: 'nodejs-bigtable', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'readRows', - clientUid: 'fake-uuid', - }, - projectId: 'my-project', }, startTime: [123, 789], endTime: [456, 789], From c59fcab6c3235887e4d42bc2881131156e33a1fa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Feb 2025 17:32:51 -0500 Subject: [PATCH 193/289] rewrite the metric to request method --- src/client-side-metrics/exporter.ts | 62 ++++++++++++++-------- test/metrics-collector/metricsToRequest.ts | 12 +++++ 2 files changed, 51 insertions(+), 23 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index eb960dfcf..4f973d0e1 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -32,6 +32,11 @@ export interface ExportInput { }; _syncAttributes: { 'monitored_resource.type': string; + 'monitored_resource.project_id': string; + 'monitored_resource.instance_id': string; + 'monitored_resource.table': string; + 'monitored_resource.cluster': string; + 'monitored_resource.zone': string; }; }; scopeMetrics: { @@ -51,21 +56,23 @@ export interface ExportInput { aggregationTemporality?: number; dataPointType?: number; dataPoints: { - attributes: { - appProfileId?: string; - finalOperationStatus: number; - streamingOperation: string; - projectId: string; - clientName: string; - metricsCollectorData: { - instanceId: string; - table: string; - cluster: string; - zone: string; - methodName: string; - clientUid: string; - }; - }; + attributes: + | { + methodName: string; + clientUid: string; + appProfileId?: string; + finalOperationStatus: number; + streamingOperation?: string; + clientName: string; + } + | { + methodName: string; + clientUid: string; + appProfileId?: string; + attemptStatus: number; + streamingOperation?: string; + clientName: string; + }; startTime: number[]; endTime: number[]; value: { @@ -92,20 +99,29 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; + // TODO: Type guard for final operation status / attempt status const metricLabels = { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, - method: allAttributes.metricsCollectorData.methodName, - status: allAttributes.finalOperationStatus?.toString(), + method: allAttributes.methodName, + status: '0', streaming: allAttributes.streamingOperation, - client_uid: allAttributes.metricsCollectorData.clientUid, + client_uid: allAttributes.clientUid, }; const resourceLabels = { - cluster: allAttributes.metricsCollectorData.cluster, - instance: allAttributes.metricsCollectorData.instanceId, - project_id: allAttributes.projectId, - table: allAttributes.metricsCollectorData.table, - zone: allAttributes.metricsCollectorData.zone, + cluster: + exportArgs.resource._syncAttributes['monitored_resource.cluster'], + instance: + exportArgs.resource._syncAttributes[ + 'monitored_resource.instance_id' + ], + project_id: + exportArgs.resource._syncAttributes[ + 'monitored_resource.project_id' + ], + table: + exportArgs.resource._syncAttributes['monitored_resource.table'], + zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], }; if (metricName === RETRY_COUNT_NAME) { const timeSeries = { diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 2123384d1..2fb8458bb 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -6,6 +6,10 @@ import { fakeStartTime, } from '../../test-common/export-input-fixture'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; +import { + expectedOtelExportConvertedValue, + expectedOtelExportInput, +} from '../../test-common/expected-otel-export-input'; export const expectedRequest = { name: 'projects/some-project', @@ -150,8 +154,16 @@ export const expectedRequest = { // TODO: Generate the export code describe('Bigtable/metricsToRequest', () => { + /* it('Converts a counter and a histogram to the cloud monitoring format', () => { const actualRequest = metricsToRequest(exportInput); assert.deepStrictEqual(actualRequest, expectedRequest); }); + */ + it('Converts an otel request to a request ready for the metric service client', () => { + assert.deepStrictEqual( + metricsToRequest(expectedOtelExportInput), + expectedOtelExportConvertedValue + ); + }); }); From 5848588743eb43e77eade489c276aee74ca54c46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 10:18:32 -0500 Subject: [PATCH 194/289] Add interfaces to work with type guards --- src/client-side-metrics/exporter.ts | 189 ++++++++++++++++------------ 1 file changed, 108 insertions(+), 81 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 4f973d0e1..9aa9e5b90 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -24,7 +24,65 @@ export interface ExportResult { code: number; } -// TODO: Only involves the values that we care about +interface OnAttemptAttribute { + methodName: string; + clientUid: string; + appProfileId?: string; + attemptStatus: number; + streamingOperation?: string; + clientName: string; +} + +interface OnOperationAttribute { + methodName: string; + clientUid: string; + appProfileId?: string; + finalOperationStatus: number; + streamingOperation?: string; + clientName: string; +} + +interface ScopeMetric { + scope: { + name: string; + version: string; + }; + metrics: { + descriptor: { + name: string; + unit: string; + description?: string; + type?: string; + valueType?: number; + advice?: {}; + }; + aggregationTemporality?: number; + dataPointType?: number; + dataPoints: { + attributes: Attributes; + startTime: number[]; + endTime: number[]; + value: Value; + }[]; + }[]; +} + +type OtherMetric = ScopeMetric< + OnAttemptAttribute | OnOperationAttribute, + { + min?: number; + max?: number; + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; + }; + } +>; + +type RetryMetric = ScopeMetric; + export interface ExportInput { resource: { _attributes: { @@ -39,91 +97,43 @@ export interface ExportInput { 'monitored_resource.zone': string; }; }; - scopeMetrics: { - scope: { - name: string; - version: string; - }; - metrics: { - descriptor: { - name: string; - unit: string; - description?: string; - type?: string; - valueType?: number; - advice?: {}; - }; - aggregationTemporality?: number; - dataPointType?: number; - dataPoints: { - attributes: - | { - methodName: string; - clientUid: string; - appProfileId?: string; - finalOperationStatus: number; - streamingOperation?: string; - clientName: string; - } - | { - methodName: string; - clientUid: string; - appProfileId?: string; - attemptStatus: number; - streamingOperation?: string; - clientName: string; - }; - startTime: number[]; - endTime: number[]; - value: { - min?: number; - max?: number; - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; - }; - }[]; - }[]; - }[]; + scopeMetrics: (OtherMetric | RetryMetric)[]; +} + +function isRetryMetric( + scopeMetric: OtherMetric | RetryMetric +): scopeMetric is RetryMetric { + return scopeMetric.scope.name === RETRY_COUNT_NAME; } export function metricsToRequest(exportArgs: ExportInput) { const timeSeriesArray = []; + const resourceLabels = { + cluster: exportArgs.resource._syncAttributes['monitored_resource.cluster'], + instance: + exportArgs.resource._syncAttributes['monitored_resource.instance_id'], + project_id: + exportArgs.resource._syncAttributes['monitored_resource.project_id'], + table: exportArgs.resource._syncAttributes['monitored_resource.table'], + zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], + }; for (const scopeMetrics of exportArgs.scopeMetrics) { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; + if (isRetryMetric(scopeMetrics)) { + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; - for (const dataPoint of metric.dataPoints) { - // Extract attributes to labels based on their intended target (resource or metric) - const allAttributes = dataPoint.attributes; - // TODO: Type guard for final operation status / attempt status - const metricLabels = { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: '0', - streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, - }; - const resourceLabels = { - cluster: - exportArgs.resource._syncAttributes['monitored_resource.cluster'], - instance: - exportArgs.resource._syncAttributes[ - 'monitored_resource.instance_id' - ], - project_id: - exportArgs.resource._syncAttributes[ - 'monitored_resource.project_id' - ], - table: - exportArgs.resource._syncAttributes['monitored_resource.table'], - zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], - }; - if (metricName === RETRY_COUNT_NAME) { + for (const dataPoint of metric.dataPoints) { + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + // TODO: Type guard for final operation status / attempt status + const metricLabels = { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: '0', + streaming: allAttributes.streamingOperation, + client_uid: allAttributes.clientUid, + }; const timeSeries = { metric: { type: metricName, @@ -153,7 +163,24 @@ export function metricsToRequest(exportArgs: ExportInput) { ], }; timeSeriesArray.push(timeSeries); - } else { + } + } + } else { + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; + + for (const dataPoint of metric.dataPoints) { + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + // TODO: Type guard for final operation status / attempt status + const metricLabels = { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: '0', + streaming: allAttributes.streamingOperation, + client_uid: allAttributes.clientUid, + }; const timeSeries = { metric: { type: metricName, From 9c35dfbd04dacb4d66af6e8e97c596e8012a0b9b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 10:42:38 -0500 Subject: [PATCH 195/289] Correct the compile error problems different metric types are under metrics not scope metrics --- src/client-side-metrics/exporter.ts | 65 +++++++++++++---------------- 1 file changed, 30 insertions(+), 35 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 9aa9e5b90..886fa9b43 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -42,32 +42,26 @@ interface OnOperationAttribute { clientName: string; } -interface ScopeMetric { - scope: { +interface Metric { + descriptor: { name: string; - version: string; + unit: string; + description?: string; + type?: string; + valueType?: number; + advice?: {}; }; - metrics: { - descriptor: { - name: string; - unit: string; - description?: string; - type?: string; - valueType?: number; - advice?: {}; - }; - aggregationTemporality?: number; - dataPointType?: number; - dataPoints: { - attributes: Attributes; - startTime: number[]; - endTime: number[]; - value: Value; - }[]; + aggregationTemporality?: number; + dataPointType?: number; + dataPoints: { + attributes: Attributes; + startTime: number[]; + endTime: number[]; + value: Value; }[]; } -type OtherMetric = ScopeMetric< +type OtherMetric = Metric< OnAttemptAttribute | OnOperationAttribute, { min?: number; @@ -81,7 +75,7 @@ type OtherMetric = ScopeMetric< } >; -type RetryMetric = ScopeMetric; +type RetryMetric = Metric; export interface ExportInput { resource: { @@ -97,13 +91,19 @@ export interface ExportInput { 'monitored_resource.zone': string; }; }; - scopeMetrics: (OtherMetric | RetryMetric)[]; + scopeMetrics: { + scope: { + name: string; + version: string; + }; + metrics: (RetryMetric | OtherMetric)[]; + }[]; } function isRetryMetric( - scopeMetric: OtherMetric | RetryMetric -): scopeMetric is RetryMetric { - return scopeMetric.scope.name === RETRY_COUNT_NAME; + metric: OtherMetric | RetryMetric +): metric is RetryMetric { + return metric.descriptor.name === RETRY_COUNT_NAME; } export function metricsToRequest(exportArgs: ExportInput) { @@ -118,10 +118,9 @@ export function metricsToRequest(exportArgs: ExportInput) { zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], }; for (const scopeMetrics of exportArgs.scopeMetrics) { - if (isRetryMetric(scopeMetrics)) { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; - + for (const metric of scopeMetrics.metrics) { + const metricName = metric.descriptor.name; + if (isRetryMetric(metric)) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; @@ -164,11 +163,7 @@ export function metricsToRequest(exportArgs: ExportInput) { }; timeSeriesArray.push(timeSeries); } - } - } else { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; - + } else { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; From 07ec90dea7242f10b0f394d46820f8d4d7172354 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 11:39:36 -0500 Subject: [PATCH 196/289] Fix expected OTEL output test --- src/client-side-metrics/exporter.ts | 44 ++++++++++++------- test-common/expected-otel-export-input.ts | 37 ++++++++++------ test/metrics-collector/gcp-metrics-handler.ts | 2 +- test/metrics-collector/metricsToRequest.ts | 14 ++++-- 4 files changed, 64 insertions(+), 33 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 886fa9b43..227536f65 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -125,14 +125,17 @@ export function metricsToRequest(exportArgs: ExportInput) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; // TODO: Type guard for final operation status / attempt status - const metricLabels = { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: '0', - streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, - }; + const streaming = allAttributes.streamingOperation; + const metricLabels = Object.assign( + { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: allAttributes.finalOperationStatus.toString(), + client_uid: allAttributes.clientUid, + }, + streaming ? {streaming} : null + ); const timeSeries = { metric: { type: metricName, @@ -168,14 +171,23 @@ export function metricsToRequest(exportArgs: ExportInput) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; // TODO: Type guard for final operation status / attempt status - const metricLabels = { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: '0', - streaming: allAttributes.streamingOperation, - client_uid: allAttributes.clientUid, - }; + const streaming = allAttributes.streamingOperation; + const metricLabels = Object.assign( + { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: + ( + allAttributes as OnAttemptAttribute + ).attemptStatus?.toString() ?? + ( + allAttributes as OnOperationAttribute + ).finalOperationStatus?.toString(), + client_uid: allAttributes.clientUid, + }, + streaming ? {streaming} : null + ); const timeSeries = { metric: { type: metricName, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index c8ff81c87..ea5ff41e9 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -8,8 +8,9 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', status: '0', streaming: 'true', client_uid: 'fake-uuid', @@ -105,8 +106,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '4', streaming: 'true', client_uid: 'fake-uuid', }, @@ -201,8 +204,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '0', streaming: 'true', client_uid: 'fake-uuid', }, @@ -297,10 +302,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', }, }, @@ -335,10 +340,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', }, }, @@ -432,8 +437,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '4', streaming: 'true', client_uid: 'fake-uuid', }, @@ -528,8 +535,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', + method: 'Bigtable.ReadRows', + status: '0', streaming: 'true', client_uid: 'fake-uuid', }, @@ -624,9 +633,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', - streaming: 'true', + method: 'Bigtable.ReadRows', + status: '4', client_uid: 'fake-uuid', }, }, @@ -720,9 +730,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { + app_profile: undefined, client_name: 'nodejs-bigtable', - method: 'readRows', - streaming: 'true', + method: 'Bigtable.ReadRows', + status: '0', client_uid: 'fake-uuid', }, }, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index b73082404..91eabd82e 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -19,7 +19,7 @@ import { import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 2fb8458bb..c26ad0085 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -160,10 +160,18 @@ describe('Bigtable/metricsToRequest', () => { assert.deepStrictEqual(actualRequest, expectedRequest); }); */ - it('Converts an otel request to a request ready for the metric service client', () => { + it.only('Converts an otel request to a request ready for the metric service client', () => { + const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( - metricsToRequest(expectedOtelExportInput), - expectedOtelExportConvertedValue + convertedValue.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length ); + for (let index = 0; index < convertedValue.timeSeries.length; index++) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + convertedValue.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] + ); + } }); }); From bf54c8c2090c927ba7c1d16b607c08cfb14c7000 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 11:41:20 -0500 Subject: [PATCH 197/289] Remove TODOs --- src/client-side-metrics/exporter.ts | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 227536f65..2b17dee6d 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -124,7 +124,6 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; - // TODO: Type guard for final operation status / attempt status const streaming = allAttributes.streamingOperation; const metricLabels = Object.assign( { @@ -170,7 +169,6 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; - // TODO: Type guard for final operation status / attempt status const streaming = allAttributes.streamingOperation; const metricLabels = Object.assign( { From f226b5fdda88d7a69cab4d8dc4a7bce93f52f3b0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 11:50:37 -0500 Subject: [PATCH 198/289] Fix test to compare pointwise --- test/metrics-collector/gcp-metrics-handler.ts | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 91eabd82e..bf2ddb8d8 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -19,7 +19,7 @@ import { import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* @@ -48,9 +48,20 @@ describe('Bigtable/GCPMetricsHandler', () => { expectedOtelExportInput as unknown as ExportInput ); assert.deepStrictEqual( - JSON.parse(JSON.stringify(convertedRequest)), - expectedOtelExportConvertedValue + convertedRequest.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length ); + for ( + let index = 0; + index < convertedRequest.timeSeries.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + convertedRequest.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] + ); + } clearTimeout(timeout); resultCallback({code: 0}); done(); From b42b4f4e912045d0008b1c95cc5a2f7935d44d7e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 13:46:20 -0500 Subject: [PATCH 199/289] connectivity error count corrections --- src/client-side-metrics/exporter.ts | 21 ++- .../gcp-metrics-handler.ts | 22 +-- test-common/expected-otel-export-input.ts | 168 +----------------- 3 files changed, 31 insertions(+), 180 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2b17dee6d..7a76366a4 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,7 +18,7 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; +import {CONNECTIIVTY_ERROR_COUNT, RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; @@ -75,7 +75,7 @@ type OtherMetric = Metric< } >; -type RetryMetric = Metric; +type RetryMetric = Metric; export interface ExportInput { resource: { @@ -100,10 +100,13 @@ export interface ExportInput { }[]; } -function isRetryMetric( +function isIntegerMetric( metric: OtherMetric | RetryMetric ): metric is RetryMetric { - return metric.descriptor.name === RETRY_COUNT_NAME; + return ( + metric.descriptor.name === RETRY_COUNT_NAME || + metric.descriptor.name === CONNECTIIVTY_ERROR_COUNT + ); } export function metricsToRequest(exportArgs: ExportInput) { @@ -120,7 +123,7 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; - if (isRetryMetric(metric)) { + if (isIntegerMetric(metric)) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; @@ -130,7 +133,13 @@ export function metricsToRequest(exportArgs: ExportInput) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - status: allAttributes.finalOperationStatus.toString(), + status: + ( + allAttributes as OnAttemptAttribute + ).attemptStatus?.toString() ?? + ( + allAttributes as OnOperationAttribute + ).finalOperationStatus?.toString(), client_uid: allAttributes.clientUid, }, streaming ? {streaming} : null diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index cd896f6e9..70d8b1d35 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -195,14 +195,11 @@ export class GCPMetricsHandler }, } ), - connectivityErrorCount: meter.createHistogram( + connectivityErrorCount: meter.createCounter( 'bigtable.googleapis.com/internal/client/connectivity_error_count', { description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", - advice: { - explicitBucketBoundaries: latencyBuckets, - }, } ), clientBlockingLatencies: meter.createHistogram( @@ -279,16 +276,13 @@ export class GCPMetricsHandler streamingOperation: data.streamingOperation, clientName: data.clientName, }); - this.otelMetrics?.connectivityErrorCount.record( - data.connectivityErrorCount, - { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, - clientName: data.clientName, - } - ); + this.otelMetrics?.connectivityErrorCount.add(data.connectivityErrorCount, { + appProfileId: data.metricsCollectorData.appProfileId, + methodName: data.metricsCollectorData.methodName, + clientUid: data.metricsCollectorData.clientUid, + attemptStatus: data.attemptStatus, + clientName: data.clientName, + }); this.otelMetrics?.serverLatencies.record(data.serverLatency, { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index ea5ff41e9..25ff2ac51 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,5 +1,7 @@ export const RETRY_COUNT_NAME = 'bigtable.googleapis.com/internal/client/retry_count'; +export const CONNECTIIVTY_ERROR_COUNT = + 'bigtable.googleapis.com/internal/client/connectivity_error_count'; export const expectedOtelExportConvertedValue = { name: 'projects/my-project', @@ -650,8 +652,7 @@ export const expectedOtelExportConvertedValue = { zone: 'us-west1-c', }, }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', + valueType: 'INT64', points: [ { interval: { @@ -663,68 +664,10 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - distributionValue: { - count: '1', - mean: 0, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - }, - }, - bucketCounts: [ - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, + int64Value: 1, }, }, ], - unit: 'ms', }, { metric: { @@ -747,8 +690,7 @@ export const expectedOtelExportConvertedValue = { zone: 'us-west1-c', }, }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', + valueType: 'INT64', points: [ { interval: { @@ -760,68 +702,10 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - distributionValue: { - count: '1', - mean: 0, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - }, - }, - bucketCounts: [ - '1', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - '0', - ], - }, + int64Value: 1, }, }, ], - unit: 'ms', }, ], }; @@ -1191,25 +1075,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - counts: [ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], - }, - count: 1, - }, + value: 1, }, { attributes: { @@ -1220,25 +1086,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: { - min: 0, - max: 0, - sum: 0, - buckets: { - boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, - ], - counts: [ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - ], - }, - count: 1, - }, + value: 1, }, ], }, From 54ac764485c31e3de68bcb4b660f9713f7c4dd64 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:21:28 -0500 Subject: [PATCH 200/289] Correct the fixtures --- test-common/expected-otel-export-input.ts | 22 +++++++----------- test/metrics-collector/gcp-metrics-handler.ts | 23 +++++++++++++++++-- test/metrics-collector/metricsToRequest.ts | 2 +- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 25ff2ac51..6dda2ca70 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -664,7 +664,7 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - int64Value: 1, + int64Value: 0, }, }, ], @@ -702,7 +702,7 @@ export const expectedOtelExportConvertedValue = { }, }, value: { - int64Value: 1, + int64Value: 0, }, }, ], @@ -1049,22 +1049,16 @@ export const expectedOtelExportInput = { { descriptor: { name: 'bigtable.googleapis.com/internal/client/connectivity_error_count', - type: 'HISTOGRAM', + type: 'COUNTER', description: "The number of requests that failed to reach Google's network. In normal cases, this number is 0. When the number is not 0, it can indicate connectivity issues between the application and the Google network.", unit: '', valueType: 1, - advice: { - explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, - ], - }, + advice: {}, }, aggregationTemporality: 1, - dataPointType: 0, + dataPointType: 3, + isMonotonic: true, dataPoints: [ { attributes: { @@ -1075,7 +1069,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: 1, + value: 0, }, { attributes: { @@ -1086,7 +1080,7 @@ export const expectedOtelExportInput = { }, startTime: [123, 789], endTime: [456, 789], - value: 1, + value: 0, }, ], }, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index bf2ddb8d8..0fab8c718 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -19,8 +19,8 @@ import { import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; -describe.only('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', done => { +describe('Bigtable/GCPMetricsHandler', () => { + it.only('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down @@ -40,6 +40,25 @@ describe.only('Bigtable/GCPMetricsHandler', () => { [123, 789], [456, 789] ); + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } assert.deepStrictEqual( JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index c26ad0085..0fed23dc1 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -160,7 +160,7 @@ describe('Bigtable/metricsToRequest', () => { assert.deepStrictEqual(actualRequest, expectedRequest); }); */ - it.only('Converts an otel request to a request ready for the metric service client', () => { + it('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( convertedValue.timeSeries.length, From eb8f14b95cf7aee72cee8b20fbc5f2d2145f321e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:33:11 -0500 Subject: [PATCH 201/289] Eliminate tests using the old export input fixture --- system-test/cloud-monitoring-exporter.ts | 36 +---- test-common/export-input-fixture.ts | 166 --------------------- test/metrics-collector/metricsToRequest.ts | 153 ------------------- 3 files changed, 2 insertions(+), 353 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index c8464457e..0023898a7 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -3,11 +3,7 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import { - exportInput, - fakeEndTime, - fakeStartTime, -} from '../test-common/export-input-fixture'; +import {fakeEndTime, fakeStartTime} from '../test-common/export-input-fixture'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; @@ -15,38 +11,10 @@ import {expectedOtelExportInput} from '../test-common/expected-otel-export-input import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/CloudMonitoringExporter', () => { - it('exports client side metrics to cloud monitoring', done => { + it('Should send an otel exported value to the CloudMonitoringExporter', done => { // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} // This test will add metrics so that they are available in Pantheon - (async () => { - const bigtable = new Bigtable(); - const projectId: string = await new Promise((resolve, reject) => { - bigtable.getProjectId_((err, projectId) => { - if (err) { - reject(err); - } else { - resolve(projectId as string); - } - }); - }); - const transformedExportInput = JSON.parse( - JSON.stringify(exportInput).replace(/some-project/g, projectId) - ); - const exporter = new CloudMonitoringExporter(); - exporter.export( - transformedExportInput as unknown as ResourceMetrics, - (result: {code: number}) => { - if (result.code === 0) { - done(); - } else { - done(result.code); - } - } - ); - })(); - }); - it.only('Should send an otel exported value to the CloudMonitoringExporter', done => { (async () => { const resultCallback: (result: ExportResult) => void = ( result: ExportResult diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts index 17717aea1..43b73980b 100644 --- a/test-common/export-input-fixture.ts +++ b/test-common/export-input-fixture.ts @@ -14,169 +14,3 @@ export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; export const fakeEndTime = fakeStartTime + 1000; - -export const exportInput = { - resource: { - _attributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - asyncAttributesPending: false, - _syncAttributes: { - 'service.name': 'Cloud Bigtable Table', - 'telemetry.sdk.language': 'nodejs', - 'telemetry.sdk.name': 'opentelemetry', - 'telemetry.sdk.version': '1.30.0', - 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', - 'cloud.resource_manager.project_id': 'some-project', - 'monitored_resource.type': 'bigtable_client_raw', - }, - _asyncAttributesPromise: {}, - }, - scopeMetrics: [ - { - scope: { - name: 'bigtable.googleapis.com', - version: '', - }, - metrics: [ - { - descriptor: { - name: 'bigtable.googleapis.com/internal/client/operation_latencies', - type: 'HISTOGRAM', - description: - "The total end-to-end latency across all RPC attempts associated with a Bigtable operation. This metric measures an operation's round trip from the client to Bigtable and back to the client and includes all retries.", - unit: '', - valueType: 1, - advice: {}, - }, - aggregationTemporality: 1, - dataPointType: 0, - dataPoints: [ - { - attributes: { - appProfileId: 'fake-app-profile-id', - finalOperationStatus: 0, - streamingOperation: 'true', - clientName: 'nodejs-bigtable/5.1.2', - projectId: 'some-project', - metricsCollectorData: { - instanceId: 'fakeInstanceId', - table: 'fakeTableId', - cluster: 'fake-cluster3', - zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', - }, - }, - startTime: [fakeStartTime, 951000000], - endTime: [fakeEndTime, 948000000], - value: { - min: 76, - max: 1337, - sum: 11979, - buckets: { - boundaries: [ - 0, // 1 - 0.01, // 2 - 0.05, // 3 - 0.1, // 4 - 0.3, // 5 - 0.6, // 6 - 0.8, // 7 - 1, // 8 - 2, // 9 - 3, // 10 - 4, // 11 - 5, // 12 - 6, // 13 - 8, // 14 - 10, // 15 - 13, // 16 - 16, // 17 - 20, // 18 - 25, // 19 - 30, // 20 - 40, // 21 - 50, // 22 - 65, // 23 - 80, // 24 - 100, // 25 - 130, // 26 - 160, // 27 - 200, // 28 - 250, // 29 - 300, // 30 - 400, // 31 - 500, // 32 - 650, // 33 - 800, // 34 - 1000, // 35 - 2000, // 36 - 5000, // 37 - 10000, // 38 - 20000, // 39 - 50000, // 40 - 100000, // 41 - ], - counts: [ - 0, //1 - 0, //2 - 0, //3 - 0, //4 - 0, //5 - 0, //6 - 0, //7 - 0, //8 - 0, //9 - 0, //10 - 0, //11 - 0, //12 - 0, //13 - 0, //14 - 0, //15 - 0, //16 - 0, //17 - 0, //18 - 0, //19 - 0, //20 - 0, //21 - 0, //22 - 0, //23 - 0, //24 - 1, //25 - 0, //26 - 0, //27 - 0, //28 - 0, //29 - 0, //30 - 0, //31 - 0, //32 - 0, //33 - 0, //34 - 0, //35 - 0, //36 - 0, //37 - 0, //38 - 0, //39 - 0, //40 - 0, //41 - 0, //42 - ], - }, - count: 1, - }, - }, - ], - }, - ], - }, - ], -}; diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 0fed23dc1..9832cf0e4 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,165 +1,12 @@ import * as assert from 'assert'; import {describe} from 'mocha'; -import { - exportInput, - fakeEndTime, - fakeStartTime, -} from '../../test-common/export-input-fixture'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; import { expectedOtelExportConvertedValue, expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; -export const expectedRequest = { - name: 'projects/some-project', - timeSeries: [ - { - metric: { - type: 'bigtable.googleapis.com/internal/client/operation_latencies', - labels: { - app_profile: 'fake-app-profile-id', - client_name: 'nodejs-bigtable/5.1.2', - client_uid: 'fake-uuid', - method: 'Bigtable.ReadRows', - status: '0', - streaming: 'true', - }, - }, - resource: { - type: 'bigtable_client_raw', - labels: { - cluster: 'fake-cluster3', - instance: 'fakeInstanceId', - project_id: 'some-project', - table: 'fakeTableId', - zone: 'us-west1-c', - }, - }, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval: { - endTime: { - seconds: fakeEndTime, - }, - startTime: { - seconds: fakeStartTime, - }, - }, - value: { - distributionValue: { - count: '1', - mean: 11979, - bucketOptions: { - explicitBuckets: { - bounds: [ - 0, // 1 - 0.01, // 2 - 0.05, // 3 - 0.1, // 4 - 0.3, // 5 - 0.6, // 6 - 0.8, // 7 - 1, // 8 - 2, // 9 - 3, // 10 - 4, // 11 - 5, // 12 - 6, // 13 - 8, // 14 - 10, // 15 - 13, // 16 - 16, // 17 - 20, // 18 - 25, // 19 - 30, // 20 - 40, // 21 - 50, // 22 - 65, // 23 - 80, // 24 - 100, // 25 - 130, // 26 - 160, // 27 - 200, // 28 - 250, // 29 - 300, // 30 - 400, // 31 - 500, // 32 - 650, // 33 - 800, // 34 - 1000, // 35 - 2000, // 36 - 5000, // 37 - 10000, // 38 - 20000, // 39 - 50000, // 40 - 100000, // 41 - ], - }, - }, - bucketCounts: [ - '0', //1 - '0', //2 - '0', //3 - '0', //4 - '0', //5 - '0', //6 - '0', //7 - '0', //8 - '0', //9 - '0', //10 - '0', //11 - '0', //12 - '0', //13 - '0', //14 - '0', //15 - '0', //16 - '0', //17 - '0', //18 - '0', //19 - '0', //20 - '0', //21 - '0', //22 - '0', //23 - '0', //24 - '1', //25 - '0', //26 - '0', //27 - '0', //28 - '0', //29 - '0', //30 - '0', //31 - '0', //32 - '0', //33 - '0', //34 - '0', //35 - '0', //36 - '0', //37 - '0', //38 - '0', //39 - '0', //40 - '0', //41 - '0', //42 - ], - }, - }, - }, - ], - unit: 'ms', - }, - ], -}; - -// TODO: Generate the export code describe('Bigtable/metricsToRequest', () => { - /* - it('Converts a counter and a histogram to the cloud monitoring format', () => { - const actualRequest = metricsToRequest(exportInput); - assert.deepStrictEqual(actualRequest, expectedRequest); - }); - */ it('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( From 6ecb1a6bb5757d00bce489e7ecc09547d3f08579 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:36:01 -0500 Subject: [PATCH 202/289] Add headers --- system-test/cloud-monitoring-exporter.ts | 14 ++++++++++++++ system-test/gcp-metrics-handler.ts | 14 ++++++++++++++ test-common/expected-otel-export-input.ts | 14 ++++++++++++++ test-common/replace-timestamps.ts | 14 ++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 14 ++++++++++++++ test/metrics-collector/metricsToRequest.ts | 14 ++++++++++++++ 6 files changed, 84 insertions(+) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 0023898a7..5116e6b8f 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import { CloudMonitoringExporter, diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 07b08e36f..abbc55820 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import {GCPMetricsHandler} from '../src/client-side-metrics/gcp-metrics-handler'; import {expectedRequestsHandled} from '../test-common/metrics-handler-fixture'; diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 6dda2ca70..6f3953e62 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + export const RETRY_COUNT_NAME = 'bigtable.googleapis.com/internal/client/retry_count'; export const CONNECTIIVTY_ERROR_COUNT = diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index ea081cd81..43e5c6c77 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {expectedOtelExportInput} from './expected-otel-export-input'; export function replaceTimestamps( diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 0fab8c718..e2d3abd75 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {describe} from 'mocha'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index 9832cf0e4..e77917d79 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import * as assert from 'assert'; import {describe} from 'mocha'; import {metricsToRequest} from '../../src/client-side-metrics/exporter'; From fa0a56e34edb65e502cc19b836db1804d2b1175c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:37:51 -0500 Subject: [PATCH 203/289] run linter --- src/client-side-metrics/exporter.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 7a76366a4..b224afd4d 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,7 +18,10 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import {CONNECTIIVTY_ERROR_COUNT, RETRY_COUNT_NAME} from '../../test-common/expected-otel-export-input'; +import { + CONNECTIIVTY_ERROR_COUNT, + RETRY_COUNT_NAME, +} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; From fcef83d5dd5504aea02f7099eb3a14799732c59d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:55:45 -0500 Subject: [PATCH 204/289] Modify the gcp-metrics-handler and test the proces end to end --- src/client-side-metrics/exporter.ts | 1 + system-test/gcp-metrics-handler.ts | 77 ++++++++++++++++++++++------- 2 files changed, 59 insertions(+), 19 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index b224afd4d..dd1104276 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -263,6 +263,7 @@ export class CloudMonitoringExporter extends MetricExporter { await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); + // {code: 0} is typically the format the callback expects in the super class. const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index abbc55820..9d04d4258 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -19,28 +19,67 @@ import { OnAttemptCompleteData, OnOperationCompleteData, } from '../src/client-side-metrics/metrics-handler'; -import {CloudMonitoringExporter} from '../src/client-side-metrics/exporter'; +import { + CloudMonitoringExporter, + ExportResult, +} from '../src/client-side-metrics/exporter'; +import {Bigtable} from '../src'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import * as assert from 'assert'; -// TODO: Test that calls export. -// TODO: Test whole process. -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the CloudMonitoringExporter', done => { - /* - We need to create a timeout here because if we don't then mocha shuts down - the test as it is sleeping before the GCPMetricsHandler has a chance to - export the data. - */ - const timeout = setTimeout(() => {}, 30000); - const handler = new GCPMetricsHandler( - new CloudMonitoringExporter({projectId: 'cloud-native-db-dpes-shared'}) - ); + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 30000); + const testResultCallback: (result: ExportResult) => void = ( + result: ExportResult + ) => { + try { + clearTimeout(timeout); + assert.deepStrictEqual(result, {code: 0}); + done(); + } catch (error) { + done(error); + } + }; + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + super.export(metrics, testResultCallback); + } + } - for (const request of expectedRequestsHandled) { - if (request.attemptLatency) { - handler.onAttemptComplete(request as OnAttemptCompleteData); - } else { - handler.onOperationComplete(request as OnOperationCompleteData); + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler.onOperationComplete(request as OnOperationCompleteData); + } } - } + })(); }); }); From cd2efacc08a65666bcd6853b7921668a35a61c2d Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 14:57:07 -0500 Subject: [PATCH 205/289] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 9d04d4258..1e1df78c1 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -27,7 +27,7 @@ import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the CloudMonitoringExporter', done => { (async () => { /* diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index e2d3abd75..5803ef119 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -34,7 +34,7 @@ import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it.only('Should export a value ready for sending to the CloudMonitoringExporter', done => { + it('Should export a value ready for sending to the CloudMonitoringExporter', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From eba027c21997a559dbc07b50ff64b97521672304 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 15:02:51 -0500 Subject: [PATCH 206/289] Use a fake projectId --- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 5803ef119..fd24d33ad 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -105,7 +105,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } const handler = new GCPMetricsHandler( - new TestExporter({projectId: 'cloud-native-db-dpes-shared'}) + new TestExporter({projectId: 'some-project'}) ); for (const request of expectedRequestsHandled) { From 5929a9dda86afd894bf60e867eb3a923c2229f1f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 15:31:53 -0500 Subject: [PATCH 207/289] Only call export once --- .../gcp-metrics-handler.ts | 4 - test/metrics-collector/gcp-metrics-handler.ts | 122 ++++++++++-------- 2 files changed, 68 insertions(+), 58 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 70d8b1d35..c11286d32 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -20,10 +20,6 @@ import { import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import { - OnAttemptCompleteAttributes, - OnOperationCompleteAttributes, -} from './client-side-metrics-attributes'; import {View} from '@opentelemetry/sdk-metrics'; const { Aggregation, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index fd24d33ad..714a2dd17 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -42,64 +42,78 @@ describe('Bigtable/GCPMetricsHandler', () => { export the data. */ const timeout = setTimeout(() => {}, 30000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exporter ensures we only test the value export receives one time. + */ + let exported = false; class TestExporter extends MetricExporter { - async export( + export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void - ): Promise { - try { - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, - expectedOtelExportInput.scopeMetrics[0].metrics.length - ); - for ( - let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], - expectedOtelExportInput.scopeMetrics[0].metrics[index] - ); - } - assert.deepStrictEqual( - JSON.parse(JSON.stringify(metrics)), - expectedOtelExportInput - ); - const convertedRequest = metricsToRequest( - expectedOtelExportInput as unknown as ExportInput - ); - assert.deepStrictEqual( - convertedRequest.timeSeries.length, - expectedOtelExportConvertedValue.timeSeries.length - ); - for ( - let index = 0; - index < convertedRequest.timeSeries.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - convertedRequest.timeSeries[index], - expectedOtelExportConvertedValue.timeSeries[index] - ); - } - clearTimeout(timeout); - resultCallback({code: 0}); - done(); - } catch (e) { - done(e); + ): void { + if (!exported) { + exported = true; + (async () => { + try { + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + assert.deepStrictEqual( + JSON.parse(JSON.stringify(metrics)), + expectedOtelExportInput + ); + const convertedRequest = metricsToRequest( + expectedOtelExportInput as unknown as ExportInput + ); + assert.deepStrictEqual( + convertedRequest.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length + ); + for ( + let index = 0; + index < convertedRequest.timeSeries.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + convertedRequest.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] + ); + } + clearTimeout(timeout); + await this.shutdown(); + resultCallback({code: 0}); + done(); + } catch (e) { + done(e); + } + })(); } } } From 3b48c8e353b91002a2bbc8e3784c35ca34f2fa6c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:01:55 -0500 Subject: [PATCH 208/289] Ensure test suite completes --- test/metrics-collector/gcp-metrics-handler.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 714a2dd17..6c4dbada9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -114,6 +114,10 @@ describe('Bigtable/GCPMetricsHandler', () => { done(e); } })(); + } else { + // The test suite will not complete if unanswered callbacks + // remain on subsequent export calls. + resultCallback({code: 0}); } } } From 8edc4ab8ce82028daba71981fdd0016255f1ace8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:04:42 -0500 Subject: [PATCH 209/289] Remove shutdown --- test/metrics-collector/gcp-metrics-handler.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 6c4dbada9..633134d7e 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -107,7 +107,6 @@ describe('Bigtable/GCPMetricsHandler', () => { ); } clearTimeout(timeout); - await this.shutdown(); resultCallback({code: 0}); done(); } catch (e) { From 8c9d23f700e024709762a2d8c9e98368a436c0ba Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:07:45 -0500 Subject: [PATCH 210/289] remove async --- test/metrics-collector/gcp-metrics-handler.ts | 102 +++++++++--------- 1 file changed, 50 insertions(+), 52 deletions(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 633134d7e..ccad1b1a4 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -56,63 +56,61 @@ describe('Bigtable/GCPMetricsHandler', () => { ): void { if (!exported) { exported = true; - (async () => { - try { - replaceTimestamps( - metrics as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length, - expectedOtelExportInput.scopeMetrics[0].metrics.length - ); - for ( - let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], - expectedOtelExportInput.scopeMetrics[0].metrics[index] - ); - } + try { + replaceTimestamps( + metrics as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics + .length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - JSON.parse(JSON.stringify(metrics)), - expectedOtelExportInput - ); - const convertedRequest = metricsToRequest( - expectedOtelExportInput as unknown as ExportInput + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] ); + } + assert.deepStrictEqual( + JSON.parse(JSON.stringify(metrics)), + expectedOtelExportInput + ); + const convertedRequest = metricsToRequest( + expectedOtelExportInput as unknown as ExportInput + ); + assert.deepStrictEqual( + convertedRequest.timeSeries.length, + expectedOtelExportConvertedValue.timeSeries.length + ); + for ( + let index = 0; + index < convertedRequest.timeSeries.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - convertedRequest.timeSeries.length, - expectedOtelExportConvertedValue.timeSeries.length + convertedRequest.timeSeries[index], + expectedOtelExportConvertedValue.timeSeries[index] ); - for ( - let index = 0; - index < convertedRequest.timeSeries.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. - assert.deepStrictEqual( - convertedRequest.timeSeries[index], - expectedOtelExportConvertedValue.timeSeries[index] - ); - } - clearTimeout(timeout); - resultCallback({code: 0}); - done(); - } catch (e) { - done(e); } - })(); + clearTimeout(timeout); + resultCallback({code: 0}); + done(); + } catch (e) { + done(e); + } } else { // The test suite will not complete if unanswered callbacks // remain on subsequent export calls. From 7b49f012cf62a127f57ae0b17820a83e13a49fef Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Feb 2025 16:48:28 -0500 Subject: [PATCH 211/289] =?UTF-8?q?Don=E2=80=99t=20export=20the=20data=20t?= =?UTF-8?q?wice?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- system-test/gcp-metrics-handler.ts | 37 +++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 1e1df78c1..94cb6afdd 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -36,22 +36,37 @@ describe('Bigtable/GCPMetricsHandler', () => { export the data. */ const timeout = setTimeout(() => {}, 30000); - const testResultCallback: (result: ExportResult) => void = ( - result: ExportResult - ) => { - try { - clearTimeout(timeout); - assert.deepStrictEqual(result, {code: 0}); - done(); - } catch (error) { - done(error); - } - }; + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exporter ensures we only test the value export receives one time. + */ + let exported = false; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + assert.deepStrictEqual(result, {code: 0}); + done(); + resultCallback({code: 0}); + } catch (error) { + done(error); + } + } else { + resultCallback({code: 0}); + } + }; + } class MockExporter extends CloudMonitoringExporter { export( metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { + const testResultCallback = getTestResultCallback(resultCallback); super.export(metrics, testResultCallback); } } From b4f7705769efb8d16f99b8d0cdf7c1d8f8a4a25a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 13:48:05 -0500 Subject: [PATCH 212/289] Increase the timeout --- test/metrics-collector/gcp-metrics-handler.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index ccad1b1a4..81d595281 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -34,7 +34,8 @@ import * as assert from 'assert'; import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it('Should export a value ready for sending to the CloudMonitoringExporter', done => { + it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { + this.timeout(600000); (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From 2a3245957e10bfaedfb0fb287726d477a183d6c3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:26:00 -0500 Subject: [PATCH 213/289] Use the PushMetricExporter interface --- src/client-side-metrics/gcp-metrics-handler.ts | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c11286d32..bef66a304 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -19,8 +19,7 @@ import { } from './metrics-handler'; import * as Resources from '@opentelemetry/resources'; import * as ResourceUtil from '@google-cloud/opentelemetry-resource-util'; -import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {View} from '@opentelemetry/sdk-metrics'; +import {PushMetricExporter, View} from '@opentelemetry/sdk-metrics'; const { Aggregation, ExplicitBucketHistogramAggregation, @@ -57,14 +56,12 @@ interface MonitoredResourceData { * This handler records metrics such as operation latency, attempt latency, retry count, and more, * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ -export class GCPMetricsHandler - implements IMetricsHandler -{ +export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; private otelMetrics?: Metrics; - private exporter: T; + private exporter: PushMetricExporter; - constructor(exporter: T) { + constructor(exporter: PushMetricExporter) { this.exporter = exporter; } From e5caa9e9e3c48d755a47248e4060b9c10893454e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:31:34 -0500 Subject: [PATCH 214/289] Removed some interfaces that are not used anymore --- .../client-side-metrics-attributes.ts | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/src/client-side-metrics/client-side-metrics-attributes.ts b/src/client-side-metrics/client-side-metrics-attributes.ts index ffc6dfa44..f5fbf911d 100644 --- a/src/client-side-metrics/client-side-metrics-attributes.ts +++ b/src/client-side-metrics/client-side-metrics-attributes.ts @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -import {grpc} from 'google-gax'; - // The backend is expecting true/false and will fail if other values are provided. // export in open telemetry is expecting string value attributes so we don't use boolean // true/false. @@ -22,45 +20,6 @@ export enum StreamingState { UNARY = 'false', } -type IMetricsCollectorData = { - instanceId: string; - table: string; - cluster?: string; - zone?: string; - appProfileId?: string; - methodName: MethodName; - clientUid: string; -}; - -/** - * Attributes associated with the completion of a Bigtable operation. These - * attributes provide context about the Bigtable environment, the completed - * operation, and its final status. They are used for recording metrics such as - * operation latency, first response latency, and retry count. - */ -export type OnOperationCompleteAttributes = { - projectId: string; - metricsCollectorData: IMetricsCollectorData; - clientName: string; - finalOperationStatus: grpc.status; - streamingOperation: StreamingState; -}; - -/** - * Attributes associated with the completion of a single attempt of a Bigtable - * operation. These attributes provide context about the Bigtable environment, - * the specific attempt, its status, and whether the operation was streaming. They - * are used for recording metrics such as attempt latency, server latency, and - * connectivity errors. - */ -export type OnAttemptCompleteAttributes = { - projectId: string; - metricsCollectorData: IMetricsCollectorData; - clientName: string; - attemptStatus: grpc.status; - streamingOperation: StreamingState; -}; - /** * Represents the names of Bigtable methods. These are used as attributes for * metrics, allowing for differentiation of performance by method. From fc114ffa4ab6d012c17393c0285395cf3a8ca123 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:38:30 -0500 Subject: [PATCH 215/289] Update JSdoc --- src/client-side-metrics/gcp-metrics-handler.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index bef66a304..479d7fbc6 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -69,7 +69,10 @@ export class GCPMetricsHandler implements IMetricsHandler { * Initializes the OpenTelemetry metrics instruments if they haven't been already. * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. - * @param {string} [projectId] The Google Cloud project ID. Used for metric export. If not provided, it will attempt to detect it from the environment. + * + * @param {MonitoredResourceData} [data] The data that will be used to set up the monitored resource + * which will be provided to the exporter in every export call. + * */ private initialize(data: MonitoredResourceData) { if (!this.initialized) { From 6fb59441ed1c70ad8e094487d30f5288522b298c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:44:43 -0500 Subject: [PATCH 216/289] Move fake start time and fake end time --- system-test/cloud-monitoring-exporter.ts | 5 ++++- test-common/expected-otel-export-input.ts | 3 +++ test-common/export-input-fixture.ts | 16 ---------------- 3 files changed, 7 insertions(+), 17 deletions(-) delete mode 100644 test-common/export-input-fixture.ts diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 5116e6b8f..96f0ffe53 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -17,7 +17,10 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import {fakeEndTime, fakeStartTime} from '../test-common/export-input-fixture'; +import { + fakeEndTime, + fakeStartTime, +} from '../test-common/expected-otel-export-input'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 6f3953e62..c5cf9ef60 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -17,6 +17,9 @@ export const RETRY_COUNT_NAME = export const CONNECTIIVTY_ERROR_COUNT = 'bigtable.googleapis.com/internal/client/connectivity_error_count'; +export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; +export const fakeEndTime = fakeStartTime + 1000; + export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ diff --git a/test-common/export-input-fixture.ts b/test-common/export-input-fixture.ts deleted file mode 100644 index 43b73980b..000000000 --- a/test-common/export-input-fixture.ts +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; -export const fakeEndTime = fakeStartTime + 1000; From ca6f05e4793a93aae463e8d61a1f71ad9b78bee5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 15:53:05 -0500 Subject: [PATCH 217/289] Remove the TODO --- src/client-side-metrics/exporter.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index dd1104276..c9d359987 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -258,7 +258,6 @@ export class CloudMonitoringExporter extends MetricExporter { ): void { (async () => { try { - // TODO: Remove casting. const request = metricsToRequest(metrics as unknown as ExportInput); await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest From 4bec216709f79a60fba1bf4e3ae949e857ce90b6 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 24 Feb 2025 16:16:35 -0500 Subject: [PATCH 218/289] Update documentation --- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 479d7fbc6..d1ddae973 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -252,7 +252,7 @@ export class GCPMetricsHandler implements IMetricsHandler { /** * Records metrics for a completed attempt of a Bigtable operation. - * This method records attempt latency, connectivity error count, server latency, and first response latency, + * This method records attempt latency, connectivity error count, server latency, * along with the provided attributes. * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ From bd4b0acf24c03728c61aa1fe4a084bfaede26145 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 10:06:59 -0500 Subject: [PATCH 219/289] Add additional information to the error reported --- src/client-side-metrics/exporter.ts | 3 +-- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- system-test/cloud-monitoring-exporter.ts | 4 +++- system-test/gcp-metrics-handler.ts | 4 +++- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index c9d359987..73dea8e8e 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -266,8 +266,7 @@ export class CloudMonitoringExporter extends MetricExporter { const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { - const exportResult = {code: (error as ServiceError).code as number}; - resultCallback(exportResult); + resultCallback(error as ServiceError); } })(); } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d1ddae973..95a0863df 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -70,7 +70,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. * - * @param {MonitoredResourceData} [data] The data that will be used to set up the monitored resource + * @param {MonitoredResourceData} data The data that will be used to set up the monitored resource * which will be provided to the exporter in every export call. * */ diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 96f0ffe53..4b8298d9d 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -37,9 +37,11 @@ describe('Bigtable/CloudMonitoringExporter', () => { result: ExportResult ) => { try { - assert.deepStrictEqual(result, {code: 0}); + assert.strictEqual(result.code, 0); done(); } catch (error) { + // Code isn't 0 so report the original error. + done(result); done(error); } }; diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 94cb6afdd..5a5c83f8f 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -50,10 +50,12 @@ describe('Bigtable/GCPMetricsHandler', () => { exported = true; try { clearTimeout(timeout); - assert.deepStrictEqual(result, {code: 0}); + assert.strictEqual(result.code, 0); done(); resultCallback({code: 0}); } catch (error) { + // Code isn't 0 so report the original error. + done(result); done(error); } } else { From c1916149c0ecef99d907ae9dcb942490a92efdf5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 11:36:59 -0500 Subject: [PATCH 220/289] Move start time and end time --- system-test/cloud-monitoring-exporter.ts | 9 ++++----- test-common/expected-otel-export-input.ts | 3 --- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 4b8298d9d..a99f4c2a1 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -17,18 +17,17 @@ import { CloudMonitoringExporter, ExportResult, } from '../src/client-side-metrics/exporter'; -import { - fakeEndTime, - fakeStartTime, -} from '../test-common/expected-otel-export-input'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; -describe('Bigtable/CloudMonitoringExporter', () => { +describe.only('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { + // TODO: In this test make sure the start time and end time are increasing? + const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; + const fakeEndTime = fakeStartTime + 1000; // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} // This test will add metrics so that they are available in Pantheon diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index c5cf9ef60..6f3953e62 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -17,9 +17,6 @@ export const RETRY_COUNT_NAME = export const CONNECTIIVTY_ERROR_COUNT = 'bigtable.googleapis.com/internal/client/connectivity_error_count'; -export const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; -export const fakeEndTime = fakeStartTime + 1000; - export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ From 86be1ea6c64cf9dd86fa66173bdf9d32d4628f8e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 11:58:22 -0500 Subject: [PATCH 221/289] Try to use timestamps in order --- system-test/cloud-monitoring-exporter.ts | 8 +++----- test-common/replace-timestamps.ts | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index a99f4c2a1..b1ca3ecfd 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -21,7 +21,7 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; -import {replaceTimestamps} from '../test-common/replace-timestamps'; +import {addFakeRecentTimestamps} from '../test-common/replace-timestamps'; describe.only('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { @@ -60,10 +60,8 @@ describe.only('Bigtable/CloudMonitoringExporter', () => { projectId ) ); - replaceTimestamps( - transformedExportInput as unknown as typeof expectedOtelExportInput, - [fakeStartTime, 0], - [fakeEndTime, 0] + addFakeRecentTimestamps( + transformedExportInput as unknown as typeof expectedOtelExportInput ); const exporter = new CloudMonitoringExporter(); exporter.export( diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index 43e5c6c77..eedba6148 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -14,6 +14,8 @@ import {expectedOtelExportInput} from './expected-otel-export-input'; +// TODO: Move these methods into their respective modules or inline. + export function replaceTimestamps( request: typeof expectedOtelExportInput, newStartTime: [number, number], @@ -28,3 +30,20 @@ export function replaceTimestamps( }); }); } + +export function addFakeRecentTimestamps( + request: typeof expectedOtelExportInput +) { + // TODO: Reference the error here. + let latestTime = Math.floor(Date.now() / 1000) - 2000; + [...request.scopeMetrics].reverse().forEach(scopeMetric => { + [...scopeMetric.metrics].reverse().forEach(metric => { + [...metric.dataPoints].reverse().forEach(dataPoint => { + dataPoint.endTime = [latestTime, 0]; + latestTime -= 1000; + dataPoint.startTime = [latestTime, 0]; + latestTime -= 1000; + }); + }); + }); +} From 3b0f0812f197ed3c5bf025ba9352fdc70d6b07fa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 12:57:35 -0500 Subject: [PATCH 222/289] Reduce timestamp delay --- test-common/replace-timestamps.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index eedba6148..4652a34bb 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -35,14 +35,14 @@ export function addFakeRecentTimestamps( request: typeof expectedOtelExportInput ) { // TODO: Reference the error here. - let latestTime = Math.floor(Date.now() / 1000) - 2000; + let latestTime = Math.floor(Date.now() / 1000) - 5; [...request.scopeMetrics].reverse().forEach(scopeMetric => { [...scopeMetric.metrics].reverse().forEach(metric => { [...metric.dataPoints].reverse().forEach(dataPoint => { dataPoint.endTime = [latestTime, 0]; - latestTime -= 1000; + latestTime -= 5; dataPoint.startTime = [latestTime, 0]; - latestTime -= 1000; + latestTime -= 5; }); }); }); From 3ebb9ff2171fba37bea60d962cbc12550126553a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 12:58:12 -0500 Subject: [PATCH 223/289] Remove only --- system-test/cloud-monitoring-exporter.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index b1ca3ecfd..29ede051b 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -23,7 +23,7 @@ import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {addFakeRecentTimestamps} from '../test-common/replace-timestamps'; -describe.only('Bigtable/CloudMonitoringExporter', () => { +describe('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { // TODO: In this test make sure the start time and end time are increasing? const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; From cf321314ef4944a86b01035b8c011c4a724cfc84 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 13:12:34 -0500 Subject: [PATCH 224/289] Inline addFakeRecentTimestamps --- system-test/cloud-monitoring-exporter.ts | 23 +++++++++++++++-------- test-common/replace-timestamps.ts | 17 ----------------- 2 files changed, 15 insertions(+), 25 deletions(-) diff --git a/system-test/cloud-monitoring-exporter.ts b/system-test/cloud-monitoring-exporter.ts index 29ede051b..268ae44a6 100644 --- a/system-test/cloud-monitoring-exporter.ts +++ b/system-test/cloud-monitoring-exporter.ts @@ -21,13 +21,9 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {Bigtable} from '../src'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; -import {addFakeRecentTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/CloudMonitoringExporter', () => { it('Should send an otel exported value to the CloudMonitoringExporter', done => { - // TODO: In this test make sure the start time and end time are increasing? - const fakeStartTime = Math.floor(Date.now() / 1000) - 2000; - const fakeEndTime = fakeStartTime + 1000; // When this test is run, metrics should be visible at the following link: // https://pantheon.corp.google.com/monitoring/metrics-explorer;duration=PT1H?inv=1&invt=Abo9_A&project={projectId} // This test will add metrics so that they are available in Pantheon @@ -59,10 +55,21 @@ describe('Bigtable/CloudMonitoringExporter', () => { /my-project/g, projectId ) - ); - addFakeRecentTimestamps( - transformedExportInput as unknown as typeof expectedOtelExportInput - ); + ) as unknown as typeof expectedOtelExportInput; + { + // This replaces the fake dates in time series with recent dates in the right order. + let latestTime = Math.floor(Date.now() / 1000) - 5; + transformedExportInput.scopeMetrics.reverse().forEach(scopeMetric => { + scopeMetric.metrics.reverse().forEach(metric => { + metric.dataPoints.reverse().forEach(dataPoint => { + dataPoint.endTime = [latestTime, 0]; + latestTime -= 5; + dataPoint.startTime = [latestTime, 0]; + latestTime -= 5; + }); + }); + }); + } const exporter = new CloudMonitoringExporter(); exporter.export( transformedExportInput as unknown as ResourceMetrics, diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index 4652a34bb..8b54b1aa5 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -30,20 +30,3 @@ export function replaceTimestamps( }); }); } - -export function addFakeRecentTimestamps( - request: typeof expectedOtelExportInput -) { - // TODO: Reference the error here. - let latestTime = Math.floor(Date.now() / 1000) - 5; - [...request.scopeMetrics].reverse().forEach(scopeMetric => { - [...scopeMetric.metrics].reverse().forEach(metric => { - [...metric.dataPoints].reverse().forEach(dataPoint => { - dataPoint.endTime = [latestTime, 0]; - latestTime -= 5; - dataPoint.startTime = [latestTime, 0]; - latestTime -= 5; - }); - }); - }); -} From 78a20d4694983a24e4c83aedfcc2b9d50bf95842 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 13:22:26 -0500 Subject: [PATCH 225/289] Move replace timestamps into the only file it is used --- test-common/replace-timestamps.ts | 32 ------------------- test/metrics-collector/gcp-metrics-handler.ts | 16 +++++++++- 2 files changed, 15 insertions(+), 33 deletions(-) delete mode 100644 test-common/replace-timestamps.ts diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts deleted file mode 100644 index 8b54b1aa5..000000000 --- a/test-common/replace-timestamps.ts +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import {expectedOtelExportInput} from './expected-otel-export-input'; - -// TODO: Move these methods into their respective modules or inline. - -export function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 81d595281..300419992 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -31,7 +31,21 @@ import { expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; -import {replaceTimestamps} from '../../test-common/replace-timestamps'; + +function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { From 105b58ba6cbdb36b97bc41d338f4db2704b351a8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Tue, 25 Feb 2025 13:26:00 -0500 Subject: [PATCH 226/289] Fix comment --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 5a5c83f8f..7ab9951aa 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -39,7 +39,7 @@ describe('Bigtable/GCPMetricsHandler', () => { /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, - exporter ensures we only test the value export receives one time. + exported variable ensures we only test the value export receives one time. */ let exported = false; function getTestResultCallback( From d4022fd1589f6e1eeb9822fab8c9fc5060aa67c5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 10:08:33 -0500 Subject: [PATCH 227/289] Rename the metric types --- src/client-side-metrics/exporter.ts | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 73dea8e8e..13d5cd8ed 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -64,7 +64,7 @@ interface Metric { }[]; } -type OtherMetric = Metric< +type DistributionMetric = Metric< OnAttemptAttribute | OnOperationAttribute, { min?: number; @@ -78,7 +78,7 @@ type OtherMetric = Metric< } >; -type RetryMetric = Metric; +type CounterMetric = Metric; export interface ExportInput { resource: { @@ -99,13 +99,13 @@ export interface ExportInput { name: string; version: string; }; - metrics: (RetryMetric | OtherMetric)[]; + metrics: (CounterMetric | DistributionMetric)[]; }[]; } -function isIntegerMetric( - metric: OtherMetric | RetryMetric -): metric is RetryMetric { +function isCounterMetric( + metric: DistributionMetric | CounterMetric +): metric is CounterMetric { return ( metric.descriptor.name === RETRY_COUNT_NAME || metric.descriptor.name === CONNECTIIVTY_ERROR_COUNT @@ -126,7 +126,7 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; - if (isIntegerMetric(metric)) { + if (isCounterMetric(metric)) { for (const dataPoint of metric.dataPoints) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; From 7ea28d2e9355a61333d8a5fbcffa2c10a44e1a17 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 11:30:44 -0500 Subject: [PATCH 228/289] Generate documentation for the new classes --- src/client-side-metrics/exporter.ts | 139 +++++++++++++++++++++- test-common/expected-otel-export-input.ts | 16 +++ 2 files changed, 154 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 13d5cd8ed..466b53a1e 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -27,6 +27,26 @@ export interface ExportResult { code: number; } +/** + * Attributes associated with the completion of a single attempt of a Bigtable + * operation. These attributes provide context about the specific attempt, + * its status, and the method involved. They are used for recording metrics + * such as attempt latency and connectivity errors. + * + * @property methodName - The name of the Bigtable method that was attempted (e.g., + * 'Bigtable.ReadRows', 'Bigtable.MutateRows'). + * @property clientUid - A unique identifier for the client that initiated the + * attempt. + * @property appProfileId - (Optional) The ID of the application profile used for + * the attempt. + * @property attemptStatus - The status code of the attempt. A value of `0` + * typically indicates success (grpc.status.OK), while other values indicate + * different types of errors. + * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. + * Will be "true" or "false" if present. + * @property clientName - The name of the client library making the attempt + * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). + */ interface OnAttemptAttribute { methodName: string; clientUid: string; @@ -36,6 +56,26 @@ interface OnAttemptAttribute { clientName: string; } +/** + * Attributes associated with the completion of a Bigtable operation. These + * attributes provide context about the operation, its final status, and the + * method involved. They are used for recording metrics such as operation + * latency. + * + * @property methodName - The name of the Bigtable method that was performed + * (e.g., 'Bigtable.ReadRows', 'Bigtable.MutateRows'). + * @property clientUid - A unique identifier for the client that initiated the + * operation. + * @property appProfileId - (Optional) The ID of the application profile used for + * the operation. + * @property finalOperationStatus - The final status code of the operation. A + * value of `0` typically indicates success (grpc.status.OK), while other + * values indicate different types of errors. + * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. + * Will be "true" or "false" if present. + * @property clientName - The name of the client library performing the operation + * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). + */ interface OnOperationAttribute { methodName: string; clientUid: string; @@ -45,6 +85,14 @@ interface OnOperationAttribute { clientName: string; } +/** + * Represents a generic metric in the OpenTelemetry format. + * + * This interface describes the structure of a metric, which can represent + * either a counter or a distribution (histogram). It includes the metric's + * descriptor, the type of data it collects, and the actual data points. + * + */ interface Metric { descriptor: { name: string; @@ -64,6 +112,16 @@ interface Metric { }[]; } +/** + * Represents a metric that measures the distribution of values. + * + * Distribution metrics, also known as histograms, are used to track the + * statistical distribution of a set of measurements. They allow you to capture + * not only the count and sum of the measurements but also how they are spread + * across different ranges (buckets). This makes them suitable for tracking + * latencies, sizes, or other metrics where the distribution is important. + * + */ type DistributionMetric = Metric< OnAttemptAttribute | OnOperationAttribute, { @@ -112,6 +170,41 @@ function isCounterMetric( ); } +/** + * Converts OpenTelemetry metrics data into a format suitable for the Google Cloud + * Monitoring API's `createTimeSeries` method. + * + * This function transforms the structured metrics data, including resource and + * metric attributes, data points, and aggregation information, into an object + * that conforms to the expected request format of the Cloud Monitoring API. + * + * @param {ExportInput} exportArgs - The OpenTelemetry metrics data to be converted. This + * object contains resource attributes, scope information, and a list of + * metrics with their associated data points. + * + * @returns An object representing a `CreateTimeSeriesRequest`, ready for sending + * to the Google Cloud Monitoring API. This object contains the project name + * and an array of time series data points, formatted for ingestion by + * Cloud Monitoring. + * + * @throws Will throw an error if there are issues converting the data. + * + * @remarks + * The output format is specific to the Cloud Monitoring API and involves + * mapping OpenTelemetry concepts to Cloud Monitoring's data model, including: + * - Mapping resource attributes to resource labels. + * - Mapping metric attributes to metric labels. + * - Handling different metric types (counter, distribution). + * - Converting data points to the correct structure, including start and end + * times, values, and bucket information for distributions. + * + * @example + * const exportInput: ExportInput = { ... }; // Example ExportInput object + * const monitoringRequest = metricsToRequest(exportInput); + * // monitoringRequest can now be used in monitoringClient.createTimeSeries(monitoringRequest) + * + * + */ export function metricsToRequest(exportArgs: ExportInput) { const timeSeriesArray = []; const resourceLabels = { @@ -248,7 +341,51 @@ export function metricsToRequest(exportArgs: ExportInput) { }; } -// TODO: Add test for when the export fails +/** + * A custom OpenTelemetry `MetricExporter` that sends metrics data to Google Cloud + * Monitoring. + * + * This class extends the base `MetricExporter` from `@google-cloud/opentelemetry-cloud-monitoring-exporter` + * and handles the process of converting OpenTelemetry metrics data into the + * format required by the Google Cloud Monitoring API. It uses the + * `MetricServiceClient` to send the data to Google Cloud Monitoring's + * `createTimeSeries` method. + * + * @remarks + * This exporter relies on the `metricsToRequest` function to perform the + * necessary transformation of OpenTelemetry metrics into Cloud Monitoring + * `TimeSeries` data. + * + * The exporter is asynchronous and will not block the calling thread while + * sending metrics. It manages the Google Cloud Monitoring client and handles + * potential errors during the export process. + * + * The class expects the `ResourceMetrics` to have been correctly configured + * and populated with the required resource attributes to correctly identify + * the monitored resource in Cloud Monitoring. + * + * @example + * // Create an instance of the CloudMonitoringExporter + * const exporter = new CloudMonitoringExporter(); + * + * // Use the exporter with a MeterProvider + * const meterProvider = new MeterProvider({ + * resource: new Resource({ + * 'service.name': 'my-service', + * // ... other resource attributes + * }), + * readers: [new PeriodicExportingMetricReader({ + * exporter: exporter, + * exportIntervalMillis: 10000 // Export every 10 seconds + * })] + * }); + * + * // Now start instrumenting your application using the meter + * const meter = meterProvider.getMeter('my-meter'); + * // ... create counters, histograms, etc. + * + * @beta + */ export class CloudMonitoringExporter extends MetricExporter { private monitoringClient = new MetricServiceClient(); diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 6f3953e62..39ea091bb 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -724,6 +724,22 @@ export const expectedOtelExportConvertedValue = { ], }; +/** + * An example of an `ExportInput` object, representing OpenTelemetry metrics + * data in the format expected by the `metricsToRequest` function. + * + * This object demonstrates the structure of the input data, including + * resource attributes, scope information, and a collection of metrics + * (both counter and distribution types) with their associated data points. + * + * @remarks + * This structure is designed to be converted into a Google Cloud Monitoring + * `CreateTimeSeriesRequest` using the `metricsToRequest` function. It + * includes various types of metrics that are sent by the Bigtable client + * library, such as operation latencies, attempt latencies, retry counts, + * and server latencies. + * + */ export const expectedOtelExportInput = { resource: { _attributes: { From 7848643509d3bfef4a55f8f3957af01b8e95d775 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 11:45:18 -0500 Subject: [PATCH 229/289] Add documentation for monitored resource --- src/client-side-metrics/gcp-metrics-handler.ts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 95a0863df..b4681b3f8 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -43,6 +43,18 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } +/** + * Represents the data associated with a monitored resource in Google Cloud Monitoring. + * + * This interface defines the structure of data that is used to identify and + * describe a specific resource being monitored, such as a Bigtable instance, + * cluster, or table. It is used to construct the `resource` part of a + * `TimeSeries` object in the Cloud Monitoring API. + * + * When an open telemetry instrument is created in the GCPMetricsHandler, all + * recordings to that instrument are expected to have the same + * MonitoredResourceData properties. + */ interface MonitoredResourceData { projectId: string; instanceId: string; From 722917407e6ae255af0017976592e817a5e31b68 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 11:55:03 -0500 Subject: [PATCH 230/289] Generate documentation for the other metrics --- src/client-side-metrics/exporter.ts | 50 +++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 466b53a1e..600c0c8d1 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -136,8 +136,40 @@ type DistributionMetric = Metric< } >; +/** + * Represents a metric that counts the number of occurrences of an event or + * the cumulative value of a quantity over time. + * + * Counter metrics are used to track quantities that increase over time, such + * as the number of requests, errors, or retries. They are always + * non-negative and can only increase or remain constant. + * + */ type CounterMetric = Metric; +/** + * Represents the input data structure for exporting OpenTelemetry metrics. + * + * This interface defines the structure of the object that is passed to the + * `metricsToRequest` function to convert OpenTelemetry metrics into a format + * suitable for the Google Cloud Monitoring API. + * + * It contains information about the monitored resource and an array of + * scope metrics, which include various types of metrics (counters and + * distributions) and their associated data points. + * + * @remarks + * This structure is specifically designed to hold OpenTelemetry metrics data + * as it is exported from the Bigtable client library. It represents the data + * before it is transformed into the Cloud Monitoring API's `TimeSeries` + * format. + * + * Each `CounterMetric` and `DistributionMetric` within the `scopeMetrics` + * array represents a different type of measurement, such as retry counts, + * operation latencies, attempt latencies etc. Each metric contains an array of dataPoints + * Each `dataPoint` contains the `attributes`, `startTime`, `endTime` and `value`. + * `value` will be a number for a counter metric and an object for a distribution metric. + */ export interface ExportInput { resource: { _attributes: { @@ -161,6 +193,24 @@ export interface ExportInput { }[]; } +/** + * Type guard function to determine if a given metric is a CounterMetric. + * + * This function checks if a metric is a CounterMetric by inspecting its + * `descriptor.name` property and comparing it against known counter metric + * names. + * + * @param metric - The metric to check. This can be either a + * `DistributionMetric` or a `CounterMetric`. + * @returns `true` if the metric is a `CounterMetric`, `false` otherwise. + * + * @remarks + * This function uses a type guard to narrow down the type of the `metric` + * parameter to `CounterMetric` if it returns `true`. This allows TypeScript + * to perform more precise type checking and provides better code + * completion when working with metrics. + * + */ function isCounterMetric( metric: DistributionMetric | CounterMetric ): metric is CounterMetric { From 7f4e167239356904019778619d72ce6577ab1375 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 12:00:22 -0500 Subject: [PATCH 231/289] Generate documentation for the constructor --- src/client-side-metrics/gcp-metrics-handler.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index b4681b3f8..4a53c2bf4 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -73,6 +73,17 @@ export class GCPMetricsHandler implements IMetricsHandler { private otelMetrics?: Metrics; private exporter: PushMetricExporter; + /** + * The `GCPMetricsHandler` is responsible for managing and recording + * client-side metrics for Google Cloud Bigtable using OpenTelemetry. It + * handles the creation and configuration of various metric instruments + * (histograms and counters) and exports them to Google Cloud Monitoring + * through the provided `PushMetricExporter`. + * + * @param exporter - The `PushMetricExporter` instance to use for exporting + * metrics to Google Cloud Monitoring. This exporter is responsible for + * sending the collected metrics data to the monitoring backend. The provided exporter must be fully configured, for example the projectId must have been set. + */ constructor(exporter: PushMetricExporter) { this.exporter = exporter; } From c86196aeab47ab69656f57d5cf33f32f96e768b3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Wed, 26 Feb 2025 13:24:15 -0500 Subject: [PATCH 232/289] Get documentation for replaceTimestamps, fixtures --- test-common/expected-otel-export-input.ts | 11 +++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 39ea091bb..10fc26139 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -17,6 +17,17 @@ export const RETRY_COUNT_NAME = export const CONNECTIIVTY_ERROR_COUNT = 'bigtable.googleapis.com/internal/client/connectivity_error_count'; +/** + * An example of the expected output format after converting + * `expectedOtelExportInput` using the `metricsToRequest` function. + * + * This object represents the data structure that is ready to be sent to the + * Google Cloud Monitoring API's `createTimeSeries` method. It demonstrates + * how OpenTelemetry metrics are transformed into the Cloud Monitoring format, + * including the structure of time series data, metric types, resource labels, + * and data point values. + * + */ export const expectedOtelExportConvertedValue = { name: 'projects/my-project', timeSeries: [ diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 300419992..425851e86 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -32,6 +32,15 @@ import { } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; +/** + * Replaces the timestamp values within an `ExportInput` object with + * standardized test values. + * + * This function is designed for testing purposes to make timestamp comparisons + * in tests more predictable and reliable. It recursively traverses the + * `ExportInput` object, finds all `startTime` and `endTime` properties, and + * replaces their numeric values with standardized test values. + */ function replaceTimestamps( request: typeof expectedOtelExportInput, newStartTime: [number, number], From d76fa14e3b1f854d0047d1785667a81e1a6448dd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Feb 2025 14:41:30 -0500 Subject: [PATCH 233/289] Reduce the interval time --- src/client-side-metrics/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 4a53c2bf4..8a418b8aa 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -144,7 +144,7 @@ export class GCPMetricsHandler implements IMetricsHandler { new PeriodicExportingMetricReader({ // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by // Cloud Monitoring. - exportIntervalMillis: 10_000, + exportIntervalMillis: 1_000, exporter: this.exporter, }), ], From 2f3b4e5f43cffc8c51d1798bd3a11502f88d2df8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 27 Feb 2025 14:47:02 -0500 Subject: [PATCH 234/289] Make view list based on latencies --- src/client-side-metrics/gcp-metrics-handler.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 8a418b8aa..c9a4a1235 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -119,10 +119,9 @@ export class GCPMetricsHandler implements IMetricsHandler { new View({ instrumentName: name, name, - aggregation: - name === 'retry_count' - ? Aggregation.Sum() - : new ExplicitBucketHistogramAggregation(latencyBuckets), + aggregation: name.endsWith('latencies') + ? Aggregation.Sum() + : new ExplicitBucketHistogramAggregation(latencyBuckets), }) ); const meterProvider = new MeterProvider({ From 7a4b33ec32e7dafb65c9d2184c4ba46e30b5e750 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 28 Feb 2025 09:58:35 -0500 Subject: [PATCH 235/289] Add a guard for count --- src/client-side-metrics/exporter.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 600c0c8d1..7bdcf8119 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -367,7 +367,9 @@ export function metricsToRequest(exportArgs: ExportInput) { value: { distributionValue: { count: String(dataPoint.value.count), - mean: dataPoint.value.sum / dataPoint.value.count, + mean: dataPoint.value.count + ? dataPoint.value.sum / dataPoint.value.count + : 0, bucketOptions: { explicitBuckets: { bounds: dataPoint.value.buckets.boundaries, From 5a09c337491695a45818442d97e178ad102f4d93 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 13:34:08 -0500 Subject: [PATCH 236/289] Invert the for and if --- src/client-side-metrics/exporter.ts | 89 +++++++++++------------------ 1 file changed, 32 insertions(+), 57 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 7bdcf8119..2857cf2a7 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -93,7 +93,7 @@ interface OnOperationAttribute { * descriptor, the type of data it collects, and the actual data points. * */ -interface Metric { +interface Metric { descriptor: { name: string; unit: string; @@ -104,12 +104,25 @@ interface Metric { }; aggregationTemporality?: number; dataPointType?: number; - dataPoints: { - attributes: Attributes; - startTime: number[]; - endTime: number[]; - value: Value; - }[]; + dataPoints: DataPoint[]; +} + +interface DataPoint { + attributes: OnAttemptAttribute | OnOperationAttribute; + startTime: number[]; + endTime: number[]; + value: Value; +} + +interface DistributionValue { + min?: number; + max?: number; + sum: number; + count: number; + buckets: { + boundaries: number[]; + counts: number[]; + }; } /** @@ -122,19 +135,7 @@ interface Metric { * latencies, sizes, or other metrics where the distribution is important. * */ -type DistributionMetric = Metric< - OnAttemptAttribute | OnOperationAttribute, - { - min?: number; - max?: number; - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; - } ->; +type DistributionMetric = Metric; /** * Represents a metric that counts the number of occurrences of an event or @@ -145,7 +146,7 @@ type DistributionMetric = Metric< * non-negative and can only increase or remain constant. * */ -type CounterMetric = Metric; +type CounterMetric = Metric; /** * Represents the input data structure for exporting OpenTelemetry metrics. @@ -193,31 +194,8 @@ export interface ExportInput { }[]; } -/** - * Type guard function to determine if a given metric is a CounterMetric. - * - * This function checks if a metric is a CounterMetric by inspecting its - * `descriptor.name` property and comparing it against known counter metric - * names. - * - * @param metric - The metric to check. This can be either a - * `DistributionMetric` or a `CounterMetric`. - * @returns `true` if the metric is a `CounterMetric`, `false` otherwise. - * - * @remarks - * This function uses a type guard to narrow down the type of the `metric` - * parameter to `CounterMetric` if it returns `true`. This allows TypeScript - * to perform more precise type checking and provides better code - * completion when working with metrics. - * - */ -function isCounterMetric( - metric: DistributionMetric | CounterMetric -): metric is CounterMetric { - return ( - metric.descriptor.name === RETRY_COUNT_NAME || - metric.descriptor.name === CONNECTIIVTY_ERROR_COUNT - ); +function isCounterValue(value: DistributionValue | number): value is number { + return typeof value === 'number'; } /** @@ -269,8 +247,9 @@ export function metricsToRequest(exportArgs: ExportInput) { for (const scopeMetrics of exportArgs.scopeMetrics) { for (const metric of scopeMetrics.metrics) { const metricName = metric.descriptor.name; - if (isCounterMetric(metric)) { - for (const dataPoint of metric.dataPoints) { + for (const dataPoint of metric.dataPoints) { + const value = dataPoint.value; + if (isCounterValue(value)) { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; const streaming = allAttributes.streamingOperation; @@ -319,9 +298,7 @@ export function metricsToRequest(exportArgs: ExportInput) { ], }; timeSeriesArray.push(timeSeries); - } - } else { - for (const dataPoint of metric.dataPoints) { + } else { // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; const streaming = allAttributes.streamingOperation; @@ -366,16 +343,14 @@ export function metricsToRequest(exportArgs: ExportInput) { }, value: { distributionValue: { - count: String(dataPoint.value.count), - mean: dataPoint.value.count - ? dataPoint.value.sum / dataPoint.value.count - : 0, + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, bucketOptions: { explicitBuckets: { - bounds: dataPoint.value.buckets.boundaries, + bounds: value.buckets.boundaries, }, }, - bucketCounts: dataPoint.value.buckets.counts.map(String), + bucketCounts: value.buckets.counts.map(String), }, }, }, From aa18c1ed9428e14b15854e1895355bd79fc65af9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 13:48:02 -0500 Subject: [PATCH 237/289] Pull all attributes out --- src/client-side-metrics/exporter.ts | 115 ++++++++++------------------ 1 file changed, 41 insertions(+), 74 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2857cf2a7..8d676f731 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -249,48 +249,47 @@ export function metricsToRequest(exportArgs: ExportInput) { const metricName = metric.descriptor.name; for (const dataPoint of metric.dataPoints) { const value = dataPoint.value; + // Extract attributes to labels based on their intended target (resource or metric) + const allAttributes = dataPoint.attributes; + const streaming = allAttributes.streamingOperation; + const metricLabels = Object.assign( + { + app_profile: allAttributes.appProfileId, + client_name: allAttributes.clientName, + method: allAttributes.methodName, + status: + (allAttributes as OnAttemptAttribute).attemptStatus?.toString() ?? + ( + allAttributes as OnOperationAttribute + ).finalOperationStatus?.toString(), + client_uid: allAttributes.clientUid, + }, + streaming ? {streaming} : null + ); + const metric = { + type: metricName, + labels: metricLabels, + }; + const resource = { + type: exportArgs.resource._syncAttributes['monitored_resource.type'], + labels: resourceLabels, + }; + const interval = { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, + }; if (isCounterValue(value)) { - // Extract attributes to labels based on their intended target (resource or metric) - const allAttributes = dataPoint.attributes; - const streaming = allAttributes.streamingOperation; - const metricLabels = Object.assign( - { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: - ( - allAttributes as OnAttemptAttribute - ).attemptStatus?.toString() ?? - ( - allAttributes as OnOperationAttribute - ).finalOperationStatus?.toString(), - client_uid: allAttributes.clientUid, - }, - streaming ? {streaming} : null - ); const timeSeries = { - metric: { - type: metricName, - labels: metricLabels, - }, - resource: { - type: exportArgs.resource._syncAttributes[ - 'monitored_resource.type' - ], - labels: resourceLabels, - }, + metric, + resource, valueType: 'INT64', points: [ { - interval: { - endTime: { - seconds: dataPoint.endTime[0], - }, - startTime: { - seconds: dataPoint.startTime[0], - }, - }, + interval, value: { int64Value: dataPoint.value, }, @@ -300,47 +299,14 @@ export function metricsToRequest(exportArgs: ExportInput) { timeSeriesArray.push(timeSeries); } else { // Extract attributes to labels based on their intended target (resource or metric) - const allAttributes = dataPoint.attributes; - const streaming = allAttributes.streamingOperation; - const metricLabels = Object.assign( - { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: - ( - allAttributes as OnAttemptAttribute - ).attemptStatus?.toString() ?? - ( - allAttributes as OnOperationAttribute - ).finalOperationStatus?.toString(), - client_uid: allAttributes.clientUid, - }, - streaming ? {streaming} : null - ); const timeSeries = { - metric: { - type: metricName, - labels: metricLabels, - }, - resource: { - type: exportArgs.resource._syncAttributes[ - 'monitored_resource.type' - ], - labels: resourceLabels, - }, + metric, + resource, metricKind: 'CUMULATIVE', valueType: 'DISTRIBUTION', points: [ { - interval: { - endTime: { - seconds: dataPoint.endTime[0], - }, - startTime: { - seconds: dataPoint.startTime[0], - }, - }, + interval, value: { distributionValue: { count: String(value.count), @@ -355,7 +321,8 @@ export function metricsToRequest(exportArgs: ExportInput) { }, }, ], - unit: metric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + unit: + (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified }; timeSeriesArray.push(timeSeries); } From 791e70dcb71cbf067eac351da6f62ef438be3d5c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 13:51:24 -0500 Subject: [PATCH 238/289] Eliminate the need for the type guard --- src/client-side-metrics/exporter.ts | 75 ++++++++++++++--------------- 1 file changed, 36 insertions(+), 39 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8d676f731..03ea30ea3 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -282,50 +282,47 @@ export function metricsToRequest(exportArgs: ExportInput) { seconds: dataPoint.startTime[0], }, }; - if (isCounterValue(value)) { - const timeSeries = { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, + const timeSeries = isCounterValue(value) + ? { + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, + }, }, - }, - ], - }; - timeSeriesArray.push(timeSeries); - } else { - // Extract attributes to labels based on their intended target (resource or metric) - const timeSeries = { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: value.count ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: value.buckets.boundaries, + ], + } + : { + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: value.buckets.boundaries, + }, }, + bucketCounts: value.buckets.counts.map(String), }, - bucketCounts: value.buckets.counts.map(String), }, }, - }, - ], - unit: - (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); - } + ], + unit: + (metric as unknown as DistributionMetric).descriptor.unit || + 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); } } } From 2e215032afaebf40dcaaa3cebb8b0bbd7a160fdc Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:09:57 -0500 Subject: [PATCH 239/289] Eliminate the data points interface --- src/client-side-metrics/exporter.ts | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 03ea30ea3..686516a23 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -104,14 +104,12 @@ interface Metric { }; aggregationTemporality?: number; dataPointType?: number; - dataPoints: DataPoint[]; -} - -interface DataPoint { - attributes: OnAttemptAttribute | OnOperationAttribute; - startTime: number[]; - endTime: number[]; - value: Value; + dataPoints: { + attributes: OnAttemptAttribute | OnOperationAttribute; + startTime: number[]; + endTime: number[]; + value: Value; + }[]; } interface DistributionValue { @@ -194,6 +192,15 @@ export interface ExportInput { }[]; } +/** + * Type guard function to determine if a given value is a counter value (a number). + * + * This function checks if a value, which could be either a `DistributionValue` + * object or a `number`, is specifically a `number`. This is used to differentiate + * between counter metrics (which have numeric values) and distribution metrics + * (which have more complex, object-based values). + * + */ function isCounterValue(value: DistributionValue | number): value is number { return typeof value === 'number'; } From e936aaa671740b5f8654ca286e5fee61e31ffc98 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:11:30 -0500 Subject: [PATCH 240/289] only --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index e77917d79..a752ce47f 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -21,7 +21,7 @@ import { } from '../../test-common/expected-otel-export-input'; describe('Bigtable/metricsToRequest', () => { - it('Converts an otel request to a request ready for the metric service client', () => { + it.only('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( convertedValue.timeSeries.length, From ab07a580ba71227695dfbf2867a6c9b4652843cb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:12:39 -0500 Subject: [PATCH 241/289] Revert "Eliminate the need for the type guard" This reverts commit 791e70dcb71cbf067eac351da6f62ef438be3d5c. --- src/client-side-metrics/exporter.ts | 75 +++++++++++++++-------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 686516a23..6face5806 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -289,47 +289,50 @@ export function metricsToRequest(exportArgs: ExportInput) { seconds: dataPoint.startTime[0], }, }; - const timeSeries = isCounterValue(value) - ? { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, - }, + if (isCounterValue(value)) { + const timeSeries = { + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, }, - ], - } - : { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: value.count ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: value.buckets.boundaries, - }, + }, + ], + }; + timeSeriesArray.push(timeSeries); + } else { + // Extract attributes to labels based on their intended target (resource or metric) + const timeSeries = { + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: value.buckets.boundaries, }, - bucketCounts: value.buckets.counts.map(String), }, + bucketCounts: value.buckets.counts.map(String), }, }, - ], - unit: - (metric as unknown as DistributionMetric).descriptor.unit || - 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); + }, + ], + unit: + (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); + } } } } From 7fe9a4679f57794e8663c743f39dc95ae5fa8367 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:27:35 -0500 Subject: [PATCH 242/289] Pull timeseries into one variable Solve the descriptor issue --- src/client-side-metrics/exporter.ts | 79 ++++++++++++++--------------- 1 file changed, 37 insertions(+), 42 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 6face5806..3b4886ee9 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -252,9 +252,9 @@ export function metricsToRequest(exportArgs: ExportInput) { zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], }; for (const scopeMetrics of exportArgs.scopeMetrics) { - for (const metric of scopeMetrics.metrics) { - const metricName = metric.descriptor.name; - for (const dataPoint of metric.dataPoints) { + for (const scopeMetric of scopeMetrics.metrics) { + const metricName = scopeMetric.descriptor.name; + for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; @@ -289,50 +289,45 @@ export function metricsToRequest(exportArgs: ExportInput) { seconds: dataPoint.startTime[0], }, }; - if (isCounterValue(value)) { - const timeSeries = { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, + const timeSeries = isCounterValue(value) + ? { + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, + }, }, - }, - ], - }; - timeSeriesArray.push(timeSeries); - } else { - // Extract attributes to labels based on their intended target (resource or metric) - const timeSeries = { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: value.count ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: value.buckets.boundaries, + ], + } + : { + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: value.count ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: value.buckets.boundaries, + }, }, + bucketCounts: value.buckets.counts.map(String), }, - bucketCounts: value.buckets.counts.map(String), }, }, - }, - ], - unit: - (metric as unknown as DistributionMetric).descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); - } + ], + unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }; + timeSeriesArray.push(timeSeries); } } } From 26b9ca7e9c1580ea542749571c5dd2a9b7271c96 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:35:51 -0500 Subject: [PATCH 243/289] Eliminate an unused import --- src/client-side-metrics/exporter.ts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 3b4886ee9..8fe4e95be 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -18,10 +18,6 @@ import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; import ICreateTimeSeriesRequest = google.monitoring.v3.ICreateTimeSeriesRequest; -import { - CONNECTIIVTY_ERROR_COUNT, - RETRY_COUNT_NAME, -} from '../../test-common/expected-otel-export-input'; export interface ExportResult { code: number; From 29ef6b0ccd977a6e83a6b57f760769bb369ce19a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 14:52:18 -0500 Subject: [PATCH 244/289] Add a comment that explains the usefulness of each metric attribute category --- src/client-side-metrics/exporter.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8fe4e95be..62774c237 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -252,9 +252,14 @@ export function metricsToRequest(exportArgs: ExportInput) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; - // Extract attributes to labels based on their intended target (resource or metric) const allAttributes = dataPoint.attributes; const streaming = allAttributes.streamingOperation; + /* + metricLabels are built from the open telemetry attributes that are set + when a data point is recorded. This means that for one metric there may + be multiple time series' with different attributes, but the resource + labels will always be the same for a particular export call. + */ const metricLabels = Object.assign( { app_profile: allAttributes.appProfileId, From 8fd59f446d13ae765de1344713d449d1a58f086c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 16:34:59 -0500 Subject: [PATCH 245/289] Remove the gce instance setting --- src/client-side-metrics/gcp-metrics-handler.ts | 1 - test-common/expected-otel-export-input.ts | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c9a4a1235..edb3b6864 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -129,7 +129,6 @@ export class GCPMetricsHandler implements IMetricsHandler { resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': data.projectId, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 10fc26139..bce9d5121 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -759,7 +759,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', @@ -775,7 +774,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', 'cloud.provider': 'gcp', - 'cloud.platform': 'gce_instance', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', From 183bdb7c8a4552246e6cf02cca0149baa2d28e2e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 16:38:17 -0500 Subject: [PATCH 246/289] Eliminate the gcp cloud provider setting --- src/client-side-metrics/gcp-metrics-handler.ts | 1 - test-common/expected-otel-export-input.ts | 2 -- 2 files changed, 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index edb3b6864..9ad1c3f78 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -128,7 +128,6 @@ export class GCPMetricsHandler implements IMetricsHandler { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'cloud.provider': 'gcp', 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': data.projectId, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index bce9d5121..0d19445f3 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -758,7 +758,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.provider': 'gcp', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', @@ -773,7 +772,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.provider': 'gcp', 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', From 92059aa8e034a1bbc4c4b40c9eb2d64d9196fa18 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 16:38:32 -0500 Subject: [PATCH 247/289] run all tests --- test/metrics-collector/metricsToRequest.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index a752ce47f..e77917d79 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -21,7 +21,7 @@ import { } from '../../test-common/expected-otel-export-input'; describe('Bigtable/metricsToRequest', () => { - it.only('Converts an otel request to a request ready for the metric service client', () => { + it('Converts an otel request to a request ready for the metric service client', () => { const convertedValue = metricsToRequest(expectedOtelExportInput); assert.deepStrictEqual( convertedValue.timeSeries.length, From af3aa73f5832f5980f356ab29011a97b4375d877 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 17:17:37 -0500 Subject: [PATCH 248/289] Eliminate duplicate project id from monitored resource --- src/client-side-metrics/exporter.ts | 5 +---- src/client-side-metrics/gcp-metrics-handler.ts | 6 ++---- test-common/expected-otel-export-input.ts | 2 -- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 62774c237..2aa1f9816 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -167,9 +167,6 @@ type CounterMetric = Metric; */ export interface ExportInput { resource: { - _attributes: { - 'cloud.resource_manager.project_id': string; - }; _syncAttributes: { 'monitored_resource.type': string; 'monitored_resource.project_id': string; @@ -333,7 +330,7 @@ export function metricsToRequest(exportArgs: ExportInput) { } } return { - name: `projects/${exportArgs.resource._attributes['cloud.resource_manager.project_id']}`, + name: `projects/${exportArgs.resource._syncAttributes['monitored_resource.project_id']}`, timeSeries: timeSeriesArray, }; } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 9ad1c3f78..67dadab55 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -128,7 +128,6 @@ export class GCPMetricsHandler implements IMetricsHandler { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'cloud.resource_manager.project_id': data.projectId, 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': data.projectId, 'monitored_resource.instance_id': data.instanceId, @@ -139,9 +138,8 @@ export class GCPMetricsHandler implements IMetricsHandler { readers: [ // Register the exporter new PeriodicExportingMetricReader({ - // Export metrics every 10 seconds. 5 seconds is the smallest sample period allowed by - // Cloud Monitoring. - exportIntervalMillis: 1_000, + // Export metrics every 60 seconds. + exportIntervalMillis: 60_000, exporter: this.exporter, }), ], diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 0d19445f3..86f4e6ad5 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -758,7 +758,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', 'monitored_resource.instance_id': 'fakeInstanceId', @@ -772,7 +771,6 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'cloud.resource_manager.project_id': 'my-project', 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', 'monitored_resource.instance_id': 'fakeInstanceId', From 422060e0e02433ae68dee3878d8ed17c3edb34f1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 6 Mar 2025 17:22:24 -0500 Subject: [PATCH 249/289] Change the two metric names --- src/client-side-metrics/gcp-metrics-handler.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 67dadab55..a21564ecb 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -176,7 +176,7 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), applicationBlockingLatencies: meter.createHistogram( - 'bigtable.googleapis.com/internal/client/application_blocking_latencies', + 'bigtable.googleapis.com/internal/client/application_latencies', { description: 'The time from when the client receives the response to a request until the application reads the response. This metric is most relevant for ReadRows requests. The start and stop times for this metric depend on the way that you send the read request; see Application blocking latencies timer examples for details.', @@ -217,7 +217,7 @@ export class GCPMetricsHandler implements IMetricsHandler { } ), clientBlockingLatencies: meter.createHistogram( - 'bigtable.googleapis.com/internal/client/client_blocking_latencies', + 'bigtable.googleapis.com/internal/client/throttling_latencies', { description: 'Latencies introduced when the client blocks the sending of more requests to the server because of too many pending requests in a bulk operation.', From f93a7211b38362ab685418dac5ff6c5a4fa6600f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 09:59:48 -0500 Subject: [PATCH 250/289] Extend the timeout so that the exporter has chance to work --- system-test/gcp-metrics-handler.ts | 2 +- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 7ab9951aa..e112f7ec9 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -35,7 +35,7 @@ describe('Bigtable/GCPMetricsHandler', () => { the test as it is sleeping before the GCPMetricsHandler has a chance to export the data. */ - const timeout = setTimeout(() => {}, 30000); + const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 425851e86..c22b32ea9 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -65,7 +65,7 @@ describe('Bigtable/GCPMetricsHandler', () => { the test as it is sleeping before the GCPMetricsHandler has a chance to export the data. */ - const timeout = setTimeout(() => {}, 30000); + const timeout = setTimeout(() => {}, 120000); /* The exporter is called every x seconds, but we only want to test the value it receives once. Since done cannot be called multiple times in mocha, From ea230aa1bd5b62443a2be53db11cc285d3de9411 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 11:31:36 -0500 Subject: [PATCH 251/289] Use spread syntax --- .../gcp-metrics-handler.ts | 50 ++++++++----------- 1 file changed, 20 insertions(+), 30 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index a21564ecb..70110fc9a 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -244,28 +244,22 @@ export class GCPMetricsHandler implements IMetricsHandler { cluster: data.metricsCollectorData.cluster, zone: data.metricsCollectorData.zone, }); - this.otelMetrics?.operationLatencies.record(data.operationLatency, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - finalOperationStatus: data.finalOperationStatus, - streamingOperation: data.streamingOperation, - clientName: data.clientName, - }); - this.otelMetrics?.retryCount.add(data.retryCount, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - finalOperationStatus: data.finalOperationStatus, - clientName: data.clientName, - }); - this.otelMetrics?.firstResponseLatencies.record(data.firstResponseLatency, { + const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, finalOperationStatus: data.finalOperationStatus, clientName: data.clientName, + }; + this.otelMetrics?.operationLatencies.record(data.operationLatency, { + streamingOperation: data.streamingOperation, + ...commonAttributes, }); + this.otelMetrics?.retryCount.add(data.retryCount, commonAttributes); + this.otelMetrics?.firstResponseLatencies.record( + data.firstResponseLatency, + commonAttributes + ); } /** @@ -282,28 +276,24 @@ export class GCPMetricsHandler implements IMetricsHandler { cluster: data.metricsCollectorData.cluster, zone: data.metricsCollectorData.zone, }); - this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, - streamingOperation: data.streamingOperation, - clientName: data.clientName, - }); - this.otelMetrics?.connectivityErrorCount.add(data.connectivityErrorCount, { + const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, attemptStatus: data.attemptStatus, clientName: data.clientName, + }; + this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { + streamingOperation: data.streamingOperation, + ...commonAttributes, }); + this.otelMetrics?.connectivityErrorCount.add( + data.connectivityErrorCount, + commonAttributes + ); this.otelMetrics?.serverLatencies.record(data.serverLatency, { - appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, streamingOperation: data.streamingOperation, - clientName: data.clientName, + ...commonAttributes, }); } } From 5ecfb707851c453ed39e3e82c8a75a709cb5b518 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 11:33:53 -0500 Subject: [PATCH 252/289] Changed metric to otel instruments --- src/client-side-metrics/gcp-metrics-handler.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 70110fc9a..56f649b1e 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -70,7 +70,7 @@ interface MonitoredResourceData { */ export class GCPMetricsHandler implements IMetricsHandler { private initialized = false; - private otelMetrics?: Metrics; + private otelInstruments?: Metrics; private exporter: PushMetricExporter; /** @@ -145,7 +145,7 @@ export class GCPMetricsHandler implements IMetricsHandler { ], }); const meter = meterProvider.getMeter('bigtable.googleapis.com'); - this.otelMetrics = { + this.otelInstruments = { operationLatencies: meter.createHistogram( 'bigtable.googleapis.com/internal/client/operation_latencies', { @@ -251,12 +251,12 @@ export class GCPMetricsHandler implements IMetricsHandler { finalOperationStatus: data.finalOperationStatus, clientName: data.clientName, }; - this.otelMetrics?.operationLatencies.record(data.operationLatency, { + this.otelInstruments?.operationLatencies.record(data.operationLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelMetrics?.retryCount.add(data.retryCount, commonAttributes); - this.otelMetrics?.firstResponseLatencies.record( + this.otelInstruments?.retryCount.add(data.retryCount, commonAttributes); + this.otelInstruments?.firstResponseLatencies.record( data.firstResponseLatency, commonAttributes ); @@ -283,15 +283,15 @@ export class GCPMetricsHandler implements IMetricsHandler { attemptStatus: data.attemptStatus, clientName: data.clientName, }; - this.otelMetrics?.attemptLatencies.record(data.attemptLatency, { + this.otelInstruments?.attemptLatencies.record(data.attemptLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelMetrics?.connectivityErrorCount.add( + this.otelInstruments?.connectivityErrorCount.add( data.connectivityErrorCount, commonAttributes ); - this.otelMetrics?.serverLatencies.record(data.serverLatency, { + this.otelInstruments?.serverLatencies.record(data.serverLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); From 303c57c37d273064a79037d9367a59838e89f1bd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 12:01:22 -0500 Subject: [PATCH 253/289] Remove optional on Otel instruments --- .../gcp-metrics-handler.ts | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 56f649b1e..5550fc90d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -69,7 +69,6 @@ interface MonitoredResourceData { * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ export class GCPMetricsHandler implements IMetricsHandler { - private initialized = false; private otelInstruments?: Metrics; private exporter: PushMetricExporter; @@ -97,9 +96,8 @@ export class GCPMetricsHandler implements IMetricsHandler { * which will be provided to the exporter in every export call. * */ - private initialize(data: MonitoredResourceData) { - if (!this.initialized) { - this.initialized = true; + private initialize(data: MonitoredResourceData): Metrics { + if (!this.otelInstruments) { const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, @@ -229,6 +227,7 @@ export class GCPMetricsHandler implements IMetricsHandler { ), }; } + return this.otelInstruments; } /** @@ -237,7 +236,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - this.initialize({ + const otelInstruments = this.initialize({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -251,12 +250,12 @@ export class GCPMetricsHandler implements IMetricsHandler { finalOperationStatus: data.finalOperationStatus, clientName: data.clientName, }; - this.otelInstruments?.operationLatencies.record(data.operationLatency, { + otelInstruments.operationLatencies.record(data.operationLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelInstruments?.retryCount.add(data.retryCount, commonAttributes); - this.otelInstruments?.firstResponseLatencies.record( + otelInstruments.retryCount.add(data.retryCount, commonAttributes); + otelInstruments?.firstResponseLatencies.record( data.firstResponseLatency, commonAttributes ); @@ -269,7 +268,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - this.initialize({ + const otelInstruments = this.initialize({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -283,15 +282,15 @@ export class GCPMetricsHandler implements IMetricsHandler { attemptStatus: data.attemptStatus, clientName: data.clientName, }; - this.otelInstruments?.attemptLatencies.record(data.attemptLatency, { + otelInstruments.attemptLatencies.record(data.attemptLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); - this.otelInstruments?.connectivityErrorCount.add( + otelInstruments.connectivityErrorCount.add( data.connectivityErrorCount, commonAttributes ); - this.otelInstruments?.serverLatencies.record(data.serverLatency, { + otelInstruments.serverLatencies.record(data.serverLatency, { streamingOperation: data.streamingOperation, ...commonAttributes, }); From a8229612e28d483c707784790caf998ed4621584 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 12:02:02 -0500 Subject: [PATCH 254/289] Rename initialize to getMetrics --- src/client-side-metrics/gcp-metrics-handler.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 5550fc90d..c27327a3d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -96,7 +96,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * which will be provided to the exporter in every export call. * */ - private initialize(data: MonitoredResourceData): Metrics { + private getMetrics(data: MonitoredResourceData): Metrics { if (!this.otelInstruments) { const latencyBuckets = [ 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, @@ -236,7 +236,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - const otelInstruments = this.initialize({ + const otelInstruments = this.getMetrics({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, @@ -268,7 +268,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - const otelInstruments = this.initialize({ + const otelInstruments = this.getMetrics({ projectId: data.projectId, instanceId: data.metricsCollectorData.instanceId, table: data.metricsCollectorData.table, From 31fe18477abb2cb6f570c451eaa5629a000f4b1c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 15:43:22 -0500 Subject: [PATCH 255/289] Pin promisify to version 4 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 903a4b89c..a200f3687 100644 --- a/package.json +++ b/package.json @@ -52,7 +52,7 @@ "@google-cloud/opentelemetry-resource-util": "^2.4.0", "@google-cloud/precise-date": "^4.0.0", "@google-cloud/projectify": "^4.0.0", - "@google-cloud/promisify": "^4.0.0", + "@google-cloud/promisify": "4.0.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/resources": "^1.30.0", "@opentelemetry/sdk-metrics": "^1.30.0", From 30152d610949bb6360082a8270062c32ff3306dd Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 7 Mar 2025 16:07:18 -0500 Subject: [PATCH 256/289] cast to string - compiler errors --- src/instance.ts | 2 +- src/table.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/instance.ts b/src/instance.ts index 7d5569bd5..420d9cbf5 100644 --- a/src/instance.ts +++ b/src/instance.ts @@ -1310,7 +1310,7 @@ Please use the format 'my-instance' or '${bigtable.projectName}/instances/my-ins : callback!; if (policy.etag !== null && policy.etag !== undefined) { - (policy.etag as {} as Buffer) = Buffer.from(policy.etag); + (policy.etag as {} as Buffer) = Buffer.from(policy.etag as string); } const reqOpts = { resource: this.name, diff --git a/src/table.ts b/src/table.ts index ea721b3ac..d59eaf0ca 100644 --- a/src/table.ts +++ b/src/table.ts @@ -1042,7 +1042,7 @@ export class Table extends TabularApiSurface { : callback!; if (policy.etag !== null && policy.etag !== undefined) { - (policy.etag as {} as Buffer) = Buffer.from(policy.etag); + (policy.etag as {} as Buffer) = Buffer.from(policy.etag as string); } const reqOpts = { resource: this.name, From 9584c69847316ac21852706ff827203fa852ebe0 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 17:07:42 -0400 Subject: [PATCH 257/289] Change the latency buckets Adjust the test fixtures as well to account for the latency bucket change. --- .../gcp-metrics-handler.ts | 7 +- test-common/expected-otel-export-input.ts | 192 +++++++++--------- 2 files changed, 97 insertions(+), 102 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index c27327a3d..46aa23873 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -99,9 +99,10 @@ export class GCPMetricsHandler implements IMetricsHandler { private getMetrics(data: MonitoredResourceData): Metrics { if (!this.otelInstruments) { const latencyBuckets = [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, - 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, - 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000, + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, + 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, + 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, + 50000.0, 100000.0, 200000.0, 400000.0, 800000.0, 1600000.0, 3200000.0, ]; const viewList = [ 'operation_latencies', diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 86f4e6ad5..2a32d423d 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -39,8 +39,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -72,10 +72,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -111,14 +111,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -137,8 +136,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -170,10 +169,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -207,14 +206,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -235,8 +233,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -268,10 +266,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -305,14 +303,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -403,10 +400,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -441,14 +438,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -468,8 +464,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -501,10 +497,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -528,14 +524,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -566,8 +561,8 @@ export const expectedOtelExportConvertedValue = { client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', - streaming: 'true', client_uid: 'fake-uuid', + streaming: 'true', }, }, resource: { @@ -599,10 +594,10 @@ export const expectedOtelExportConvertedValue = { bucketOptions: { explicitBuckets: { bounds: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], }, }, @@ -626,14 +621,13 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '1', '0', '0', '0', '0', '0', '0', - '1', - '0', '0', '0', '0', @@ -797,10 +791,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -809,10 +803,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', finalOperationStatus: 0, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -823,15 +817,15 @@ export const expectedOtelExportInput = { sum: 7000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -849,10 +843,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -861,10 +855,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 4, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -875,15 +869,15 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -891,10 +885,10 @@ export const expectedOtelExportInput = { }, { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 0, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -905,15 +899,15 @@ export const expectedOtelExportInput = { sum: 2000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -958,10 +952,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -983,15 +977,15 @@ export const expectedOtelExportInput = { sum: 5000, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, ], }, count: 1, @@ -1009,10 +1003,10 @@ export const expectedOtelExportInput = { valueType: 1, advice: { explicitBucketBoundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, - 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, - 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, - 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, + 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, + 5000, 10000, 20000, 50000, 100000, 200000, 400000, 800000, + 1600000, 3200000, ], }, }, @@ -1021,10 +1015,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 4, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1035,15 +1029,15 @@ export const expectedOtelExportInput = { sum: 101, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + 0, ], }, count: 1, @@ -1051,10 +1045,10 @@ export const expectedOtelExportInput = { }, { attributes: { + streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', attemptStatus: 0, - streamingOperation: 'true', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1065,15 +1059,15 @@ export const expectedOtelExportInput = { sum: 103, buckets: { boundaries: [ - 0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, - 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, - 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, - 50000, 100000, + 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, + 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, + 2000, 5000, 10000, 20000, 50000, 100000, 200000, 400000, + 800000, 1600000, 3200000, ], counts: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + 0, ], }, count: 1, @@ -1093,7 +1087,6 @@ export const expectedOtelExportInput = { }, aggregationTemporality: 1, dataPointType: 3, - isMonotonic: true, dataPoints: [ { attributes: { @@ -1118,6 +1111,7 @@ export const expectedOtelExportInput = { value: 0, }, ], + isMonotonic: true, }, ], }, From 082c049c6ec3b17ba8cbbed40cef23fbc0910a53 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 13 Mar 2025 17:14:38 -0400 Subject: [PATCH 258/289] Updated the resultCallback comment --- src/client-side-metrics/exporter.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 2aa1f9816..1f31fc0f5 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -393,7 +393,12 @@ export class CloudMonitoringExporter extends MetricExporter { await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); - // {code: 0} is typically the format the callback expects in the super class. + // The resultCallback typically accepts a value equal to {code: x} + // for some value x along with other info. When the code is equal to 0 + // then the operation completed successfully. When the code is not equal + // to 0 then the operation failed. Open telemetry logs errors to the + // console when the resultCallback passes in non-zero code values and + // logs nothing when the code is 0. const exportResult = {code: 0}; resultCallback(exportResult); } catch (error) { From f2e46a4d1355cbfdc929d4089602e22e7c0761f1 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 09:51:02 -0400 Subject: [PATCH 259/289] Change the test description --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index e112f7ec9..7571fcf20 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -28,7 +28,7 @@ import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; describe('Bigtable/GCPMetricsHandler', () => { - it('Should export a value to the CloudMonitoringExporter', done => { + it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From dfcf56eb1839dd19d5ee91276176561ddee09738 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 09:58:21 -0400 Subject: [PATCH 260/289] Add a comment explaining what the test is about --- system-test/gcp-metrics-handler.ts | 75 ++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 7571fcf20..5ba2dbaef 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -99,4 +99,79 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); + it('Should export a value to two GCPMetricsHandlers', done => { + // This test ensures that when we create two GCPMetricsHandlers much like + // what we would be doing when calling readRows on two separate tables that + // the data doesn't store duplicates in the same place and export twice as + // much data as it should. + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + if (!exported) { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + } else { + resultCallback({code: 0}); + } + }; + } + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + const testResultCallback = getTestResultCallback(resultCallback); + super.export(metrics, testResultCallback); + } + } + + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler.onOperationComplete(request as OnOperationCompleteData); + } + } + })(); + }); }); From 35da5c6b27748508211b11b057ec7301fc6abaa4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 10:32:39 -0400 Subject: [PATCH 261/289] Completed the test for two metrics handlers --- system-test/gcp-metrics-handler.ts | 110 +++++++++++++++++++++++++---- 1 file changed, 96 insertions(+), 14 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 5ba2dbaef..610dee1c9 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -21,11 +21,37 @@ import { } from '../src/client-side-metrics/metrics-handler'; import { CloudMonitoringExporter, + ExportInput, ExportResult, } from '../src/client-side-metrics/exporter'; import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; +import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; + +/** + * Replaces the timestamp values within an `ExportInput` object with + * standardized test values. + * + * This function is designed for testing purposes to make timestamp comparisons + * in tests more predictable and reliable. It recursively traverses the + * `ExportInput` object, finds all `startTime` and `endTime` properties, and + * replaces their numeric values with standardized test values. + */ +function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { @@ -99,7 +125,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); - it('Should export a value to two GCPMetricsHandlers', done => { + it.only('Should export a value to two GCPMetricsHandlers', done => { // This test ensures that when we create two GCPMetricsHandlers much like // what we would be doing when calling readRows on two separate tables that // the data doesn't store duplicates in the same place and export twice as @@ -116,33 +142,81 @@ describe('Bigtable/GCPMetricsHandler', () => { it receives once. Since done cannot be called multiple times in mocha, exported variable ensures we only test the value export receives one time. */ - let exported = false; + let exportedCount = 0; function getTestResultCallback( - resultCallback: (result: ExportResult) => void + resultCallback: (result: ExportResult) => void ) { return (result: ExportResult) => { - if (!exported) { - exported = true; + if (exportedCount < 2) { + exportedCount++; try { - clearTimeout(timeout); assert.strictEqual(result.code, 0); - done(); - resultCallback({code: 0}); } catch (error) { // Code isn't 0 so report the original error. done(result); done(error); } + if (exportedCount === 2) { + // We are expecting two calls to an exporter. One for each + // metrics handler. + clearTimeout(timeout); + done(); + } + resultCallback({code: 0}); } else { + // After the test is complete the periodic exporter may still be + // running in which case we don't want to do any checks. We just + // want to call the resultCallback so that there are no hanging + // threads. resultCallback({code: 0}); } }; } class MockExporter extends CloudMonitoringExporter { export( - metrics: ResourceMetrics, - resultCallback: (result: ExportResult) => void + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void ): void { + try { + // The code block ensures the metrics are correct. Mainly, the metrics + // shouldn't contain two copies of the data. It should only contain + // one. + + // For this test since we are still writing a time series with + // metrics variable we don't want to modify the metrics variable + // to have artificial times because then sending the data to the + // metric service client will fail. Therefore, we must make a copy + // of the metrics and use that. + const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + replaceTimestamps( + parsedExportInput as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < + (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ + index + ], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + } catch (e) { + // The error needs to be caught so it can be reported to the mocha + // test runner. + done(e); + } + // The code below uses the test callback to ensure the export was successful. const testResultCallback = getTestResultCallback(resultCallback); super.export(metrics, testResultCallback); } @@ -159,11 +233,12 @@ describe('Bigtable/GCPMetricsHandler', () => { }); }); const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const handler2 = new GCPMetricsHandler(new MockExporter({projectId})); const transformedRequestsHandled = JSON.parse( - JSON.stringify(expectedRequestsHandled).replace( - /my-project/g, - projectId - ) + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) ); for (const request of transformedRequestsHandled) { if (request.attemptLatency) { @@ -172,6 +247,13 @@ describe('Bigtable/GCPMetricsHandler', () => { handler.onOperationComplete(request as OnOperationCompleteData); } } + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler2.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler2.onOperationComplete(request as OnOperationCompleteData); + } + } })(); }); }); From b5ae964074158a00111d0edf32a7c79b84c6366b Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 10:33:39 -0400 Subject: [PATCH 262/289] remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 610dee1c9..11deebece 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -125,7 +125,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); - it.only('Should export a value to two GCPMetricsHandlers', done => { + it('Should export a value to two GCPMetricsHandlers', done => { // This test ensures that when we create two GCPMetricsHandlers much like // what we would be doing when calling readRows on two separate tables that // the data doesn't store duplicates in the same place and export twice as From b575010f22532f9d940fec4cf27dd44af91f304c Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 11:06:59 -0400 Subject: [PATCH 263/289] Cast as histogram to make compile error go away --- src/client-side-metrics/exporter.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 1f31fc0f5..b65b5a79b 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -13,7 +13,7 @@ // limitations under the License. import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import {ExponentialHistogram, Histogram, ResourceMetrics} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; @@ -312,13 +312,16 @@ export function metricsToRequest(exportArgs: ExportInput) { value: { distributionValue: { count: String(value.count), - mean: value.count ? value.sum / value.count : 0, + mean: + value.count && value.sum ? value.sum / value.count : 0, bucketOptions: { explicitBuckets: { - bounds: value.buckets.boundaries, + bounds: (value as Histogram).buckets.boundaries, }, }, - bucketCounts: value.buckets.counts.map(String), + bucketCounts: (value as Histogram).buckets.counts.map( + String + ), }, }, }, From 822c14e57bb003ab298bceb45682f706f5ebfd42 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 11:17:19 -0400 Subject: [PATCH 264/289] Move the duplicate copies of replaceTimestamps into one file --- system-test/gcp-metrics-handler.ts | 27 ++----------------- test-common/replace-timestamps.ts | 25 +++++++++++++++++ test/metrics-collector/gcp-metrics-handler.ts | 25 +---------------- 3 files changed, 28 insertions(+), 49 deletions(-) create mode 100644 test-common/replace-timestamps.ts diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 11deebece..fe040a7b0 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -28,33 +28,10 @@ import {Bigtable} from '../src'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; - -/** - * Replaces the timestamp values within an `ExportInput` object with - * standardized test values. - * - * This function is designed for testing purposes to make timestamp comparisons - * in tests more predictable and reliable. It recursively traverses the - * `ExportInput` object, finds all `startTime` and `endTime` properties, and - * replaces their numeric values with standardized test values. - */ -function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} +import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it('Should export a value to the GCPMetricsHandler', done => { + it.only('Should export a value to the GCPMetricsHandler', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts new file mode 100644 index 000000000..616e496ad --- /dev/null +++ b/test-common/replace-timestamps.ts @@ -0,0 +1,25 @@ +import {expectedOtelExportInput} from './expected-otel-export-input'; + +/** + * Replaces the timestamp values within an `ExportInput` object with + * standardized test values. + * + * This function is designed for testing purposes to make timestamp comparisons + * in tests more predictable and reliable. It recursively traverses the + * `ExportInput` object, finds all `startTime` and `endTime` properties, and + * replaces their numeric values with standardized test values. + */ +export function replaceTimestamps( + request: typeof expectedOtelExportInput, + newStartTime: [number, number], + newEndTime: [number, number] +) { + request.scopeMetrics.forEach(scopeMetric => { + scopeMetric.metrics.forEach(metric => { + metric.dataPoints.forEach(dataPoint => { + dataPoint.startTime = newStartTime; + dataPoint.endTime = newEndTime; + }); + }); + }); +} diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index c22b32ea9..1858bf9a8 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -31,30 +31,7 @@ import { expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; import * as assert from 'assert'; - -/** - * Replaces the timestamp values within an `ExportInput` object with - * standardized test values. - * - * This function is designed for testing purposes to make timestamp comparisons - * in tests more predictable and reliable. It recursively traverses the - * `ExportInput` object, finds all `startTime` and `endTime` properties, and - * replaces their numeric values with standardized test values. - */ -function replaceTimestamps( - request: typeof expectedOtelExportInput, - newStartTime: [number, number], - newEndTime: [number, number] -) { - request.scopeMetrics.forEach(scopeMetric => { - scopeMetric.metrics.forEach(metric => { - metric.dataPoints.forEach(dataPoint => { - dataPoint.startTime = newStartTime; - dataPoint.endTime = newEndTime; - }); - }); - }); -} +import {replaceTimestamps} from '../../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value ready for sending to the CloudMonitoringExporter', function (done) { From 561889c99627189c58ca0491a16934e706e76009 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 11:59:11 -0400 Subject: [PATCH 265/289] Take steps to eliminate the ExportInput interface --- src/client-side-metrics/exporter.ts | 49 +++++++++++++------ system-test/gcp-metrics-handler.ts | 14 +++--- test/metrics-collector/gcp-metrics-handler.ts | 20 +++----- test/metrics-collector/metricsToRequest.ts | 5 +- 4 files changed, 52 insertions(+), 36 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index b65b5a79b..0824b40ec 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -13,7 +13,11 @@ // limitations under the License. import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; -import {ExponentialHistogram, Histogram, ResourceMetrics} from '@opentelemetry/sdk-metrics'; +import { + ExponentialHistogram, + Histogram, + ResourceMetrics, +} from '@opentelemetry/sdk-metrics'; import {ServiceError} from 'google-gax'; import {MetricServiceClient} from '@google-cloud/monitoring'; import {google} from '@google-cloud/monitoring/build/protos/protos'; @@ -194,7 +198,9 @@ export interface ExportInput { * (which have more complex, object-based values). * */ -function isCounterValue(value: DistributionValue | number): value is number { +function isCounterValue( + value: number | Histogram | ExponentialHistogram +): value is number { return typeof value === 'number'; } @@ -206,7 +212,7 @@ function isCounterValue(value: DistributionValue | number): value is number { * metric attributes, data points, and aggregation information, into an object * that conforms to the expected request format of the Cloud Monitoring API. * - * @param {ExportInput} exportArgs - The OpenTelemetry metrics data to be converted. This + * @param {ResourceMetrics} exportArgs - The OpenTelemetry metrics data to be converted. This * object contains resource attributes, scope information, and a list of * metrics with their associated data points. * @@ -233,16 +239,27 @@ function isCounterValue(value: DistributionValue | number): value is number { * * */ -export function metricsToRequest(exportArgs: ExportInput) { +export function metricsToRequest(exportArgs: ResourceMetrics) { + type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; + const resourcesWithSyncAttributes = + exportArgs.resource as unknown as WithSyncAttributes; const timeSeriesArray = []; const resourceLabels = { - cluster: exportArgs.resource._syncAttributes['monitored_resource.cluster'], + cluster: + resourcesWithSyncAttributes._syncAttributes['monitored_resource.cluster'], instance: - exportArgs.resource._syncAttributes['monitored_resource.instance_id'], + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.instance_id' + ], project_id: - exportArgs.resource._syncAttributes['monitored_resource.project_id'], - table: exportArgs.resource._syncAttributes['monitored_resource.table'], - zone: exportArgs.resource._syncAttributes['monitored_resource.zone'], + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.project_id' + ], + table: + resourcesWithSyncAttributes._syncAttributes['monitored_resource.table'], + zone: resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.zone' + ], }; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { @@ -263,9 +280,11 @@ export function metricsToRequest(exportArgs: ExportInput) { client_name: allAttributes.clientName, method: allAttributes.methodName, status: - (allAttributes as OnAttemptAttribute).attemptStatus?.toString() ?? ( - allAttributes as OnOperationAttribute + allAttributes as {attemptStatus: number} + ).attemptStatus?.toString() ?? + ( + allAttributes as {finalOperationStatus: number} ).finalOperationStatus?.toString(), client_uid: allAttributes.clientUid, }, @@ -276,7 +295,9 @@ export function metricsToRequest(exportArgs: ExportInput) { labels: metricLabels, }; const resource = { - type: exportArgs.resource._syncAttributes['monitored_resource.type'], + type: resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.type' + ], labels: resourceLabels, }; const interval = { @@ -333,7 +354,7 @@ export function metricsToRequest(exportArgs: ExportInput) { } } return { - name: `projects/${exportArgs.resource._syncAttributes['monitored_resource.project_id']}`, + name: `projects/${resourcesWithSyncAttributes._syncAttributes['monitored_resource.project_id']}`, timeSeries: timeSeriesArray, }; } @@ -392,7 +413,7 @@ export class CloudMonitoringExporter extends MetricExporter { ): void { (async () => { try { - const request = metricsToRequest(metrics as unknown as ExportInput); + const request = metricsToRequest(metrics); await this.monitoringClient.createTimeSeries( request as ICreateTimeSeriesRequest ); diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index fe040a7b0..52ecf28b3 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -21,7 +21,6 @@ import { } from '../src/client-side-metrics/metrics-handler'; import { CloudMonitoringExporter, - ExportInput, ExportResult, } from '../src/client-side-metrics/exporter'; import {Bigtable} from '../src'; @@ -164,27 +163,26 @@ describe('Bigtable/GCPMetricsHandler', () => { // to have artificial times because then sending the data to the // metric service client will fail. Therefore, we must make a copy // of the metrics and use that. - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); replaceTimestamps( parsedExportInput as unknown as typeof expectedOtelExportInput, [123, 789], [456, 789] ); assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length, + parsedExportInput.scopeMetrics[0].metrics.length, expectedOtelExportInput.scopeMetrics[0].metrics.length ); for ( let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics.length; + index < parsedExportInput.scopeMetrics[0].metrics.length; index++ ) { // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], + parsedExportInput.scopeMetrics[0].metrics[index], expectedOtelExportInput.scopeMetrics[0].metrics[index] ); } diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index 1858bf9a8..b870a17ac 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -15,7 +15,6 @@ import {describe} from 'mocha'; import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; import { - ExportInput, ExportResult, metricsToRequest, } from '../../src/client-side-metrics/exporter'; @@ -63,24 +62,21 @@ describe('Bigtable/GCPMetricsHandler', () => { [123, 789], [456, 789] ); - const parsedExportInput = JSON.parse(JSON.stringify(metrics)); + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length, + parsedExportInput.scopeMetrics[0].metrics.length, expectedOtelExportInput.scopeMetrics[0].metrics.length ); for ( let index = 0; - index < - (parsedExportInput as ExportInput).scopeMetrics[0].metrics - .length; + index < parsedExportInput.scopeMetrics[0].metrics.length; index++ ) { // We need to compare pointwise because mocha truncates to an 8192 character limit. assert.deepStrictEqual( - (parsedExportInput as ExportInput).scopeMetrics[0].metrics[ - index - ], + parsedExportInput.scopeMetrics[0].metrics[index], expectedOtelExportInput.scopeMetrics[0].metrics[index] ); } @@ -88,9 +84,7 @@ describe('Bigtable/GCPMetricsHandler', () => { JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput ); - const convertedRequest = metricsToRequest( - expectedOtelExportInput as unknown as ExportInput - ); + const convertedRequest = metricsToRequest(metrics); assert.deepStrictEqual( convertedRequest.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length diff --git a/test/metrics-collector/metricsToRequest.ts b/test/metrics-collector/metricsToRequest.ts index e77917d79..8777bba4f 100644 --- a/test/metrics-collector/metricsToRequest.ts +++ b/test/metrics-collector/metricsToRequest.ts @@ -19,10 +19,13 @@ import { expectedOtelExportConvertedValue, expectedOtelExportInput, } from '../../test-common/expected-otel-export-input'; +import {ResourceMetrics} from '@opentelemetry/sdk-metrics'; describe('Bigtable/metricsToRequest', () => { it('Converts an otel request to a request ready for the metric service client', () => { - const convertedValue = metricsToRequest(expectedOtelExportInput); + const convertedValue = metricsToRequest( + expectedOtelExportInput as unknown as ResourceMetrics + ); assert.deepStrictEqual( convertedValue.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length From f423c578e9b8e812feb8f9630fda5ebc975434d3 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:12:08 -0400 Subject: [PATCH 266/289] Add a header --- test-common/replace-timestamps.ts | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/test-common/replace-timestamps.ts b/test-common/replace-timestamps.ts index 616e496ad..07095d906 100644 --- a/test-common/replace-timestamps.ts +++ b/test-common/replace-timestamps.ts @@ -1,3 +1,17 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import {expectedOtelExportInput} from './expected-otel-export-input'; /** From 9076be156cf9e88f76256231cb849011ec955958 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:13:35 -0400 Subject: [PATCH 267/289] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 52ecf28b3..a4e0a3160 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -30,7 +30,7 @@ import {expectedOtelExportInput} from '../test-common/expected-otel-export-input import {replaceTimestamps} from '../test-common/replace-timestamps'; describe('Bigtable/GCPMetricsHandler', () => { - it.only('Should export a value to the GCPMetricsHandler', done => { + it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* We need to create a timeout here because if we don't then mocha shuts down From ead3f5e797b4563c998311d7d5aded08a66f83c4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:23:03 -0400 Subject: [PATCH 268/289] Eliminate ExportInput and dependencies --- src/client-side-metrics/exporter.ts | 162 ---------------------------- 1 file changed, 162 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 0824b40ec..1a04bcc18 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -27,168 +27,6 @@ export interface ExportResult { code: number; } -/** - * Attributes associated with the completion of a single attempt of a Bigtable - * operation. These attributes provide context about the specific attempt, - * its status, and the method involved. They are used for recording metrics - * such as attempt latency and connectivity errors. - * - * @property methodName - The name of the Bigtable method that was attempted (e.g., - * 'Bigtable.ReadRows', 'Bigtable.MutateRows'). - * @property clientUid - A unique identifier for the client that initiated the - * attempt. - * @property appProfileId - (Optional) The ID of the application profile used for - * the attempt. - * @property attemptStatus - The status code of the attempt. A value of `0` - * typically indicates success (grpc.status.OK), while other values indicate - * different types of errors. - * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. - * Will be "true" or "false" if present. - * @property clientName - The name of the client library making the attempt - * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). - */ -interface OnAttemptAttribute { - methodName: string; - clientUid: string; - appProfileId?: string; - attemptStatus: number; - streamingOperation?: string; - clientName: string; -} - -/** - * Attributes associated with the completion of a Bigtable operation. These - * attributes provide context about the operation, its final status, and the - * method involved. They are used for recording metrics such as operation - * latency. - * - * @property methodName - The name of the Bigtable method that was performed - * (e.g., 'Bigtable.ReadRows', 'Bigtable.MutateRows'). - * @property clientUid - A unique identifier for the client that initiated the - * operation. - * @property appProfileId - (Optional) The ID of the application profile used for - * the operation. - * @property finalOperationStatus - The final status code of the operation. A - * value of `0` typically indicates success (grpc.status.OK), while other - * values indicate different types of errors. - * @property streamingOperation - (Optional) Indicates if the operation is a streaming operation. - * Will be "true" or "false" if present. - * @property clientName - The name of the client library performing the operation - * (e.g., 'nodejs-bigtable', 'go-bigtable/1.35.0'). - */ -interface OnOperationAttribute { - methodName: string; - clientUid: string; - appProfileId?: string; - finalOperationStatus: number; - streamingOperation?: string; - clientName: string; -} - -/** - * Represents a generic metric in the OpenTelemetry format. - * - * This interface describes the structure of a metric, which can represent - * either a counter or a distribution (histogram). It includes the metric's - * descriptor, the type of data it collects, and the actual data points. - * - */ -interface Metric { - descriptor: { - name: string; - unit: string; - description?: string; - type?: string; - valueType?: number; - advice?: {}; - }; - aggregationTemporality?: number; - dataPointType?: number; - dataPoints: { - attributes: OnAttemptAttribute | OnOperationAttribute; - startTime: number[]; - endTime: number[]; - value: Value; - }[]; -} - -interface DistributionValue { - min?: number; - max?: number; - sum: number; - count: number; - buckets: { - boundaries: number[]; - counts: number[]; - }; -} - -/** - * Represents a metric that measures the distribution of values. - * - * Distribution metrics, also known as histograms, are used to track the - * statistical distribution of a set of measurements. They allow you to capture - * not only the count and sum of the measurements but also how they are spread - * across different ranges (buckets). This makes them suitable for tracking - * latencies, sizes, or other metrics where the distribution is important. - * - */ -type DistributionMetric = Metric; - -/** - * Represents a metric that counts the number of occurrences of an event or - * the cumulative value of a quantity over time. - * - * Counter metrics are used to track quantities that increase over time, such - * as the number of requests, errors, or retries. They are always - * non-negative and can only increase or remain constant. - * - */ -type CounterMetric = Metric; - -/** - * Represents the input data structure for exporting OpenTelemetry metrics. - * - * This interface defines the structure of the object that is passed to the - * `metricsToRequest` function to convert OpenTelemetry metrics into a format - * suitable for the Google Cloud Monitoring API. - * - * It contains information about the monitored resource and an array of - * scope metrics, which include various types of metrics (counters and - * distributions) and their associated data points. - * - * @remarks - * This structure is specifically designed to hold OpenTelemetry metrics data - * as it is exported from the Bigtable client library. It represents the data - * before it is transformed into the Cloud Monitoring API's `TimeSeries` - * format. - * - * Each `CounterMetric` and `DistributionMetric` within the `scopeMetrics` - * array represents a different type of measurement, such as retry counts, - * operation latencies, attempt latencies etc. Each metric contains an array of dataPoints - * Each `dataPoint` contains the `attributes`, `startTime`, `endTime` and `value`. - * `value` will be a number for a counter metric and an object for a distribution metric. - */ -export interface ExportInput { - resource: { - _syncAttributes: { - 'monitored_resource.type': string; - 'monitored_resource.project_id': string; - 'monitored_resource.instance_id': string; - 'monitored_resource.table': string; - 'monitored_resource.cluster': string; - 'monitored_resource.zone': string; - }; - }; - scopeMetrics: { - scope: { - name: string; - version: string; - }; - metrics: (CounterMetric | DistributionMetric)[]; - }[]; -} - /** * Type guard function to determine if a given value is a counter value (a number). * From fa6c3fd877ccc34868842c5c12a3a1ce3f3b74af Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 13:36:08 -0400 Subject: [PATCH 269/289] Eliminate constant --- src/client-side-metrics/exporter.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 1a04bcc18..8fe8f1585 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -261,8 +261,7 @@ export class CloudMonitoringExporter extends MetricExporter { // to 0 then the operation failed. Open telemetry logs errors to the // console when the resultCallback passes in non-zero code values and // logs nothing when the code is 0. - const exportResult = {code: 0}; - resultCallback(exportResult); + resultCallback({code: 0}); } catch (error) { resultCallback(error as ServiceError); } From a587d0c97b83b061b20016cf5edb062049284d10 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 14:28:40 -0400 Subject: [PATCH 270/289] Push conversion to status back to metrics collect Also add if statement for timeseries. --- src/client-side-metrics/exporter.ts | 83 +++++++++---------- .../gcp-metrics-handler.ts | 4 +- src/client-side-metrics/metrics-handler.ts | 3 +- .../operation-metrics-collector.ts | 4 +- test-common/expected-otel-export-input.ts | 18 ++-- test-common/metrics-handler-fixture.ts | 6 +- .../metrics-collector/typical-method-call.txt | 6 +- 7 files changed, 59 insertions(+), 65 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 8fe8f1585..c49de82e3 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -117,13 +117,7 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { app_profile: allAttributes.appProfileId, client_name: allAttributes.clientName, method: allAttributes.methodName, - status: - ( - allAttributes as {attemptStatus: number} - ).attemptStatus?.toString() ?? - ( - allAttributes as {finalOperationStatus: number} - ).finalOperationStatus?.toString(), + status: allAttributes.status, client_uid: allAttributes.clientUid, }, streaming ? {streaming} : null @@ -146,48 +140,49 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { seconds: dataPoint.startTime[0], }, }; - const timeSeries = isCounterValue(value) - ? { - metric, - resource, - valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, - }, + if (isCounterValue(value)) { + timeSeriesArray.push({ + metric, + resource, + valueType: 'INT64', + points: [ + { + interval, + value: { + int64Value: dataPoint.value, }, - ], - } - : { - metric, - resource, - metricKind: 'CUMULATIVE', - valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: - value.count && value.sum ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: (value as Histogram).buckets.boundaries, - }, + }, + ], + }); + } else { + timeSeriesArray.push({ + metric, + resource, + metricKind: 'CUMULATIVE', + valueType: 'DISTRIBUTION', + points: [ + { + interval, + value: { + distributionValue: { + count: String(value.count), + mean: + value.count && value.sum ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: (value as Histogram).buckets.boundaries, }, - bucketCounts: (value as Histogram).buckets.counts.map( - String - ), }, + bucketCounts: (value as Histogram).buckets.counts.map( + String + ), }, }, - ], - unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified - }; - timeSeriesArray.push(timeSeries); + }, + ], + unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified + }); + } } } } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 46aa23873..16fdfa9c2 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -248,7 +248,7 @@ export class GCPMetricsHandler implements IMetricsHandler { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, - finalOperationStatus: data.finalOperationStatus, + status: data.status, clientName: data.clientName, }; otelInstruments.operationLatencies.record(data.operationLatency, { @@ -280,7 +280,7 @@ export class GCPMetricsHandler implements IMetricsHandler { appProfileId: data.metricsCollectorData.appProfileId, methodName: data.metricsCollectorData.methodName, clientUid: data.metricsCollectorData.clientUid, - attemptStatus: data.attemptStatus, + status: data.status, clientName: data.clientName, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 9b26176fb..7aad1bd7f 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -36,20 +36,19 @@ interface StandardData { metricsCollectorData: IMetricsCollectorData; clientName: string; streamingOperation: StreamingState; + status: string; } export interface OnOperationCompleteData extends StandardData { firstResponseLatency?: number; operationLatency: number; retryCount?: number; - finalOperationStatus: grpc.status; } export interface OnAttemptCompleteData extends StandardData { attemptLatency: number; serverLatency?: number; connectivityErrorCount: number; - attemptStatus: grpc.status; } /** diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 4fd24b4d9..12f405bc1 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -155,7 +155,7 @@ export class OperationMetricsCollector { serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, streamingOperation: this.streamingOperation, - attemptStatus, + status: attemptStatus.toString(), clientName: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, @@ -224,7 +224,7 @@ export class OperationMetricsCollector { this.metricsHandlers.forEach(metricsHandler => { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete({ - finalOperationStatus: finalOperationStatus, + status: finalOperationStatus.toString(), streamingOperation: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), clientName: `nodejs-bigtable/${version}`, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 2a32d423d..d822001dc 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -806,7 +806,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - finalOperationStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -858,7 +858,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -888,7 +888,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -932,7 +932,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - finalOperationStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -966,7 +966,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - finalOperationStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1018,7 +1018,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1048,7 +1048,7 @@ export const expectedOtelExportInput = { streamingOperation: 'true', methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1092,7 +1092,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', }, startTime: [123, 789], @@ -1103,7 +1103,7 @@ export const expectedOtelExportInput = { attributes: { methodName: 'Bigtable.ReadRows', clientUid: 'fake-uuid', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', }, startTime: [123, 789], diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index a49a91158..3bfe4f490 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -18,7 +18,7 @@ export const expectedRequestsHandled = [ serverLatency: 101, connectivityErrorCount: 0, streamingOperation: 'true', - attemptStatus: 4, + status: '4', clientName: 'nodejs-bigtable', metricsCollectorData: { appProfileId: undefined, @@ -36,7 +36,7 @@ export const expectedRequestsHandled = [ serverLatency: 103, connectivityErrorCount: 0, streamingOperation: 'true', - attemptStatus: 0, + status: '0', clientName: 'nodejs-bigtable', metricsCollectorData: { appProfileId: undefined, @@ -50,7 +50,7 @@ export const expectedRequestsHandled = [ projectId: 'my-project', }, { - finalOperationStatus: 0, + status: '0', streamingOperation: 'true', metricsCollectorData: { appProfileId: undefined, diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index 576a66102..da43e899b 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -11,7 +11,7 @@ getDate call returns 3000 ms 8. A transient error occurs. getDate call returns 4000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streamingOperation":"true","attemptStatus":4,"clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streamingOperation":"true","status":"4","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} 9. After a timeout, the second attempt is made. getDate call returns 5000 ms 10. Client receives status information. @@ -24,7 +24,7 @@ getDate call returns 6000 ms 16. Stream ends, operation completes getDate call returns 7000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streamingOperation":"true","attemptStatus":0,"clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streamingOperation":"true","status":"0","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} getDate call returns 8000 ms Recording parameters for onOperationComplete: -{"finalOperationStatus":0,"streamingOperation":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"clientName":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} +{"status":"0","streamingOperation":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"clientName":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} From 311d55581bfd113bccd2b5e18894322810831048 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 15:44:03 -0400 Subject: [PATCH 271/289] Change property names to match MetricServiceClient expectation --- src/client-side-metrics/exporter.ts | 20 +---- .../gcp-metrics-handler.ts | 18 ++--- src/client-side-metrics/metrics-handler.ts | 8 +- .../operation-metrics-collector.ts | 29 ++++--- test-common/expected-otel-export-input.ts | 81 +++++++++---------- test-common/metrics-handler-fixture.ts | 27 +++---- test-common/test-metrics-handler.ts | 4 +- .../metrics-collector/typical-method-call.txt | 6 +- 8 files changed, 87 insertions(+), 106 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index c49de82e3..ad985ec90 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -104,27 +104,9 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; - const allAttributes = dataPoint.attributes; - const streaming = allAttributes.streamingOperation; - /* - metricLabels are built from the open telemetry attributes that are set - when a data point is recorded. This means that for one metric there may - be multiple time series' with different attributes, but the resource - labels will always be the same for a particular export call. - */ - const metricLabels = Object.assign( - { - app_profile: allAttributes.appProfileId, - client_name: allAttributes.clientName, - method: allAttributes.methodName, - status: allAttributes.status, - client_uid: allAttributes.clientUid, - }, - streaming ? {streaming} : null - ); const metric = { type: metricName, - labels: metricLabels, + labels: dataPoint.attributes, }; const resource = { type: resourcesWithSyncAttributes._syncAttributes[ diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 16fdfa9c2..69753b7c4 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -246,13 +246,13 @@ export class GCPMetricsHandler implements IMetricsHandler { }); const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, + method: data.metricsCollectorData.method, + client_uid: data.metricsCollectorData.client_uid, status: data.status, - clientName: data.clientName, + client_name: data.client_name, }; otelInstruments.operationLatencies.record(data.operationLatency, { - streamingOperation: data.streamingOperation, + streaming: data.streaming, ...commonAttributes, }); otelInstruments.retryCount.add(data.retryCount, commonAttributes); @@ -278,13 +278,13 @@ export class GCPMetricsHandler implements IMetricsHandler { }); const commonAttributes = { appProfileId: data.metricsCollectorData.appProfileId, - methodName: data.metricsCollectorData.methodName, - clientUid: data.metricsCollectorData.clientUid, + method: data.metricsCollectorData.method, + client_uid: data.metricsCollectorData.client_uid, status: data.status, - clientName: data.clientName, + client_name: data.client_name, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { - streamingOperation: data.streamingOperation, + streaming: data.streaming, ...commonAttributes, }); otelInstruments.connectivityErrorCount.add( @@ -292,7 +292,7 @@ export class GCPMetricsHandler implements IMetricsHandler { commonAttributes ); otelInstruments.serverLatencies.record(data.serverLatency, { - streamingOperation: data.streamingOperation, + streaming: data.streaming, ...commonAttributes, }); } diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index 7aad1bd7f..d7fb0af64 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -27,15 +27,15 @@ type IMetricsCollectorData = { cluster?: string; zone?: string; appProfileId?: string; - methodName: MethodName; - clientUid: string; + method: MethodName; + client_uid: string; }; interface StandardData { projectId: string; metricsCollectorData: IMetricsCollectorData; - clientName: string; - streamingOperation: StreamingState; + client_name: string; + streaming: StreamingState; status: string; } diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 12f405bc1..6c5e86232 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -105,15 +105,18 @@ export class OperationMetricsCollector { } private getMetricsCollectorData() { - return { - instanceId: this.tabularApiSurface.instance.id, - table: this.tabularApiSurface.id, - cluster: this.cluster, - zone: this.zone, - appProfileId: this.tabularApiSurface.bigtable.appProfileId, - methodName: this.methodName, - clientUid: this.tabularApiSurface.bigtable.clientUid, - }; + const appProfileId = this.tabularApiSurface.bigtable.appProfileId; + return Object.assign( + { + instanceId: this.tabularApiSurface.instance.id, + table: this.tabularApiSurface.id, + cluster: this.cluster, + zone: this.zone, + method: this.methodName, + client_uid: this.tabularApiSurface.bigtable.clientUid, + }, + appProfileId ? {app_profile_id: appProfileId} : {} + ); } /** @@ -154,9 +157,9 @@ export class OperationMetricsCollector { attemptLatency: totalTime, serverLatency: this.serverTime ?? undefined, connectivityErrorCount: this.connectivityErrorCount, - streamingOperation: this.streamingOperation, + streaming: this.streamingOperation, status: attemptStatus.toString(), - clientName: `nodejs-bigtable/${version}`, + client_name: `nodejs-bigtable/${version}`, metricsCollectorData: this.getMetricsCollectorData(), projectId, }); @@ -225,9 +228,9 @@ export class OperationMetricsCollector { if (metricsHandler.onOperationComplete) { metricsHandler.onOperationComplete({ status: finalOperationStatus.toString(), - streamingOperation: this.streamingOperation, + streaming: this.streamingOperation, metricsCollectorData: this.getMetricsCollectorData(), - clientName: `nodejs-bigtable/${version}`, + client_name: `nodejs-bigtable/${version}`, projectId, operationLatency: totalTime, retryCount: this.attemptCount - 1, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index d822001dc..e15833a01 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -35,7 +35,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { - app_profile: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -132,7 +131,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -229,7 +228,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -326,7 +325,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -364,7 +363,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -460,7 +459,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -557,7 +556,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -654,7 +653,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -692,7 +691,7 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile: undefined, + app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -803,11 +802,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -855,11 +854,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -885,11 +884,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -930,10 +929,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -964,10 +963,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1015,11 +1014,11 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1045,11 +1044,11 @@ export const expectedOtelExportInput = { }, { attributes: { - streamingOperation: 'true', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + streaming: 'true', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1090,10 +1089,10 @@ export const expectedOtelExportInput = { dataPoints: [ { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], @@ -1101,10 +1100,10 @@ export const expectedOtelExportInput = { }, { attributes: { - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', }, startTime: [123, 789], endTime: [456, 789], diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 3bfe4f490..5e947677e 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -17,17 +17,16 @@ export const expectedRequestsHandled = [ attemptLatency: 2000, serverLatency: 101, connectivityErrorCount: 0, - streamingOperation: 'true', + streaming: 'true', status: '4', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', metricsCollectorData: { - appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', }, projectId: 'my-project', }, @@ -35,33 +34,31 @@ export const expectedRequestsHandled = [ attemptLatency: 2000, serverLatency: 103, connectivityErrorCount: 0, - streamingOperation: 'true', + streaming: 'true', status: '0', - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', metricsCollectorData: { - appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', }, projectId: 'my-project', }, { status: '0', - streamingOperation: 'true', + streaming: 'true', metricsCollectorData: { - appProfileId: undefined, instanceId: 'fakeInstanceId', table: 'fakeTableId', cluster: 'fake-cluster3', zone: 'us-west1-c', - methodName: 'Bigtable.ReadRows', - clientUid: 'fake-uuid', + method: 'Bigtable.ReadRows', + client_uid: 'fake-uuid', }, - clientName: 'nodejs-bigtable', + client_name: 'nodejs-bigtable', projectId: 'my-project', operationLatency: 7000, retryCount: 1, diff --git a/test-common/test-metrics-handler.ts b/test-common/test-metrics-handler.ts index 1734cf18f..61257913f 100644 --- a/test-common/test-metrics-handler.ts +++ b/test-common/test-metrics-handler.ts @@ -35,7 +35,7 @@ export class TestMetricsHandler implements IMetricsHandler { */ onOperationComplete(data: OnOperationCompleteData) { this.requestsHandled.push(data); - data.clientName = 'nodejs-bigtable'; + data.client_name = 'nodejs-bigtable'; this.messages.value += 'Recording parameters for onOperationComplete:\n'; this.messages.value += `${JSON.stringify(data)}\n`; } @@ -46,7 +46,7 @@ export class TestMetricsHandler implements IMetricsHandler { */ onAttemptComplete(data: OnAttemptCompleteData) { this.requestsHandled.push(data); - data.clientName = 'nodejs-bigtable'; + data.client_name = 'nodejs-bigtable'; this.messages.value += 'Recording parameters for onAttemptComplete:\n'; this.messages.value += `${JSON.stringify(data)}\n`; } diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index da43e899b..bc6de7ad7 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -11,7 +11,7 @@ getDate call returns 3000 ms 8. A transient error occurs. getDate call returns 4000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streamingOperation":"true","status":"4","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":101,"connectivityErrorCount":0,"streaming":"true","status":"4","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} 9. After a timeout, the second attempt is made. getDate call returns 5000 ms 10. Client receives status information. @@ -24,7 +24,7 @@ getDate call returns 6000 ms 16. Stream ends, operation completes getDate call returns 7000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streamingOperation":"true","status":"0","clientName":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"projectId":"my-project"} +{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streaming":"true","status":"0","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} getDate call returns 8000 ms Recording parameters for onOperationComplete: -{"status":"0","streamingOperation":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","methodName":"Bigtable.ReadRows","clientUid":"fake-uuid"},"clientName":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} +{"status":"0","streaming":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"client_name":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} From 117473b2bfb956204f1619b0391dd526134bb368 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 16:37:51 -0400 Subject: [PATCH 272/289] Solve the app_profile_id issue --- src/client-side-metrics/gcp-metrics-handler.ts | 4 ++-- src/client-side-metrics/metrics-handler.ts | 2 +- test-common/expected-otel-export-input.ts | 8 -------- test/metrics-collector/gcp-metrics-handler.ts | 2 +- 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 69753b7c4..d80a8344d 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -245,7 +245,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - appProfileId: data.metricsCollectorData.appProfileId, + app_profile_id: data.metricsCollectorData.app_profile_id, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, @@ -277,7 +277,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - appProfileId: data.metricsCollectorData.appProfileId, + app_profile_id: data.metricsCollectorData.app_profile_id, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index d7fb0af64..e4cb29758 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -26,7 +26,7 @@ type IMetricsCollectorData = { table: string; cluster?: string; zone?: string; - appProfileId?: string; + app_profile_id?: string; method: MethodName; client_uid: string; }; diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index e15833a01..2d7b43401 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -131,7 +131,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -228,7 +227,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -325,7 +323,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -363,7 +360,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -459,7 +455,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -556,7 +551,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', @@ -653,7 +647,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '4', @@ -691,7 +684,6 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - app_profile_id: undefined, client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', status: '0', diff --git a/test/metrics-collector/gcp-metrics-handler.ts b/test/metrics-collector/gcp-metrics-handler.ts index b870a17ac..f4f81eacd 100644 --- a/test/metrics-collector/gcp-metrics-handler.ts +++ b/test/metrics-collector/gcp-metrics-handler.ts @@ -84,7 +84,7 @@ describe('Bigtable/GCPMetricsHandler', () => { JSON.parse(JSON.stringify(metrics)), expectedOtelExportInput ); - const convertedRequest = metricsToRequest(metrics); + const convertedRequest = metricsToRequest(parsedExportInput); assert.deepStrictEqual( convertedRequest.timeSeries.length, expectedOtelExportConvertedValue.timeSeries.length From e4154af705bf7d4a6e6304e17434610c5b26af46 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 14 Mar 2025 17:03:12 -0400 Subject: [PATCH 273/289] It is actually app_profile not app_profile_id --- src/client-side-metrics/gcp-metrics-handler.ts | 4 ++-- src/client-side-metrics/metrics-handler.ts | 2 +- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index d80a8344d..1855b88e0 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -245,7 +245,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - app_profile_id: data.metricsCollectorData.app_profile_id, + app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, @@ -277,7 +277,7 @@ export class GCPMetricsHandler implements IMetricsHandler { zone: data.metricsCollectorData.zone, }); const commonAttributes = { - app_profile_id: data.metricsCollectorData.app_profile_id, + app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, diff --git a/src/client-side-metrics/metrics-handler.ts b/src/client-side-metrics/metrics-handler.ts index e4cb29758..e69b951b8 100644 --- a/src/client-side-metrics/metrics-handler.ts +++ b/src/client-side-metrics/metrics-handler.ts @@ -26,7 +26,7 @@ type IMetricsCollectorData = { table: string; cluster?: string; zone?: string; - app_profile_id?: string; + app_profile?: string; method: MethodName; client_uid: string; }; diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 6c5e86232..d25538a31 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -115,7 +115,7 @@ export class OperationMetricsCollector { method: this.methodName, client_uid: this.tabularApiSurface.bigtable.clientUid, }, - appProfileId ? {app_profile_id: appProfileId} : {} + appProfileId ? {app_profile: appProfileId} : {} ); } From 0ccec5b0dea7c74d886bc03185f23541760ffd86 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:13:32 -0400 Subject: [PATCH 274/289] Add guards that stop the export call --- system-test/gcp-metrics-handler.ts | 150 +++++++++++++++-------------- 1 file changed, 76 insertions(+), 74 deletions(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index a4e0a3160..65470c706 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -29,7 +29,7 @@ import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; -describe('Bigtable/GCPMetricsHandler', () => { +describe.only('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* @@ -48,20 +48,16 @@ describe('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ) { return (result: ExportResult) => { - if (!exported) { - exported = true; - try { - clearTimeout(timeout); - assert.strictEqual(result.code, 0); - done(); - resultCallback({code: 0}); - } catch (error) { - // Code isn't 0 so report the original error. - done(result); - done(error); - } - } else { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); } }; } @@ -71,7 +67,11 @@ describe('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ): void { const testResultCallback = getTestResultCallback(resultCallback); - super.export(metrics, testResultCallback); + if (!exported) { + super.export(metrics, testResultCallback); + } else { + resultCallback({code: 0}); + } } } @@ -123,29 +123,23 @@ describe('Bigtable/GCPMetricsHandler', () => { resultCallback: (result: ExportResult) => void ) { return (result: ExportResult) => { - if (exportedCount < 2) { - exportedCount++; - try { - assert.strictEqual(result.code, 0); - } catch (error) { - // Code isn't 0 so report the original error. - done(result); - done(error); - } - if (exportedCount === 2) { - // We are expecting two calls to an exporter. One for each - // metrics handler. - clearTimeout(timeout); - done(); - } - resultCallback({code: 0}); - } else { - // After the test is complete the periodic exporter may still be - // running in which case we don't want to do any checks. We just - // want to call the resultCallback so that there are no hanging - // threads. - resultCallback({code: 0}); + exportedCount++; + try { + assert.strictEqual(result.code, 0); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); } + if (exportedCount === 2) { + // We are expecting two calls to an exporter. One for each + // metrics handler. + clearTimeout(timeout); + done(); + } + // The resultCallback needs to be called to end the exporter operation + // so that the test shuts down in mocha. + resultCallback({code: 0}); }; } class MockExporter extends CloudMonitoringExporter { @@ -153,47 +147,55 @@ describe('Bigtable/GCPMetricsHandler', () => { metrics: ResourceMetrics, resultCallback: (result: ExportResult) => void ): void { - try { - // The code block ensures the metrics are correct. Mainly, the metrics - // shouldn't contain two copies of the data. It should only contain - // one. - - // For this test since we are still writing a time series with - // metrics variable we don't want to modify the metrics variable - // to have artificial times because then sending the data to the - // metric service client will fail. Therefore, we must make a copy - // of the metrics and use that. - const parsedExportInput: ResourceMetrics = JSON.parse( - JSON.stringify(metrics) - ); - replaceTimestamps( - parsedExportInput as unknown as typeof expectedOtelExportInput, - [123, 789], - [456, 789] - ); - assert.deepStrictEqual( - parsedExportInput.scopeMetrics[0].metrics.length, - expectedOtelExportInput.scopeMetrics[0].metrics.length - ); - for ( - let index = 0; - index < parsedExportInput.scopeMetrics[0].metrics.length; - index++ - ) { - // We need to compare pointwise because mocha truncates to an 8192 character limit. + if (exportedCount < 2) { + try { + // This code block ensures the metrics are correct. Mainly, the metrics + // shouldn't contain two copies of the data. It should only contain + // one. + // + // For this test since we are still writing a time series with + // metrics variable we don't want to modify the metrics variable + // to have artificial times because then sending the data to the + // metric service client will fail. Therefore, we must make a copy + // of the metrics and use that. + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); + replaceTimestamps( + parsedExportInput as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); assert.deepStrictEqual( - parsedExportInput.scopeMetrics[0].metrics[index], - expectedOtelExportInput.scopeMetrics[0].metrics[index] + parsedExportInput.scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length ); + for ( + let index = 0; + index < parsedExportInput.scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + parsedExportInput.scopeMetrics[0].metrics[index], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + } catch (e) { + // The error needs to be caught so it can be reported to the mocha + // test runner. + done(e); } - } catch (e) { - // The error needs to be caught so it can be reported to the mocha - // test runner. - done(e); + // The code below uses the test callback to ensure the export was successful. + const testResultCallback = getTestResultCallback(resultCallback); + super.export(metrics, testResultCallback); + } else { + // After the test is complete the periodic exporter may still be + // running in which case we don't want to do any checks. We just + // want to call the resultCallback so that there are no hanging + // threads. + resultCallback({code: 0}); } - // The code below uses the test callback to ensure the export was successful. - const testResultCallback = getTestResultCallback(resultCallback); - super.export(metrics, testResultCallback); } } From 9f94b9e25ae749c87c08f71ead3098216914a495 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:16:55 -0400 Subject: [PATCH 275/289] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 65470c706..4097d3216 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -29,7 +29,7 @@ import * as assert from 'assert'; import {expectedOtelExportInput} from '../test-common/expected-otel-export-input'; import {replaceTimestamps} from '../test-common/replace-timestamps'; -describe.only('Bigtable/GCPMetricsHandler', () => { +describe('Bigtable/GCPMetricsHandler', () => { it('Should export a value to the GCPMetricsHandler', done => { (async () => { /* From 7e76c39953de2e81460d72a0ec0b03675f1aaafb Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:44:21 -0400 Subject: [PATCH 276/289] Add a new test for ten metrics handlers --- system-test/gcp-metrics-handler.ts | 127 +++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 4097d3216..b159537dc 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -233,4 +233,131 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); + it.only('Should export a value to ten GCPMetricsHandlers', done => { + // This test ensures that when we create two GCPMetricsHandlers much like + // what we would be doing when calling readRows on two separate tables that + // the data doesn't store duplicates in the same place and export twice as + // much data as it should. + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exportedCount = 0; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + exportedCount++; + try { + assert.strictEqual(result.code, 0); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + if (exportedCount === 10) { + // We are expecting ten calls to an exporter. One for each + // metrics handler. + clearTimeout(timeout); + done(); + } + // The resultCallback needs to be called to end the exporter operation + // so that the test shuts down in mocha. + resultCallback({code: 0}); + }; + } + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + if (exportedCount < 10) { + try { + // This code block ensures the metrics are correct. Mainly, the metrics + // shouldn't contain two copies of the data. It should only contain + // one. + // + // For this test since we are still writing a time series with + // metrics variable we don't want to modify the metrics variable + // to have artificial times because then sending the data to the + // metric service client will fail. Therefore, we must make a copy + // of the metrics and use that. + const parsedExportInput: ResourceMetrics = JSON.parse( + JSON.stringify(metrics) + ); + replaceTimestamps( + parsedExportInput as unknown as typeof expectedOtelExportInput, + [123, 789], + [456, 789] + ); + assert.deepStrictEqual( + parsedExportInput.scopeMetrics[0].metrics.length, + expectedOtelExportInput.scopeMetrics[0].metrics.length + ); + for ( + let index = 0; + index < parsedExportInput.scopeMetrics[0].metrics.length; + index++ + ) { + // We need to compare pointwise because mocha truncates to an 8192 character limit. + assert.deepStrictEqual( + parsedExportInput.scopeMetrics[0].metrics[index], + expectedOtelExportInput.scopeMetrics[0].metrics[index] + ); + } + } catch (e) { + // The error needs to be caught so it can be reported to the mocha + // test runner. + done(e); + } + // The code below uses the test callback to ensure the export was successful. + const testResultCallback = getTestResultCallback(resultCallback); + super.export(metrics, testResultCallback); + } else { + // After the test is complete the periodic exporter may still be + // running in which case we don't want to do any checks. We just + // want to call the resultCallback so that there are no hanging + // threads. + resultCallback({code: 0}); + } + } + } + + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + const handlers = []; + for (let i = 0; i < 10; i++) { + handlers.push(new GCPMetricsHandler(new MockExporter({projectId}))); + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handlers[i].onAttemptComplete(request as OnAttemptCompleteData); + } else { + handlers[i].onOperationComplete(request as OnOperationCompleteData); + } + } + } + })(); + }); }); From bb64fbe7cfc5d9f8d2f1e002ef4c36b65742f0c8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 10:49:16 -0400 Subject: [PATCH 277/289] Remove only --- system-test/gcp-metrics-handler.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index b159537dc..12cd9ef9c 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -233,7 +233,7 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); - it.only('Should export a value to ten GCPMetricsHandlers', done => { + it('Should export a value to ten GCPMetricsHandlers', done => { // This test ensures that when we create two GCPMetricsHandlers much like // what we would be doing when calling readRows on two separate tables that // the data doesn't store duplicates in the same place and export twice as From 27f5bcdbfe71f00dbd049718957892806371d219 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 13:21:42 -0400 Subject: [PATCH 278/289] Do not pass data through the Resource object anymore --- src/client-side-metrics/exporter.ts | 47 ++++++++++--------- .../gcp-metrics-handler.ts | 37 ++++++--------- test-common/expected-otel-export-input.ts | 46 ++++++++++++++---- 3 files changed, 76 insertions(+), 54 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index ad985ec90..a7b351fdb 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -81,37 +81,40 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { type WithSyncAttributes = {_syncAttributes: {[index: string]: string}}; const resourcesWithSyncAttributes = exportArgs.resource as unknown as WithSyncAttributes; + const projectId = + resourcesWithSyncAttributes._syncAttributes[ + 'monitored_resource.project_id' + ]; const timeSeriesArray = []; - const resourceLabels = { - cluster: - resourcesWithSyncAttributes._syncAttributes['monitored_resource.cluster'], - instance: - resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.instance_id' - ], - project_id: - resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.project_id' - ], - table: - resourcesWithSyncAttributes._syncAttributes['monitored_resource.table'], - zone: resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.zone' - ], - }; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { const value = dataPoint.value; + const resourceLabels = { + cluster: dataPoint.attributes.cluster, + instance: dataPoint.attributes.instanceId, + project_id: projectId, + table: dataPoint.attributes.table, + zone: dataPoint.attributes.zone, + }; + const streaming = dataPoint.attributes.streaming; + const app_profile = dataPoint.attributes.app_profile; const metric = { type: metricName, - labels: dataPoint.attributes, + labels: Object.assign( + { + method: dataPoint.attributes.method, + client_uid: dataPoint.attributes.client_uid, + status: dataPoint.attributes.status, + client_name: dataPoint.attributes.client_name, + }, + streaming ? {streaming} : null, + app_profile ? {app_profile} : null + ), }; const resource = { - type: resourcesWithSyncAttributes._syncAttributes[ - 'monitored_resource.type' - ], + type: 'bigtable_client_raw', labels: resourceLabels, }; const interval = { @@ -169,7 +172,7 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { } } return { - name: `projects/${resourcesWithSyncAttributes._syncAttributes['monitored_resource.project_id']}`, + name: `projects/${projectId}`, timeSeries: timeSeriesArray, }; } diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 1855b88e0..b73347422 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -92,11 +92,13 @@ export class GCPMetricsHandler implements IMetricsHandler { * Creates and registers metric instruments (histograms and counters) for various Bigtable client metrics. * Sets up a MeterProvider and configures a PeriodicExportingMetricReader for exporting metrics to Cloud Monitoring. * - * @param {MonitoredResourceData} data The data that will be used to set up the monitored resource * which will be provided to the exporter in every export call. * */ - private getMetrics(data: MonitoredResourceData): Metrics { + private getMetrics(projectId: string): Metrics { + // The projectId is needed per metrics handler because when the exporter is + // used it provides the project id for the name of the time series exported. + // ie. name: `projects/${....['monitored_resource.project_id']}`, if (!this.otelInstruments) { const latencyBuckets = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, @@ -127,12 +129,7 @@ export class GCPMetricsHandler implements IMetricsHandler { views: viewList, resource: new Resources.Resource({ 'service.name': 'Cloud Bigtable Table', - 'monitored_resource.type': 'bigtable_client_raw', - 'monitored_resource.project_id': data.projectId, - 'monitored_resource.instance_id': data.instanceId, - 'monitored_resource.table': data.table, - 'monitored_resource.cluster': data.cluster, - 'monitored_resource.zone': data.zone, + 'monitored_resource.project_id': projectId, }).merge(new ResourceUtil.GcpDetectorSync().detect()), readers: [ // Register the exporter @@ -237,19 +234,17 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - const otelInstruments = this.getMetrics({ - projectId: data.projectId, - instanceId: data.metricsCollectorData.instanceId, - table: data.metricsCollectorData.table, - cluster: data.metricsCollectorData.cluster, - zone: data.metricsCollectorData.zone, - }); + const otelInstruments = this.getMetrics(data.projectId); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, }; otelInstruments.operationLatencies.record(data.operationLatency, { streaming: data.streaming, @@ -269,19 +264,17 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - const otelInstruments = this.getMetrics({ - projectId: data.projectId, - instanceId: data.metricsCollectorData.instanceId, - table: data.metricsCollectorData.table, - cluster: data.metricsCollectorData.cluster, - zone: data.metricsCollectorData.zone, - }); + const otelInstruments = this.getMetrics(data.projectId); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, client_uid: data.metricsCollectorData.client_uid, status: data.status, client_name: data.client_name, + instanceId: data.metricsCollectorData.instanceId, + table: data.metricsCollectorData.table, + cluster: data.metricsCollectorData.cluster, + zone: data.metricsCollectorData.zone, }; otelInstruments.attemptLatencies.record(data.attemptLatency, { streaming: data.streaming, diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index 2d7b43401..d9855ed7f 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -743,12 +743,7 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', - 'monitored_resource.instance_id': 'fakeInstanceId', - 'monitored_resource.table': 'fakeTableId', - 'monitored_resource.cluster': 'fake-cluster3', - 'monitored_resource.zone': 'us-west1-c', }, asyncAttributesPending: false, _syncAttributes: { @@ -756,12 +751,7 @@ export const expectedOtelExportInput = { 'telemetry.sdk.language': 'nodejs', 'telemetry.sdk.name': 'opentelemetry', 'telemetry.sdk.version': '1.30.1', - 'monitored_resource.type': 'bigtable_client_raw', 'monitored_resource.project_id': 'my-project', - 'monitored_resource.instance_id': 'fakeInstanceId', - 'monitored_resource.table': 'fakeTableId', - 'monitored_resource.cluster': 'fake-cluster3', - 'monitored_resource.zone': 'us-west1-c', }, _asyncAttributesPromise: {}, }, @@ -799,6 +789,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -851,6 +845,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '4', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -881,6 +879,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -925,6 +927,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -959,6 +965,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1011,6 +1021,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '4', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1041,6 +1055,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1085,6 +1103,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '4', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], @@ -1096,6 +1118,10 @@ export const expectedOtelExportInput = { client_uid: 'fake-uuid', status: '0', client_name: 'nodejs-bigtable', + instanceId: 'fakeInstanceId', + table: 'fakeTableId', + cluster: 'fake-cluster3', + zone: 'us-west1-c', }, startTime: [123, 789], endTime: [456, 789], From d1a292fd3e149dfcb095ace09261c2f83ce2e6f4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 13:51:27 -0400 Subject: [PATCH 279/289] Add a test for writing duplicate points to MH --- system-test/gcp-metrics-handler.ts | 73 ++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/system-test/gcp-metrics-handler.ts b/system-test/gcp-metrics-handler.ts index 12cd9ef9c..f67e5cf39 100644 --- a/system-test/gcp-metrics-handler.ts +++ b/system-test/gcp-metrics-handler.ts @@ -360,4 +360,77 @@ describe('Bigtable/GCPMetricsHandler', () => { } })(); }); + it('Should write two duplicate points inserted into the metrics handler', done => { + (async () => { + /* + We need to create a timeout here because if we don't then mocha shuts down + the test as it is sleeping before the GCPMetricsHandler has a chance to + export the data. + */ + const timeout = setTimeout(() => {}, 120000); + /* + The exporter is called every x seconds, but we only want to test the value + it receives once. Since done cannot be called multiple times in mocha, + exported variable ensures we only test the value export receives one time. + */ + let exported = false; + function getTestResultCallback( + resultCallback: (result: ExportResult) => void + ) { + return (result: ExportResult) => { + exported = true; + try { + clearTimeout(timeout); + assert.strictEqual(result.code, 0); + done(); + resultCallback({code: 0}); + } catch (error) { + // Code isn't 0 so report the original error. + done(result); + done(error); + } + }; + } + class MockExporter extends CloudMonitoringExporter { + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void + ): void { + const testResultCallback = getTestResultCallback(resultCallback); + if (!exported) { + super.export(metrics, testResultCallback); + } else { + resultCallback({code: 0}); + } + } + } + + const bigtable = new Bigtable(); + const projectId: string = await new Promise((resolve, reject) => { + bigtable.getProjectId_((err, projectId) => { + if (err) { + reject(err); + } else { + resolve(projectId as string); + } + }); + }); + const handler = new GCPMetricsHandler(new MockExporter({projectId})); + const transformedRequestsHandled = JSON.parse( + JSON.stringify(expectedRequestsHandled).replace( + /my-project/g, + projectId + ) + ); + for (let i = 0; i < 2; i++) { + for (const request of transformedRequestsHandled) { + if (request.attemptLatency) { + handler.onAttemptComplete(request as OnAttemptCompleteData); + } else { + handler.onOperationComplete(request as OnOperationCompleteData); + } + } + } + })(); + }); }); From 23a4d397a59b8cf512e9c9331a3793cd0fb8227e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 14:02:56 -0400 Subject: [PATCH 280/289] Eliminate interface --- .../gcp-metrics-handler.ts | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index b73347422..2c250aa1a 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -43,26 +43,6 @@ interface Metrics { clientBlockingLatencies: typeof Histogram; } -/** - * Represents the data associated with a monitored resource in Google Cloud Monitoring. - * - * This interface defines the structure of data that is used to identify and - * describe a specific resource being monitored, such as a Bigtable instance, - * cluster, or table. It is used to construct the `resource` part of a - * `TimeSeries` object in the Cloud Monitoring API. - * - * When an open telemetry instrument is created in the GCPMetricsHandler, all - * recordings to that instrument are expected to have the same - * MonitoredResourceData properties. - */ -interface MonitoredResourceData { - projectId: string; - instanceId: string; - table: string; - cluster?: string; - zone?: string; -} - /** * A metrics handler implementation that uses OpenTelemetry to export metrics to Google Cloud Monitoring. * This handler records metrics such as operation latency, attempt latency, retry count, and more, From 55dbd8f59aa60945dfc9c9cf41e35360aa27d78e Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Mon, 17 Mar 2025 14:32:28 -0400 Subject: [PATCH 281/289] Set connectivity error count to 1 (not increment) --- src/client-side-metrics/operation-metrics-collector.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index d25538a31..5c5c2ef3d 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -271,7 +271,7 @@ export class OperationMetricsCollector { : parseInt(matchedDuration[1]); } } else { - this.connectivityErrorCount++; + this.connectivityErrorCount = 1; } } From 3b4f54ce19abce969653cbc3660a9e79a5c66ca9 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Thu, 20 Mar 2025 17:30:33 -0400 Subject: [PATCH 282/289] Latency improvement serverTime and connectivityErrorCount should only be read once. --- .../operation-metrics-collector.ts | 39 ++++++++++--------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index 5c5c2ef3d..eabb52e80 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -253,25 +253,28 @@ export class OperationMetricsCollector { internalRepr: Map; options: {}; }) { - const mappedEntries = new Map( - Array.from(metadata.internalRepr.entries(), ([key, value]) => [ - key, - value.toString(), - ]) - ); - const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; - const SERVER_TIMING_KEY = 'server-timing'; - const durationValues = mappedEntries.get(SERVER_TIMING_KEY); - const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); - if (matchedDuration && matchedDuration[1]) { - if (!this.serverTimeRead) { - this.serverTimeRead = true; - this.serverTime = isNaN(parseInt(matchedDuration[1])) - ? null - : parseInt(matchedDuration[1]); + if (!this.serverTimeRead && this.connectivityErrorCount < 1) { + // Check serverTimeRead, connectivityErrorCount here to reduce latency. + const mappedEntries = new Map( + Array.from(metadata.internalRepr.entries(), ([key, value]) => [ + key, + value.toString(), + ]) + ); + const SERVER_TIMING_REGEX = /.*gfet4t7;\s*dur=(\d+\.?\d*).*/; + const SERVER_TIMING_KEY = 'server-timing'; + const durationValues = mappedEntries.get(SERVER_TIMING_KEY); + const matchedDuration = durationValues?.match(SERVER_TIMING_REGEX); + if (matchedDuration && matchedDuration[1]) { + if (!this.serverTimeRead) { + this.serverTimeRead = true; + this.serverTime = isNaN(parseInt(matchedDuration[1])) + ? null + : parseInt(matchedDuration[1]); + } + } else { + this.connectivityErrorCount = 1; } - } else { - this.connectivityErrorCount = 1; } } From 1566d1fcd377457f7920a4d62ff44b14b9cd12d8 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 10:06:28 -0400 Subject: [PATCH 283/289] Add latency optimizations and update fixtures --- .../operation-metrics-collector.ts | 42 +++++++++++-------- test-common/metrics-handler-fixture.ts | 6 +-- .../metrics-collector/typical-method-call.txt | 9 ++-- 3 files changed, 32 insertions(+), 25 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index eabb52e80..e673ed3bd 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -196,7 +196,8 @@ export class OperationMetricsCollector { onResponse(projectId: string) { if ( this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET && + !this.firstResponseLatency ) { this.state = MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; @@ -285,22 +286,29 @@ export class OperationMetricsCollector { onStatusMetadataReceived(status: { metadata: {internalRepr: Map; options: {}}; }) { - const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; - const mappedValue = status.metadata.internalRepr.get( - INSTANCE_INFORMATION_KEY - ) as Buffer[]; - const decodedValue = ResponseParams.decode( - mappedValue[0], - mappedValue[0].length - ); - if (decodedValue && (decodedValue as unknown as {zoneId: string}).zoneId) { - this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; - } - if ( - decodedValue && - (decodedValue as unknown as {clusterId: string}).clusterId - ) { - this.cluster = (decodedValue as unknown as {clusterId: string}).clusterId; + if (!this.zone || !this.cluster) { + const INSTANCE_INFORMATION_KEY = 'x-goog-ext-425905942-bin'; + const mappedValue = status.metadata.internalRepr.get( + INSTANCE_INFORMATION_KEY + ) as Buffer[]; + const decodedValue = ResponseParams.decode( + mappedValue[0], + mappedValue[0].length + ); + if ( + decodedValue && + (decodedValue as unknown as {zoneId: string}).zoneId + ) { + this.zone = (decodedValue as unknown as {zoneId: string}).zoneId; + } + if ( + decodedValue && + (decodedValue as unknown as {clusterId: string}).clusterId + ) { + this.cluster = ( + decodedValue as unknown as {clusterId: string} + ).clusterId; + } } } } diff --git a/test-common/metrics-handler-fixture.ts b/test-common/metrics-handler-fixture.ts index 5e947677e..0aa8ef26d 100644 --- a/test-common/metrics-handler-fixture.ts +++ b/test-common/metrics-handler-fixture.ts @@ -31,7 +31,7 @@ export const expectedRequestsHandled = [ projectId: 'my-project', }, { - attemptLatency: 2000, + attemptLatency: 1000, serverLatency: 103, connectivityErrorCount: 0, streaming: 'true', @@ -60,8 +60,8 @@ export const expectedRequestsHandled = [ }, client_name: 'nodejs-bigtable', projectId: 'my-project', - operationLatency: 7000, + operationLatency: 6000, retryCount: 1, - firstResponseLatency: 5000, + firstResponseLatency: 2000, }, ]; diff --git a/test/metrics-collector/typical-method-call.txt b/test/metrics-collector/typical-method-call.txt index bc6de7ad7..a47124ab3 100644 --- a/test/metrics-collector/typical-method-call.txt +++ b/test/metrics-collector/typical-method-call.txt @@ -17,14 +17,13 @@ getDate call returns 5000 ms 10. Client receives status information. 11. Client receives metadata. 12. Client receives third row. -getDate call returns 6000 ms 13. Client receives metadata. 14. Client receives fourth row. 15. User reads row 1 16. Stream ends, operation completes -getDate call returns 7000 ms +getDate call returns 6000 ms Recording parameters for onAttemptComplete: -{"attemptLatency":2000,"serverLatency":103,"connectivityErrorCount":0,"streaming":"true","status":"0","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} -getDate call returns 8000 ms +{"attemptLatency":1000,"serverLatency":103,"connectivityErrorCount":0,"streaming":"true","status":"0","client_name":"nodejs-bigtable","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"projectId":"my-project"} +getDate call returns 7000 ms Recording parameters for onOperationComplete: -{"status":"0","streaming":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"client_name":"nodejs-bigtable","projectId":"my-project","operationLatency":7000,"retryCount":1,"firstResponseLatency":5000} +{"status":"0","streaming":"true","metricsCollectorData":{"instanceId":"fakeInstanceId","table":"fakeTableId","cluster":"fake-cluster3","zone":"us-west1-c","method":"Bigtable.ReadRows","client_uid":"fake-uuid"},"client_name":"nodejs-bigtable","projectId":"my-project","operationLatency":6000,"retryCount":1,"firstResponseLatency":2000} From cab2a95adb0d0dceb89fa741648dc7385be4aa40 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 10:28:17 -0400 Subject: [PATCH 284/289] Update fixtures with new latency measurements The optimization means some code gets skipped therefore lower latency in the tests --- test-common/expected-otel-export-input.ts | 68 +++++++++++------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/test-common/expected-otel-export-input.ts b/test-common/expected-otel-export-input.ts index d9855ed7f..bb0795641 100644 --- a/test-common/expected-otel-export-input.ts +++ b/test-common/expected-otel-export-input.ts @@ -35,10 +35,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/operation_latencies', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '0', client_uid: 'fake-uuid', + status: '0', + client_name: 'nodejs-bigtable', streaming: 'true', }, }, @@ -67,7 +67,7 @@ export const expectedOtelExportConvertedValue = { value: { distributionValue: { count: '1', - mean: 7000, + mean: 6000, bucketOptions: { explicitBuckets: { bounds: [ @@ -131,10 +131,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '4', client_uid: 'fake-uuid', + status: '4', + client_name: 'nodejs-bigtable', streaming: 'true', }, }, @@ -227,10 +227,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/attempt_latencies', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '0', client_uid: 'fake-uuid', + status: '0', + client_name: 'nodejs-bigtable', streaming: 'true', }, }, @@ -259,7 +259,7 @@ export const expectedOtelExportConvertedValue = { value: { distributionValue: { count: '1', - mean: 2000, + mean: 1000, bucketOptions: { explicitBuckets: { bounds: [ @@ -299,7 +299,6 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', - '0', '1', '0', '0', @@ -312,6 +311,7 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '0', ], }, }, @@ -323,10 +323,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/retry_count', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '0', client_uid: 'fake-uuid', + status: '0', + client_name: 'nodejs-bigtable', }, }, resource: { @@ -360,10 +360,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/first_response_latencies', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '0', client_uid: 'fake-uuid', + status: '0', + client_name: 'nodejs-bigtable', }, }, resource: { @@ -391,7 +391,7 @@ export const expectedOtelExportConvertedValue = { value: { distributionValue: { count: '1', - mean: 5000, + mean: 2000, bucketOptions: { explicitBuckets: { bounds: [ @@ -432,7 +432,6 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', - '0', '1', '0', '0', @@ -444,6 +443,7 @@ export const expectedOtelExportConvertedValue = { '0', '0', '0', + '0', ], }, }, @@ -455,10 +455,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '4', client_uid: 'fake-uuid', + status: '4', + client_name: 'nodejs-bigtable', streaming: 'true', }, }, @@ -551,10 +551,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/server_latencies', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '0', client_uid: 'fake-uuid', + status: '0', + client_name: 'nodejs-bigtable', streaming: 'true', }, }, @@ -647,10 +647,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '4', client_uid: 'fake-uuid', + status: '4', + client_name: 'nodejs-bigtable', }, }, resource: { @@ -684,10 +684,10 @@ export const expectedOtelExportConvertedValue = { metric: { type: 'bigtable.googleapis.com/internal/client/connectivity_error_count', labels: { - client_name: 'nodejs-bigtable', method: 'Bigtable.ReadRows', - status: '0', client_uid: 'fake-uuid', + status: '0', + client_name: 'nodejs-bigtable', }, }, resource: { @@ -797,9 +797,9 @@ export const expectedOtelExportInput = { startTime: [123, 789], endTime: [456, 789], value: { - min: 7000, - max: 7000, - sum: 7000, + min: 6000, + max: 6000, + sum: 6000, buckets: { boundaries: [ 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, @@ -887,9 +887,9 @@ export const expectedOtelExportInput = { startTime: [123, 789], endTime: [456, 789], value: { - min: 2000, - max: 2000, - sum: 2000, + min: 1000, + max: 1000, + sum: 1000, buckets: { boundaries: [ 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, @@ -899,7 +899,7 @@ export const expectedOtelExportInput = { ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], }, @@ -973,9 +973,9 @@ export const expectedOtelExportInput = { startTime: [123, 789], endTime: [456, 789], value: { - min: 5000, - max: 5000, - sum: 5000, + min: 2000, + max: 2000, + sum: 2000, buckets: { boundaries: [ 0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, @@ -985,7 +985,7 @@ export const expectedOtelExportInput = { ], counts: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], }, From 29c78cc74656e897b00b7fa00c0b234148bfb7fa Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 13:53:32 -0400 Subject: [PATCH 285/289] Check for first response latency first --- .../operation-metrics-collector.ts | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/client-side-metrics/operation-metrics-collector.ts b/src/client-side-metrics/operation-metrics-collector.ts index e673ed3bd..920bcb347 100644 --- a/src/client-side-metrics/operation-metrics-collector.ts +++ b/src/client-side-metrics/operation-metrics-collector.ts @@ -194,17 +194,19 @@ export class OperationMetricsCollector { * Called when the first response is received. Records first response latencies. */ onResponse(projectId: string) { - if ( - this.state === - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET && - !this.firstResponseLatency - ) { - this.state = - MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; - const endTime = new Date(); - if (projectId && this.operationStartTime) { - this.firstResponseLatency = - endTime.getTime() - this.operationStartTime.getTime(); + if (!this.firstResponseLatency) { + // Check firstResponseLatency first to improve latency for calls with many rows + if ( + this.state === + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_NO_ROWS_YET + ) { + this.state = + MetricsCollectorState.OPERATION_STARTED_ATTEMPT_IN_PROGRESS_SOME_ROWS_RECEIVED; + const endTime = new Date(); + if (projectId && this.operationStartTime) { + this.firstResponseLatency = + endTime.getTime() - this.operationStartTime.getTime(); + } } } } From 28c81e0c8cf8ce354234035a17a6e67a5a32e5d5 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 16:45:28 -0400 Subject: [PATCH 286/289] Break metricsToRequest down into smaller fns --- src/client-side-metrics/exporter.ts | 133 +++++++++++++++++----------- 1 file changed, 81 insertions(+), 52 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index a7b351fdb..6024a3e2e 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -14,6 +14,7 @@ import {MetricExporter} from '@google-cloud/opentelemetry-cloud-monitoring-exporter'; import { + DataPoint, ExponentialHistogram, Histogram, ResourceMetrics, @@ -37,9 +38,82 @@ export interface ExportResult { * */ function isCounterValue( - value: number | Histogram | ExponentialHistogram -): value is number { - return typeof value === 'number'; + dataPoint: + | DataPoint + | DataPoint + | DataPoint +): dataPoint is DataPoint { + return typeof dataPoint.value === 'number'; +} + +function getInterval( + dataPoint: + | DataPoint + | DataPoint + | DataPoint +) { + return { + endTime: { + seconds: dataPoint.endTime[0], + }, + startTime: { + seconds: dataPoint.startTime[0], + }, + }; +} + +function getDistributionPoints( + dataPoint: DataPoint | DataPoint +) { + const value = dataPoint.value; + return [ + { + interval: getInterval(dataPoint), + value: { + distributionValue: { + count: String(value.count), + mean: value.count && value.sum ? value.sum / value.count : 0, + bucketOptions: { + explicitBuckets: { + bounds: (value as Histogram).buckets.boundaries, + }, + }, + bucketCounts: (value as Histogram).buckets.counts.map(String), + }, + }, + }, + ]; +} + +function getIntegerPoints(dataPoint: DataPoint) { + return [ + { + interval: getInterval(dataPoint), + value: { + int64Value: dataPoint.value, + }, + }, + ]; +} + +function getResource( + projectId: string, + dataPoint: + | DataPoint + | DataPoint + | DataPoint +) { + const resourceLabels = { + cluster: dataPoint.attributes.cluster, + instance: dataPoint.attributes.instanceId, + project_id: projectId, + table: dataPoint.attributes.table, + zone: dataPoint.attributes.zone, + }; + return { + type: 'bigtable_client_raw', + labels: resourceLabels, + }; } /** @@ -90,14 +164,6 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { for (const scopeMetric of scopeMetrics.metrics) { const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { - const value = dataPoint.value; - const resourceLabels = { - cluster: dataPoint.attributes.cluster, - instance: dataPoint.attributes.instanceId, - project_id: projectId, - table: dataPoint.attributes.table, - zone: dataPoint.attributes.zone, - }; const streaming = dataPoint.attributes.streaming; const app_profile = dataPoint.attributes.app_profile; const metric = { @@ -113,31 +179,13 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { app_profile ? {app_profile} : null ), }; - const resource = { - type: 'bigtable_client_raw', - labels: resourceLabels, - }; - const interval = { - endTime: { - seconds: dataPoint.endTime[0], - }, - startTime: { - seconds: dataPoint.startTime[0], - }, - }; - if (isCounterValue(value)) { + const resource = getResource(projectId, dataPoint); + if (isCounterValue(dataPoint)) { timeSeriesArray.push({ metric, resource, valueType: 'INT64', - points: [ - { - interval, - value: { - int64Value: dataPoint.value, - }, - }, - ], + points: getIntegerPoints(dataPoint), }); } else { timeSeriesArray.push({ @@ -145,26 +193,7 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { resource, metricKind: 'CUMULATIVE', valueType: 'DISTRIBUTION', - points: [ - { - interval, - value: { - distributionValue: { - count: String(value.count), - mean: - value.count && value.sum ? value.sum / value.count : 0, - bucketOptions: { - explicitBuckets: { - bounds: (value as Histogram).buckets.boundaries, - }, - }, - bucketCounts: (value as Histogram).buckets.counts.map( - String - ), - }, - }, - }, - ], + points: getDistributionPoints(dataPoint), unit: scopeMetric.descriptor.unit || 'ms', // Default to 'ms' if no unit is specified }); } From 84fcc86a2668841c4a6cf701c82f2acb18a566e4 Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 17:19:41 -0400 Subject: [PATCH 287/289] Refactor the metricToRequest function --- src/client-side-metrics/exporter.ts | 73 ++++++++++++++++++++++------- 1 file changed, 57 insertions(+), 16 deletions(-) diff --git a/src/client-side-metrics/exporter.ts b/src/client-side-metrics/exporter.ts index 6024a3e2e..2b7172026 100644 --- a/src/client-side-metrics/exporter.ts +++ b/src/client-side-metrics/exporter.ts @@ -62,6 +62,14 @@ function getInterval( }; } +/** + * This function gets the timeseries data points for metrics that are + * represented as distributions on the backend. These data points are part of a + * timeseries object that is recorded to Google Cloud Monitoring. + * + * @param {DataPoint} dataPoint The datapoint containing the data we wish to + * send to the Google Cloud Monitoring dashboard + */ function getDistributionPoints( dataPoint: DataPoint | DataPoint ) { @@ -85,6 +93,14 @@ function getDistributionPoints( ]; } +/** + * This function gets the timeseries data points for metrics that are + * represented as integers on the backend. These data points are part of a + * timeseries object that is recorded to Google Cloud Monitoring. + * + * @param {DataPoint} dataPoint The datapoint containing the data we wish to + * send to the Google Cloud Monitoring dashboard + */ function getIntegerPoints(dataPoint: DataPoint) { return [ { @@ -96,6 +112,14 @@ function getIntegerPoints(dataPoint: DataPoint) { ]; } +/** + * getResource gets the resource object which is used for building the timeseries + * object that will be sent to Google Cloud Monitoring dashboard + * + * @param {string} metricName The backend name of the metric that we want to record + * @param {DataPoint} dataPoint The datapoint containing the data we wish to + * send to the Google Cloud Monitoring dashboard + */ function getResource( projectId: string, dataPoint: @@ -116,6 +140,38 @@ function getResource( }; } +/** + * getMetric gets the metric object which is used for building the timeseries + * object that will be sent to Google Cloud Monitoring dashboard + * + * @param {string} metricName The backend name of the metric that we want to record + * @param {DataPoint} dataPoint The datapoint containing the data we wish to + * send to the Google Cloud Monitoring dashboard + */ +function getMetric( + metricName: string, + dataPoint: + | DataPoint + | DataPoint + | DataPoint +) { + const streaming = dataPoint.attributes.streaming; + const app_profile = dataPoint.attributes.app_profile; + return { + type: metricName, + labels: Object.assign( + { + method: dataPoint.attributes.method, + client_uid: dataPoint.attributes.client_uid, + status: dataPoint.attributes.status, + client_name: dataPoint.attributes.client_name, + }, + streaming ? {streaming} : null, + app_profile ? {app_profile} : null + ), + }; +} + /** * Converts OpenTelemetry metrics data into a format suitable for the Google Cloud * Monitoring API's `createTimeSeries` method. @@ -162,23 +218,8 @@ export function metricsToRequest(exportArgs: ResourceMetrics) { const timeSeriesArray = []; for (const scopeMetrics of exportArgs.scopeMetrics) { for (const scopeMetric of scopeMetrics.metrics) { - const metricName = scopeMetric.descriptor.name; for (const dataPoint of scopeMetric.dataPoints) { - const streaming = dataPoint.attributes.streaming; - const app_profile = dataPoint.attributes.app_profile; - const metric = { - type: metricName, - labels: Object.assign( - { - method: dataPoint.attributes.method, - client_uid: dataPoint.attributes.client_uid, - status: dataPoint.attributes.status, - client_name: dataPoint.attributes.client_name, - }, - streaming ? {streaming} : null, - app_profile ? {app_profile} : null - ), - }; + const metric = getMetric(scopeMetric.descriptor.name, dataPoint); const resource = getResource(projectId, dataPoint); if (isCounterValue(dataPoint)) { timeSeriesArray.push({ From 20220223100bcfe10538d91b0a601a6d1d36fb8a Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 17:24:03 -0400 Subject: [PATCH 288/289] Rename interface --- src/client-side-metrics/gcp-metrics-handler.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 2c250aa1a..9c26d7800 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -32,7 +32,7 @@ const { * A collection of OpenTelemetry metric instruments used to record * Bigtable client-side metrics. */ -interface Metrics { +interface MetricsInstruments { operationLatencies: typeof Histogram; attemptLatencies: typeof Histogram; retryCount: typeof Histogram; @@ -49,7 +49,7 @@ interface Metrics { * associating them with relevant attributes for detailed analysis in Cloud Monitoring. */ export class GCPMetricsHandler implements IMetricsHandler { - private otelInstruments?: Metrics; + private otelInstruments?: MetricsInstruments; private exporter: PushMetricExporter; /** @@ -75,7 +75,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * which will be provided to the exporter in every export call. * */ - private getMetrics(projectId: string): Metrics { + private getMetrics(projectId: string): MetricsInstruments { // The projectId is needed per metrics handler because when the exporter is // used it provides the project id for the name of the time series exported. // ie. name: `projects/${....['monitored_resource.project_id']}`, From d7b37dbadfc1138caa534fb8a84dfdba7196b28f Mon Sep 17 00:00:00 2001 From: Daniel Bruce Date: Fri, 21 Mar 2025 17:25:14 -0400 Subject: [PATCH 289/289] Rename to getInstruments --- src/client-side-metrics/gcp-metrics-handler.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client-side-metrics/gcp-metrics-handler.ts b/src/client-side-metrics/gcp-metrics-handler.ts index 9c26d7800..4d7bfc532 100644 --- a/src/client-side-metrics/gcp-metrics-handler.ts +++ b/src/client-side-metrics/gcp-metrics-handler.ts @@ -75,7 +75,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * which will be provided to the exporter in every export call. * */ - private getMetrics(projectId: string): MetricsInstruments { + private getInstruments(projectId: string): MetricsInstruments { // The projectId is needed per metrics handler because when the exporter is // used it provides the project id for the name of the time series exported. // ie. name: `projects/${....['monitored_resource.project_id']}`, @@ -214,7 +214,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnOperationCompleteData} data Data related to the completed operation. */ onOperationComplete(data: OnOperationCompleteData) { - const otelInstruments = this.getMetrics(data.projectId); + const otelInstruments = this.getInstruments(data.projectId); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method, @@ -244,7 +244,7 @@ export class GCPMetricsHandler implements IMetricsHandler { * @param {OnAttemptCompleteData} data Data related to the completed attempt. */ onAttemptComplete(data: OnAttemptCompleteData) { - const otelInstruments = this.getMetrics(data.projectId); + const otelInstruments = this.getInstruments(data.projectId); const commonAttributes = { app_profile: data.metricsCollectorData.app_profile, method: data.metricsCollectorData.method,