diff --git a/src/backup.ts b/src/backup.ts
index 75ca35fc9..650fbae6c 100644
--- a/src/backup.ts
+++ b/src/backup.ts
@@ -37,6 +37,15 @@ import {
 } from './cluster';
 import {CallOptions, LROperation, Operation, ServiceError} from 'google-gax';
 import {Instance} from './instance';
+import {ClusterUtils} from './utils/cluster';
+
+export type CopyBackupResponse = GenericBackupPromise<Operation>;
+export type CopyBackupCallback = GenericBackupCallback<Operation>;
+export interface CopyBackupConfig extends ModifiableBackupFields {
+  cluster: Cluster;
+  gaxOptions?: CallOptions;
+  id: string;
+}
 
 type IEmpty = google.protobuf.IEmpty;
 export type IBackup = google.bigtable.admin.v2.IBackup;
@@ -59,6 +68,7 @@ export interface GenericBackupCallback<T> {
     apiResponse?: T | null
   ): void;
 }
+export type GenericBackupPromise<T> = [Backup, T];
 
 export type DeleteBackupCallback = (
   err: ServiceError | null,
@@ -243,6 +253,50 @@ Please use the format 'my-backup' or '${cluster.name}/backups/my-backup'.`);
     });
   }
 
+  /**
+   * When this backup object represents a backup that has already been created,
+   * copy will copy this created backup to the location and with the settings
+   * specified by the config parameter. After running this function the original
+   * backup will exist as well as a second backup matching the parameters given
+   * by the config argument.
+   *
+   * @param {CopyBackupConfig} [config] The config that specifies all of the
+   * information about the destination backup which is the new backup that gets
+   * created as a result of calling copy.
+   * @param {CopyBackupCallback} [callback] The callback function that passes an
+   * error or results back to the user.
+   */
+  copy(config: CopyBackupConfig, callback: CopyBackupCallback): void;
+  copy(config: CopyBackupConfig): Promise<CopyBackupResponse>;
+  copy(
+    config: CopyBackupConfig,
+    callback?: CopyBackupCallback
+  ): void | Promise<CopyBackupResponse> {
+    const reqOpts = {
+      parent: config.cluster.name,
+      backupId: config.id,
+      sourceBackup: `${this.cluster.name}/backups/${this.id}`,
+      expireTime: config?.expireTime,
+    };
+    ClusterUtils.formatBackupExpiryTime(reqOpts);
+    this.bigtable.request(
+      {
+        client: 'BigtableTableAdminClient',
+        method: 'copyBackup',
+        reqOpts,
+        gaxOpts: config.gaxOptions,
+      },
+      (err, ...args) => {
+        if (err) {
+          callback!(err, undefined, ...args);
+          return;
+        }
+        // Second argument is a backup for the new backup id
+        callback!(null, config.cluster.backup(config.id), ...args);
+      }
+    );
+  }
+
   create(config: CreateBackupConfig, callback?: CreateBackupCallback): void;
   create(config: CreateBackupConfig): Promise<CreateBackupResponse>;
   /**
diff --git a/src/cluster.ts b/src/cluster.ts
index 22d0c8697..3fb99d806 100644
--- a/src/cluster.ts
+++ b/src/cluster.ts
@@ -12,7 +12,6 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-import {PreciseDate} from '@google-cloud/precise-date';
 import {promisifyAll} from '@google-cloud/promisify';
 import {CallOptions, LROperation, Operation, ServiceError} from 'google-gax';
 
@@ -314,11 +313,7 @@ Please use the format 'my-cluster' or '${instance.name}/clusters/my-cluster'.`);
       },
     };
 
-    if (reqOpts.backup.expireTime instanceof Date) {
-      reqOpts.backup.expireTime = new PreciseDate(
-        reqOpts.backup.expireTime
-      ).toStruct();
-    }
+    ClusterUtils.formatBackupExpiryTime(reqOpts.backup);
 
     delete reqOpts.backup.table;
     delete reqOpts.backup.gaxOptions;
diff --git a/src/utils/cluster.ts b/src/utils/cluster.ts
index f5f3594bd..3f66a5e31 100644
--- a/src/utils/cluster.ts
+++ b/src/utils/cluster.ts
@@ -20,6 +20,8 @@ import {
   SetClusterMetadataOptions,
 } from '../cluster';
 import {google} from '../../protos/protos';
+import {ModifiableBackupFields} from '../backup';
+import {PreciseDate} from '@google-cloud/precise-date';
 
 export class ClusterUtils {
   static noConfigError =
@@ -163,4 +165,10 @@ export class ClusterUtils {
       updateMask: {paths: this.getUpdateMask(metadata)},
     };
   }
+
+  static formatBackupExpiryTime(backup: ModifiableBackupFields) {
+    if (backup.expireTime instanceof Date) {
+      backup.expireTime = new PreciseDate(backup.expireTime).toStruct();
+    }
+  }
 }
diff --git a/system-test/bigtable.ts b/system-test/bigtable.ts
index 8ace09947..10e70e6bc 100644
--- a/system-test/bigtable.ts
+++ b/system-test/bigtable.ts
@@ -18,8 +18,15 @@ import * as assert from 'assert';
 import {beforeEach, afterEach, describe, it, before, after} from 'mocha';
 import Q from 'p-queue';
 
-import {Backup, Bigtable, Instance} from '../src';
+import {
+  Backup,
+  BackupTimestamp,
+  Bigtable,
+  Instance,
+  InstanceOptions,
+} from '../src';
 import {AppProfile} from '../src/app-profile.js';
+import {CopyBackupConfig} from '../src/backup.js';
 import {Cluster} from '../src/cluster.js';
 import {Family} from '../src/family.js';
 import {Row} from '../src/row.js';
@@ -1414,6 +1421,296 @@ describe('Bigtable', () => {
 
       Object.keys(policy).forEach(key => assert(key in updatedPolicy));
     });
+    describe('copying backups', () => {
+      // The server requires the copy backup time to be sufficiently ahead of
+      // the create backup time to avoid an error.
+      // Set it to 308 hours ahead
+      const sourceExpireTimeMilliseconds =
+        PreciseDate.now() + (8 + 300) * 60 * 60 * 1000;
+      const sourceExpireTime = new PreciseDate(sourceExpireTimeMilliseconds);
+      // 608 hours ahead of now, 300 hours ahead of sourceExpireTimeMilliseconds
+      const copyExpireTimeMilliseconds =
+        PreciseDate.now() + (8 + 600) * 60 * 60 * 1000;
+      const copyExpireTime = new PreciseDate(copyExpireTimeMilliseconds);
+
+      /*
+        This function checks that when a backup is copied using the provided
+        config that a new backup is created on the instance.
+       */
+      async function testCopyBackup(
+        backup: Backup,
+        config: CopyBackupConfig,
+        instance: Instance
+      ) {
+        // Get a list of backup ids before the copy
+        const [backupsBeforeCopy] = await instance.getBackups();
+        const backupIdsBeforeCopy = backupsBeforeCopy.map(backup => backup.id);
+        // Copy the backup
+        const [newBackup, operation] = await backup.copy(config);
+        try {
+          assert.strictEqual(config.id, newBackup.id);
+          await operation.promise();
+          const id = config.id;
+          const backupPath = `${config.cluster.name}/backups/${id}`;
+          {
+            // Ensure that the backup specified by the config and id match the backup name for the operation returned by the server.
+            // the split/map/join functions replace the project name with the {{projectId}} string
+            assert(operation);
+            assert(operation.metadata);
+            assert.strictEqual(
+              operation.metadata.name
+                .split('/')
+                .map((item, index) => (index === 1 ? '{{projectId}}' : item))
+                .join('/'),
+              backupPath
+                .split('/')
+                .map((item, index) => (index === 1 ? '{{projectId}}' : item))
+                .join('/')
+            );
+          }
+          // Check that there is now one more backup
+          const [backupsAfterCopy] = await instance.getBackups();
+          const newBackups = backupsAfterCopy.filter(
+            backup => !backupIdsBeforeCopy.includes(backup.id)
+          );
+          assert.strictEqual(newBackups.length, 1);
+          const [fetchedNewBackup] = newBackups;
+          // Ensure the fetched backup matches the config
+          assert.strictEqual(fetchedNewBackup.id, id);
+          assert.strictEqual(fetchedNewBackup.name, backupPath);
+          // Delete the copied backup
+        } finally {
+          await config.cluster.backup(newBackup.id).delete();
+        }
+      }
+
+      describe('should create backup of a table and copy it in the same cluster', async () => {
+        async function testWithExpiryTimes(
+          sourceTestExpireTime: BackupTimestamp,
+          copyTestExpireTime: BackupTimestamp
+        ) {
+          const [backup, op] = await TABLE.createBackup(generateId('backup'), {
+            expireTime: sourceTestExpireTime,
+          });
+          try {
+            {
+              await op.promise();
+              // Check expiry time for running operation.
+              await backup.getMetadata();
+              assert.deepStrictEqual(backup.expireDate, sourceExpireTime);
+            }
+            await testCopyBackup(
+              backup,
+              {
+                cluster: backup.cluster,
+                id: generateId('backup'),
+                expireTime: copyTestExpireTime,
+              },
+              INSTANCE
+            );
+          } finally {
+            await backup.delete();
+          }
+        }
+        it('should copy to the same cluster with precise date expiry times', async () => {
+          await testWithExpiryTimes(sourceExpireTime, copyExpireTime);
+        });
+        it('should copy to the same cluster with timestamp expiry times', async () => {
+          // Calling toStruct converts times to a timestamp object.
+          // For example: sourceExpireTime.toStruct() = {seconds: 1706659851, nanos: 981000000}
+          await testWithExpiryTimes(
+            sourceExpireTime.toStruct(),
+            copyExpireTime.toStruct()
+          );
+        });
+        it('should copy to the same cluster with date expiry times', async () => {
+          await testWithExpiryTimes(
+            new Date(sourceExpireTimeMilliseconds),
+            new Date(copyExpireTimeMilliseconds)
+          );
+        });
+      });
+      it('should create backup of a table and copy it on another cluster of another instance', async () => {
+        const [backup, op] = await TABLE.createBackup(generateId('backup'), {
+          expireTime: sourceExpireTime,
+        });
+        try {
+          {
+            await op.promise();
+            // Check the expiry time.
+            await backup.getMetadata();
+            assert.deepStrictEqual(backup.expireDate, sourceExpireTime);
+          }
+          // Create another instance
+          const instance = bigtable.instance(generateId('instance'));
+          const destinationClusterId = generateId('cluster');
+          {
+            // Create production instance with given options
+            const instanceOptions: InstanceOptions = {
+              clusters: [
+                {
+                  id: destinationClusterId,
+                  nodes: 3,
+                  location: 'us-central1-f',
+                  storage: 'ssd',
+                },
+              ],
+              labels: {'prod-label': 'prod-label'},
+              type: 'production',
+            };
+            const [, operation] = await instance.create(instanceOptions);
+            await operation.promise();
+          }
+          // Create the copy and test the copied backup
+          await testCopyBackup(
+            backup,
+            {
+              cluster: new Cluster(instance, destinationClusterId),
+              id: generateId('backup'),
+              expireTime: copyExpireTime,
+            },
+            instance
+          );
+          await instance.delete();
+        } finally {
+          await backup.delete();
+        }
+      });
+      it('should create backup of a table and copy it on another cluster of the same instance', async () => {
+        const [backup, op] = await TABLE.createBackup(generateId('backup'), {
+          expireTime: sourceExpireTime,
+        });
+        try {
+          {
+            await op.promise();
+            // Check the expiry time.
+            await backup.getMetadata();
+            assert.deepStrictEqual(backup.expireDate, sourceExpireTime);
+          }
+          const destinationClusterId = generateId('cluster');
+          {
+            // Create destination cluster with given options
+            const [, operation] = await INSTANCE.cluster(
+              destinationClusterId
+            ).create({
+              location: 'us-central1-b',
+              nodes: 3,
+            });
+            await operation.promise();
+          }
+          // Create the copy and test the copied backup
+          await testCopyBackup(
+            backup,
+            {
+              cluster: new Cluster(INSTANCE, destinationClusterId),
+              id: generateId('backup'),
+              expireTime: copyExpireTime,
+            },
+            INSTANCE
+          );
+        } finally {
+          await backup.delete();
+        }
+      });
+      it('should create backup of a table and copy it on another project', async () => {
+        const [backup, op] = await TABLE.createBackup(generateId('backup'), {
+          expireTime: sourceExpireTime,
+        });
+        try {
+          {
+            await op.promise();
+            // Check the expiry time.
+            await backup.getMetadata();
+            assert.deepStrictEqual(backup.expireDate, sourceExpireTime);
+          }
+          // Create client, instance, cluster for second project
+          const bigtableSecondaryProject = new Bigtable(
+            process.env.GCLOUD_PROJECT2
+              ? {projectId: process.env.GCLOUD_PROJECT2}
+              : {}
+          );
+          const secondInstance = bigtableSecondaryProject.instance(
+            generateId('instance')
+          );
+          const destinationClusterId = generateId('cluster');
+          {
+            // Create production instance with given options
+            const instanceOptions: InstanceOptions = {
+              clusters: [
+                {
+                  id: destinationClusterId,
+                  nodes: 3,
+                  location: 'us-central1-f',
+                  storage: 'ssd',
+                },
+              ],
+              labels: {'prod-label': 'prod-label'},
+              type: 'production',
+            };
+            const [, operation] = await secondInstance.create(instanceOptions);
+            await operation.promise();
+          }
+          // Create the copy and test the copied backup
+          await testCopyBackup(
+            backup,
+            {
+              cluster: new Cluster(secondInstance, destinationClusterId),
+              id: generateId('backup'),
+              expireTime: copyExpireTime,
+            },
+            secondInstance
+          );
+          await secondInstance.delete();
+        } finally {
+          await backup.delete();
+        }
+      });
+      it('should restore a copied backup', async () => {
+        const backupId = generateId('backup');
+        const table = INSTANCE.table('old-table');
+        {
+          // Create a table and insert data into it.
+          await table.create();
+          await table.createFamily('follows');
+          await table.insert([
+            {
+              key: 'some-data-to-copy-key',
+              data: {
+                follows: {
+                  copyData: 'data-to-copy',
+                },
+              },
+            },
+          ]);
+        }
+        // Create the backup
+        const [backup, createBackupOperation] = await table.createBackup(
+          backupId,
+          {
+            expireTime: sourceExpireTime,
+          }
+        );
+        try {
+          await createBackupOperation.promise();
+          // Copy the backup
+          const config = {
+            cluster: backup.cluster,
+            id: generateId('backup'),
+            expireTime: copyExpireTime,
+          };
+          const [newBackup, copyOperation] = await backup.copy(config);
+          await copyOperation.promise();
+          // Restore a table from the copied backup
+          const [newTable, restoreOperation] =
+            await newBackup.restore('new-table');
+          await restoreOperation.promise();
+          const rows = await newTable.getRows();
+          assert.deepStrictEqual(rows[0][0].id, 'some-data-to-copy-key');
+        } finally {
+          await backup.delete();
+        }
+      });
+    });
   });
 });
 
diff --git a/test/backup.ts b/test/backup.ts
index fa3fec846..e12be3b9b 100644
--- a/test/backup.ts
+++ b/test/backup.ts
@@ -25,8 +25,10 @@ import * as backupTypes from '../src/backup';
 import * as instanceTypes from '../src/instance';
 import * as sinon from 'sinon';
 
-import {Bigtable} from '../src';
+import {Bigtable, RequestOptions} from '../src';
 import {Table} from '../src/table';
+import {generateId} from '../system-test/common';
+import {Backup} from '../src/backup';
 
 let promisified = false;
 const fakePromisify = Object.assign({}, promisify, {
@@ -223,6 +225,66 @@ describe('Bigtable/Backup', () => {
     });
   });
 
+  describe('copy', () => {
+    beforeEach(() => {
+      backup.bigtable.request = (
+        config: RequestOptions,
+        callback: (err: ServiceError | null, res: RequestOptions) => void
+      ) => {
+        callback(null, config);
+      };
+    });
+
+    it('should correctly copy backup from the cluster to a custom project', done => {
+      const destinationProjectId = generateId('project');
+      const bigtable = new Bigtable({projectId: destinationProjectId});
+      const backupId = generateId('backup');
+      const newBackupId = generateId('backup');
+      const backup = new Backup(CLUSTER, backupId);
+      const destinationInstanceId = generateId('instance');
+      const destinationClusterId = generateId('cluster');
+      const instance = new FakeInstance(bigtable, destinationInstanceId);
+      // In callback, config is object received in request function so must be
+      // of type any so that this test can compile and so that asserts can test
+      // its properties.
+      backup.copy(
+        {
+          cluster: new clusterTypes.Cluster(instance, destinationClusterId),
+          id: newBackupId,
+          expireTime: new PreciseDate(177),
+          gaxOptions: {
+            timeout: 139,
+          },
+        },
+        (
+          err?: ServiceError | Error | null,
+          backup?: Backup | null,
+          config?: any
+        ) => {
+          assert.strictEqual(
+            backup?.name,
+            `projects/${destinationProjectId}/instances/${destinationInstanceId}/clusters/${destinationClusterId}/backups/${newBackupId}`
+          );
+          assert.strictEqual(config?.client, 'BigtableTableAdminClient');
+          assert.strictEqual(config?.method, 'copyBackup');
+          assert.deepStrictEqual(config?.reqOpts, {
+            parent: `projects/${destinationProjectId}/instances/${destinationInstanceId}/clusters/${destinationClusterId}`,
+            backupId: newBackupId,
+            sourceBackup: `a/b/c/d/backups/${backupId}`,
+            expireTime: {
+              seconds: 0,
+              nanos: 177000000,
+            },
+          });
+          assert.deepStrictEqual(config?.gaxOpts, {
+            timeout: 139,
+          });
+          done();
+        }
+      );
+    });
+  });
+
   describe('delete', () => {
     it('should make the correct request', done => {
       // eslint-disable-next-line @typescript-eslint/no-explicit-any