Skip to content

Commit

Permalink
Add a benchmark to monitor performance for large dataset indexing (#9012
Browse files Browse the repository at this point in the history
)
  • Loading branch information
hmaarrfk authored May 7, 2024
1 parent dcf2ac4 commit 4e9d557
Showing 1 changed file with 10 additions and 0 deletions.
10 changes: 10 additions & 0 deletions asv_bench/benchmarks/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
nt = 500

basic_indexes = {
"1scalar": {"x": 0},
"1slice": {"x": slice(0, 3)},
"1slice-1scalar": {"x": 0, "y": slice(None, None, 3)},
"2slicess-1scalar": {"x": slice(3, -3, 3), "y": 1, "t": slice(None, -3, 3)},
Expand Down Expand Up @@ -74,6 +75,10 @@ def setup(self, key):
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
# Benchmark how indexing is slowed down by adding many scalar variable
# to the dataset
# https://github.com/pydata/xarray/pull/9003
self.ds_large = self.ds.merge({f"extra_var{i}": i for i in range(400)})


class Indexing(Base):
Expand All @@ -89,6 +94,11 @@ def time_indexing_outer(self, key):
def time_indexing_vectorized(self, key):
self.ds.isel(**vectorized_indexes[key]).load()

@parameterized(["key"], [list(basic_indexes.keys())])
def time_indexing_basic_ds_large(self, key):
# https://github.com/pydata/xarray/pull/9003
self.ds_large.isel(**basic_indexes[key]).load()


class Assignment(Base):
@parameterized(["key"], [list(basic_indexes.keys())])
Expand Down

0 comments on commit 4e9d557

Please # to comment.