diff --git a/tests/unit/dataloader/test_tf_dataloader.py b/tests/unit/dataloader/test_tf_dataloader.py index fce6d5de..10443897 100644 --- a/tests/unit/dataloader/test_tf_dataloader.py +++ b/tests/unit/dataloader/test_tf_dataloader.py @@ -26,7 +26,7 @@ import pytest from sklearn.metrics import roc_auc_score -from merlin.core.compat import HAS_GPU, cupy +from merlin.core.compat import HAS_GPU, cudf, cupy from merlin.core.dispatch import make_df, random_uniform from merlin.io import Dataset from merlin.schema import Tags @@ -357,7 +357,7 @@ def add_sample_weight(features, labels, sample_weight_col_name="sample_weight"): # TODO: include parts_per_chunk test @pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.06]) @pytest.mark.parametrize("batch_size", [1, 10, 100]) -@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU else [True]) +@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU and cudf else [True]) def test_tensorflow_dataloader( tmpdir, cpu, @@ -633,7 +633,7 @@ def test_horovod_multigpu(tmpdir): @pytest.mark.parametrize("batch_size", [1000]) -@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU else [True]) +@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU and cudf else [True]) def test_dataloader_schema(tmpdir, dataset, batch_size, cpu): with tf_loader( dataset, diff --git a/tests/unit/dataloader/test_torch_dataloader.py b/tests/unit/dataloader/test_torch_dataloader.py index 9a364332..753dd8ec 100644 --- a/tests/unit/dataloader/test_torch_dataloader.py +++ b/tests/unit/dataloader/test_torch_dataloader.py @@ -23,7 +23,7 @@ from conftest import assert_eq from merlin.core import dispatch -from merlin.core.compat import HAS_GPU +from merlin.core.compat import HAS_GPU, cudf from merlin.core.dispatch import make_df from merlin.io import Dataset from merlin.schema import Tags @@ -150,7 +150,7 @@ def test_torch_drp_reset(tmpdir, batch_size, drop_last, num_rows): # Each column has only one unique value # We test that each value in chunk (output of dataloader) # is equal to every value in dataframe - if dispatch.HAS_GPU: + if cudf and isinstance(df, cudf.DataFrame): assert ( np.expand_dims(chunk[0][col].cpu().numpy(), 1) == df[col].values_host ).all() @@ -224,7 +224,7 @@ def test_gpu_file_iterator_ds(df, dataset, batch): @pytest.mark.parametrize("part_mem_fraction", [0.001, 0.06]) @pytest.mark.parametrize("batch_size", [1000]) -@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU else [True]) +@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU and cudf else [True]) def test_dataloader_break(dataset, batch_size, part_mem_fraction, cpu): dataloader = torch_loader( dataset, @@ -257,7 +257,7 @@ def test_dataloader_break(dataset, batch_size, part_mem_fraction, cpu): @pytest.mark.parametrize("part_mem_fraction", [0.001, 0.06]) @pytest.mark.parametrize("batch_size", [1000]) -@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU else [True]) +@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU and cudf else [True]) def test_dataloader(df, dataset, batch_size, part_mem_fraction, cpu): dataloader = torch_loader( dataset, @@ -336,7 +336,7 @@ def test_mh_support(multihot_dataset): @pytest.mark.parametrize("batch_size", [1000]) -@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU else [True]) +@pytest.mark.parametrize("cpu", [False, True] if HAS_GPU and cudf else [True]) def test_dataloader_schema(df, dataset, batch_size, cpu): with torch_loader( dataset,