Skip to content

Commit 0ec64fe

Browse files
authored
[tests] fix broken xformers tests (#9206)
* fix xformers tests * remove unnecessary modifications to cogvideox tests * update
1 parent 5090b09 commit 0ec64fe

File tree

4 files changed

+28
-0
lines changed

4 files changed

+28
-0
lines changed

tests/pipelines/animatediff/test_animatediff_controlnet.py

+8
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
)
2121
from diffusers.models.attention import FreeNoiseTransformerBlock
2222
from diffusers.utils import logging
23+
from diffusers.utils.import_utils import is_xformers_available
2324
from diffusers.utils.testing_utils import torch_device
2425

2526
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -329,6 +330,13 @@ def test_prompt_embeds(self):
329330
inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device)
330331
pipe(**inputs)
331332

333+
@unittest.skipIf(
334+
torch_device != "cuda" or not is_xformers_available(),
335+
reason="XFormers attention is only available with CUDA and `xformers` installed",
336+
)
337+
def test_xformers_attention_forwardGenerator_pass(self):
338+
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
339+
332340
def test_free_init(self):
333341
components = self.get_dummy_components()
334342
pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components)

tests/pipelines/animatediff/test_animatediff_sparsectrl.py

+8
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
UNetMotionModel,
2020
)
2121
from diffusers.utils import logging
22+
from diffusers.utils.import_utils import is_xformers_available
2223
from diffusers.utils.testing_utils import torch_device
2324

2425
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
@@ -393,6 +394,13 @@ def test_prompt_embeds(self):
393394
inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device)
394395
pipe(**inputs)
395396

397+
@unittest.skipIf(
398+
torch_device != "cuda" or not is_xformers_available(),
399+
reason="XFormers attention is only available with CUDA and `xformers` installed",
400+
)
401+
def test_xformers_attention_forwardGenerator_pass(self):
402+
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
403+
396404
def test_free_init(self):
397405
components = self.get_dummy_components()
398406
pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components)

tests/pipelines/cogvideox/test_cogvideox.py

+4
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,10 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
275275
"VAE tiling should not affect the inference results",
276276
)
277277

278+
@unittest.skip("xformers attention processor does not exist for CogVideoX")
279+
def test_xformers_attention_forwardGenerator_pass(self):
280+
pass
281+
278282

279283
@slow
280284
@require_torch_gpu

tests/pipelines/latte/test_latte.py

+8
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
LattePipeline,
2929
LatteTransformer3DModel,
3030
)
31+
from diffusers.utils.import_utils import is_xformers_available
3132
from diffusers.utils.testing_utils import (
3233
enable_full_determinism,
3334
numpy_cosine_similarity_distance,
@@ -256,6 +257,13 @@ def test_save_load_optional_components(self):
256257
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
257258
self.assertLess(max_diff, 1.0)
258259

260+
@unittest.skipIf(
261+
torch_device != "cuda" or not is_xformers_available(),
262+
reason="XFormers attention is only available with CUDA and `xformers` installed",
263+
)
264+
def test_xformers_attention_forwardGenerator_pass(self):
265+
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
266+
259267

260268
@slow
261269
@require_torch_gpu

0 commit comments

Comments
 (0)