Skip to content

Commit 35fada4

Browse files
authored
enable unidiffuser test cases on xpu (#11444)
* enable unidiffuser cases on XPU Signed-off-by: Yao Matrix <[email protected]> * fix a typo Signed-off-by: Yao Matrix <[email protected]> * fix style Signed-off-by: Yao Matrix <[email protected]> --------- Signed-off-by: Yao Matrix <[email protected]>
1 parent fbe2fe5 commit 35fada4

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

tests/pipelines/test_pipelines_common.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -1485,8 +1485,8 @@ def test_to_device(self):
14851485
model_devices = [component.device.type for component in components.values() if hasattr(component, "device")]
14861486
self.assertTrue(all(device == torch_device for device in model_devices))
14871487

1488-
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
1489-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
1488+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
1489+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
14901490

14911491
def test_to_dtype(self):
14921492
components = self.get_dummy_components()
@@ -1677,11 +1677,11 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4):
16771677

16781678
pipe.set_progress_bar_config(disable=None)
16791679

1680-
pipe.enable_model_cpu_offload(device=torch_device)
1680+
pipe.enable_model_cpu_offload()
16811681
inputs = self.get_dummy_inputs(generator_device)
16821682
output_with_offload = pipe(**inputs)[0]
16831683

1684-
pipe.enable_model_cpu_offload(device=torch_device)
1684+
pipe.enable_model_cpu_offload()
16851685
inputs = self.get_dummy_inputs(generator_device)
16861686
output_with_offload_twice = pipe(**inputs)[0]
16871687

@@ -2226,7 +2226,7 @@ def create_pipe():
22262226

22272227
def enable_group_offload_on_component(pipe, group_offloading_kwargs):
22282228
# We intentionally don't test VAE's here. This is because some tests enable tiling on the VAE. If
2229-
# tiling is enabled and a forward pass is run, when cuda streams are used, the execution order of
2229+
# tiling is enabled and a forward pass is run, when accelerator streams are used, the execution order of
22302230
# the layers is not traced correctly. This causes errors. For apply group offloading to VAE, a
22312231
# warmup forward pass (even with dummy small inputs) is recommended.
22322232
for component_name in [

tests/pipelines/unidiffuser/test_unidiffuser.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@
2222
UniDiffuserTextDecoder,
2323
)
2424
from diffusers.utils.testing_utils import (
25+
backend_empty_cache,
2526
enable_full_determinism,
2627
floats_tensor,
2728
load_image,
2829
nightly,
2930
require_torch_2,
3031
require_torch_accelerator,
31-
require_torch_gpu,
3232
run_test_in_subprocess,
3333
torch_device,
3434
)
@@ -577,24 +577,24 @@ def test_unidiffuser_default_img2text_v1_fp16(self):
577577
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
578578

579579
@unittest.skip(
580-
"Test not supported becauseit has a bunch of direct configs at init and also, this pipeline isn't used that much now."
580+
"Test not supported because it has a bunch of direct configs at init and also, this pipeline isn't used that much now."
581581
)
582582
def test_encode_prompt_works_in_isolation():
583583
pass
584584

585585

586586
@nightly
587-
@require_torch_gpu
587+
@require_torch_accelerator
588588
class UniDiffuserPipelineSlowTests(unittest.TestCase):
589589
def setUp(self):
590590
super().setUp()
591591
gc.collect()
592-
torch.cuda.empty_cache()
592+
backend_empty_cache(torch_device)
593593

594594
def tearDown(self):
595595
super().tearDown()
596596
gc.collect()
597-
torch.cuda.empty_cache()
597+
backend_empty_cache(torch_device)
598598

599599
def get_inputs(self, device, seed=0, generate_latents=False):
600600
generator = torch.manual_seed(seed)
@@ -705,17 +705,17 @@ def test_unidiffuser_compile(self, seed=0):
705705

706706

707707
@nightly
708-
@require_torch_gpu
708+
@require_torch_accelerator
709709
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
710710
def setUp(self):
711711
super().setUp()
712712
gc.collect()
713-
torch.cuda.empty_cache()
713+
backend_empty_cache(torch_device)
714714

715715
def tearDown(self):
716716
super().tearDown()
717717
gc.collect()
718-
torch.cuda.empty_cache()
718+
backend_empty_cache(torch_device)
719719

720720
def get_inputs(self, device, seed=0, generate_latents=False):
721721
generator = torch.manual_seed(seed)

0 commit comments

Comments
 (0)