From f175a87a7683eed4a71578f2fdd58ac484e62b63 Mon Sep 17 00:00:00 2001 From: Piotr Papierkowski Date: Wed, 24 Sep 2025 18:28:24 +0300 Subject: [PATCH 1/4] Add exception to float->int conversion test case Downcasting float32 values beyond target type value range is an undefined behaviour according to torch documentation. --- test/xpu/test_tensor_creation_ops_xpu.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/test/xpu/test_tensor_creation_ops_xpu.py b/test/xpu/test_tensor_creation_ops_xpu.py index 95a30716ea..ff7a9077ee 100644 --- a/test/xpu/test_tensor_creation_ops_xpu.py +++ b/test/xpu/test_tensor_creation_ops_xpu.py @@ -1226,8 +1226,10 @@ def test_float_to_int_conversion_finite(self, device, dtype): vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2, max) refs = None if self.device_type == "cuda" or self.device_type == "xpu": - if torch.version.hip: + if torch.version.hip or torch.version.xpu: # HIP min float -> int64 conversion is divergent + # XPU min float -> int8 conversion is divergent + # XPU min float -> int16 conversion is divergentt vals = (-2, -1.5, -0.5, 0, 0.5, 1.5, 2) else: vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2) @@ -3686,9 +3688,11 @@ def test_logspace_special_steps(self, device, dtype): @dtypes(*all_types_and(torch.bfloat16)) @dtypesIfCUDA( - *integral_types_and(torch.half, torch.bfloat16, torch.float32, torch.float64) - if TEST_WITH_ROCM - else all_types_and(torch.half, torch.bfloat16) + *( + integral_types_and(torch.half, torch.bfloat16, torch.float32, torch.float64) + if TEST_WITH_ROCM + else all_types_and(torch.half, torch.bfloat16) + ) ) def test_logspace(self, device, dtype): _from = random.random() From f84569cff38341ae1c5f66e087b70132d45e8752 Mon Sep 17 00:00:00 2001 From: Piotr Papierkowski <140634144+ppapierk@users.noreply.github.com> Date: Wed, 24 Sep 2025 17:35:59 +0200 Subject: [PATCH 2/4] Update test/xpu/test_tensor_creation_ops_xpu.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- test/xpu/test_tensor_creation_ops_xpu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/xpu/test_tensor_creation_ops_xpu.py b/test/xpu/test_tensor_creation_ops_xpu.py index ff7a9077ee..875c58d82f 100644 --- a/test/xpu/test_tensor_creation_ops_xpu.py +++ b/test/xpu/test_tensor_creation_ops_xpu.py @@ -1229,7 +1229,7 @@ def test_float_to_int_conversion_finite(self, device, dtype): if torch.version.hip or torch.version.xpu: # HIP min float -> int64 conversion is divergent # XPU min float -> int8 conversion is divergent - # XPU min float -> int16 conversion is divergentt + # XPU min float -> int16 conversion is divergent vals = (-2, -1.5, -0.5, 0, 0.5, 1.5, 2) else: vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2) From 6ce2fb2212886472f8b9677a1e535c88eec85dbb Mon Sep 17 00:00:00 2001 From: Piotr Papierkowski Date: Thu, 25 Sep 2025 13:05:01 +0300 Subject: [PATCH 3/4] . --- test/xpu/test_tensor_creation_ops_xpu.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/test/xpu/test_tensor_creation_ops_xpu.py b/test/xpu/test_tensor_creation_ops_xpu.py index 875c58d82f..4a62ffe88c 100644 --- a/test/xpu/test_tensor_creation_ops_xpu.py +++ b/test/xpu/test_tensor_creation_ops_xpu.py @@ -3688,11 +3688,9 @@ def test_logspace_special_steps(self, device, dtype): @dtypes(*all_types_and(torch.bfloat16)) @dtypesIfCUDA( - *( - integral_types_and(torch.half, torch.bfloat16, torch.float32, torch.float64) - if TEST_WITH_ROCM - else all_types_and(torch.half, torch.bfloat16) - ) + *integral_types_and(torch.half, torch.bfloat16, torch.float32, torch.float64) + if TEST_WITH_ROCM + else all_types_and(torch.half, torch.bfloat16) ) def test_logspace(self, device, dtype): _from = random.random() From 842ac71f6597c8dbaf7b759c862f485e922d9544 Mon Sep 17 00:00:00 2001 From: Piotr Papierkowski Date: Wed, 15 Oct 2025 11:23:40 +0300 Subject: [PATCH 4/4] Test unskipped --- test/xpu/skip_list_common.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 10eebf6a94..4c751fc26e 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -729,9 +729,6 @@ "nn/test_dropout_xpu.py": None, "test_dataloader_xpu.py": None, "test_tensor_creation_ops_xpu.py": ( - # CPU only (vs Numpy). CUDA skips these cases since non-deterministic results are outputed for inf and nan. - "test_float_to_int_conversion_finite_xpu_int8", - "test_float_to_int_conversion_finite_xpu_int16", # Dispatch issue. It is a composite operator. But it is implemented by # DispatchStub. XPU doesn't support DispatchStub. "test_kaiser_window_xpu",