[llvm] [X86] Baseline test for "invalid operand order for fp16 vector comparison" issue (PR #159786)

via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 19 07:55:18 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-x86

Author: None (azwolski)

<details>
<summary>Changes</summary>

Despite the difference in the order of fcmp operands, `%lhs, %rhs` and`%rhs, %lhs`, generated assembly remains the same.

This is a baseline test for https://github.com/llvm/llvm-project/issues/159723#event-19795488422

---
Full diff: https://github.com/llvm/llvm-project/pull/159786.diff


1 Files Affected:

- (added) llvm/test/CodeGen/X86/pr159723.ll (+131) 


``````````diff
diff --git a/llvm/test/CodeGen/X86/pr159723.ll b/llvm/test/CodeGen/X86/pr159723.ll
new file mode 100644
index 0000000000000..f11a058d97c63
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr159723.ll
@@ -0,0 +1,131 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 -mattr=+avx512vl -O2| FileCheck %s
+
+declare <8 x half> @test_call_8()
+
+declare <16 x half> @test_call_16()
+
+declare <32 x half> @test_call_32()
+
+define <8 x i1> @test_cmp_v8half_ogt(<8 x half> %rhs, <8 x i1> %mask) nounwind {
+; CHECK-LABEL: test_cmp_v8half_ogt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    vpsllw $15, %xmm1, %xmm0
+; CHECK-NEXT:    vpmovw2m %xmm0, %k1
+; CHECK-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; CHECK-NEXT:    callq test_call_8 at PLT
+; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; CHECK-NEXT:    vcmpltph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 {%k1} # 16-byte Folded Reload
+; CHECK-NEXT:    vpmovm2w %k0, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
+    %lhs = call <8 x half> @test_call_8()
+    %comp = fcmp ogt <8 x half> %lhs, %rhs
+    %res = and <8 x i1> %comp, %mask
+    ret <8 x i1> %res
+}
+
+define <8 x i1> @test_cmp_v8half_ogt_rev(<8 x half> %rhs, <8 x i1> %mask) nounwind {
+; CHECK-LABEL: test_cmp_v8half_ogt_rev:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT:    vpsllw $15, %xmm1, %xmm0
+; CHECK-NEXT:    vpmovw2m %xmm0, %k1
+; CHECK-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; CHECK-NEXT:    callq test_call_8 at PLT
+; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; CHECK-NEXT:    vcmpltph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %k0 {%k1} # 16-byte Folded Reload
+; CHECK-NEXT:    vpmovm2w %k0, %xmm0
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    retq
+    %lhs = call <8 x half> @test_call_8()
+    %comp = fcmp ogt <8 x half> %rhs, %lhs
+    %res = and <8 x i1> %comp, %mask
+    ret <8 x i1> %res
+}
+
+
+define <16 x i1> @test_cmp_v16half_olt(<16 x half> %rhs, <16 x i1> %mask) nounwind {
+; CHECK-LABEL: test_cmp_v16half_olt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $56, %rsp
+; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT:    vpsllw $7, %xmm1, %xmm0
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; CHECK-NEXT:    callq test_call_16 at PLT
+; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; CHECK-NEXT:    vcmpltph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 {%k1} # 32-byte Folded Reload
+; CHECK-NEXT:    vpmovm2b %k0, %xmm0
+; CHECK-NEXT:    addq $56, %rsp
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+    %lhs = call <16 x half> @test_call_16()
+    %comp = fcmp olt <16 x half> %lhs, %rhs
+    %res = and <16 x i1> %comp, %mask
+    ret <16 x i1> %res
+}
+
+define <16 x i1> @test_cmp_v16half_olt_rev(<16 x half> %rhs, <16 x i1> %mask) nounwind {
+; CHECK-LABEL: test_cmp_v16half_olt_rev:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $56, %rsp
+; CHECK-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; CHECK-NEXT:    vpsllw $7, %xmm1, %xmm0
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; CHECK-NEXT:    callq test_call_16 at PLT
+; CHECK-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
+; CHECK-NEXT:    vcmpltph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %k0 {%k1} # 32-byte Folded Reload
+; CHECK-NEXT:    vpmovm2b %k0, %xmm0
+; CHECK-NEXT:    addq $56, %rsp
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+    %lhs = call <16 x half> @test_call_16()
+    %comp = fcmp olt <16 x half> %rhs, %lhs
+    %res = and <16 x i1> %comp, %mask
+    ret <16 x i1> %res
+}
+
+define <32 x i1> @test_cmp_v32half_oge(<32 x half> %rhs, <32 x i1> %mask) nounwind {
+; CHECK-LABEL: test_cmp_v32half_oge:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $88, %rsp
+; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; CHECK-NEXT:    vpsllw $7, %ymm1, %ymm0
+; CHECK-NEXT:    vpmovb2m %ymm0, %k1
+; CHECK-NEXT:    kmovd %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT:    callq test_call_32 at PLT
+; CHECK-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
+; CHECK-NEXT:    vcmpleph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 {%k1} # 64-byte Folded Reload
+; CHECK-NEXT:    vpmovm2b %k0, %ymm0
+; CHECK-NEXT:    addq $88, %rsp
+; CHECK-NEXT:    retq
+    %lhs = call <32 x half> @test_call_32()
+    %comp = fcmp oge <32 x half> %lhs, %rhs
+    %res = and <32 x i1> %comp, %mask
+    ret <32 x i1> %res
+}
+
+define <32 x i1> @test_cmp_v32half_oge_rev(<32 x half> %rhs, <32 x i1> %mask) nounwind {
+; CHECK-LABEL: test_cmp_v32half_oge_rev:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subq $88, %rsp
+; CHECK-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; CHECK-NEXT:    vpsllw $7, %ymm1, %ymm0
+; CHECK-NEXT:    vpmovb2m %ymm0, %k1
+; CHECK-NEXT:    kmovd %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; CHECK-NEXT:    callq test_call_32 at PLT
+; CHECK-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
+; CHECK-NEXT:    vcmpleph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %k0 {%k1} # 64-byte Folded Reload
+; CHECK-NEXT:    vpmovm2b %k0, %ymm0
+; CHECK-NEXT:    addq $88, %rsp
+; CHECK-NEXT:    retq
+    %lhs = call <32 x half> @test_call_32()
+    %comp = fcmp oge <32 x half> %rhs, %lhs
+    %res = and <32 x i1> %comp, %mask
+    ret <32 x i1> %res
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/159786


More information about the llvm-commits mailing list