[llvm] 05a57fd - [X86] Add tests showing failure to simplify ssubsat/usubsat to sub

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat May 6 07:55:21 PDT 2023


Author: Simon Pilgrim
Date: 2023-05-06T15:55:04+01:00
New Revision: 05a57fd18cb1117127f48ee9f63d42631f5725b9

URL: https://github.com/llvm/llvm-project/commit/05a57fd18cb1117127f48ee9f63d42631f5725b9
DIFF: https://github.com/llvm/llvm-project/commit/05a57fd18cb1117127f48ee9f63d42631f5725b9.diff

LOG: [X86] Add tests showing failure to simplify ssubsat/usubsat to sub

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-sub-ssat.ll
    llvm/test/CodeGen/X86/combine-sub-usat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-sub-ssat.ll b/llvm/test/CodeGen/X86/combine-sub-ssat.ll
index b1dcd56a1367..15c39dd1b666 100644
--- a/llvm/test/CodeGen/X86/combine-sub-ssat.ll
+++ b/llvm/test/CodeGen/X86/combine-sub-ssat.ll
@@ -115,6 +115,45 @@ define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) {
   ret <8 x i16> %1
 }
 
+; fold (ssub_sat x, y) -> (sub x, y) iff no overflow
+define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
+; CHECK-LABEL: combine_no_overflow_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sarl $16, %edi
+; CHECK-NEXT:    shrl $16, %esi
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    cmpl %esi, %edi
+; CHECK-NEXT:    setns %al
+; CHECK-NEXT:    addl $2147483647, %eax # imm = 0x7FFFFFFF
+; CHECK-NEXT:    subl %esi, %edi
+; CHECK-NEXT:    cmovnol %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = ashr i32 %a0, 16
+  %2 = lshr i32 %a1, 16
+  %3 = call i32 @llvm.ssub.sat.i32(i32 %1, i32 %2)
+  ret i32 %3
+}
+
+define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_no_overflow_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psraw $10, %xmm0
+; SSE-NEXT:    psrlw $10, %xmm1
+; SSE-NEXT:    psubsw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_no_overflow_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsraw $10, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $10, %xmm1, %xmm1
+; AVX-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = ashr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %3 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
+  ret <8 x i16> %3
+}
+
 ; fold (ssub_sat (shuffle x, u, m), (shuffle y, u, m)) -> (shuffle (ssub_sat x, y), u, m)
 define <8 x i16> @combine_shuffle_shuffle_v8i16(<8 x i16> %x0, <8 x i16> %y0) {
 ; SSE-LABEL: combine_shuffle_shuffle_v8i16:

diff  --git a/llvm/test/CodeGen/X86/combine-sub-usat.ll b/llvm/test/CodeGen/X86/combine-sub-usat.ll
index bda30651c7a1..2e36ffc388d4 100644
--- a/llvm/test/CodeGen/X86/combine-sub-usat.ll
+++ b/llvm/test/CodeGen/X86/combine-sub-usat.ll
@@ -116,6 +116,42 @@ define <8 x i16> @combine_self_v8i16(<8 x i16> %a0) {
   ret <8 x i16> %1
 }
 
+; fold (usub_sat x, y) -> (sub x, y) iff no overflow
+define i32 @combine_no_overflow_i32(i32 %a0, i32 %a1) {
+; CHECK-LABEL: combine_no_overflow_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    shrl $16, %edi
+; CHECK-NEXT:    shrl $16, %esi
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    subl %esi, %edi
+; CHECK-NEXT:    cmovael %edi, %eax
+; CHECK-NEXT:    retq
+  %1 = lshr i32 %a0, 16
+  %2 = lshr i32 %a1, 16
+  %3 = call i32 @llvm.usub.sat.i32(i32 %1, i32 %2)
+  ret i32 %3
+}
+
+define <8 x i16> @combine_no_overflow_v8i16(<8 x i16> %a0, <8 x i16> %a1) {
+; SSE-LABEL: combine_no_overflow_v8i16:
+; SSE:       # %bb.0:
+; SSE-NEXT:    psrlw $10, %xmm0
+; SSE-NEXT:    psrlw $10, %xmm1
+; SSE-NEXT:    psubusw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: combine_no_overflow_v8i16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsrlw $10, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $10, %xmm1, %xmm1
+; AVX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %1 = lshr <8 x i16> %a0, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %2 = lshr <8 x i16> %a1, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
+  %3 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %1, <8 x i16> %2)
+  ret <8 x i16> %3
+}
+
 ; FIXME: fold (trunc (usub_sat zext(x), y)) -> usub_sat(x, trunc(umin(y,satlimit)))
 define i16 @combine_trunc_i32_i16(i16 %a0, i32 %a1) {
 ; CHECK-LABEL: combine_trunc_i32_i16:


        


More information about the llvm-commits mailing list