[llvm] af03324 - [ARM] Disable sign extended SSAT pattern recognition.

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 22 06:08:09 PST 2021


Author: David Green
Date: 2021-01-22T14:07:48Z
New Revision: af0332498405b3a4074cef09845bbacfd4fd594f

URL: https://github.com/llvm/llvm-project/commit/af0332498405b3a4074cef09845bbacfd4fd594f
DIFF: https://github.com/llvm/llvm-project/commit/af0332498405b3a4074cef09845bbacfd4fd594f.diff

LOG: [ARM] Disable sign extended SSAT pattern recognition.

I may have given bad advice, and skipping sext_inreg when matching SSAT
patterns is not valid on it's own. It at least needs to sext_inreg the
input again, but as far as I can tell is still only valid based on
demanded bits. For the moment disable that part of the combine,
hopefully reimplementing it in the future more correctly.

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/test/CodeGen/ARM/ssat.ll
    llvm/test/CodeGen/ARM/usat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 949d2ffc1714..f6f8597f3a69 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -5062,12 +5062,6 @@ static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) {
   SDValue V1Tmp = V1;
   SDValue V2Tmp = V2;
 
-  if (V1.getOpcode() == ISD::SIGN_EXTEND_INREG &&
-      V2.getOpcode() == ISD::SIGN_EXTEND_INREG) {
-    V1Tmp = V1.getOperand(0);
-    V2Tmp = V2.getOperand(0);
-  }
-
   // Check that the registers and the constants match a max(min()) or min(max())
   // pattern
   if (V1Tmp != TrueVal1 || V2Tmp != TrueVal2 || K1 != FalseVal1 ||

diff  --git a/llvm/test/CodeGen/ARM/ssat.ll b/llvm/test/CodeGen/ARM/ssat.ll
index 9d9758b0515d..fb3c17710b75 100644
--- a/llvm/test/CodeGen/ARM/ssat.ll
+++ b/llvm/test/CodeGen/ARM/ssat.ll
@@ -68,7 +68,15 @@ define i16 @sat_base_16bit(i16 %x) #0 {
 ;
 ; V6T2-LABEL: sat_base_16bit:
 ; V6T2:       @ %bb.0: @ %entry
-; V6T2-NEXT:    ssat r0, #12, r0
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    movw r2, #2047
+; V6T2-NEXT:    cmp r1, r2
+; V6T2-NEXT:    movlt r2, r0
+; V6T2-NEXT:    movw r0, #63488
+; V6T2-NEXT:    sxth r1, r2
+; V6T2-NEXT:    movt r0, #65535
+; V6T2-NEXT:    cmn r1, #2048
+; V6T2-NEXT:    movgt r0, r2
 ; V6T2-NEXT:    bx lr
 entry:
   %0 = icmp slt i16 %x, 2047
@@ -95,7 +103,12 @@ define i8 @sat_base_8bit(i8 %x) #0 {
 ;
 ; V6T2-LABEL: sat_base_8bit:
 ; V6T2:       @ %bb.0: @ %entry
-; V6T2-NEXT:    ssat r0, #6, r0
+; V6T2-NEXT:    sxtb r1, r0
+; V6T2-NEXT:    cmp r1, #31
+; V6T2-NEXT:    movge r0, #31
+; V6T2-NEXT:    sxtb r1, r0
+; V6T2-NEXT:    cmn r1, #32
+; V6T2-NEXT:    mvnle r0, #31
 ; V6T2-NEXT:    bx lr
 entry:
   %0 = icmp slt i8 %x, 31
@@ -547,7 +560,12 @@ define void @extended(i32 %xx, i16 signext %y, i8* nocapture %z) {
 ; V6T2-LABEL: extended:
 ; V6T2:       @ %bb.0: @ %entry
 ; V6T2-NEXT:    add r0, r1, r0, lsr #16
-; V6T2-NEXT:    ssat r0, #8, r0
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    cmp r1, #127
+; V6T2-NEXT:    movge r0, #127
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    cmn r1, #128
+; V6T2-NEXT:    mvnle r0, #127
 ; V6T2-NEXT:    strb r0, [r2]
 ; V6T2-NEXT:    bx lr
 entry:
@@ -582,7 +600,12 @@ define i32 @formulated_valid(i32 %a) {
 ;
 ; V6T2-LABEL: formulated_valid:
 ; V6T2:       @ %bb.0:
-; V6T2-NEXT:    ssat r0, #8, r0
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    cmp r1, #127
+; V6T2-NEXT:    movge r0, #127
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    cmn r1, #128
+; V6T2-NEXT:    mvnle r0, #127
 ; V6T2-NEXT:    uxth r0, r0
 ; V6T2-NEXT:    bx lr
   %x1 = trunc i32 %a to i16
@@ -613,7 +636,12 @@ define i32 @formulated_invalid(i32 %a) {
 ;
 ; V6T2-LABEL: formulated_invalid:
 ; V6T2:       @ %bb.0:
-; V6T2-NEXT:    ssat r0, #8, r0
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    cmp r1, #127
+; V6T2-NEXT:    movge r0, #127
+; V6T2-NEXT:    sxth r1, r0
+; V6T2-NEXT:    cmn r1, #128
+; V6T2-NEXT:    mvnle r0, #127
 ; V6T2-NEXT:    bic r0, r0, #-16777216
 ; V6T2-NEXT:    bx lr
   %x1 = trunc i32 %a to i16

diff  --git a/llvm/test/CodeGen/ARM/usat.ll b/llvm/test/CodeGen/ARM/usat.ll
index ab508fc0e032..dd0eca823a50 100644
--- a/llvm/test/CodeGen/ARM/usat.ll
+++ b/llvm/test/CodeGen/ARM/usat.ll
@@ -67,12 +67,27 @@ define i16 @unsigned_sat_base_16bit(i16 %x) #0 {
 ;
 ; V6-LABEL: unsigned_sat_base_16bit:
 ; V6:       @ %bb.0: @ %entry
-; V6-NEXT:    usat r0, #11, r0
+; V6-NEXT:    mov r1, #255
+; V6-NEXT:    sxth r2, r0
+; V6-NEXT:    orr r1, r1, #1792
+; V6-NEXT:    cmp r2, r1
+; V6-NEXT:    movlt r1, r0
+; V6-NEXT:    sxth r0, r1
+; V6-NEXT:    cmp r0, #0
+; V6-NEXT:    movle r1, #0
+; V6-NEXT:    mov r0, r1
 ; V6-NEXT:    bx lr
 ;
 ; V6T2-LABEL: unsigned_sat_base_16bit:
 ; V6T2:       @ %bb.0: @ %entry
-; V6T2-NEXT:    usat r0, #11, r0
+; V6T2-NEXT:    sxth r2, r0
+; V6T2-NEXT:    movw r1, #2047
+; V6T2-NEXT:    cmp r2, r1
+; V6T2-NEXT:    movlt r1, r0
+; V6T2-NEXT:    sxth r0, r1
+; V6T2-NEXT:    cmp r0, #0
+; V6T2-NEXT:    movle r1, #0
+; V6T2-NEXT:    mov r0, r1
 ; V6T2-NEXT:    bx lr
 entry:
   %0 = icmp slt i16 %x, 2047
@@ -99,12 +114,22 @@ define i8 @unsigned_sat_base_8bit(i8 %x) #0 {
 ;
 ; V6-LABEL: unsigned_sat_base_8bit:
 ; V6:       @ %bb.0: @ %entry
-; V6-NEXT:    usat r0, #5, r0
+; V6-NEXT:    sxtb r1, r0
+; V6-NEXT:    cmp r1, #31
+; V6-NEXT:    movge r0, #31
+; V6-NEXT:    sxtb r1, r0
+; V6-NEXT:    cmp r1, #0
+; V6-NEXT:    movle r0, #0
 ; V6-NEXT:    bx lr
 ;
 ; V6T2-LABEL: unsigned_sat_base_8bit:
 ; V6T2:       @ %bb.0: @ %entry
-; V6T2-NEXT:    usat r0, #5, r0
+; V6T2-NEXT:    sxtb r1, r0
+; V6T2-NEXT:    cmp r1, #31
+; V6T2-NEXT:    movge r0, #31
+; V6T2-NEXT:    sxtb r1, r0
+; V6T2-NEXT:    cmp r1, #0
+; V6T2-NEXT:    movle r0, #0
 ; V6T2-NEXT:    bx lr
 entry:
   %0 = icmp slt i8 %x, 31


        


More information about the llvm-commits mailing list