[llvm] f4515ab - Revert "[CodeGen][AArch64] Ensure isSExtCheaperThanZExt returns true for negative constants"

David Sherwood via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 18 00:41:33 PST 2022


Author: David Sherwood
Date: 2022-01-18T08:40:20Z
New Revision: f4515ab858ec3ac87ee63aacdf29647bc64b0de4

URL: https://github.com/llvm/llvm-project/commit/f4515ab858ec3ac87ee63aacdf29647bc64b0de4
DIFF: https://github.com/llvm/llvm-project/commit/f4515ab858ec3ac87ee63aacdf29647bc64b0de4.diff

LOG: Revert "[CodeGen][AArch64] Ensure isSExtCheaperThanZExt returns true for negative constants"

This reverts commit 197f3c0deb76951315118ef13937b67ea9cbd5aa.

Reverting after miscompilation errors discovered with ffmpeg.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/lib/CodeGen/CodeGenPrepare.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/test/CodeGen/AArch64/funnel-shift.ll
    llvm/test/CodeGen/AArch64/reduce-and.ll
    llvm/test/CodeGen/AArch64/sve-vector-splat.ll
    llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
    llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 3d67f0c0ca296..0497847e74316 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2647,9 +2647,9 @@ class TargetLoweringBase {
                       getApproximateEVTForLLT(ToTy, DL, Ctx));
   }
 
-  /// Return true if sign-extension of value \p V from FromTy to ToTy is
-  /// cheaper than zero-extension, where \p V can be SDValue() if unknown.
-  virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy, SDValue V) const {
+  /// Return true if sign-extension from FromTy to ToTy is cheaper than
+  /// zero-extension.
+  virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
     return false;
   }
 

diff  --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 8053f1d7e5d57..747f4e4fdecca 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -7004,7 +7004,7 @@ bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
   // matching the argument extension instead.
   Instruction::CastOps ExtType = Instruction::ZExt;
   // Some targets prefer SExt over ZExt.
-  if (TLI->isSExtCheaperThanZExt(OldVT, RegType, SDValue()))
+  if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
     ExtType = Instruction::SExt;
 
   if (auto *Arg = dyn_cast<Argument>(Cond)) {

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index da1dd65f02511..8c7b90b6cd336 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -1704,7 +1704,7 @@ void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &LHS, SDValue &RHS,
   SDValue OpL = GetPromotedInteger(LHS);
   SDValue OpR = GetPromotedInteger(RHS);
 
-  if (TLI.isSExtCheaperThanZExt(LHS.getValueType(), OpL.getValueType(), LHS)) {
+  if (TLI.isSExtCheaperThanZExt(LHS.getValueType(), OpL.getValueType())) {
     // The target would prefer to promote the comparison operand with sign
     // extension. Honor that unless the promoted values are already zero
     // extended.

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 39ea913ef2e68..b62399ed04e62 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -283,7 +283,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
     EVT OldVT = Op.getValueType();
     SDLoc DL(Op);
     Op = GetPromotedInteger(Op);
-    if (TLI.isSExtCheaperThanZExt(OldVT, Op.getValueType(), Op))
+    if (TLI.isSExtCheaperThanZExt(OldVT, Op.getValueType()))
       return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), Op,
                          DAG.getValueType(OldVT));
     return DAG.getZeroExtendInReg(Op, DL, OldVT);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 0c2d04b74f204..2200f144ab067 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4765,7 +4765,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
                          C->isTargetOpcode(), C->isOpaque());
     case ISD::ANY_EXTEND:
       // Some targets like RISCV prefer to sign extend some types.
-      if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT, Operand))
+      if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT))
         return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
                            C->isTargetOpcode(), C->isOpaque());
       return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index ea6a7e16bcdbd..b683f33c3c815 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -3873,7 +3873,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
     } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG &&
                (Cond == ISD::SETEQ || Cond == ISD::SETNE) &&
                !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(),
-                                      OpVT, N0.getOperand(1))) {
+                                      OpVT)) {
       EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT();
       unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits();
       EVT ExtDstTy = N0.getValueType();

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index a0eb1f93e08f8..9841a4c048632 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1138,14 +1138,6 @@ class AArch64TargetLowering : public TargetLowering {
 
   bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
                                               LLT Ty2) const override;
-
-  bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT, SDValue V) const override {
-    if (!V || SrcVT.getScalarType() == MVT::i1)
-      return false;
-    if (ConstantSDNode *C = isConstOrConstSplat(V))
-      return C->getAPIntValue().isNegative();
-    return false;
-  }
 };
 
 namespace AArch64 {

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4d6627346ae2a..39ecf0a3dbedf 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1198,8 +1198,7 @@ bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
   return TargetLowering::isZExtFree(Val, VT2);
 }
 
-bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT,
-                                                SDValue V) const {
+bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index ee9ac0062d5b6..cd423c9ecca09 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -326,7 +326,7 @@ class RISCVTargetLowering : public TargetLowering {
   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
   bool isZExtFree(SDValue Val, EVT VT2) const override;
-  bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT, SDValue V) const override;
+  bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
   bool isCheapToSpeculateCttz() const override;
   bool isCheapToSpeculateCtlz() const override;
   bool hasAndNotCompare(SDValue Y) const override;

diff  --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll
index da1b107e65d4b..51dc7ce2d061d 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll
@@ -93,7 +93,7 @@ declare i7 @llvm.fshl.i7(i7, i7, i7)
 define i7 @fshl_i7_const_fold() {
 ; CHECK-LABEL: fshl_i7_const_fold:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-61
+; CHECK-NEXT:    mov w0, #67
 ; CHECK-NEXT:    ret
   %f = call i7 @llvm.fshl.i7(i7 112, i7 127, i7 2)
   ret i7 %f
@@ -102,7 +102,7 @@ define i7 @fshl_i7_const_fold() {
 define i8 @fshl_i8_const_fold_overshift_1() {
 ; CHECK-LABEL: fshl_i8_const_fold_overshift_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-128
+; CHECK-NEXT:    mov w0, #128
 ; CHECK-NEXT:    ret
   %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 15)
   ret i8 %f
@@ -164,7 +164,7 @@ define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) {
 define i8 @fshl_i8_const_fold() {
 ; CHECK-LABEL: fshl_i8_const_fold:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-128
+; CHECK-NEXT:    mov w0, #128
 ; CHECK-NEXT:    ret
   %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7)
   ret i8 %f
@@ -241,7 +241,7 @@ define i7 @fshr_i7_const_fold() {
 define i8 @fshr_i8_const_fold_overshift_1() {
 ; CHECK-LABEL: fshr_i8_const_fold_overshift_1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-2
+; CHECK-NEXT:    mov w0, #254
 ; CHECK-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 15)
   ret i8 %f
@@ -250,7 +250,7 @@ define i8 @fshr_i8_const_fold_overshift_1() {
 define i8 @fshr_i8_const_fold_overshift_2() {
 ; CHECK-LABEL: fshr_i8_const_fold_overshift_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-31
+; CHECK-NEXT:    mov w0, #225
 ; CHECK-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 15, i8 15, i8 11)
   ret i8 %f
@@ -259,7 +259,7 @@ define i8 @fshr_i8_const_fold_overshift_2() {
 define i8 @fshr_i8_const_fold_overshift_3() {
 ; CHECK-LABEL: fshr_i8_const_fold_overshift_3:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-1
+; CHECK-NEXT:    mov w0, #255
 ; CHECK-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 0, i8 255, i8 8)
   ret i8 %f
@@ -303,7 +303,7 @@ define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) {
 define i8 @fshr_i8_const_fold() {
 ; CHECK-LABEL: fshr_i8_const_fold:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-2
+; CHECK-NEXT:    mov w0, #254
 ; CHECK-NEXT:    ret
   %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7)
   ret i8 %f

diff  --git a/llvm/test/CodeGen/AArch64/reduce-and.ll b/llvm/test/CodeGen/AArch64/reduce-and.ll
index a4d2f6576f0af..60546cf128af4 100644
--- a/llvm/test/CodeGen/AArch64/reduce-and.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-and.ll
@@ -223,7 +223,8 @@ define i8 @test_redand_v3i8(<3 x i8> %a) {
 ; CHECK-LABEL: test_redand_v3i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, w1
-; CHECK-NEXT:    and w0, w8, w2
+; CHECK-NEXT:    and w8, w8, w2
+; CHECK-NEXT:    and w0, w8, #0xff
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: test_redand_v3i8:

diff  --git a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
index 7f9307d37f6d1..58821738f3a3a 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-splat.ll
@@ -119,7 +119,8 @@ define <vscale x 8 x i8> @sve_splat_8xi8(i8 %val) {
 define <vscale x 8 x i8> @sve_splat_8xi8_imm() {
 ; CHECK-LABEL: sve_splat_8xi8_imm:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov w8, #255
+; CHECK-NEXT:    mov z0.h, w8
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 8 x i8> undef, i8 -1, i32 0
   %splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
@@ -150,7 +151,8 @@ define <vscale x 4 x i16> @sve_splat_4xi16(i16 %val) {
 define <vscale x 4 x i16> @sve_splat_4xi16_imm() {
 ; CHECK-LABEL: sve_splat_4xi16_imm:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov w8, #65535
+; CHECK-NEXT:    mov z0.s, w8
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 4 x i16> undef, i16 -1, i32 0
   %splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
@@ -171,7 +173,8 @@ define <vscale x 2 x i32> @sve_splat_2xi32(i32 %val) {
 define <vscale x 2 x i32> @sve_splat_2xi32_imm() {
 ; CHECK-LABEL: sve_splat_2xi32_imm:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    mov z0.d, x8
 ; CHECK-NEXT:    ret
   %ins = insertelement <vscale x 2 x i32> undef, i32 -1, i32 0
   %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll b/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
index 0c85c14badfb8..607f5dd3dc772 100644
--- a/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
+++ b/llvm/test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll
@@ -29,7 +29,11 @@ define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind {
 define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind {
 ; CHECK-LABEL: out_v2i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    movi d3, #0x0000ff000000ff
+; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    eor v2.8b, v2.8b, v3.8b
+; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
   %mx = and <2 x i8> %x, %mask
   %notmask = xor <2 x i8> %mask, <i8 -1, i8 -1>
@@ -57,7 +61,11 @@ define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwin
 define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
 ; CHECK-LABEL: out_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    movi d3, #0xff00ff00ff00ff
+; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    eor v2.8b, v2.8b, v3.8b
+; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
   %mx = and <4 x i8> %x, %mask
   %notmask = xor <4 x i8> %mask, <i8 -1, i8 -1, i8 -1, i8 -1>
@@ -69,7 +77,11 @@ define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
 define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
 ; CHECK-LABEL: out_v4i8_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    movi d3, #0xff00ff00ff00ff
+; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    eor v2.8b, v2.8b, v3.8b
+; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
   %mx = and <4 x i8> %x, %mask
   %notmask = xor <4 x i8> %mask, <i8 -1, i8 -1, i8 undef, i8 -1>
@@ -81,7 +93,11 @@ define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwi
 define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind {
 ; CHECK-LABEL: out_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    movi d3, #0x00ffff0000ffff
+; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
+; CHECK-NEXT:    eor v2.8b, v2.8b, v3.8b
+; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    orr v0.8b, v0.8b, v1.8b
 ; CHECK-NEXT:    ret
   %mx = and <2 x i16> %x, %mask
   %notmask = xor <2 x i16> %mask, <i16 -1, i16 -1>

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
index 03f0f04062aad..fc0f3a10f5b16 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
@@ -86,7 +86,8 @@ define i8 @test_v3i8(<3 x i8> %a) nounwind {
 ; CHECK-LABEL: test_v3i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, w1
-; CHECK-NEXT:    and w0, w8, w2
+; CHECK-NEXT:    and w8, w8, w2
+; CHECK-NEXT:    and w0, w8, #0xff
 ; CHECK-NEXT:    ret
   %b = call i8 @llvm.vector.reduce.and.v3i8(<3 x i8> %a)
   ret i8 %b


        


More information about the llvm-commits mailing list