[llvm-branch-commits] [llvm] e24a859 - [SelectionDAG][RISCV] Remove code for handling too small shift type from SimplifyDemandedBits.

Tom Stellard via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue May 16 20:02:19 PDT 2023


Author: Craig Topper
Date: 2023-05-16T20:01:54-07:00
New Revision: e24a8596a81b904db37d114f91200d7b5cf89683

URL: https://github.com/llvm/llvm-project/commit/e24a8596a81b904db37d114f91200d7b5cf89683
DIFF: https://github.com/llvm/llvm-project/commit/e24a8596a81b904db37d114f91200d7b5cf89683.diff

LOG: [SelectionDAG][RISCV] Remove code for handling too small shift type from SimplifyDemandedBits.

This code detected that the type returned from getShiftAmountTy was
too small to hold the constant shift amount. But it used the full
type size instead of scalar type size leading it to crash for
scalable vectors.

This code was necessary when getShiftAmountTy would always
return the target preferred shift amount type for scalars even when
the type was an illegal type larger than the target supported. For
vectors, getShiftAmountTy has always returned the vector type.

Fortunately, getShiftAmountTy was fixed a while ago to detect that
the target's preferred size for scalars is not large enough for the
type. So we can delete this code.

Switched to use getShiftAmountConstant to further simplify the code.

Fixs PR61561.

(cherry picked from commit a37df84f99ebe68c3e9cc533ffd3952fb22d1f38)

Added: 
    llvm/test/CodeGen/RISCV/rvv/pr61561.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 8d4c8802f71ce..3de4efb5ba22b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1724,12 +1724,9 @@ bool TargetLowering::SimplifyDemandedBits(
         unsigned InnerBits = InnerVT.getScalarSizeInBits();
         if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits &&
             isTypeDesirableForOp(ISD::SHL, InnerVT)) {
-          EVT ShTy = getShiftAmountTy(InnerVT, DL);
-          if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits()))
-            ShTy = InnerVT;
-          SDValue NarrowShl =
-              TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp,
-                              TLO.DAG.getConstant(ShAmt, dl, ShTy));
+          SDValue NarrowShl = TLO.DAG.getNode(
+              ISD::SHL, dl, InnerVT, InnerOp,
+              TLO.DAG.getShiftAmountConstant(ShAmt, InnerVT, dl));
           return TLO.CombineTo(
               Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl));
         }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/pr61561.ll b/llvm/test/CodeGen/RISCV/rvv/pr61561.ll
new file mode 100644
index 0000000000000..1478e8bfd3c65
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/pr61561.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define <vscale x 4 x i8> @foo(ptr %p) {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl1re16.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsll.vi v8, v8, 3
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    li a0, 248
+; CHECK-NEXT:    vand.vx v8, v10, a0
+; CHECK-NEXT:    lui a0, 4
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    lui a0, 1
+; CHECK-NEXT:    addiw a0, a0, -361
+; CHECK-NEXT:    vmacc.vx v10, a0, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 15
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %i13 = load <vscale x 4 x i16>, ptr %p, align 2
+  %i14 = zext <vscale x 4 x i16> %i13 to <vscale x 4 x i32>
+  %i15 = shl nuw nsw <vscale x 4 x i32> %i14, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 3, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %i16 = and <vscale x 4 x i32> %i15, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 248, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %i17 = mul nuw nsw <vscale x 4 x i32> %i16, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 3735, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %i18 = add nuw nsw <vscale x 4 x i32> %i17, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 16384, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %i21 = lshr <vscale x 4 x i32> %i18, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 15, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %i22 = trunc <vscale x 4 x i32> %i21 to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %i22
+}


        


More information about the llvm-branch-commits mailing list