[llvm] a39eadc - [DAGCombiner] Teach combineShiftToMULH to handle constant and const splat vector.

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 2 05:05:42 PDT 2021


Author: jacquesguan
Date: 2021-11-02T12:04:23Z
New Revision: a39eadcf1609db66a42fa00d55da6e1ff734e1b3

URL: https://github.com/llvm/llvm-project/commit/a39eadcf1609db66a42fa00d55da6e1ff734e1b3
DIFF: https://github.com/llvm/llvm-project/commit/a39eadcf1609db66a42fa00d55da6e1ff734e1b3.diff

LOG: [DAGCombiner] Teach combineShiftToMULH to handle constant and const splat vector.

Fold (srl (mul (zext i32:$a to i64), i64:c), 32) -> (mulhu $a, $b),
if c can truncate to i32 without loss.

Reviewed By: frasercrmck, craig.topper, RKSimon

Differential Revision: https://reviews.llvm.org/D108129

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a6f2328d2cc2f..d1114a9802e32 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -8552,25 +8552,42 @@ static SDValue combineShiftToMULH(SDNode *N, SelectionDAG &DAG,
   // Both operands must be equivalent extend nodes.
   SDValue LeftOp = ShiftOperand.getOperand(0);
   SDValue RightOp = ShiftOperand.getOperand(1);
+
   bool IsSignExt = LeftOp.getOpcode() == ISD::SIGN_EXTEND;
   bool IsZeroExt = LeftOp.getOpcode() == ISD::ZERO_EXTEND;
 
-  if ((!(IsSignExt || IsZeroExt)) || LeftOp.getOpcode() != RightOp.getOpcode())
+  if (!IsSignExt && !IsZeroExt)
     return SDValue();
 
+  EVT NarrowVT = LeftOp.getOperand(0).getValueType();
+  unsigned NarrowVTSize = NarrowVT.getScalarSizeInBits();
+
+  SDValue MulhRightOp;
+  if (ConstantSDNode *Constant = isConstOrConstSplat(RightOp)) {
+    unsigned ActiveBits = IsSignExt
+                              ? Constant->getAPIntValue().getMinSignedBits()
+                              : Constant->getAPIntValue().getActiveBits();
+    if (ActiveBits > NarrowVTSize)
+      return SDValue();
+    MulhRightOp = DAG.getConstant(
+        Constant->getAPIntValue().trunc(NarrowVT.getScalarSizeInBits()), DL,
+        NarrowVT);
+  } else {
+    if (LeftOp.getOpcode() != RightOp.getOpcode())
+      return SDValue();
+    // Check that the two extend nodes are the same type.
+    if (NarrowVT != RightOp.getOperand(0).getValueType())
+      return SDValue();
+    MulhRightOp = RightOp.getOperand(0);
+  }
+
   EVT WideVT = LeftOp.getValueType();
   // Proceed with the transformation if the wide types match.
   assert((WideVT == RightOp.getValueType()) &&
          "Cannot have a multiply node with two 
diff erent operand types.");
 
-  EVT NarrowVT = LeftOp.getOperand(0).getValueType();
-  // Check that the two extend nodes are the same type.
-  if (NarrowVT != RightOp.getOperand(0).getValueType())
-    return SDValue();
-
   // Proceed with the transformation if the wide type is twice as large
   // as the narrow type.
-  unsigned NarrowVTSize = NarrowVT.getScalarSizeInBits();
   if (WideVT.getScalarSizeInBits() != 2 * NarrowVTSize)
     return SDValue();
 
@@ -8589,8 +8606,8 @@ static SDValue combineShiftToMULH(SDNode *N, SelectionDAG &DAG,
   if (!TLI.isOperationLegalOrCustom(MulhOpcode, NarrowVT))
     return SDValue();
 
-  SDValue Result = DAG.getNode(MulhOpcode, DL, NarrowVT, LeftOp.getOperand(0),
-                               RightOp.getOperand(0));
+  SDValue Result =
+      DAG.getNode(MulhOpcode, DL, NarrowVT, LeftOp.getOperand(0), MulhRightOp);
   return (N->getOpcode() == ISD::SRA ? DAG.getSExtOrTrunc(Result, DL, WideVT)
                                      : DAG.getZExtOrTrunc(Result, DL, WideVT));
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
index dd25bbc4c9cbc..19d5141d78105 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 ; Test that the prepareSREMEqFold optimization doesn't crash on scalable
 ; vector types.
@@ -60,17 +60,21 @@ define <vscale x 1 x i32> @vmulh_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
 }
 
 define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
-; CHECK-LABEL: vmulh_vi_nxv1i32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    addi a0, zero, -7
-; CHECK-NEXT:    vmul.vx v8, v9, a0
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulh_vi_nxv1i32_0:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vmulh.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulh_vi_nxv1i32_0:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    addi a0, a0, -7
+; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
   %vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
@@ -86,13 +90,9 @@ define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 define <vscale x 1 x i32> @vmulh_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv1i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    vsll.vi v8, v9, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
   %head1 = insertelement <vscale x 1 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
@@ -141,17 +141,21 @@ define <vscale x 2 x i32> @vmulh_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
 }
 
 define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
-; CHECK-LABEL: vmulh_vi_nxv2i32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    addi a0, zero, -7
-; CHECK-NEXT:    vmul.vx v8, v10, a0
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v10, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulh_vi_nxv2i32_0:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV32-NEXT:    vmulh.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulh_vi_nxv2i32_0:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    addi a0, a0, -7
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV64-NEXT:    vmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
   %vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
@@ -167,13 +171,9 @@ define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 define <vscale x 2 x i32> @vmulh_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv2i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsll.vi v8, v10, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v10, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
   %head1 = insertelement <vscale x 2 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
@@ -222,17 +222,21 @@ define <vscale x 4 x i32> @vmulh_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
 }
 
 define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
-; CHECK-LABEL: vmulh_vi_nxv4i32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v12, v8
-; CHECK-NEXT:    addi a0, zero, -7
-; CHECK-NEXT:    vmul.vx v8, v12, a0
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v12, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulh_vi_nxv4i32_0:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vmulh.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulh_vi_nxv4i32_0:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    addi a0, a0, -7
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV64-NEXT:    vmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
@@ -248,13 +252,9 @@ define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 define <vscale x 4 x i32> @vmulh_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv4i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v12, v8
-; CHECK-NEXT:    vsll.vi v8, v12, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v12, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
   %head1 = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
@@ -303,17 +303,21 @@ define <vscale x 8 x i32> @vmulh_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
 }
 
 define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
-; CHECK-LABEL: vmulh_vi_nxv8i32_0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vsext.vf2 v16, v8
-; CHECK-NEXT:    addi a0, zero, -7
-; CHECK-NEXT:    vmul.vx v8, v16, a0
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v16, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulh_vi_nxv8i32_0:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a0, zero, -7
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV32-NEXT:    vmulh.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulh_vi_nxv8i32_0:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 1
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    addi a0, a0, -7
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV64-NEXT:    vmulh.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
   %vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
@@ -329,13 +333,9 @@ define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 define <vscale x 8 x i32> @vmulh_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
 ; CHECK-LABEL: vmulh_vi_nxv8i32_1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vsext.vf2 v16, v8
-; CHECK-NEXT:    vsll.vi v8, v16, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v16, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    addi a0, zero, 16
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    ret
   %head1 = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
index b2cfd5b78576e..ec06dbffbcf93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll
@@ -39,35 +39,18 @@ define <vscale x 1 x i32> @vmulhu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
 define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv1i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    addi a0, zero, -7
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vzext.vf2 v10, v8
-; RV32-NEXT:    vmul.vv v8, v10, v9
-; RV32-NEXT:    addi a0, zero, 32
-; RV32-NEXT:    vsrl.vx v8, v8, a0
-; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv1i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; RV64-NEXT:    vzext.vf2 v9, v8
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
-; RV64-NEXT:    vmul.vx v8, v9, a0
-; RV64-NEXT:    addi a0, zero, 32
-; RV64-NEXT:    vsrl.vx v8, v8, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v8, 0
+; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
@@ -82,16 +65,18 @@ define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 }
 
 define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
-; CHECK-LABEL: vmulhu_vi_nxv1i32_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vsll.vi v8, v9, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulhu_vi_nxv1i32_1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vsrl.vi v8, v8, 28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulhu_vi_nxv1i32_1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 1 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
   %vb = zext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
@@ -141,35 +126,18 @@ define <vscale x 2 x i32> @vmulhu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
 define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv2i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    addi a0, zero, -7
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vzext.vf2 v12, v8
-; RV32-NEXT:    vmul.vv v8, v12, v10
-; RV32-NEXT:    addi a0, zero, 32
-; RV32-NEXT:    vsrl.vx v10, v8, a0
-; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v10, 0
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv2i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; RV64-NEXT:    vzext.vf2 v10, v8
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
-; RV64-NEXT:    vmul.vx v8, v10, a0
-; RV64-NEXT:    addi a0, zero, 32
-; RV64-NEXT:    vsrl.vx v10, v8, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v10, 0
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
@@ -184,16 +152,18 @@ define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 }
 
 define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
-; CHECK-LABEL: vmulhu_vi_nxv2i32_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vsll.vi v8, v10, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v10, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulhu_vi_nxv2i32_1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vsrl.vi v8, v8, 28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulhu_vi_nxv2i32_1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 2 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
   %vb = zext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
@@ -243,35 +213,18 @@ define <vscale x 4 x i32> @vmulhu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
 define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv4i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    addi a0, zero, -7
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vzext.vf2 v16, v8
-; RV32-NEXT:    vmul.vv v8, v16, v12
-; RV32-NEXT:    addi a0, zero, 32
-; RV32-NEXT:    vsrl.vx v12, v8, a0
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v12, 0
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv4i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; RV64-NEXT:    vzext.vf2 v12, v8
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
-; RV64-NEXT:    vmul.vx v8, v12, a0
-; RV64-NEXT:    addi a0, zero, 32
-; RV64-NEXT:    vsrl.vx v12, v8, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v12, 0
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
@@ -286,16 +239,18 @@ define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 }
 
 define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
-; CHECK-LABEL: vmulhu_vi_nxv4i32_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v12, v8
-; CHECK-NEXT:    vsll.vi v8, v12, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v12, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulhu_vi_nxv4i32_1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vsrl.vi v8, v8, 28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulhu_vi_nxv4i32_1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 4 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
   %vb = zext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
@@ -345,35 +300,18 @@ define <vscale x 8 x i32> @vmulhu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
 define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; RV32-LABEL: vmulhu_vi_nxv8i32_0:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    addi a0, zero, -7
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vzext.vf2 v24, v8
-; RV32-NEXT:    vmul.vv v8, v24, v16
-; RV32-NEXT:    addi a0, zero, 32
-; RV32-NEXT:    vsrl.vx v16, v8, a0
-; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32-NEXT:    vnsrl.wi v8, v16, 0
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV32-NEXT:    vmulhu.vx v8, v8, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vmulhu_vi_nxv8i32_0:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV64-NEXT:    vzext.vf2 v16, v8
 ; RV64-NEXT:    addi a0, zero, 1
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    addi a0, a0, -7
-; RV64-NEXT:    vmul.vx v8, v16, a0
-; RV64-NEXT:    addi a0, zero, 32
-; RV64-NEXT:    vsrl.vx v16, v8, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64-NEXT:    vnsrl.wi v8, v16, 0
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
 ; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
   %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
@@ -388,16 +326,18 @@ define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 }
 
 define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
-; CHECK-LABEL: vmulhu_vi_nxv8i32_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v16, v8
-; CHECK-NEXT:    vsll.vi v8, v16, 4
-; CHECK-NEXT:    addi a0, zero, 32
-; CHECK-NEXT:    vsrl.vx v16, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
-; CHECK-NEXT:    ret
+; RV32-LABEL: vmulhu_vi_nxv8i32_1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; RV32-NEXT:    vsrl.vi v8, v8, 28
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vmulhu_vi_nxv8i32_1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi a0, zero, 16
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV64-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-NEXT:    ret
   %head1 = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
   %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
   %vb = zext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>


        


More information about the llvm-commits mailing list