[llvm] [RISCV] Transform build_vector((binop X_i, C_i)..) to binop (build_ve… (PR #67358)

via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 25 11:52:56 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

<details>
<summary>Changes</summary>

…ctor, build_vector)

If we have a build_vector of identical binops, we'd prefer to have a single vector binop in most cases.  We do need to make sure that the two build_vectors aren't more difficult to materialize than the original build_vector.  To start with, let's restrict ourselves to the case where one build_vector is a fully constant vector.

Note that we don't need to worry about speculation safety here.  We are not speculating any of the lanes, and thus none of the typical e.g. div-by-zero concerns apply.

I'll highlight that the constant build_vector heuristic is just one we could chose here.  We just need some way to be reasonable sure the cost of the two build_vectors isn't going to completely outweigh the savings from the binop formation.  I'm open to alternate heuristics here - both more restrictive and more permissive.

As noted in comments, we can extend this in a number of ways.  I decided to start small as a) that helps keep things understandable in review and b) it covers my actual motivating case.

---

Patch is 49.88 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/67358.diff


5 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+56-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll (+122-289) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll (+151-362) 
- (modified) llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll (+13-68) 
- (modified) llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll (+27-28) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 686350de29883aa..c40e940f1b8aeae 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1295,7 +1295,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR,
-                         ISD::CONCAT_VECTORS});
+                         ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS});
   if (Subtarget.hasVendorXTHeadMemPair())
     setTargetDAGCombine({ISD::LOAD, ISD::STORE});
   if (Subtarget.useRVVForFixedLengthVectors())
@@ -13337,6 +13337,57 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
   return tryFoldSelectIntoOp(N, DAG, FalseVal, TrueVal, /*Swapped*/true);
 }
 
+/// IF we have a build_vector where each lanes is binop X, C, where C
+/// is a constant (nut not neccessarily the same constant on all lanes),
+/// form binop (build_vector x1, x2, ...), (build_vector c1, c2, c3, ..).
+/// We assume that materializing a constant build vector will be no more
+/// expensive that performing O(n) binops.
+static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
+                                            const RISCVSubtarget &Subtarget,
+                                            const RISCVTargetLowering &TLI) {
+  SDLoc DL(N);
+  EVT VT = N->getValueType(0);
+
+  assert(!VT.isScalableVector() && "unexpected build vector");
+
+  if (VT.getVectorNumElements() == 1)
+    return SDValue();
+
+  const unsigned Opcode = N->op_begin()->getNode()->getOpcode();
+  if (!TLI.isBinOp(Opcode))
+    return SDValue();
+
+  if (!TLI.isOperationLegalOrCustom(Opcode, VT) ||
+      !TLI.isTypeLegal(VT))
+    return SDValue();
+
+  SmallVector<SDValue> LHSOps;
+  SmallVector<SDValue> RHSOps;
+  for (SDValue Op : N->ops()) {
+    if (Op.isUndef()) {
+      LHSOps.push_back(Op);
+      RHSOps.push_back(Op);
+      continue;
+    }
+
+    // TODO: We can handle operations which have an neutral rhs value
+    // (e.g. x + 0, a * 1 or a << 0), but we then have to keep track
+    // of profit in a more explicit manner.
+    if (Op.getOpcode() != Opcode || !Op.hasOneUse())
+      return SDValue();
+
+    LHSOps.push_back(Op.getOperand(0));
+    if (!isa<ConstantSDNode>(Op.getOperand(1)) &&
+        !isa<ConstantFPSDNode>(Op.getOperand(1)))
+      return SDValue();
+    RHSOps.push_back(Op.getOperand(1));
+  }
+
+  return DAG.getNode(Opcode, DL, VT,
+                     DAG.getBuildVector(VT, DL, LHSOps),
+                     DAG.getBuildVector(VT, DL, RHSOps));
+}
+
 // If we're concatenating a series of vector loads like
 // concat_vectors (load v4i8, p+0), (load v4i8, p+n), (load v4i8, p+n*2) ...
 // Then we can turn this into a strided load by widening the vector elements
@@ -14399,6 +14450,10 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
       return Gather;
     break;
   }
+  case ISD::BUILD_VECTOR:
+    if (SDValue V = performBUILD_VECTORCombine(N, DAG, Subtarget, *this))
+      return V;
+    break;
   case ISD::CONCAT_VECTORS:
     if (SDValue V = performCONCAT_VECTORSCombine(N, DAG, Subtarget, *this))
       return V;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
index 37a43c3550a5282..9e4584eb17ff9a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
@@ -3,33 +3,18 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 define <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: add_constant_rhs:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 23
-; RV32-NEXT:    addi a1, a1, 25
-; RV32-NEXT:    addi a2, a2, 1
-; RV32-NEXT:    addi a3, a3, 2047
-; RV32-NEXT:    addi a3, a3, 308
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: add_constant_rhs:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addiw a0, a0, 23
-; RV64-NEXT:    addiw a1, a1, 25
-; RV64-NEXT:    addiw a2, a2, 1
-; RV64-NEXT:    addiw a3, a3, 2047
-; RV64-NEXT:    addiw a3, a3, 308
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    ret
+; CHECK-LABEL: add_constant_rhs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI0_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
   %e0 = add i32 %a, 23
   %e1 = add i32 %b, 25
   %e2 = add i32 %c, 1
@@ -42,49 +27,22 @@ define <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
 }
 
 define <8 x i32> @add_constant_rhs_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
-; RV32-LABEL: add_constant_rhs_8xi32:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 23
-; RV32-NEXT:    addi a1, a1, 25
-; RV32-NEXT:    addi a2, a2, 1
-; RV32-NEXT:    addi a3, a3, 2047
-; RV32-NEXT:    addi a3, a3, 308
-; RV32-NEXT:    addi a4, a4, 23
-; RV32-NEXT:    addi a5, a5, 23
-; RV32-NEXT:    addi a6, a6, 22
-; RV32-NEXT:    addi a7, a7, 23
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    vslide1down.vx v8, v8, a4
-; RV32-NEXT:    vslide1down.vx v8, v8, a5
-; RV32-NEXT:    vslide1down.vx v8, v8, a6
-; RV32-NEXT:    vslide1down.vx v8, v8, a7
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: add_constant_rhs_8xi32:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addiw a0, a0, 23
-; RV64-NEXT:    addiw a1, a1, 25
-; RV64-NEXT:    addiw a2, a2, 1
-; RV64-NEXT:    addiw a3, a3, 2047
-; RV64-NEXT:    addiw a3, a3, 308
-; RV64-NEXT:    addiw a4, a4, 23
-; RV64-NEXT:    addiw a5, a5, 23
-; RV64-NEXT:    addiw a6, a6, 22
-; RV64-NEXT:    addiw a7, a7, 23
-; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    vslide1down.vx v8, v8, a4
-; RV64-NEXT:    vslide1down.vx v8, v8, a5
-; RV64-NEXT:    vslide1down.vx v8, v8, a6
-; RV64-NEXT:    vslide1down.vx v8, v8, a7
-; RV64-NEXT:    ret
+; CHECK-LABEL: add_constant_rhs_8xi32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    vslide1down.vx v8, v8, a4
+; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI1_0)
+; CHECK-NEXT:    vle32.v v10, (a0)
+; CHECK-NEXT:    vslide1down.vx v8, v8, a5
+; CHECK-NEXT:    vslide1down.vx v8, v8, a6
+; CHECK-NEXT:    vslide1down.vx v8, v8, a7
+; CHECK-NEXT:    vadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
   %e0 = add i32 %a, 23
   %e1 = add i32 %b, 25
   %e2 = add i32 %c, 1
@@ -106,33 +64,18 @@ define <8 x i32> @add_constant_rhs_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e,
 
 
 define <4 x i32> @sub_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: sub_constant_rhs:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, -23
-; RV32-NEXT:    addi a1, a1, -25
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    addi a3, a3, -2048
-; RV32-NEXT:    addi a3, a3, -307
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: sub_constant_rhs:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addiw a0, a0, -23
-; RV64-NEXT:    addiw a1, a1, -25
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    addiw a3, a3, -2048
-; RV64-NEXT:    addiw a3, a3, -307
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    ret
+; CHECK-LABEL: sub_constant_rhs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI2_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    vsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
   %e0 = sub i32 %a, 23
   %e1 = sub i32 %b, 25
   %e2 = sub i32 %c, 1
@@ -145,41 +88,18 @@ define <4 x i32> @sub_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
 }
 
 define <4 x i32> @mul_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: mul_constant_rhs:
-; RV32:       # %bb.0:
-; RV32-NEXT:    li a4, 23
-; RV32-NEXT:    mul a0, a0, a4
-; RV32-NEXT:    li a4, 25
-; RV32-NEXT:    mul a1, a1, a4
-; RV32-NEXT:    li a4, 27
-; RV32-NEXT:    mul a2, a2, a4
-; RV32-NEXT:    lui a4, 1
-; RV32-NEXT:    addi a4, a4, -1741
-; RV32-NEXT:    mul a3, a3, a4
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: mul_constant_rhs:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a4, 23
-; RV64-NEXT:    mulw a0, a0, a4
-; RV64-NEXT:    li a4, 25
-; RV64-NEXT:    mulw a1, a1, a4
-; RV64-NEXT:    li a4, 27
-; RV64-NEXT:    mulw a2, a2, a4
-; RV64-NEXT:    lui a4, 1
-; RV64-NEXT:    addiw a4, a4, -1741
-; RV64-NEXT:    mulw a3, a3, a4
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    ret
+; CHECK-LABEL: mul_constant_rhs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI3_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    vmul.vv v8, v8, v9
+; CHECK-NEXT:    ret
   %e0 = mul i32 %a, 23
   %e1 = mul i32 %b, 25
   %e2 = mul i32 %c, 27
@@ -192,60 +112,30 @@ define <4 x i32> @mul_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
 }
 
 define <4 x i32> @udiv_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: udiv_constant_rhs:
-; RV32:       # %bb.0:
-; RV32-NEXT:    lui a4, 729444
-; RV32-NEXT:    addi a4, a4, 713
-; RV32-NEXT:    mulhu a0, a0, a4
-; RV32-NEXT:    srli a0, a0, 4
-; RV32-NEXT:    lui a4, 335544
-; RV32-NEXT:    addi a4, a4, 1311
-; RV32-NEXT:    mulhu a1, a1, a4
-; RV32-NEXT:    srli a1, a1, 3
-; RV32-NEXT:    lui a4, 93703
-; RV32-NEXT:    addi a4, a4, -1899
-; RV32-NEXT:    mulhu a4, a3, a4
-; RV32-NEXT:    sub a3, a3, a4
-; RV32-NEXT:    srli a3, a3, 1
-; RV32-NEXT:    add a3, a3, a4
-; RV32-NEXT:    srli a3, a3, 7
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: udiv_constant_rhs:
-; RV64:       # %bb.0:
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    lui a4, 729444
-; RV64-NEXT:    addiw a4, a4, 713
-; RV64-NEXT:    slli a4, a4, 32
-; RV64-NEXT:    mulhu a0, a0, a4
-; RV64-NEXT:    srli a0, a0, 36
-; RV64-NEXT:    slli a1, a1, 32
-; RV64-NEXT:    lui a4, 335544
-; RV64-NEXT:    addiw a4, a4, 1311
-; RV64-NEXT:    slli a4, a4, 32
-; RV64-NEXT:    mulhu a1, a1, a4
-; RV64-NEXT:    srli a1, a1, 35
-; RV64-NEXT:    slli a4, a3, 32
-; RV64-NEXT:    lui a5, 93703
-; RV64-NEXT:    addiw a5, a5, -1899
-; RV64-NEXT:    slli a5, a5, 32
-; RV64-NEXT:    mulhu a4, a4, a5
-; RV64-NEXT:    srli a4, a4, 32
-; RV64-NEXT:    subw a3, a3, a4
-; RV64-NEXT:    srliw a3, a3, 1
-; RV64-NEXT:    add a3, a3, a4
-; RV64-NEXT:    srli a3, a3, 7
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    ret
+; CHECK-LABEL: udiv_constant_rhs:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI4_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    vmulhu.vv v9, v8, v9
+; CHECK-NEXT:    vsub.vv v10, v8, v9
+; CHECK-NEXT:    vmv.v.i v11, 0
+; CHECK-NEXT:    lui a0, 524288
+; CHECK-NEXT:    vslide1down.vx v11, v11, a0
+; CHECK-NEXT:    lui a0, %hi(.LCPI4_1)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI4_1)
+; CHECK-NEXT:    vle32.v v12, (a0)
+; CHECK-NEXT:    vmulhu.vv v10, v10, v11
+; CHECK-NEXT:    vadd.vv v9, v10, v9
+; CHECK-NEXT:    vmv.v.i v0, 4
+; CHECK-NEXT:    vsrl.vv v9, v9, v12
+; CHECK-NEXT:    vmerge.vvm v8, v9, v8, v0
+; CHECK-NEXT:    ret
   %e0 = udiv i32 %a, 23
   %e1 = udiv i32 %b, 25
   %e2 = udiv i32 %c, 1
@@ -261,21 +151,15 @@ define <4 x i32> @udiv_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
 define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
 ; CHECK-LABEL: fadd_constant_rhs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 269184
-; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    fadd.s fa4, fa0, fa5
-; CHECK-NEXT:    lui a0, 269440
-; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    fadd.s fa1, fa1, fa0
-; CHECK-NEXT:    lui a0, 262144
-; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    fadd.s fa2, fa2, fa0
-; CHECK-NEXT:    fadd.s fa5, fa3, fa5
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa4
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI5_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa3
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %e0 = fadd float %a, 23.0
   %e1 = fadd float %b, 25.0
@@ -291,21 +175,15 @@ define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
 define <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) {
 ; CHECK-LABEL: fdiv_constant_rhs:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 269184
-; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    fdiv.s fa4, fa0, fa5
-; CHECK-NEXT:    lui a0, 269440
-; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    fdiv.s fa1, fa1, fa0
-; CHECK-NEXT:    lui a0, 266752
-; CHECK-NEXT:    fmv.w.x fa0, a0
-; CHECK-NEXT:    fdiv.s fa2, fa2, fa0
-; CHECK-NEXT:    fdiv.s fa5, fa3, fa5
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa4
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
+; CHECK-NEXT:    lui a0, %hi(.LCPI6_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI6_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, fa1
 ; CHECK-NEXT:    vfslide1down.vf v8, v8, fa2
-; CHECK-NEXT:    vfslide1down.vf v8, v8, fa5
+; CHECK-NEXT:    vfslide1down.vf v8, v8, fa3
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %e0 = fdiv float %a, 23.0
   %e1 = fdiv float %b, 25.0
@@ -319,31 +197,16 @@ define <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) {
 }
 
 define <4 x i32> @add_constant_rhs_splat(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: add_constant_rhs_splat:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 23
-; RV32-NEXT:    addi a1, a1, 23
-; RV32-NEXT:    addi a2, a2, 23
-; RV32-NEXT:    addi a3, a3, 23
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: add_constant_rhs_splat:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addiw a0, a0, 23
-; RV64-NEXT:    addiw a1, a1, 23
-; RV64-NEXT:    addiw a2, a2, 23
-; RV64-NEXT:    addiw a3, a3, 23
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    ret
+; CHECK-LABEL: add_constant_rhs_splat:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    li a0, 23
+; CHECK-NEXT:    vadd.vx v8, v8, a0
+; CHECK-NEXT:    ret
   %e0 = add i32 %a, 23
   %e1 = add i32 %b, 23
   %e2 = add i32 %c, 23
@@ -458,33 +321,18 @@ define <4 x i32> @add_constant_rhs_identity2(i32 %a, i32 %b, i32 %c, i32 %d) {
 }
 
 define <4 x i32> @add_constant_rhs_inverse(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: add_constant_rhs_inverse:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    addi a1, a1, 25
-; RV32-NEXT:    addi a2, a2, 1
-; RV32-NEXT:    addi a3, a3, 2047
-; RV32-NEXT:    addi a3, a3, 308
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: add_constant_rhs_inverse:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addiw a0, a0, -1
-; RV64-NEXT:    addiw a1, a1, 25
-; RV64-NEXT:    addiw a2, a2, 1
-; RV64-NEXT:    addiw a3, a3, 2047
-; RV64-NEXT:    addiw a3, a3, 308
-; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-NEXT:    ret
+; CHECK-LABEL: add_constant_rhs_inverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, %hi(.LCPI11_0)
+; CHECK-NEXT:    addi a0, a0, %lo(.LCPI11_0)
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vslide1down.vx v8, v8, a2
+; CHECK-NEXT:    vslide1down.vx v8, v8, a3
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
   %e0 = sub i32 %a, 1
   %e1 = add i32 %b, 25
   %e2 = add i32 %c, 1
@@ -497,33 +345,18 @@ define <4 x i32> @add_constant_rhs_inverse(i32 %a, i32 %b, i32 %c, i32 %d) {
 }
 
 define <4 x i32> @add_constant_rhs_commute(i32 %a, i32 %b, i32 %c, i32 %d) {
-; RV32-LABEL: add_constant_rhs_commute:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 23
-; RV32-NEXT:    addi a1, a1, 25
-; RV32-NEXT:    addi a2, a2, 1
-; RV32-NEXT:    addi a3, a3, 2047
-; RV32-NEXT:    addi a3, a3, 308
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NE...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/67358


More information about the llvm-commits mailing list