[llvm] [RISCV] fold trunc_vl (srl_vl (vwaddu X, Y), splat 1) -> vaaddu X, Y (PR #76550)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 4 07:33:01 PST 2024


https://github.com/sun-jacobi updated https://github.com/llvm/llvm-project/pull/76550

>From 73947743e193fb3b6148a184df3e4a59cb69475a Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Fri, 5 Jan 2024 00:23:30 +0900
Subject: [PATCH 1/2] [RISCV][ISel] Implement combineUnsignedAvgFloor.

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 97 +++++++++++++++++--
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |  4 +
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 24 +++++
 3 files changed, 119 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 27bb69dc9868c8..5fb1b9bfcfb74f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -859,6 +859,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom);
       setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom);
 
+      setOperationAction(ISD::AVGFLOORU, VT, Custom);
+
       // Splice
       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
 
@@ -1177,6 +1179,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
                            VT, Custom);
 
+        setOperationAction(ISD::AVGFLOORU, VT, Custom);
+
         setOperationAction(
             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
 
@@ -1375,7 +1379,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
   setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
                        ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
-                       ISD::OR, ISD::XOR, ISD::SETCC, ISD::SELECT});
+                       ISD::TRUNCATE, ISD::OR, ISD::XOR, ISD::SETCC,
+                       ISD::SELECT});
   if (Subtarget.is64Bit())
     setTargetDAGCombine(ISD::SRA);
 
@@ -1385,9 +1390,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasStdExtZbb())
     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
 
-  if (Subtarget.hasStdExtZbs() && Subtarget.is64Bit())
-    setTargetDAGCombine(ISD::TRUNCATE);
-
   if (Subtarget.hasStdExtZbkb())
     setTargetDAGCombine(ISD::BITREVERSE);
   if (Subtarget.hasStdExtZfhminOrZhinxmin())
@@ -5501,6 +5503,8 @@ static unsigned getRISCVVLOp(SDValue Op) {
   VP_CASE(CTLZ)       // VP_CTLZ
   VP_CASE(CTTZ)       // VP_CTTZ
   VP_CASE(CTPOP)      // VP_CTPOP
+  case ISD::AVGFLOORU:
+    return RISCVISD::UAVGADD_VL;
   case ISD::CTLZ_ZERO_UNDEF:
   case ISD::VP_CTLZ_ZERO_UNDEF:
     return RISCVISD::CTLZ_VL;
@@ -5563,7 +5567,7 @@ static bool hasMergeOp(unsigned Opcode) {
          Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
          "not a RISC-V target specific op");
   static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
-                    125 &&
+                    126 &&
                 RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
                         ISD::FIRST_TARGET_STRICTFP_OPCODE ==
                     21 &&
@@ -5589,7 +5593,7 @@ static bool hasMaskOp(unsigned Opcode) {
          Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
          "not a RISC-V target specific op");
   static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
-                    125 &&
+                    126 &&
                 RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
                         ISD::FIRST_TARGET_STRICTFP_OPCODE ==
                     21 &&
@@ -6438,6 +6442,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
            "Unexpected custom legalisation");
     return SDValue();
+  case ISD::AVGFLOORU:
+    return lowerUnsignedAvgFloor(Op, DAG);
   case ISD::FADD:
   case ISD::FSUB:
   case ISD::FMUL:
@@ -10298,6 +10304,36 @@ SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
 }
 
+// Lower vector AVGFLOORU(X, Y)
+SDValue RISCVTargetLowering::lowerUnsignedAvgFloor(SDValue Op,
+                                                   SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  assert((Op.getOpcode() == ISD::AVGFLOORU) &&
+         "Opcode should be ISD::AVGFLOORU");
+
+  MVT VT = Op.getSimpleValueType();
+  SDValue X = Op.getOperand(0);
+  SDValue Y = Op.getOperand(1);
+
+  MVT ContainerVT = VT;
+  if (VT.isFixedLengthVector()) {
+    ContainerVT = getContainerForFixedLengthVector(VT);
+    X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
+    Y = convertToScalableVector(ContainerVT, Y, DAG, Subtarget);
+  }
+
+  auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
+
+  SDValue RM = DAG.getTargetConstant(0b10, DL, Subtarget.getXLenVT());
+  SDValue Result = DAG.getNode(RISCVISD::UAVGADD_VL, DL, ContainerVT,
+                               {X, Y, DAG.getUNDEF(ContainerVT), Mask, VL, RM});
+
+  if (VT.isFixedLengthVector())
+    Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+
+  return Result;
+}
+
 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
 // * Operands of each node are assumed to be in the same order.
 // * The EVL operand is promoted from i32 to i64 on RV64.
@@ -12357,6 +12393,51 @@ static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG) {
                      N0.getOperand(0));
 }
 
+static SDValue combineUnsignedAvgFloor(SDNode *N, SelectionDAG &DAG,
+                                       const RISCVSubtarget &Subtarget) {
+
+  if (!Subtarget.hasVInstructions())
+    return SDValue();
+
+  EVT VT = N->getValueType(0);
+  if (!VT.isVector() || !VT.isInteger())
+    return SDValue();
+
+  assert(N->getOpcode() == ISD::TRUNCATE && "Opcode should be ISD::TRUNCATE");
+
+  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+    return SDValue();
+
+  SDValue Srl = N->getOperand(0);
+
+  // (lshr X, 1)
+  if (!Srl.hasOneUse() || Srl.getOpcode() != ISD::SRL ||
+      !isOneOrOneSplat(Srl->getOperand(1)))
+    return SDValue();
+
+  SDValue WiddenAdd = Srl.getOperand(0);
+
+  if (!WiddenAdd.hasOneUse() || WiddenAdd.getOpcode() != ISD::ADD)
+    return SDValue();
+
+  SDValue N0 = WiddenAdd.getOperand(0);
+  SDValue N1 = WiddenAdd.getOperand(1);
+
+  auto IsZext = [&](SDValue V) {
+    if (V.getOpcode() != ISD::ZERO_EXTEND)
+      return false;
+
+    return V.getOperand(0)->getValueType(0) == VT;
+  };
+
+  if (!IsZext(N0) || !IsZext(N1))
+    return SDValue();
+
+  SDLoc DL(N);
+  return DAG.getNode(ISD::AVGFLOORU, DL, VT, N0->getOperand(0),
+                     N1->getOperand(0));
+}
+
 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
                                  const RISCVSubtarget &Subtarget) {
   if (SDValue V = combineAddOfBooleanXor(N, DAG))
@@ -12490,6 +12571,9 @@ static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {
 
 static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
                                       const RISCVSubtarget &Subtarget) {
+  if (SDValue V = combineUnsignedAvgFloor(N, DAG, Subtarget))
+    return V;
+
   SDValue N0 = N->getOperand(0);
   EVT VT = N->getValueType(0);
 
@@ -18619,6 +18703,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(SMAX_VL)
   NODE_NAME_CASE(UMIN_VL)
   NODE_NAME_CASE(UMAX_VL)
+  NODE_NAME_CASE(UAVGADD_VL)
   NODE_NAME_CASE(BITREVERSE_VL)
   NODE_NAME_CASE(BSWAP_VL)
   NODE_NAME_CASE(CTLZ_VL)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 58ed611efc83d1..911b2fcf2aec05 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -252,6 +252,9 @@ enum NodeType : unsigned {
   UADDSAT_VL,
   SSUBSAT_VL,
   USUBSAT_VL,
+  
+  // Averaging adds of unsigned integers.
+  UAVGADD_VL,
 
   MULHS_VL,
   MULHU_VL,
@@ -903,6 +906,7 @@ class RISCVTargetLowering : public TargetLowering {
   SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
                                             SelectionDAG &DAG) const;
   SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerUnsignedAvgFloor(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 5b50a4a78c018b..570bca5ca49086 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -57,6 +57,15 @@ def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
                                                 SDTCisSameNumEltsAs<0, 4>,
                                                 SDTCisVT<5, XLenVT>]>;
 
+def SDT_RISCVIntBinOp_RM_VL : SDTypeProfile<1, 6, [SDTCisSameAs<0, 1>,
+                                                   SDTCisSameAs<0, 2>,
+                                                   SDTCisVec<0>, SDTCisInt<0>,
+                                                   SDTCisSameAs<0, 3>,
+                                                   SDTCVecEltisVT<4, i1>,
+                                                   SDTCisSameNumEltsAs<0, 4>,
+                                                   SDTCisVT<5, XLenVT>,
+                                                   SDTCisVT<6, XLenVT>]>; // Rounding Mode
+
 def riscv_vmv_v_v_vl : SDNode<"RISCVISD::VMV_V_V_VL",
                               SDTypeProfile<1, 3, [SDTCisVec<0>,
                                                    SDTCisSameAs<0, 1>,
@@ -115,6 +124,7 @@ def riscv_saddsat_vl   : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [S
 def riscv_uaddsat_vl   : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 def riscv_ssubsat_vl   : SDNode<"RISCVISD::SSUBSAT_VL", SDT_RISCVIntBinOp_VL>;
 def riscv_usubsat_vl   : SDNode<"RISCVISD::USUBSAT_VL", SDT_RISCVIntBinOp_VL>;
+def riscv_uavgadd_vl   : SDNode<"RISCVISD::UAVGADD_VL", SDT_RISCVIntBinOp_RM_VL, [SDNPCommutative]>;
 
 def riscv_fadd_vl  : SDNode<"RISCVISD::FADD_VL",  SDT_RISCVFPBinOp_VL, [SDNPCommutative]>;
 def riscv_fsub_vl  : SDNode<"RISCVISD::FSUB_VL",  SDT_RISCVFPBinOp_VL>;
@@ -2338,6 +2348,20 @@ defm : VPatBinaryVL_VV_VX_VI<riscv_uaddsat_vl, "PseudoVSADDU">;
 defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
 defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
 
+// 12.2. Vector Single-Width Averaging Add and Subtract
+foreach vti = AllIntegerVectors in {
+  let Predicates = GetVTypePredicates<vti>.Predicates in {
+    def : Pat<(riscv_uavgadd_vl (vti.Vector vti.RegClass:$rs1),
+                            (vti.Vector vti.RegClass:$rs2),
+                            vti.RegClass:$merge, (vti.Mask V0), VLOpFrag,
+                            (XLenVT timm:$rounding_mode)),
+              (!cast<Instruction>("PseudoVAADDU_VV_"# vti.LMul.MX#"_MASK")
+                   vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2,
+                   (vti.Mask V0), (XLenVT timm:$rounding_mode),
+                   GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+  }
+}
+
 // 12.5. Vector Narrowing Fixed-Point Clip Instructions
 class VPatTruncSatClipMaxMinBase<string inst,
                                  VTypeInfo vti,

>From e6b47bff4da74cf90a410ba5274732910d96dc87 Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Fri, 5 Jan 2024 00:32:47 +0900
Subject: [PATCH 2/2] [RISCV][ISel] add vaadd autogen test.

---
 llvm/test/CodeGen/RISCV/rvv/vaadd-autogen.ll | 336 +++++++++++++++++++
 1 file changed, 336 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vaadd-autogen.ll

diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-autogen.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-autogen.ll
new file mode 100644
index 00000000000000..9fe865146c0eaf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-autogen.ll
@@ -0,0 +1,336 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s
+
+
+define <8 x i8> @vaaddu_i8(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v9, (a1)
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %xv = load <8 x i8>, ptr %x, align 2
+  %yv = load <8 x i8>, ptr %y, align 2
+  %xzv = zext <8 x i8> %xv to <8 x i16>
+  %yzv = zext <8 x i8> %yv to <8 x i16>
+  %add = add nuw nsw <8 x i16> %xzv, %yzv
+  %div = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %ret = trunc <8 x i16> %div to <8 x i8>
+  ret <8 x i8> %ret
+}
+
+
+define <8 x i8> @vaaddu_i8_arg(<8 x i8> %x, <8 x i8> %y) {
+; CHECK-LABEL: vaaddu_i8_arg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %xz = zext <8 x i8> %x to <8 x i16>
+  %yz = zext <8 x i8> %y to <8 x i16>
+  %add = add nuw nsw <8 x i16> %xz, %yz
+  %div = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %ret = trunc <8 x i16> %div to <8 x i8>
+  ret <8 x i8> %ret
+}
+
+define <8 x i8> @vaaddu_i8_sext(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v9, (a1)
+; CHECK-NEXT:    vwadd.vv v10, v8, v9
+; CHECK-NEXT:    vnsrl.wi v8, v10, 1
+; CHECK-NEXT:    ret
+  %xv = load <8 x i8>, ptr %x, align 2
+  %yv = load <8 x i8>, ptr %y, align 2
+  %xzv = sext <8 x i8> %xv to <8 x i16>
+  %yzv = sext <8 x i8> %yv to <8 x i16>
+  %add = add nuw nsw <8 x i16> %xzv, %yzv
+  %div = lshr <8 x i16> %add, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %ret = trunc <8 x i16> %div to <8 x i8>
+  ret <8 x i8> %ret
+}
+
+define <8 x i8> @vaaddu_i8_zexti32(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_zexti32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v9, (a1)
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %xv = load <8 x i8>, ptr %x, align 2
+  %yv = load <8 x i8>, ptr %y, align 2
+  %xzv = zext <8 x i8> %xv to <8 x i32>
+  %yzv = zext <8 x i8> %yv to <8 x i32>
+  %add = add nuw nsw <8 x i32> %xzv, %yzv
+  %div = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %ret = trunc <8 x i32> %div to <8 x i8>
+  ret <8 x i8> %ret
+}
+
+define <8 x i8> @vaaddu_i8_lshr2(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_lshr2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v9, (a1)
+; CHECK-NEXT:    vwaddu.vv v10, v8, v9
+; CHECK-NEXT:    vnsrl.wi v8, v10, 2
+; CHECK-NEXT:    ret
+  %xv = load <8 x i8>, ptr %x, align 2
+  %yv = load <8 x i8>, ptr %y, align 2
+  %xzv = zext <8 x i8> %xv to <8 x i16>
+  %yzv = zext <8 x i8> %yv to <8 x i16>
+  %add = add nuw nsw <8 x i16> %xzv, %yzv
+  %div = lshr <8 x i16> %add, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
+  %ret = trunc <8 x i16> %div to <8 x i8>
+  ret <8 x i8> %ret
+}
+
+define <8 x i16> @vaaddu_i16(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vle16.v v9, (a1)
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %xv = load <8 x i16>, ptr %x, align 2
+  %yv = load <8 x i16>, ptr %y, align 2
+  %xzv = zext <8 x i16> %xv to <8 x i32>
+  %yzv = zext <8 x i16> %yv to <8 x i32>
+  %add = add nuw nsw <8 x i32> %xzv, %yzv
+  %div = lshr <8 x i32> %add, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %ret = trunc <8 x i32> %div to <8 x i16>
+  ret <8 x i16> %ret
+}
+
+
+define <8 x i32> @vaaddu_i32(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v10, (a1)
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %xv = load <8 x i32>, ptr %x, align 2
+  %yv = load <8 x i32>, ptr %y, align 2
+  %xzv = zext <8 x i32> %xv to <8 x i64>
+  %yzv = zext <8 x i32> %yv to <8 x i64>
+  %add = add nuw nsw <8 x i64> %xzv, %yzv
+  %div = lshr <8 x i64> %add, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %ret = trunc <8 x i64> %div to <8 x i32>
+  ret <8 x i32> %ret
+}
+
+define <8 x i64> @vaaddu_i64(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 64
+; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v12, (a1)
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %xv = load <8 x i64>, ptr %x, align 2
+  %yv = load <8 x i64>, ptr %y, align 2
+  %xzv = zext <8 x i64> %xv to <8 x i128>
+  %yzv = zext <8 x i64> %yv to <8 x i128>
+  %add = add nuw nsw <8 x i128> %xzv, %yzv
+  %div = lshr <8 x i128> %add, <i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1, i128 1>
+  %ret = trunc <8 x i128> %div to <8 x i64>
+  ret <8 x i64> %ret
+}
+
+
+define <8 x i1> @vaaddu_i1(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vlm.v v0, (a0)
+; CHECK-NEXT:    vlm.v v8, (a1)
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vmerge.vim v10, v9, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v10, v8
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %xv = load <8 x i1>, ptr %x, align 2
+  %yv = load <8 x i1>, ptr %y, align 2
+  %xzv = zext <8 x i1> %xv to <8 x i8>
+  %yzv = zext <8 x i1> %yv to <8 x i8>
+  %add = add nuw nsw <8 x i8> %xzv, %yzv
+  %div = lshr <8 x i8> %add, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %ret = trunc <8 x i8> %div to <8 x i1>
+  ret <8 x i1> %ret
+}
+
+
+define <vscale x 8 x i8> @vaaddu_i8_vscale(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl1r.v v8, (a0)
+; CHECK-NEXT:    vl1r.v v9, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i8>, ptr %x, align 2
+  %yv = load <vscale x 8 x i8>, ptr %y, align 2
+  %xzv = zext <vscale x 8 x i8> %xv to <vscale x 8 x i16>
+  %yzv = zext <vscale x 8 x i8> %yv to <vscale x 8 x i16>
+  %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i16> %add, %splat
+  %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ret
+}
+
+
+define <vscale x 8 x i8> @vaaddu_i8_sext_vscale(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_sext_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl1r.v v8, (a0)
+; CHECK-NEXT:    vl1r.v v9, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vwadd.vv v10, v8, v9
+; CHECK-NEXT:    vnsrl.wi v8, v10, 1
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i8>, ptr %x, align 2
+  %yv = load <vscale x 8 x i8>, ptr %y, align 2
+  %xzv = sext <vscale x 8 x i8> %xv to <vscale x 8 x i16>
+  %yzv = sext <vscale x 8 x i8> %yv to <vscale x 8 x i16>
+  %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i16> %add, %splat
+  %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ret
+}
+
+define <vscale x 8 x i8> @vaaddu_i8_zexti32_vscale(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_zexti32_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl1r.v v8, (a0)
+; CHECK-NEXT:    vl1r.v v9, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i8>, ptr %x, align 2
+  %yv = load <vscale x 8 x i8>, ptr %y, align 2
+  %xzv = zext <vscale x 8 x i8> %xv to <vscale x 8 x i32>
+  %yzv = zext <vscale x 8 x i8> %yv to <vscale x 8 x i32>
+  %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i32> %add, %splat
+  %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ret
+}
+
+define <vscale x 8 x i8> @vaaddu_i8_lshr2_vscale(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i8_lshr2_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl1r.v v8, (a0)
+; CHECK-NEXT:    vl1r.v v9, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vwaddu.vv v10, v8, v9
+; CHECK-NEXT:    vnsrl.wi v8, v10, 2
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i8>, ptr %x, align 2
+  %yv = load <vscale x 8 x i8>, ptr %y, align 2
+  %xzv = zext <vscale x 8 x i8> %xv to <vscale x 8 x i16>
+  %yzv = zext <vscale x 8 x i8> %yv to <vscale x 8 x i16>
+  %add = add nuw nsw <vscale x 8 x i16> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %one, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i16> %add, %splat
+  %ret = trunc <vscale x 8 x i16> %div to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %ret
+}
+
+define <vscale x 8 x i16> @vaaddu_i16_vscale(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i16_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl2re16.v v8, (a0)
+; CHECK-NEXT:    vl2re16.v v10, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i16>, ptr %x, align 2
+  %yv = load <vscale x 8 x i16>, ptr %y, align 2
+  %xzv = zext <vscale x 8 x i16> %xv to <vscale x 8 x i32>
+  %yzv = zext <vscale x 8 x i16> %yv to <vscale x 8 x i32>
+  %add = add nuw nsw <vscale x 8 x i32> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %one, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i32> %add, %splat
+  %ret = trunc <vscale x 8 x i32> %div to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %ret
+}
+
+
+define <vscale x 8 x i32> @vaaddu_i32_vscale(ptr %x, ptr %y) {
+; CHECK-LABEL: vaaddu_i32_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4r.v v8, (a0)
+; CHECK-NEXT:    vl4r.v v12, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i32>, ptr %x, align 2
+  %yv = load <vscale x 8 x i32>, ptr %y, align 2
+  %xzv = zext <vscale x 8 x i32> %xv to <vscale x 8 x i64>
+  %yzv = zext <vscale x 8 x i32> %yv to <vscale x 8 x i64>
+  %add = add nuw nsw <vscale x 8 x i64> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i64> poison, i64 1, i64 0
+  %splat = shufflevector <vscale x 8 x i64> %one, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i64> %add, %splat
+  %ret = trunc <vscale x 8 x i64> %div to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %ret
+}
+
+define <vscale x 8 x i64> @vaaddu_i64_vscale(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: vaaddu_i64_vscale:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v8, (a0)
+; CHECK-NEXT:    vl8r.v v16, (a1)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaaddu.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %xv = load <vscale x 8 x i64>, ptr %x, align 2
+  %yv = load <vscale x 8 x i64>, ptr %y, align 2
+  %xzv = zext <vscale x 8 x i64> %xv to <vscale x 8 x i128>
+  %yzv = zext <vscale x 8 x i64> %yv to <vscale x 8 x i128>
+  %add = add nuw nsw <vscale x 8 x i128> %xzv, %yzv
+  %one = insertelement <vscale x 8 x i128> poison, i128 1, i128 0
+  %splat = shufflevector <vscale x 8 x i128> %one, <vscale x 8 x i128> poison, <vscale x 8 x i32> zeroinitializer
+  %div = lshr <vscale x 8 x i128> %add, %splat
+  %ret = trunc <vscale x 8 x i128> %div to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %ret
+}



More information about the llvm-commits mailing list